4 * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/kernel.h>
17 #include <linux/err.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/interrupt.h>
21 #include <linux/platform_device.h>
23 #include <linux/iio/iio.h>
25 #include <linux/of_device.h>
26 #include <linux/iio/machine.h>
27 #include <linux/iio/driver.h>
29 #include <linux/mfd/ti_am335x_tscadc.h>
30 #include <linux/iio/buffer.h>
31 #include <linux/iio/kfifo_buf.h>
33 #include <linux/dmaengine.h>
34 #include <linux/dma-mapping.h>
36 #define DMA_BUFFER_SIZE SZ_2K
39 struct dma_slave_config conf
;
40 struct dma_chan
*chan
;
50 struct ti_tscadc_dev
*mfd_tscadc
;
52 struct mutex fifo1_lock
; /* to protect fifo access */
57 int buffer_en_ch_steps
;
59 u32 open_delay
[8], sample_delay
[8], step_avg
[8];
62 static unsigned int tiadc_readl(struct tiadc_device
*adc
, unsigned int reg
)
64 return readl(adc
->mfd_tscadc
->tscadc_base
+ reg
);
67 static void tiadc_writel(struct tiadc_device
*adc
, unsigned int reg
,
70 writel(val
, adc
->mfd_tscadc
->tscadc_base
+ reg
);
73 static u32
get_adc_step_mask(struct tiadc_device
*adc_dev
)
77 step_en
= ((1 << adc_dev
->channels
) - 1);
78 step_en
<<= TOTAL_STEPS
- adc_dev
->channels
+ 1;
82 static u32
get_adc_chan_step_mask(struct tiadc_device
*adc_dev
,
83 struct iio_chan_spec
const *chan
)
87 for (i
= 0; i
< ARRAY_SIZE(adc_dev
->channel_step
); i
++) {
88 if (chan
->channel
== adc_dev
->channel_line
[i
]) {
91 step
= adc_dev
->channel_step
[i
];
92 /* +1 for the charger */
93 return 1 << (step
+ 1);
100 static u32
get_adc_step_bit(struct tiadc_device
*adc_dev
, int chan
)
102 return 1 << adc_dev
->channel_step
[chan
];
105 static void tiadc_step_config(struct iio_dev
*indio_dev
)
107 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
108 struct device
*dev
= adc_dev
->mfd_tscadc
->dev
;
109 unsigned int stepconfig
;
113 * There are 16 configurable steps and 8 analog input
114 * lines available which are shared between Touchscreen and ADC.
116 * Steps forwards i.e. from 0 towards 16 are used by ADC
117 * depending on number of input lines needed.
118 * Channel would represent which analog input
119 * needs to be given to ADC to digitalize data.
123 for (i
= 0; i
< adc_dev
->channels
; i
++) {
126 chan
= adc_dev
->channel_line
[i
];
128 if (adc_dev
->step_avg
[i
] > STEPCONFIG_AVG_16
) {
129 dev_warn(dev
, "chan %d step_avg truncating to %d\n",
130 chan
, STEPCONFIG_AVG_16
);
131 adc_dev
->step_avg
[i
] = STEPCONFIG_AVG_16
;
134 if (adc_dev
->step_avg
[i
])
136 STEPCONFIG_AVG(ffs(adc_dev
->step_avg
[i
]) - 1) |
139 stepconfig
= STEPCONFIG_FIFO1
;
141 if (iio_buffer_enabled(indio_dev
))
142 stepconfig
|= STEPCONFIG_MODE_SWCNT
;
144 tiadc_writel(adc_dev
, REG_STEPCONFIG(steps
),
145 stepconfig
| STEPCONFIG_INP(chan
));
147 if (adc_dev
->open_delay
[i
] > STEPDELAY_OPEN_MASK
) {
148 dev_warn(dev
, "chan %d open delay truncating to 0x3FFFF\n",
150 adc_dev
->open_delay
[i
] = STEPDELAY_OPEN_MASK
;
153 if (adc_dev
->sample_delay
[i
] > 0xFF) {
154 dev_warn(dev
, "chan %d sample delay truncating to 0xFF\n",
156 adc_dev
->sample_delay
[i
] = 0xFF;
159 tiadc_writel(adc_dev
, REG_STEPDELAY(steps
),
160 STEPDELAY_OPEN(adc_dev
->open_delay
[i
]) |
161 STEPDELAY_SAMPLE(adc_dev
->sample_delay
[i
]));
163 adc_dev
->channel_step
[i
] = steps
;
168 static irqreturn_t
tiadc_irq_h(int irq
, void *private)
170 struct iio_dev
*indio_dev
= private;
171 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
172 unsigned int status
, config
, adc_fsm
;
173 unsigned short count
= 0;
175 status
= tiadc_readl(adc_dev
, REG_IRQSTATUS
);
178 * ADC and touchscreen share the IRQ line.
179 * FIFO0 interrupts are used by TSC. Handle FIFO1 IRQs here only
181 if (status
& IRQENB_FIFO1OVRRUN
) {
182 /* FIFO Overrun. Clear flag. Disable/Enable ADC to recover */
183 config
= tiadc_readl(adc_dev
, REG_CTRL
);
184 config
&= ~(CNTRLREG_TSCSSENB
);
185 tiadc_writel(adc_dev
, REG_CTRL
, config
);
186 tiadc_writel(adc_dev
, REG_IRQSTATUS
, IRQENB_FIFO1OVRRUN
187 | IRQENB_FIFO1UNDRFLW
| IRQENB_FIFO1THRES
);
189 /* wait for idle state.
190 * ADC needs to finish the current conversion
191 * before disabling the module
194 adc_fsm
= tiadc_readl(adc_dev
, REG_ADCFSM
);
195 } while (adc_fsm
!= 0x10 && count
++ < 100);
197 tiadc_writel(adc_dev
, REG_CTRL
, (config
| CNTRLREG_TSCSSENB
));
199 } else if (status
& IRQENB_FIFO1THRES
) {
200 /* Disable irq and wake worker thread */
201 tiadc_writel(adc_dev
, REG_IRQCLR
, IRQENB_FIFO1THRES
);
202 return IRQ_WAKE_THREAD
;
208 static irqreturn_t
tiadc_worker_h(int irq
, void *private)
210 struct iio_dev
*indio_dev
= private;
211 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
212 int i
, k
, fifo1count
, read
;
213 u16
*data
= adc_dev
->data
;
215 fifo1count
= tiadc_readl(adc_dev
, REG_FIFO1CNT
);
216 for (k
= 0; k
< fifo1count
; k
= k
+ i
) {
217 for (i
= 0; i
< (indio_dev
->scan_bytes
)/2; i
++) {
218 read
= tiadc_readl(adc_dev
, REG_FIFO1
);
219 data
[i
] = read
& FIFOREAD_DATA_MASK
;
221 iio_push_to_buffers(indio_dev
, (u8
*) data
);
224 tiadc_writel(adc_dev
, REG_IRQSTATUS
, IRQENB_FIFO1THRES
);
225 tiadc_writel(adc_dev
, REG_IRQENABLE
, IRQENB_FIFO1THRES
);
230 static void tiadc_dma_rx_complete(void *param
)
232 struct iio_dev
*indio_dev
= param
;
233 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
234 struct tiadc_dma
*dma
= &adc_dev
->dma
;
238 data
= dma
->buf
+ dma
->current_period
* dma
->period_size
;
239 dma
->current_period
= 1 - dma
->current_period
; /* swap the buffer ID */
241 for (i
= 0; i
< dma
->period_size
; i
+= indio_dev
->scan_bytes
) {
242 iio_push_to_buffers(indio_dev
, data
);
243 data
+= indio_dev
->scan_bytes
;
247 static int tiadc_start_dma(struct iio_dev
*indio_dev
)
249 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
250 struct tiadc_dma
*dma
= &adc_dev
->dma
;
251 struct dma_async_tx_descriptor
*desc
;
253 dma
->current_period
= 0; /* We start to fill period 0 */
255 * Make the fifo thresh as the multiple of total number of
256 * channels enabled, so make sure that cyclic DMA period
257 * length is also a multiple of total number of channels
258 * enabled. This ensures that no invalid data is reported
259 * to the stack via iio_push_to_buffers().
261 dma
->fifo_thresh
= rounddown(FIFO1_THRESHOLD
+ 1,
262 adc_dev
->total_ch_enabled
) - 1;
263 /* Make sure that period length is multiple of fifo thresh level */
264 dma
->period_size
= rounddown(DMA_BUFFER_SIZE
/ 2,
265 (dma
->fifo_thresh
+ 1) * sizeof(u16
));
267 dma
->conf
.src_maxburst
= dma
->fifo_thresh
+ 1;
268 dmaengine_slave_config(dma
->chan
, &dma
->conf
);
270 desc
= dmaengine_prep_dma_cyclic(dma
->chan
, dma
->addr
,
271 dma
->period_size
* 2,
272 dma
->period_size
, DMA_DEV_TO_MEM
,
277 desc
->callback
= tiadc_dma_rx_complete
;
278 desc
->callback_param
= indio_dev
;
280 dma
->cookie
= dmaengine_submit(desc
);
282 dma_async_issue_pending(dma
->chan
);
284 tiadc_writel(adc_dev
, REG_FIFO1THR
, dma
->fifo_thresh
);
285 tiadc_writel(adc_dev
, REG_DMA1REQ
, dma
->fifo_thresh
);
286 tiadc_writel(adc_dev
, REG_DMAENABLE_SET
, DMA_FIFO1
);
291 static int tiadc_buffer_preenable(struct iio_dev
*indio_dev
)
293 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
294 int i
, fifo1count
, read
;
296 tiadc_writel(adc_dev
, REG_IRQCLR
, (IRQENB_FIFO1THRES
|
298 IRQENB_FIFO1UNDRFLW
));
300 /* Flush FIFO. Needed in corner cases in simultaneous tsc/adc use */
301 fifo1count
= tiadc_readl(adc_dev
, REG_FIFO1CNT
);
302 for (i
= 0; i
< fifo1count
; i
++)
303 read
= tiadc_readl(adc_dev
, REG_FIFO1
);
308 static int tiadc_buffer_postenable(struct iio_dev
*indio_dev
)
310 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
311 struct tiadc_dma
*dma
= &adc_dev
->dma
;
312 unsigned int irq_enable
;
313 unsigned int enb
= 0;
316 tiadc_step_config(indio_dev
);
317 for_each_set_bit(bit
, indio_dev
->active_scan_mask
, adc_dev
->channels
) {
318 enb
|= (get_adc_step_bit(adc_dev
, bit
) << 1);
319 adc_dev
->total_ch_enabled
++;
321 adc_dev
->buffer_en_ch_steps
= enb
;
324 tiadc_start_dma(indio_dev
);
326 am335x_tsc_se_set_cache(adc_dev
->mfd_tscadc
, enb
);
328 tiadc_writel(adc_dev
, REG_IRQSTATUS
, IRQENB_FIFO1THRES
329 | IRQENB_FIFO1OVRRUN
| IRQENB_FIFO1UNDRFLW
);
331 irq_enable
= IRQENB_FIFO1OVRRUN
;
333 irq_enable
|= IRQENB_FIFO1THRES
;
334 tiadc_writel(adc_dev
, REG_IRQENABLE
, irq_enable
);
339 static int tiadc_buffer_predisable(struct iio_dev
*indio_dev
)
341 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
342 struct tiadc_dma
*dma
= &adc_dev
->dma
;
343 int fifo1count
, i
, read
;
345 tiadc_writel(adc_dev
, REG_IRQCLR
, (IRQENB_FIFO1THRES
|
346 IRQENB_FIFO1OVRRUN
| IRQENB_FIFO1UNDRFLW
));
347 am335x_tsc_se_clr(adc_dev
->mfd_tscadc
, adc_dev
->buffer_en_ch_steps
);
348 adc_dev
->buffer_en_ch_steps
= 0;
349 adc_dev
->total_ch_enabled
= 0;
351 tiadc_writel(adc_dev
, REG_DMAENABLE_CLEAR
, 0x2);
352 dmaengine_terminate_async(dma
->chan
);
355 /* Flush FIFO of leftover data in the time it takes to disable adc */
356 fifo1count
= tiadc_readl(adc_dev
, REG_FIFO1CNT
);
357 for (i
= 0; i
< fifo1count
; i
++)
358 read
= tiadc_readl(adc_dev
, REG_FIFO1
);
363 static int tiadc_buffer_postdisable(struct iio_dev
*indio_dev
)
365 tiadc_step_config(indio_dev
);
370 static const struct iio_buffer_setup_ops tiadc_buffer_setup_ops
= {
371 .preenable
= &tiadc_buffer_preenable
,
372 .postenable
= &tiadc_buffer_postenable
,
373 .predisable
= &tiadc_buffer_predisable
,
374 .postdisable
= &tiadc_buffer_postdisable
,
377 static int tiadc_iio_buffered_hardware_setup(struct iio_dev
*indio_dev
,
378 irqreturn_t (*pollfunc_bh
)(int irq
, void *p
),
379 irqreturn_t (*pollfunc_th
)(int irq
, void *p
),
382 const struct iio_buffer_setup_ops
*setup_ops
)
384 struct iio_buffer
*buffer
;
387 buffer
= iio_kfifo_allocate();
391 iio_device_attach_buffer(indio_dev
, buffer
);
393 ret
= request_threaded_irq(irq
, pollfunc_th
, pollfunc_bh
,
394 flags
, indio_dev
->name
, indio_dev
);
396 goto error_kfifo_free
;
398 indio_dev
->setup_ops
= setup_ops
;
399 indio_dev
->modes
|= INDIO_BUFFER_SOFTWARE
;
404 iio_kfifo_free(indio_dev
->buffer
);
408 static void tiadc_iio_buffered_hardware_remove(struct iio_dev
*indio_dev
)
410 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
412 free_irq(adc_dev
->mfd_tscadc
->irq
, indio_dev
);
413 iio_kfifo_free(indio_dev
->buffer
);
417 static const char * const chan_name_ain
[] = {
428 static int tiadc_channel_init(struct iio_dev
*indio_dev
, int channels
)
430 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
431 struct iio_chan_spec
*chan_array
;
432 struct iio_chan_spec
*chan
;
435 indio_dev
->num_channels
= channels
;
436 chan_array
= kcalloc(channels
, sizeof(*chan_array
), GFP_KERNEL
);
437 if (chan_array
== NULL
)
441 for (i
= 0; i
< channels
; i
++, chan
++) {
443 chan
->type
= IIO_VOLTAGE
;
445 chan
->channel
= adc_dev
->channel_line
[i
];
446 chan
->info_mask_separate
= BIT(IIO_CHAN_INFO_RAW
);
447 chan
->datasheet_name
= chan_name_ain
[chan
->channel
];
448 chan
->scan_index
= i
;
449 chan
->scan_type
.sign
= 'u';
450 chan
->scan_type
.realbits
= 12;
451 chan
->scan_type
.storagebits
= 16;
454 indio_dev
->channels
= chan_array
;
459 static void tiadc_channels_remove(struct iio_dev
*indio_dev
)
461 kfree(indio_dev
->channels
);
464 static int tiadc_read_raw(struct iio_dev
*indio_dev
,
465 struct iio_chan_spec
const *chan
,
466 int *val
, int *val2
, long mask
)
468 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
469 int ret
= IIO_VAL_INT
;
471 unsigned int fifo1count
, read
, stepid
;
474 unsigned long timeout
;
476 if (iio_buffer_enabled(indio_dev
))
479 step_en
= get_adc_chan_step_mask(adc_dev
, chan
);
483 mutex_lock(&adc_dev
->fifo1_lock
);
484 fifo1count
= tiadc_readl(adc_dev
, REG_FIFO1CNT
);
486 tiadc_readl(adc_dev
, REG_FIFO1
);
488 am335x_tsc_se_set_once(adc_dev
->mfd_tscadc
, step_en
);
490 timeout
= jiffies
+ msecs_to_jiffies
491 (IDLE_TIMEOUT
* adc_dev
->channels
);
492 /* Wait for Fifo threshold interrupt */
494 fifo1count
= tiadc_readl(adc_dev
, REG_FIFO1CNT
);
498 if (time_after(jiffies
, timeout
)) {
499 am335x_tsc_se_adc_done(adc_dev
->mfd_tscadc
);
504 map_val
= adc_dev
->channel_step
[chan
->scan_index
];
507 * We check the complete FIFO. We programmed just one entry but in case
508 * something went wrong we left empty handed (-EAGAIN previously) and
509 * then the value apeared somehow in the FIFO we would have two entries.
510 * Therefore we read every item and keep only the latest version of the
513 for (i
= 0; i
< fifo1count
; i
++) {
514 read
= tiadc_readl(adc_dev
, REG_FIFO1
);
515 stepid
= read
& FIFOREAD_CHNLID_MASK
;
516 stepid
= stepid
>> 0x10;
518 if (stepid
== map_val
) {
519 read
= read
& FIFOREAD_DATA_MASK
;
524 am335x_tsc_se_adc_done(adc_dev
->mfd_tscadc
);
530 mutex_unlock(&adc_dev
->fifo1_lock
);
534 static const struct iio_info tiadc_info
= {
535 .read_raw
= &tiadc_read_raw
,
538 static int tiadc_request_dma(struct platform_device
*pdev
,
539 struct tiadc_device
*adc_dev
)
541 struct tiadc_dma
*dma
= &adc_dev
->dma
;
544 /* Default slave configuration parameters */
545 dma
->conf
.direction
= DMA_DEV_TO_MEM
;
546 dma
->conf
.src_addr_width
= DMA_SLAVE_BUSWIDTH_2_BYTES
;
547 dma
->conf
.src_addr
= adc_dev
->mfd_tscadc
->tscadc_phys_base
+ REG_FIFO1
;
550 dma_cap_set(DMA_CYCLIC
, mask
);
552 /* Get a channel for RX */
553 dma
->chan
= dma_request_chan(adc_dev
->mfd_tscadc
->dev
, "fifo1");
554 if (IS_ERR(dma
->chan
)) {
555 int ret
= PTR_ERR(dma
->chan
);
562 dma
->buf
= dma_alloc_coherent(dma
->chan
->device
->dev
, DMA_BUFFER_SIZE
,
563 &dma
->addr
, GFP_KERNEL
);
569 dma_release_channel(dma
->chan
);
573 static int tiadc_parse_dt(struct platform_device
*pdev
,
574 struct tiadc_device
*adc_dev
)
576 struct device_node
*node
= pdev
->dev
.of_node
;
577 struct property
*prop
;
582 of_property_for_each_u32(node
, "ti,adc-channels", prop
, cur
, val
) {
583 adc_dev
->channel_line
[channels
] = val
;
585 /* Set Default values for optional DT parameters */
586 adc_dev
->open_delay
[channels
] = STEPCONFIG_OPENDLY
;
587 adc_dev
->sample_delay
[channels
] = STEPCONFIG_SAMPLEDLY
;
588 adc_dev
->step_avg
[channels
] = 16;
593 of_property_read_u32_array(node
, "ti,chan-step-avg",
594 adc_dev
->step_avg
, channels
);
595 of_property_read_u32_array(node
, "ti,chan-step-opendelay",
596 adc_dev
->open_delay
, channels
);
597 of_property_read_u32_array(node
, "ti,chan-step-sampledelay",
598 adc_dev
->sample_delay
, channels
);
600 adc_dev
->channels
= channels
;
604 static int tiadc_probe(struct platform_device
*pdev
)
606 struct iio_dev
*indio_dev
;
607 struct tiadc_device
*adc_dev
;
608 struct device_node
*node
= pdev
->dev
.of_node
;
612 dev_err(&pdev
->dev
, "Could not find valid DT data.\n");
616 indio_dev
= devm_iio_device_alloc(&pdev
->dev
, sizeof(*adc_dev
));
617 if (indio_dev
== NULL
) {
618 dev_err(&pdev
->dev
, "failed to allocate iio device\n");
621 adc_dev
= iio_priv(indio_dev
);
623 adc_dev
->mfd_tscadc
= ti_tscadc_dev_get(pdev
);
624 tiadc_parse_dt(pdev
, adc_dev
);
626 indio_dev
->dev
.parent
= &pdev
->dev
;
627 indio_dev
->name
= dev_name(&pdev
->dev
);
628 indio_dev
->modes
= INDIO_DIRECT_MODE
;
629 indio_dev
->info
= &tiadc_info
;
631 tiadc_step_config(indio_dev
);
632 tiadc_writel(adc_dev
, REG_FIFO1THR
, FIFO1_THRESHOLD
);
633 mutex_init(&adc_dev
->fifo1_lock
);
635 err
= tiadc_channel_init(indio_dev
, adc_dev
->channels
);
639 err
= tiadc_iio_buffered_hardware_setup(indio_dev
,
642 adc_dev
->mfd_tscadc
->irq
,
644 &tiadc_buffer_setup_ops
);
647 goto err_free_channels
;
649 err
= iio_device_register(indio_dev
);
651 goto err_buffer_unregister
;
653 platform_set_drvdata(pdev
, indio_dev
);
655 err
= tiadc_request_dma(pdev
, adc_dev
);
656 if (err
&& err
== -EPROBE_DEFER
)
662 iio_device_unregister(indio_dev
);
663 err_buffer_unregister
:
664 tiadc_iio_buffered_hardware_remove(indio_dev
);
666 tiadc_channels_remove(indio_dev
);
670 static int tiadc_remove(struct platform_device
*pdev
)
672 struct iio_dev
*indio_dev
= platform_get_drvdata(pdev
);
673 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
674 struct tiadc_dma
*dma
= &adc_dev
->dma
;
678 dma_free_coherent(dma
->chan
->device
->dev
, DMA_BUFFER_SIZE
,
679 dma
->buf
, dma
->addr
);
680 dma_release_channel(dma
->chan
);
682 iio_device_unregister(indio_dev
);
683 tiadc_iio_buffered_hardware_remove(indio_dev
);
684 tiadc_channels_remove(indio_dev
);
686 step_en
= get_adc_step_mask(adc_dev
);
687 am335x_tsc_se_clr(adc_dev
->mfd_tscadc
, step_en
);
692 static int __maybe_unused
tiadc_suspend(struct device
*dev
)
694 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
695 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
696 struct ti_tscadc_dev
*tscadc_dev
;
699 tscadc_dev
= ti_tscadc_dev_get(to_platform_device(dev
));
700 if (!device_may_wakeup(tscadc_dev
->dev
)) {
701 idle
= tiadc_readl(adc_dev
, REG_CTRL
);
702 idle
&= ~(CNTRLREG_TSCSSENB
);
703 tiadc_writel(adc_dev
, REG_CTRL
, (idle
|
704 CNTRLREG_POWERDOWN
));
710 static int __maybe_unused
tiadc_resume(struct device
*dev
)
712 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
713 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
714 unsigned int restore
;
716 /* Make sure ADC is powered up */
717 restore
= tiadc_readl(adc_dev
, REG_CTRL
);
718 restore
&= ~(CNTRLREG_POWERDOWN
);
719 tiadc_writel(adc_dev
, REG_CTRL
, restore
);
721 tiadc_step_config(indio_dev
);
722 am335x_tsc_se_set_cache(adc_dev
->mfd_tscadc
,
723 adc_dev
->buffer_en_ch_steps
);
727 static SIMPLE_DEV_PM_OPS(tiadc_pm_ops
, tiadc_suspend
, tiadc_resume
);
729 static const struct of_device_id ti_adc_dt_ids
[] = {
730 { .compatible
= "ti,am3359-adc", },
733 MODULE_DEVICE_TABLE(of
, ti_adc_dt_ids
);
735 static struct platform_driver tiadc_driver
= {
737 .name
= "TI-am335x-adc",
739 .of_match_table
= ti_adc_dt_ids
,
741 .probe
= tiadc_probe
,
742 .remove
= tiadc_remove
,
744 module_platform_driver(tiadc_driver
);
746 MODULE_DESCRIPTION("TI ADC controller driver");
747 MODULE_AUTHOR("Rachna Patil <rachna@ti.com>");
748 MODULE_LICENSE("GPL");