1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/
8 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/interrupt.h>
13 #include <linux/platform_device.h>
15 #include <linux/iio/iio.h>
17 #include <linux/iio/machine.h>
18 #include <linux/iio/driver.h>
19 #include <linux/iopoll.h>
21 #include <linux/mfd/ti_am335x_tscadc.h>
22 #include <linux/iio/buffer.h>
23 #include <linux/iio/kfifo_buf.h>
25 #include <linux/dmaengine.h>
26 #include <linux/dma-mapping.h>
28 #define DMA_BUFFER_SIZE SZ_2K
31 struct dma_slave_config conf
;
32 struct dma_chan
*chan
;
42 struct ti_tscadc_dev
*mfd_tscadc
;
44 struct mutex fifo1_lock
; /* to protect fifo access */
49 int buffer_en_ch_steps
;
51 u32 open_delay
[8], sample_delay
[8], step_avg
[8];
54 static unsigned int tiadc_readl(struct tiadc_device
*adc
, unsigned int reg
)
56 return readl(adc
->mfd_tscadc
->tscadc_base
+ reg
);
59 static void tiadc_writel(struct tiadc_device
*adc
, unsigned int reg
,
62 writel(val
, adc
->mfd_tscadc
->tscadc_base
+ reg
);
65 static u32
get_adc_step_mask(struct tiadc_device
*adc_dev
)
69 step_en
= ((1 << adc_dev
->channels
) - 1);
70 step_en
<<= TOTAL_STEPS
- adc_dev
->channels
+ 1;
74 static u32
get_adc_chan_step_mask(struct tiadc_device
*adc_dev
,
75 struct iio_chan_spec
const *chan
)
79 for (i
= 0; i
< ARRAY_SIZE(adc_dev
->channel_step
); i
++) {
80 if (chan
->channel
== adc_dev
->channel_line
[i
]) {
83 step
= adc_dev
->channel_step
[i
];
84 /* +1 for the charger */
85 return 1 << (step
+ 1);
92 static u32
get_adc_step_bit(struct tiadc_device
*adc_dev
, int chan
)
94 return 1 << adc_dev
->channel_step
[chan
];
97 static int tiadc_wait_idle(struct tiadc_device
*adc_dev
)
101 return readl_poll_timeout(adc_dev
->mfd_tscadc
->tscadc_base
+ REG_ADCFSM
,
102 val
, !(val
& SEQ_STATUS
), 10,
103 IDLE_TIMEOUT_MS
* 1000 * adc_dev
->channels
);
106 static void tiadc_step_config(struct iio_dev
*indio_dev
)
108 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
109 unsigned int stepconfig
;
113 * There are 16 configurable steps and 8 analog input
114 * lines available which are shared between Touchscreen and ADC.
116 * Steps forwards i.e. from 0 towards 16 are used by ADC
117 * depending on number of input lines needed.
118 * Channel would represent which analog input
119 * needs to be given to ADC to digitalize data.
121 for (i
= 0; i
< adc_dev
->channels
; i
++) {
124 chan
= adc_dev
->channel_line
[i
];
126 if (adc_dev
->step_avg
[i
])
127 stepconfig
= STEPCONFIG_AVG(ffs(adc_dev
->step_avg
[i
]) - 1) |
130 stepconfig
= STEPCONFIG_FIFO1
;
132 if (iio_buffer_enabled(indio_dev
))
133 stepconfig
|= STEPCONFIG_MODE_SWCNT
;
135 tiadc_writel(adc_dev
, REG_STEPCONFIG(steps
),
136 stepconfig
| STEPCONFIG_INP(chan
) |
137 STEPCONFIG_INM_ADCREFM
| STEPCONFIG_RFP_VREFP
|
138 STEPCONFIG_RFM_VREFN
);
140 tiadc_writel(adc_dev
, REG_STEPDELAY(steps
),
141 STEPDELAY_OPEN(adc_dev
->open_delay
[i
]) |
142 STEPDELAY_SAMPLE(adc_dev
->sample_delay
[i
]));
144 adc_dev
->channel_step
[i
] = steps
;
149 static irqreturn_t
tiadc_irq_h(int irq
, void *private)
151 struct iio_dev
*indio_dev
= private;
152 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
153 unsigned int status
, config
, adc_fsm
;
154 unsigned short count
= 0;
156 status
= tiadc_readl(adc_dev
, REG_IRQSTATUS
);
159 * ADC and touchscreen share the IRQ line.
160 * FIFO0 interrupts are used by TSC. Handle FIFO1 IRQs here only
162 if (status
& IRQENB_FIFO1OVRRUN
) {
163 /* FIFO Overrun. Clear flag. Disable/Enable ADC to recover */
164 config
= tiadc_readl(adc_dev
, REG_CTRL
);
165 config
&= ~(CNTRLREG_SSENB
);
166 tiadc_writel(adc_dev
, REG_CTRL
, config
);
167 tiadc_writel(adc_dev
, REG_IRQSTATUS
,
168 IRQENB_FIFO1OVRRUN
| IRQENB_FIFO1UNDRFLW
|
172 * Wait for the idle state.
173 * ADC needs to finish the current conversion
174 * before disabling the module
177 adc_fsm
= tiadc_readl(adc_dev
, REG_ADCFSM
);
178 } while (adc_fsm
!= 0x10 && count
++ < 100);
180 tiadc_writel(adc_dev
, REG_CTRL
, (config
| CNTRLREG_SSENB
));
182 } else if (status
& IRQENB_FIFO1THRES
) {
183 /* Disable irq and wake worker thread */
184 tiadc_writel(adc_dev
, REG_IRQCLR
, IRQENB_FIFO1THRES
);
185 return IRQ_WAKE_THREAD
;
191 static irqreturn_t
tiadc_worker_h(int irq
, void *private)
193 struct iio_dev
*indio_dev
= private;
194 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
195 int i
, k
, fifo1count
, read
;
196 u16
*data
= adc_dev
->data
;
198 fifo1count
= tiadc_readl(adc_dev
, REG_FIFO1CNT
);
199 for (k
= 0; k
< fifo1count
; k
= k
+ i
) {
200 for (i
= 0; i
< indio_dev
->scan_bytes
/ 2; i
++) {
201 read
= tiadc_readl(adc_dev
, REG_FIFO1
);
202 data
[i
] = read
& FIFOREAD_DATA_MASK
;
204 iio_push_to_buffers(indio_dev
, (u8
*)data
);
207 tiadc_writel(adc_dev
, REG_IRQSTATUS
, IRQENB_FIFO1THRES
);
208 tiadc_writel(adc_dev
, REG_IRQENABLE
, IRQENB_FIFO1THRES
);
213 static void tiadc_dma_rx_complete(void *param
)
215 struct iio_dev
*indio_dev
= param
;
216 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
217 struct tiadc_dma
*dma
= &adc_dev
->dma
;
221 data
= dma
->buf
+ dma
->current_period
* dma
->period_size
;
222 dma
->current_period
= 1 - dma
->current_period
; /* swap the buffer ID */
224 for (i
= 0; i
< dma
->period_size
; i
+= indio_dev
->scan_bytes
) {
225 iio_push_to_buffers(indio_dev
, data
);
226 data
+= indio_dev
->scan_bytes
;
230 static int tiadc_start_dma(struct iio_dev
*indio_dev
)
232 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
233 struct tiadc_dma
*dma
= &adc_dev
->dma
;
234 struct dma_async_tx_descriptor
*desc
;
236 dma
->current_period
= 0; /* We start to fill period 0 */
239 * Make the fifo thresh as the multiple of total number of
240 * channels enabled, so make sure that cyclic DMA period
241 * length is also a multiple of total number of channels
242 * enabled. This ensures that no invalid data is reported
243 * to the stack via iio_push_to_buffers().
245 dma
->fifo_thresh
= rounddown(FIFO1_THRESHOLD
+ 1,
246 adc_dev
->total_ch_enabled
) - 1;
248 /* Make sure that period length is multiple of fifo thresh level */
249 dma
->period_size
= rounddown(DMA_BUFFER_SIZE
/ 2,
250 (dma
->fifo_thresh
+ 1) * sizeof(u16
));
252 dma
->conf
.src_maxburst
= dma
->fifo_thresh
+ 1;
253 dmaengine_slave_config(dma
->chan
, &dma
->conf
);
255 desc
= dmaengine_prep_dma_cyclic(dma
->chan
, dma
->addr
,
256 dma
->period_size
* 2,
257 dma
->period_size
, DMA_DEV_TO_MEM
,
262 desc
->callback
= tiadc_dma_rx_complete
;
263 desc
->callback_param
= indio_dev
;
265 dma
->cookie
= dmaengine_submit(desc
);
267 dma_async_issue_pending(dma
->chan
);
269 tiadc_writel(adc_dev
, REG_FIFO1THR
, dma
->fifo_thresh
);
270 tiadc_writel(adc_dev
, REG_DMA1REQ
, dma
->fifo_thresh
);
271 tiadc_writel(adc_dev
, REG_DMAENABLE_SET
, DMA_FIFO1
);
276 static int tiadc_buffer_preenable(struct iio_dev
*indio_dev
)
278 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
282 ret
= tiadc_wait_idle(adc_dev
);
286 tiadc_writel(adc_dev
, REG_IRQCLR
,
287 IRQENB_FIFO1THRES
| IRQENB_FIFO1OVRRUN
|
288 IRQENB_FIFO1UNDRFLW
);
290 /* Flush FIFO. Needed in corner cases in simultaneous tsc/adc use */
291 fifo1count
= tiadc_readl(adc_dev
, REG_FIFO1CNT
);
292 for (i
= 0; i
< fifo1count
; i
++)
293 tiadc_readl(adc_dev
, REG_FIFO1
);
298 static int tiadc_buffer_postenable(struct iio_dev
*indio_dev
)
300 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
301 struct tiadc_dma
*dma
= &adc_dev
->dma
;
302 unsigned int irq_enable
;
303 unsigned int enb
= 0;
306 tiadc_step_config(indio_dev
);
307 for_each_set_bit(bit
, indio_dev
->active_scan_mask
, adc_dev
->channels
) {
308 enb
|= (get_adc_step_bit(adc_dev
, bit
) << 1);
309 adc_dev
->total_ch_enabled
++;
311 adc_dev
->buffer_en_ch_steps
= enb
;
314 tiadc_start_dma(indio_dev
);
316 am335x_tsc_se_set_cache(adc_dev
->mfd_tscadc
, enb
);
318 tiadc_writel(adc_dev
, REG_IRQSTATUS
,
319 IRQENB_FIFO1THRES
| IRQENB_FIFO1OVRRUN
|
320 IRQENB_FIFO1UNDRFLW
);
322 irq_enable
= IRQENB_FIFO1OVRRUN
;
324 irq_enable
|= IRQENB_FIFO1THRES
;
325 tiadc_writel(adc_dev
, REG_IRQENABLE
, irq_enable
);
330 static int tiadc_buffer_predisable(struct iio_dev
*indio_dev
)
332 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
333 struct tiadc_dma
*dma
= &adc_dev
->dma
;
336 tiadc_writel(adc_dev
, REG_IRQCLR
,
337 IRQENB_FIFO1THRES
| IRQENB_FIFO1OVRRUN
|
338 IRQENB_FIFO1UNDRFLW
);
339 am335x_tsc_se_clr(adc_dev
->mfd_tscadc
, adc_dev
->buffer_en_ch_steps
);
340 adc_dev
->buffer_en_ch_steps
= 0;
341 adc_dev
->total_ch_enabled
= 0;
343 tiadc_writel(adc_dev
, REG_DMAENABLE_CLEAR
, 0x2);
344 dmaengine_terminate_async(dma
->chan
);
347 /* Flush FIFO of leftover data in the time it takes to disable adc */
348 fifo1count
= tiadc_readl(adc_dev
, REG_FIFO1CNT
);
349 for (i
= 0; i
< fifo1count
; i
++)
350 tiadc_readl(adc_dev
, REG_FIFO1
);
355 static int tiadc_buffer_postdisable(struct iio_dev
*indio_dev
)
357 tiadc_step_config(indio_dev
);
362 static const struct iio_buffer_setup_ops tiadc_buffer_setup_ops
= {
363 .preenable
= &tiadc_buffer_preenable
,
364 .postenable
= &tiadc_buffer_postenable
,
365 .predisable
= &tiadc_buffer_predisable
,
366 .postdisable
= &tiadc_buffer_postdisable
,
369 static int tiadc_iio_buffered_hardware_setup(struct device
*dev
,
370 struct iio_dev
*indio_dev
,
371 irqreturn_t (*pollfunc_bh
)(int irq
, void *p
),
372 irqreturn_t (*pollfunc_th
)(int irq
, void *p
),
373 int irq
, unsigned long flags
,
374 const struct iio_buffer_setup_ops
*setup_ops
)
378 ret
= devm_iio_kfifo_buffer_setup(dev
, indio_dev
, setup_ops
);
382 return devm_request_threaded_irq(dev
, irq
, pollfunc_th
, pollfunc_bh
,
383 flags
, indio_dev
->name
, indio_dev
);
386 static const char * const chan_name_ain
[] = {
397 static int tiadc_channel_init(struct device
*dev
, struct iio_dev
*indio_dev
,
400 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
401 struct iio_chan_spec
*chan_array
;
402 struct iio_chan_spec
*chan
;
405 indio_dev
->num_channels
= channels
;
406 chan_array
= devm_kcalloc(dev
, channels
, sizeof(*chan_array
),
412 for (i
= 0; i
< channels
; i
++, chan
++) {
413 chan
->type
= IIO_VOLTAGE
;
415 chan
->channel
= adc_dev
->channel_line
[i
];
416 chan
->info_mask_separate
= BIT(IIO_CHAN_INFO_RAW
);
417 chan
->info_mask_shared_by_type
= BIT(IIO_CHAN_INFO_SCALE
);
418 chan
->datasheet_name
= chan_name_ain
[chan
->channel
];
419 chan
->scan_index
= i
;
420 chan
->scan_type
.sign
= 'u';
421 chan
->scan_type
.realbits
= 12;
422 chan
->scan_type
.storagebits
= 16;
425 indio_dev
->channels
= chan_array
;
430 static int tiadc_read_raw(struct iio_dev
*indio_dev
,
431 struct iio_chan_spec
const *chan
, int *val
, int *val2
,
434 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
436 unsigned int fifo1count
, read
, stepid
;
439 unsigned long timeout
;
443 case IIO_CHAN_INFO_RAW
:
445 case IIO_CHAN_INFO_SCALE
:
446 switch (chan
->type
) {
449 *val2
= chan
->scan_type
.realbits
;
450 return IIO_VAL_FRACTIONAL_LOG2
;
459 if (iio_buffer_enabled(indio_dev
))
462 step_en
= get_adc_chan_step_mask(adc_dev
, chan
);
466 mutex_lock(&adc_dev
->fifo1_lock
);
468 ret
= tiadc_wait_idle(adc_dev
);
472 fifo1count
= tiadc_readl(adc_dev
, REG_FIFO1CNT
);
474 tiadc_readl(adc_dev
, REG_FIFO1
);
476 am335x_tsc_se_set_once(adc_dev
->mfd_tscadc
, step_en
);
478 /* Wait for Fifo threshold interrupt */
479 timeout
= jiffies
+ msecs_to_jiffies(IDLE_TIMEOUT_MS
* adc_dev
->channels
);
481 fifo1count
= tiadc_readl(adc_dev
, REG_FIFO1CNT
);
485 if (time_after(jiffies
, timeout
)) {
486 am335x_tsc_se_adc_done(adc_dev
->mfd_tscadc
);
492 map_val
= adc_dev
->channel_step
[chan
->scan_index
];
495 * We check the complete FIFO. We programmed just one entry but in case
496 * something went wrong we left empty handed (-EAGAIN previously) and
497 * then the value appeared somehow in the FIFO we would have two entries.
498 * Therefore we read every item and keep only the latest version of the
501 for (i
= 0; i
< fifo1count
; i
++) {
502 read
= tiadc_readl(adc_dev
, REG_FIFO1
);
503 stepid
= read
& FIFOREAD_CHNLID_MASK
;
504 stepid
= stepid
>> 0x10;
506 if (stepid
== map_val
) {
507 read
= read
& FIFOREAD_DATA_MASK
;
513 am335x_tsc_se_adc_done(adc_dev
->mfd_tscadc
);
519 mutex_unlock(&adc_dev
->fifo1_lock
);
520 return ret
? ret
: IIO_VAL_INT
;
523 static const struct iio_info tiadc_info
= {
524 .read_raw
= &tiadc_read_raw
,
527 static int tiadc_request_dma(struct platform_device
*pdev
,
528 struct tiadc_device
*adc_dev
)
530 struct tiadc_dma
*dma
= &adc_dev
->dma
;
533 /* Default slave configuration parameters */
534 dma
->conf
.direction
= DMA_DEV_TO_MEM
;
535 dma
->conf
.src_addr_width
= DMA_SLAVE_BUSWIDTH_2_BYTES
;
536 dma
->conf
.src_addr
= adc_dev
->mfd_tscadc
->tscadc_phys_base
+ REG_FIFO1
;
539 dma_cap_set(DMA_CYCLIC
, mask
);
541 /* Get a channel for RX */
542 dma
->chan
= dma_request_chan(adc_dev
->mfd_tscadc
->dev
, "fifo1");
543 if (IS_ERR(dma
->chan
)) {
544 int ret
= PTR_ERR(dma
->chan
);
551 dma
->buf
= dma_alloc_coherent(dma
->chan
->device
->dev
, DMA_BUFFER_SIZE
,
552 &dma
->addr
, GFP_KERNEL
);
559 dma_release_channel(dma
->chan
);
563 static int tiadc_parse_dt(struct platform_device
*pdev
,
564 struct tiadc_device
*adc_dev
)
566 struct device_node
*node
= pdev
->dev
.of_node
;
571 of_property_for_each_u32(node
, "ti,adc-channels", val
) {
572 adc_dev
->channel_line
[channels
] = val
;
574 /* Set Default values for optional DT parameters */
575 adc_dev
->open_delay
[channels
] = STEPCONFIG_OPENDLY
;
576 adc_dev
->sample_delay
[channels
] = STEPCONFIG_SAMPLEDLY
;
577 adc_dev
->step_avg
[channels
] = 16;
582 adc_dev
->channels
= channels
;
584 of_property_read_u32_array(node
, "ti,chan-step-avg",
585 adc_dev
->step_avg
, channels
);
586 of_property_read_u32_array(node
, "ti,chan-step-opendelay",
587 adc_dev
->open_delay
, channels
);
588 of_property_read_u32_array(node
, "ti,chan-step-sampledelay",
589 adc_dev
->sample_delay
, channels
);
591 for (i
= 0; i
< adc_dev
->channels
; i
++) {
594 chan
= adc_dev
->channel_line
[i
];
596 if (adc_dev
->step_avg
[i
] > STEPCONFIG_AVG_16
) {
598 "chan %d: wrong step avg, truncated to %ld\n",
599 chan
, STEPCONFIG_AVG_16
);
600 adc_dev
->step_avg
[i
] = STEPCONFIG_AVG_16
;
603 if (adc_dev
->open_delay
[i
] > STEPCONFIG_MAX_OPENDLY
) {
605 "chan %d: wrong open delay, truncated to 0x%lX\n",
606 chan
, STEPCONFIG_MAX_OPENDLY
);
607 adc_dev
->open_delay
[i
] = STEPCONFIG_MAX_OPENDLY
;
610 if (adc_dev
->sample_delay
[i
] > STEPCONFIG_MAX_SAMPLE
) {
612 "chan %d: wrong sample delay, truncated to 0x%lX\n",
613 chan
, STEPCONFIG_MAX_SAMPLE
);
614 adc_dev
->sample_delay
[i
] = STEPCONFIG_MAX_SAMPLE
;
621 static int tiadc_probe(struct platform_device
*pdev
)
623 struct iio_dev
*indio_dev
;
624 struct tiadc_device
*adc_dev
;
625 struct device_node
*node
= pdev
->dev
.of_node
;
629 dev_err(&pdev
->dev
, "Could not find valid DT data.\n");
633 indio_dev
= devm_iio_device_alloc(&pdev
->dev
, sizeof(*adc_dev
));
635 dev_err(&pdev
->dev
, "failed to allocate iio device\n");
638 adc_dev
= iio_priv(indio_dev
);
640 adc_dev
->mfd_tscadc
= ti_tscadc_dev_get(pdev
);
641 tiadc_parse_dt(pdev
, adc_dev
);
643 indio_dev
->name
= dev_name(&pdev
->dev
);
644 indio_dev
->modes
= INDIO_DIRECT_MODE
;
645 indio_dev
->info
= &tiadc_info
;
647 tiadc_step_config(indio_dev
);
648 tiadc_writel(adc_dev
, REG_FIFO1THR
, FIFO1_THRESHOLD
);
649 mutex_init(&adc_dev
->fifo1_lock
);
651 err
= tiadc_channel_init(&pdev
->dev
, indio_dev
, adc_dev
->channels
);
655 err
= tiadc_iio_buffered_hardware_setup(&pdev
->dev
, indio_dev
,
658 adc_dev
->mfd_tscadc
->irq
,
660 &tiadc_buffer_setup_ops
);
664 err
= iio_device_register(indio_dev
);
668 platform_set_drvdata(pdev
, indio_dev
);
670 err
= tiadc_request_dma(pdev
, adc_dev
);
671 if (err
&& err
!= -ENODEV
) {
672 dev_err_probe(&pdev
->dev
, err
, "DMA request failed\n");
679 iio_device_unregister(indio_dev
);
684 static void tiadc_remove(struct platform_device
*pdev
)
686 struct iio_dev
*indio_dev
= platform_get_drvdata(pdev
);
687 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
688 struct tiadc_dma
*dma
= &adc_dev
->dma
;
692 dma_free_coherent(dma
->chan
->device
->dev
, DMA_BUFFER_SIZE
,
693 dma
->buf
, dma
->addr
);
694 dma_release_channel(dma
->chan
);
696 iio_device_unregister(indio_dev
);
698 step_en
= get_adc_step_mask(adc_dev
);
699 am335x_tsc_se_clr(adc_dev
->mfd_tscadc
, step_en
);
702 static int tiadc_suspend(struct device
*dev
)
704 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
705 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
708 idle
= tiadc_readl(adc_dev
, REG_CTRL
);
709 idle
&= ~(CNTRLREG_SSENB
);
710 tiadc_writel(adc_dev
, REG_CTRL
, idle
| CNTRLREG_POWERDOWN
);
715 static int tiadc_resume(struct device
*dev
)
717 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
718 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
719 unsigned int restore
;
721 /* Make sure ADC is powered up */
722 restore
= tiadc_readl(adc_dev
, REG_CTRL
);
723 restore
&= ~CNTRLREG_POWERDOWN
;
724 tiadc_writel(adc_dev
, REG_CTRL
, restore
);
726 tiadc_step_config(indio_dev
);
727 am335x_tsc_se_set_cache(adc_dev
->mfd_tscadc
,
728 adc_dev
->buffer_en_ch_steps
);
732 static DEFINE_SIMPLE_DEV_PM_OPS(tiadc_pm_ops
, tiadc_suspend
, tiadc_resume
);
734 static const struct of_device_id ti_adc_dt_ids
[] = {
735 { .compatible
= "ti,am3359-adc", },
736 { .compatible
= "ti,am4372-adc", },
739 MODULE_DEVICE_TABLE(of
, ti_adc_dt_ids
);
741 static struct platform_driver tiadc_driver
= {
743 .name
= "TI-am335x-adc",
744 .pm
= pm_sleep_ptr(&tiadc_pm_ops
),
745 .of_match_table
= ti_adc_dt_ids
,
747 .probe
= tiadc_probe
,
748 .remove
= tiadc_remove
,
750 module_platform_driver(tiadc_driver
);
752 MODULE_DESCRIPTION("TI ADC controller driver");
753 MODULE_AUTHOR("Rachna Patil <rachna@ti.com>");
754 MODULE_LICENSE("GPL");