1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright 2013-2014 Analog Devices Inc.
6 * Author: Lars-Peter Clauen <lars@metafoo.de>
8 * Documentation for the parts can be found at:
9 * - XADC hardmacro: Xilinx UG480
10 * - ZYNQ XADC interface: Xilinx UG585
11 * - AXI XADC interface: Xilinx PG019
14 #include <linux/clk.h>
15 #include <linux/device.h>
16 #include <linux/err.h>
17 #include <linux/interrupt.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
22 #include <linux/platform_device.h>
23 #include <linux/slab.h>
24 #include <linux/sysfs.h>
26 #include <linux/iio/buffer.h>
27 #include <linux/iio/events.h>
28 #include <linux/iio/iio.h>
29 #include <linux/iio/sysfs.h>
30 #include <linux/iio/trigger.h>
31 #include <linux/iio/trigger_consumer.h>
32 #include <linux/iio/triggered_buffer.h>
34 #include "xilinx-xadc.h"
36 static const unsigned int XADC_ZYNQ_UNMASK_TIMEOUT
= 500;
38 /* ZYNQ register definitions */
39 #define XADC_ZYNQ_REG_CFG 0x00
40 #define XADC_ZYNQ_REG_INTSTS 0x04
41 #define XADC_ZYNQ_REG_INTMSK 0x08
42 #define XADC_ZYNQ_REG_STATUS 0x0c
43 #define XADC_ZYNQ_REG_CFIFO 0x10
44 #define XADC_ZYNQ_REG_DFIFO 0x14
45 #define XADC_ZYNQ_REG_CTL 0x18
47 #define XADC_ZYNQ_CFG_ENABLE BIT(31)
48 #define XADC_ZYNQ_CFG_CFIFOTH_MASK (0xf << 20)
49 #define XADC_ZYNQ_CFG_CFIFOTH_OFFSET 20
50 #define XADC_ZYNQ_CFG_DFIFOTH_MASK (0xf << 16)
51 #define XADC_ZYNQ_CFG_DFIFOTH_OFFSET 16
52 #define XADC_ZYNQ_CFG_WEDGE BIT(13)
53 #define XADC_ZYNQ_CFG_REDGE BIT(12)
54 #define XADC_ZYNQ_CFG_TCKRATE_MASK (0x3 << 8)
55 #define XADC_ZYNQ_CFG_TCKRATE_DIV2 (0x0 << 8)
56 #define XADC_ZYNQ_CFG_TCKRATE_DIV4 (0x1 << 8)
57 #define XADC_ZYNQ_CFG_TCKRATE_DIV8 (0x2 << 8)
58 #define XADC_ZYNQ_CFG_TCKRATE_DIV16 (0x3 << 8)
59 #define XADC_ZYNQ_CFG_IGAP_MASK 0x1f
60 #define XADC_ZYNQ_CFG_IGAP(x) (x)
62 #define XADC_ZYNQ_INT_CFIFO_LTH BIT(9)
63 #define XADC_ZYNQ_INT_DFIFO_GTH BIT(8)
64 #define XADC_ZYNQ_INT_ALARM_MASK 0xff
65 #define XADC_ZYNQ_INT_ALARM_OFFSET 0
67 #define XADC_ZYNQ_STATUS_CFIFO_LVL_MASK (0xf << 16)
68 #define XADC_ZYNQ_STATUS_CFIFO_LVL_OFFSET 16
69 #define XADC_ZYNQ_STATUS_DFIFO_LVL_MASK (0xf << 12)
70 #define XADC_ZYNQ_STATUS_DFIFO_LVL_OFFSET 12
71 #define XADC_ZYNQ_STATUS_CFIFOF BIT(11)
72 #define XADC_ZYNQ_STATUS_CFIFOE BIT(10)
73 #define XADC_ZYNQ_STATUS_DFIFOF BIT(9)
74 #define XADC_ZYNQ_STATUS_DFIFOE BIT(8)
75 #define XADC_ZYNQ_STATUS_OT BIT(7)
76 #define XADC_ZYNQ_STATUS_ALM(x) BIT(x)
78 #define XADC_ZYNQ_CTL_RESET BIT(4)
80 #define XADC_ZYNQ_CMD_NOP 0x00
81 #define XADC_ZYNQ_CMD_READ 0x01
82 #define XADC_ZYNQ_CMD_WRITE 0x02
84 #define XADC_ZYNQ_CMD(cmd, addr, data) (((cmd) << 26) | ((addr) << 16) | (data))
86 /* AXI register definitions */
87 #define XADC_AXI_REG_RESET 0x00
88 #define XADC_AXI_REG_STATUS 0x04
89 #define XADC_AXI_REG_ALARM_STATUS 0x08
90 #define XADC_AXI_REG_CONVST 0x0c
91 #define XADC_AXI_REG_XADC_RESET 0x10
92 #define XADC_AXI_REG_GIER 0x5c
93 #define XADC_AXI_REG_IPISR 0x60
94 #define XADC_AXI_REG_IPIER 0x68
95 #define XADC_AXI_ADC_REG_OFFSET 0x200
97 #define XADC_AXI_RESET_MAGIC 0xa
98 #define XADC_AXI_GIER_ENABLE BIT(31)
100 #define XADC_AXI_INT_EOS BIT(4)
101 #define XADC_AXI_INT_ALARM_MASK 0x3c0f
103 #define XADC_FLAGS_BUFFERED BIT(0)
105 static void xadc_write_reg(struct xadc
*xadc
, unsigned int reg
,
108 writel(val
, xadc
->base
+ reg
);
111 static void xadc_read_reg(struct xadc
*xadc
, unsigned int reg
,
114 *val
= readl(xadc
->base
+ reg
);
118 * The ZYNQ interface uses two asynchronous FIFOs for communication with the
119 * XADC. Reads and writes to the XADC register are performed by submitting a
120 * request to the command FIFO (CFIFO), once the request has been completed the
121 * result can be read from the data FIFO (DFIFO). The method currently used in
122 * this driver is to submit the request for a read/write operation, then go to
123 * sleep and wait for an interrupt that signals that a response is available in
127 static void xadc_zynq_write_fifo(struct xadc
*xadc
, uint32_t *cmd
,
132 for (i
= 0; i
< n
; i
++)
133 xadc_write_reg(xadc
, XADC_ZYNQ_REG_CFIFO
, cmd
[i
]);
136 static void xadc_zynq_drain_fifo(struct xadc
*xadc
)
138 uint32_t status
, tmp
;
140 xadc_read_reg(xadc
, XADC_ZYNQ_REG_STATUS
, &status
);
142 while (!(status
& XADC_ZYNQ_STATUS_DFIFOE
)) {
143 xadc_read_reg(xadc
, XADC_ZYNQ_REG_DFIFO
, &tmp
);
144 xadc_read_reg(xadc
, XADC_ZYNQ_REG_STATUS
, &status
);
148 static void xadc_zynq_update_intmsk(struct xadc
*xadc
, unsigned int mask
,
151 xadc
->zynq_intmask
&= ~mask
;
152 xadc
->zynq_intmask
|= val
;
154 xadc_write_reg(xadc
, XADC_ZYNQ_REG_INTMSK
,
155 xadc
->zynq_intmask
| xadc
->zynq_masked_alarm
);
158 static int xadc_zynq_write_adc_reg(struct xadc
*xadc
, unsigned int reg
,
165 spin_lock_irq(&xadc
->lock
);
166 xadc_zynq_update_intmsk(xadc
, XADC_ZYNQ_INT_DFIFO_GTH
,
167 XADC_ZYNQ_INT_DFIFO_GTH
);
169 reinit_completion(&xadc
->completion
);
171 cmd
[0] = XADC_ZYNQ_CMD(XADC_ZYNQ_CMD_WRITE
, reg
, val
);
172 xadc_zynq_write_fifo(xadc
, cmd
, ARRAY_SIZE(cmd
));
173 xadc_read_reg(xadc
, XADC_ZYNQ_REG_CFG
, &tmp
);
174 tmp
&= ~XADC_ZYNQ_CFG_DFIFOTH_MASK
;
175 tmp
|= 0 << XADC_ZYNQ_CFG_DFIFOTH_OFFSET
;
176 xadc_write_reg(xadc
, XADC_ZYNQ_REG_CFG
, tmp
);
178 xadc_zynq_update_intmsk(xadc
, XADC_ZYNQ_INT_DFIFO_GTH
, 0);
179 spin_unlock_irq(&xadc
->lock
);
181 ret
= wait_for_completion_interruptible_timeout(&xadc
->completion
, HZ
);
187 xadc_read_reg(xadc
, XADC_ZYNQ_REG_DFIFO
, &tmp
);
192 static int xadc_zynq_read_adc_reg(struct xadc
*xadc
, unsigned int reg
,
199 cmd
[0] = XADC_ZYNQ_CMD(XADC_ZYNQ_CMD_READ
, reg
, 0);
200 cmd
[1] = XADC_ZYNQ_CMD(XADC_ZYNQ_CMD_NOP
, 0, 0);
202 spin_lock_irq(&xadc
->lock
);
203 xadc_zynq_update_intmsk(xadc
, XADC_ZYNQ_INT_DFIFO_GTH
,
204 XADC_ZYNQ_INT_DFIFO_GTH
);
205 xadc_zynq_drain_fifo(xadc
);
206 reinit_completion(&xadc
->completion
);
208 xadc_zynq_write_fifo(xadc
, cmd
, ARRAY_SIZE(cmd
));
209 xadc_read_reg(xadc
, XADC_ZYNQ_REG_CFG
, &tmp
);
210 tmp
&= ~XADC_ZYNQ_CFG_DFIFOTH_MASK
;
211 tmp
|= 1 << XADC_ZYNQ_CFG_DFIFOTH_OFFSET
;
212 xadc_write_reg(xadc
, XADC_ZYNQ_REG_CFG
, tmp
);
214 xadc_zynq_update_intmsk(xadc
, XADC_ZYNQ_INT_DFIFO_GTH
, 0);
215 spin_unlock_irq(&xadc
->lock
);
216 ret
= wait_for_completion_interruptible_timeout(&xadc
->completion
, HZ
);
222 xadc_read_reg(xadc
, XADC_ZYNQ_REG_DFIFO
, &resp
);
223 xadc_read_reg(xadc
, XADC_ZYNQ_REG_DFIFO
, &resp
);
225 *val
= resp
& 0xffff;
230 static unsigned int xadc_zynq_transform_alarm(unsigned int alarm
)
232 return ((alarm
& 0x80) >> 4) |
233 ((alarm
& 0x78) << 1) |
238 * The ZYNQ threshold interrupts are level sensitive. Since we can't make the
239 * threshold condition go way from within the interrupt handler, this means as
240 * soon as a threshold condition is present we would enter the interrupt handler
241 * again and again. To work around this we mask all active thresholds interrupts
242 * in the interrupt handler and start a timer. In this timer we poll the
243 * interrupt status and only if the interrupt is inactive we unmask it again.
245 static void xadc_zynq_unmask_worker(struct work_struct
*work
)
247 struct xadc
*xadc
= container_of(work
, struct xadc
, zynq_unmask_work
.work
);
248 unsigned int misc_sts
, unmask
;
250 xadc_read_reg(xadc
, XADC_ZYNQ_REG_STATUS
, &misc_sts
);
252 misc_sts
&= XADC_ZYNQ_INT_ALARM_MASK
;
254 spin_lock_irq(&xadc
->lock
);
256 /* Clear those bits which are not active anymore */
257 unmask
= (xadc
->zynq_masked_alarm
^ misc_sts
) & xadc
->zynq_masked_alarm
;
258 xadc
->zynq_masked_alarm
&= misc_sts
;
260 /* Also clear those which are masked out anyway */
261 xadc
->zynq_masked_alarm
&= ~xadc
->zynq_intmask
;
263 /* Clear the interrupts before we unmask them */
264 xadc_write_reg(xadc
, XADC_ZYNQ_REG_INTSTS
, unmask
);
266 xadc_zynq_update_intmsk(xadc
, 0, 0);
268 spin_unlock_irq(&xadc
->lock
);
270 /* if still pending some alarm re-trigger the timer */
271 if (xadc
->zynq_masked_alarm
) {
272 schedule_delayed_work(&xadc
->zynq_unmask_work
,
273 msecs_to_jiffies(XADC_ZYNQ_UNMASK_TIMEOUT
));
278 static irqreturn_t
xadc_zynq_interrupt_handler(int irq
, void *devid
)
280 struct iio_dev
*indio_dev
= devid
;
281 struct xadc
*xadc
= iio_priv(indio_dev
);
284 xadc_read_reg(xadc
, XADC_ZYNQ_REG_INTSTS
, &status
);
286 status
&= ~(xadc
->zynq_intmask
| xadc
->zynq_masked_alarm
);
291 spin_lock(&xadc
->lock
);
293 xadc_write_reg(xadc
, XADC_ZYNQ_REG_INTSTS
, status
);
295 if (status
& XADC_ZYNQ_INT_DFIFO_GTH
) {
296 xadc_zynq_update_intmsk(xadc
, XADC_ZYNQ_INT_DFIFO_GTH
,
297 XADC_ZYNQ_INT_DFIFO_GTH
);
298 complete(&xadc
->completion
);
301 status
&= XADC_ZYNQ_INT_ALARM_MASK
;
303 xadc
->zynq_masked_alarm
|= status
;
305 * mask the current event interrupt,
306 * unmask it when the interrupt is no more active.
308 xadc_zynq_update_intmsk(xadc
, 0, 0);
310 xadc_handle_events(indio_dev
,
311 xadc_zynq_transform_alarm(status
));
313 /* unmask the required interrupts in timer. */
314 schedule_delayed_work(&xadc
->zynq_unmask_work
,
315 msecs_to_jiffies(XADC_ZYNQ_UNMASK_TIMEOUT
));
317 spin_unlock(&xadc
->lock
);
322 #define XADC_ZYNQ_TCK_RATE_MAX 50000000
323 #define XADC_ZYNQ_IGAP_DEFAULT 20
324 #define XADC_ZYNQ_PCAP_RATE_MAX 200000000
326 static int xadc_zynq_setup(struct platform_device
*pdev
,
327 struct iio_dev
*indio_dev
, int irq
)
329 struct xadc
*xadc
= iio_priv(indio_dev
);
330 unsigned long pcap_rate
;
331 unsigned int tck_div
;
334 unsigned int tck_rate
;
337 /* TODO: Figure out how to make igap and tck_rate configurable */
338 igap
= XADC_ZYNQ_IGAP_DEFAULT
;
339 tck_rate
= XADC_ZYNQ_TCK_RATE_MAX
;
341 xadc
->zynq_intmask
= ~0;
343 pcap_rate
= clk_get_rate(xadc
->clk
);
347 if (pcap_rate
> XADC_ZYNQ_PCAP_RATE_MAX
) {
348 ret
= clk_set_rate(xadc
->clk
,
349 (unsigned long)XADC_ZYNQ_PCAP_RATE_MAX
);
354 if (tck_rate
> pcap_rate
/ 2) {
357 div
= pcap_rate
/ tck_rate
;
358 if (pcap_rate
/ div
> XADC_ZYNQ_TCK_RATE_MAX
)
363 tck_div
= XADC_ZYNQ_CFG_TCKRATE_DIV2
;
365 tck_div
= XADC_ZYNQ_CFG_TCKRATE_DIV4
;
367 tck_div
= XADC_ZYNQ_CFG_TCKRATE_DIV8
;
369 tck_div
= XADC_ZYNQ_CFG_TCKRATE_DIV16
;
371 xadc_write_reg(xadc
, XADC_ZYNQ_REG_CTL
, XADC_ZYNQ_CTL_RESET
);
372 xadc_write_reg(xadc
, XADC_ZYNQ_REG_CTL
, 0);
373 xadc_write_reg(xadc
, XADC_ZYNQ_REG_INTSTS
, ~0);
374 xadc_write_reg(xadc
, XADC_ZYNQ_REG_INTMSK
, xadc
->zynq_intmask
);
375 xadc_write_reg(xadc
, XADC_ZYNQ_REG_CFG
, XADC_ZYNQ_CFG_ENABLE
|
376 XADC_ZYNQ_CFG_REDGE
| XADC_ZYNQ_CFG_WEDGE
|
377 tck_div
| XADC_ZYNQ_CFG_IGAP(igap
));
379 if (pcap_rate
> XADC_ZYNQ_PCAP_RATE_MAX
) {
380 ret
= clk_set_rate(xadc
->clk
, pcap_rate
);
388 static unsigned long xadc_zynq_get_dclk_rate(struct xadc
*xadc
)
393 xadc_read_reg(xadc
, XADC_ZYNQ_REG_CFG
, &val
);
395 switch (val
& XADC_ZYNQ_CFG_TCKRATE_MASK
) {
396 case XADC_ZYNQ_CFG_TCKRATE_DIV4
:
399 case XADC_ZYNQ_CFG_TCKRATE_DIV8
:
402 case XADC_ZYNQ_CFG_TCKRATE_DIV16
:
410 return clk_get_rate(xadc
->clk
) / div
;
413 static void xadc_zynq_update_alarm(struct xadc
*xadc
, unsigned int alarm
)
418 /* Move OT to bit 7 */
419 alarm
= ((alarm
& 0x08) << 4) | ((alarm
& 0xf0) >> 1) | (alarm
& 0x07);
421 spin_lock_irqsave(&xadc
->lock
, flags
);
423 /* Clear previous interrupts if any. */
424 xadc_read_reg(xadc
, XADC_ZYNQ_REG_INTSTS
, &status
);
425 xadc_write_reg(xadc
, XADC_ZYNQ_REG_INTSTS
, status
& alarm
);
427 xadc_zynq_update_intmsk(xadc
, XADC_ZYNQ_INT_ALARM_MASK
,
428 ~alarm
& XADC_ZYNQ_INT_ALARM_MASK
);
430 spin_unlock_irqrestore(&xadc
->lock
, flags
);
433 static const struct xadc_ops xadc_zynq_ops
= {
434 .read
= xadc_zynq_read_adc_reg
,
435 .write
= xadc_zynq_write_adc_reg
,
436 .setup
= xadc_zynq_setup
,
437 .get_dclk_rate
= xadc_zynq_get_dclk_rate
,
438 .interrupt_handler
= xadc_zynq_interrupt_handler
,
439 .update_alarm
= xadc_zynq_update_alarm
,
442 static int xadc_axi_read_adc_reg(struct xadc
*xadc
, unsigned int reg
,
447 xadc_read_reg(xadc
, XADC_AXI_ADC_REG_OFFSET
+ reg
* 4, &val32
);
448 *val
= val32
& 0xffff;
453 static int xadc_axi_write_adc_reg(struct xadc
*xadc
, unsigned int reg
,
456 xadc_write_reg(xadc
, XADC_AXI_ADC_REG_OFFSET
+ reg
* 4, val
);
461 static int xadc_axi_setup(struct platform_device
*pdev
,
462 struct iio_dev
*indio_dev
, int irq
)
464 struct xadc
*xadc
= iio_priv(indio_dev
);
466 xadc_write_reg(xadc
, XADC_AXI_REG_RESET
, XADC_AXI_RESET_MAGIC
);
467 xadc_write_reg(xadc
, XADC_AXI_REG_GIER
, XADC_AXI_GIER_ENABLE
);
472 static irqreturn_t
xadc_axi_interrupt_handler(int irq
, void *devid
)
474 struct iio_dev
*indio_dev
= devid
;
475 struct xadc
*xadc
= iio_priv(indio_dev
);
476 uint32_t status
, mask
;
479 xadc_read_reg(xadc
, XADC_AXI_REG_IPISR
, &status
);
480 xadc_read_reg(xadc
, XADC_AXI_REG_IPIER
, &mask
);
486 if ((status
& XADC_AXI_INT_EOS
) && xadc
->trigger
)
487 iio_trigger_poll(xadc
->trigger
);
489 if (status
& XADC_AXI_INT_ALARM_MASK
) {
491 * The order of the bits in the AXI-XADC status register does
492 * not match the order of the bits in the XADC alarm enable
493 * register. xadc_handle_events() expects the events to be in
494 * the same order as the XADC alarm enable register.
496 events
= (status
& 0x000e) >> 1;
497 events
|= (status
& 0x0001) << 3;
498 events
|= (status
& 0x3c00) >> 6;
499 xadc_handle_events(indio_dev
, events
);
502 xadc_write_reg(xadc
, XADC_AXI_REG_IPISR
, status
);
507 static void xadc_axi_update_alarm(struct xadc
*xadc
, unsigned int alarm
)
513 * The order of the bits in the AXI-XADC status register does not match
514 * the order of the bits in the XADC alarm enable register. We get
515 * passed the alarm mask in the same order as in the XADC alarm enable
518 alarm
= ((alarm
& 0x07) << 1) | ((alarm
& 0x08) >> 3) |
519 ((alarm
& 0xf0) << 6);
521 spin_lock_irqsave(&xadc
->lock
, flags
);
522 xadc_read_reg(xadc
, XADC_AXI_REG_IPIER
, &val
);
523 val
&= ~XADC_AXI_INT_ALARM_MASK
;
525 xadc_write_reg(xadc
, XADC_AXI_REG_IPIER
, val
);
526 spin_unlock_irqrestore(&xadc
->lock
, flags
);
529 static unsigned long xadc_axi_get_dclk(struct xadc
*xadc
)
531 return clk_get_rate(xadc
->clk
);
534 static const struct xadc_ops xadc_axi_ops
= {
535 .read
= xadc_axi_read_adc_reg
,
536 .write
= xadc_axi_write_adc_reg
,
537 .setup
= xadc_axi_setup
,
538 .get_dclk_rate
= xadc_axi_get_dclk
,
539 .update_alarm
= xadc_axi_update_alarm
,
540 .interrupt_handler
= xadc_axi_interrupt_handler
,
541 .flags
= XADC_FLAGS_BUFFERED
,
544 static int _xadc_update_adc_reg(struct xadc
*xadc
, unsigned int reg
,
545 uint16_t mask
, uint16_t val
)
550 ret
= _xadc_read_adc_reg(xadc
, reg
, &tmp
);
554 return _xadc_write_adc_reg(xadc
, reg
, (tmp
& ~mask
) | val
);
557 static int xadc_update_adc_reg(struct xadc
*xadc
, unsigned int reg
,
558 uint16_t mask
, uint16_t val
)
562 mutex_lock(&xadc
->mutex
);
563 ret
= _xadc_update_adc_reg(xadc
, reg
, mask
, val
);
564 mutex_unlock(&xadc
->mutex
);
569 static unsigned long xadc_get_dclk_rate(struct xadc
*xadc
)
571 return xadc
->ops
->get_dclk_rate(xadc
);
574 static int xadc_update_scan_mode(struct iio_dev
*indio_dev
,
575 const unsigned long *mask
)
577 struct xadc
*xadc
= iio_priv(indio_dev
);
580 n
= bitmap_weight(mask
, indio_dev
->masklength
);
583 xadc
->data
= kcalloc(n
, sizeof(*xadc
->data
), GFP_KERNEL
);
590 static unsigned int xadc_scan_index_to_channel(unsigned int scan_index
)
592 switch (scan_index
) {
594 return XADC_REG_VCCPINT
;
596 return XADC_REG_VCCPAUX
;
598 return XADC_REG_VCCO_DDR
;
600 return XADC_REG_TEMP
;
602 return XADC_REG_VCCINT
;
604 return XADC_REG_VCCAUX
;
606 return XADC_REG_VPVN
;
608 return XADC_REG_VREFP
;
610 return XADC_REG_VREFN
;
612 return XADC_REG_VCCBRAM
;
614 return XADC_REG_VAUX(scan_index
- 16);
618 static irqreturn_t
xadc_trigger_handler(int irq
, void *p
)
620 struct iio_poll_func
*pf
= p
;
621 struct iio_dev
*indio_dev
= pf
->indio_dev
;
622 struct xadc
*xadc
= iio_priv(indio_dev
);
630 for_each_set_bit(i
, indio_dev
->active_scan_mask
,
631 indio_dev
->masklength
) {
632 chan
= xadc_scan_index_to_channel(i
);
633 xadc_read_adc_reg(xadc
, chan
, &xadc
->data
[j
]);
637 iio_push_to_buffers(indio_dev
, xadc
->data
);
640 iio_trigger_notify_done(indio_dev
->trig
);
645 static int xadc_trigger_set_state(struct iio_trigger
*trigger
, bool state
)
647 struct xadc
*xadc
= iio_trigger_get_drvdata(trigger
);
653 mutex_lock(&xadc
->mutex
);
656 /* Only one of the two triggers can be active at the a time. */
657 if (xadc
->trigger
!= NULL
) {
661 xadc
->trigger
= trigger
;
662 if (trigger
== xadc
->convst_trigger
)
663 convst
= XADC_CONF0_EC
;
667 ret
= _xadc_update_adc_reg(xadc
, XADC_REG_CONF1
, XADC_CONF0_EC
,
672 xadc
->trigger
= NULL
;
675 spin_lock_irqsave(&xadc
->lock
, flags
);
676 xadc_read_reg(xadc
, XADC_AXI_REG_IPIER
, &val
);
677 xadc_write_reg(xadc
, XADC_AXI_REG_IPISR
, val
& XADC_AXI_INT_EOS
);
679 val
|= XADC_AXI_INT_EOS
;
681 val
&= ~XADC_AXI_INT_EOS
;
682 xadc_write_reg(xadc
, XADC_AXI_REG_IPIER
, val
);
683 spin_unlock_irqrestore(&xadc
->lock
, flags
);
686 mutex_unlock(&xadc
->mutex
);
691 static const struct iio_trigger_ops xadc_trigger_ops
= {
692 .set_trigger_state
= &xadc_trigger_set_state
,
695 static struct iio_trigger
*xadc_alloc_trigger(struct iio_dev
*indio_dev
,
698 struct iio_trigger
*trig
;
701 trig
= iio_trigger_alloc("%s%d-%s", indio_dev
->name
,
702 indio_dev
->id
, name
);
704 return ERR_PTR(-ENOMEM
);
706 trig
->dev
.parent
= indio_dev
->dev
.parent
;
707 trig
->ops
= &xadc_trigger_ops
;
708 iio_trigger_set_drvdata(trig
, iio_priv(indio_dev
));
710 ret
= iio_trigger_register(trig
);
712 goto error_free_trig
;
717 iio_trigger_free(trig
);
721 static int xadc_power_adc_b(struct xadc
*xadc
, unsigned int seq_mode
)
726 case XADC_CONF1_SEQ_SIMULTANEOUS
:
727 case XADC_CONF1_SEQ_INDEPENDENT
:
728 val
= XADC_CONF2_PD_ADC_B
;
735 return xadc_update_adc_reg(xadc
, XADC_REG_CONF2
, XADC_CONF2_PD_MASK
,
739 static int xadc_get_seq_mode(struct xadc
*xadc
, unsigned long scan_mode
)
741 unsigned int aux_scan_mode
= scan_mode
>> 16;
743 if (xadc
->external_mux_mode
== XADC_EXTERNAL_MUX_DUAL
)
744 return XADC_CONF1_SEQ_SIMULTANEOUS
;
746 if ((aux_scan_mode
& 0xff00) == 0 ||
747 (aux_scan_mode
& 0x00ff) == 0)
748 return XADC_CONF1_SEQ_CONTINUOUS
;
750 return XADC_CONF1_SEQ_SIMULTANEOUS
;
753 static int xadc_postdisable(struct iio_dev
*indio_dev
)
755 struct xadc
*xadc
= iio_priv(indio_dev
);
756 unsigned long scan_mask
;
760 scan_mask
= 1; /* Run calibration as part of the sequence */
761 for (i
= 0; i
< indio_dev
->num_channels
; i
++)
762 scan_mask
|= BIT(indio_dev
->channels
[i
].scan_index
);
764 /* Enable all channels and calibration */
765 ret
= xadc_write_adc_reg(xadc
, XADC_REG_SEQ(0), scan_mask
& 0xffff);
769 ret
= xadc_write_adc_reg(xadc
, XADC_REG_SEQ(1), scan_mask
>> 16);
773 ret
= xadc_update_adc_reg(xadc
, XADC_REG_CONF1
, XADC_CONF1_SEQ_MASK
,
774 XADC_CONF1_SEQ_CONTINUOUS
);
778 return xadc_power_adc_b(xadc
, XADC_CONF1_SEQ_CONTINUOUS
);
781 static int xadc_preenable(struct iio_dev
*indio_dev
)
783 struct xadc
*xadc
= iio_priv(indio_dev
);
784 unsigned long scan_mask
;
788 ret
= xadc_update_adc_reg(xadc
, XADC_REG_CONF1
, XADC_CONF1_SEQ_MASK
,
789 XADC_CONF1_SEQ_DEFAULT
);
793 scan_mask
= *indio_dev
->active_scan_mask
;
794 seq_mode
= xadc_get_seq_mode(xadc
, scan_mask
);
796 ret
= xadc_write_adc_reg(xadc
, XADC_REG_SEQ(0), scan_mask
& 0xffff);
800 ret
= xadc_write_adc_reg(xadc
, XADC_REG_SEQ(1), scan_mask
>> 16);
804 ret
= xadc_power_adc_b(xadc
, seq_mode
);
808 ret
= xadc_update_adc_reg(xadc
, XADC_REG_CONF1
, XADC_CONF1_SEQ_MASK
,
815 xadc_postdisable(indio_dev
);
819 static const struct iio_buffer_setup_ops xadc_buffer_ops
= {
820 .preenable
= &xadc_preenable
,
821 .postenable
= &iio_triggered_buffer_postenable
,
822 .predisable
= &iio_triggered_buffer_predisable
,
823 .postdisable
= &xadc_postdisable
,
826 static int xadc_read_raw(struct iio_dev
*indio_dev
,
827 struct iio_chan_spec
const *chan
, int *val
, int *val2
, long info
)
829 struct xadc
*xadc
= iio_priv(indio_dev
);
835 case IIO_CHAN_INFO_RAW
:
836 if (iio_buffer_enabled(indio_dev
))
838 ret
= xadc_read_adc_reg(xadc
, chan
->address
, &val16
);
843 if (chan
->scan_type
.sign
== 'u')
846 *val
= sign_extend32(val16
, 11);
849 case IIO_CHAN_INFO_SCALE
:
850 switch (chan
->type
) {
852 /* V = (val * 3.0) / 4096 */
853 switch (chan
->address
) {
854 case XADC_REG_VCCINT
:
855 case XADC_REG_VCCAUX
:
858 case XADC_REG_VCCBRAM
:
859 case XADC_REG_VCCPINT
:
860 case XADC_REG_VCCPAUX
:
861 case XADC_REG_VCCO_DDR
:
869 return IIO_VAL_FRACTIONAL_LOG2
;
871 /* Temp in C = (val * 503.975) / 4096 - 273.15 */
874 return IIO_VAL_FRACTIONAL_LOG2
;
878 case IIO_CHAN_INFO_OFFSET
:
879 /* Only the temperature channel has an offset */
880 *val
= -((273150 << 12) / 503975);
882 case IIO_CHAN_INFO_SAMP_FREQ
:
883 ret
= xadc_read_adc_reg(xadc
, XADC_REG_CONF2
, &val16
);
887 div
= (val16
& XADC_CONF2_DIV_MASK
) >> XADC_CONF2_DIV_OFFSET
;
891 *val
= xadc_get_dclk_rate(xadc
) / div
/ 26;
899 static int xadc_write_raw(struct iio_dev
*indio_dev
,
900 struct iio_chan_spec
const *chan
, int val
, int val2
, long info
)
902 struct xadc
*xadc
= iio_priv(indio_dev
);
903 unsigned long clk_rate
= xadc_get_dclk_rate(xadc
);
909 if (info
!= IIO_CHAN_INFO_SAMP_FREQ
)
926 * We want to round down, but only if we do not exceed the 150 kSPS
929 div
= clk_rate
/ val
;
930 if (clk_rate
/ div
/ 26 > 150000)
937 return xadc_update_adc_reg(xadc
, XADC_REG_CONF2
, XADC_CONF2_DIV_MASK
,
938 div
<< XADC_CONF2_DIV_OFFSET
);
941 static const struct iio_event_spec xadc_temp_events
[] = {
943 .type
= IIO_EV_TYPE_THRESH
,
944 .dir
= IIO_EV_DIR_RISING
,
945 .mask_separate
= BIT(IIO_EV_INFO_ENABLE
) |
946 BIT(IIO_EV_INFO_VALUE
) |
947 BIT(IIO_EV_INFO_HYSTERESIS
),
951 /* Separate values for upper and lower thresholds, but only a shared enabled */
952 static const struct iio_event_spec xadc_voltage_events
[] = {
954 .type
= IIO_EV_TYPE_THRESH
,
955 .dir
= IIO_EV_DIR_RISING
,
956 .mask_separate
= BIT(IIO_EV_INFO_VALUE
),
958 .type
= IIO_EV_TYPE_THRESH
,
959 .dir
= IIO_EV_DIR_FALLING
,
960 .mask_separate
= BIT(IIO_EV_INFO_VALUE
),
962 .type
= IIO_EV_TYPE_THRESH
,
963 .dir
= IIO_EV_DIR_EITHER
,
964 .mask_separate
= BIT(IIO_EV_INFO_ENABLE
),
968 #define XADC_CHAN_TEMP(_chan, _scan_index, _addr) { \
971 .channel = (_chan), \
972 .address = (_addr), \
973 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
974 BIT(IIO_CHAN_INFO_SCALE) | \
975 BIT(IIO_CHAN_INFO_OFFSET), \
976 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
977 .event_spec = xadc_temp_events, \
978 .num_event_specs = ARRAY_SIZE(xadc_temp_events), \
979 .scan_index = (_scan_index), \
985 .endianness = IIO_CPU, \
989 #define XADC_CHAN_VOLTAGE(_chan, _scan_index, _addr, _ext, _alarm) { \
990 .type = IIO_VOLTAGE, \
992 .channel = (_chan), \
993 .address = (_addr), \
994 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
995 BIT(IIO_CHAN_INFO_SCALE), \
996 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
997 .event_spec = (_alarm) ? xadc_voltage_events : NULL, \
998 .num_event_specs = (_alarm) ? ARRAY_SIZE(xadc_voltage_events) : 0, \
999 .scan_index = (_scan_index), \
1001 .sign = ((_addr) == XADC_REG_VREFN) ? 's' : 'u', \
1003 .storagebits = 16, \
1005 .endianness = IIO_CPU, \
1007 .extend_name = _ext, \
1010 static const struct iio_chan_spec xadc_channels
[] = {
1011 XADC_CHAN_TEMP(0, 8, XADC_REG_TEMP
),
1012 XADC_CHAN_VOLTAGE(0, 9, XADC_REG_VCCINT
, "vccint", true),
1013 XADC_CHAN_VOLTAGE(1, 10, XADC_REG_VCCAUX
, "vccaux", true),
1014 XADC_CHAN_VOLTAGE(2, 14, XADC_REG_VCCBRAM
, "vccbram", true),
1015 XADC_CHAN_VOLTAGE(3, 5, XADC_REG_VCCPINT
, "vccpint", true),
1016 XADC_CHAN_VOLTAGE(4, 6, XADC_REG_VCCPAUX
, "vccpaux", true),
1017 XADC_CHAN_VOLTAGE(5, 7, XADC_REG_VCCO_DDR
, "vccoddr", true),
1018 XADC_CHAN_VOLTAGE(6, 12, XADC_REG_VREFP
, "vrefp", false),
1019 XADC_CHAN_VOLTAGE(7, 13, XADC_REG_VREFN
, "vrefn", false),
1020 XADC_CHAN_VOLTAGE(8, 11, XADC_REG_VPVN
, NULL
, false),
1021 XADC_CHAN_VOLTAGE(9, 16, XADC_REG_VAUX(0), NULL
, false),
1022 XADC_CHAN_VOLTAGE(10, 17, XADC_REG_VAUX(1), NULL
, false),
1023 XADC_CHAN_VOLTAGE(11, 18, XADC_REG_VAUX(2), NULL
, false),
1024 XADC_CHAN_VOLTAGE(12, 19, XADC_REG_VAUX(3), NULL
, false),
1025 XADC_CHAN_VOLTAGE(13, 20, XADC_REG_VAUX(4), NULL
, false),
1026 XADC_CHAN_VOLTAGE(14, 21, XADC_REG_VAUX(5), NULL
, false),
1027 XADC_CHAN_VOLTAGE(15, 22, XADC_REG_VAUX(6), NULL
, false),
1028 XADC_CHAN_VOLTAGE(16, 23, XADC_REG_VAUX(7), NULL
, false),
1029 XADC_CHAN_VOLTAGE(17, 24, XADC_REG_VAUX(8), NULL
, false),
1030 XADC_CHAN_VOLTAGE(18, 25, XADC_REG_VAUX(9), NULL
, false),
1031 XADC_CHAN_VOLTAGE(19, 26, XADC_REG_VAUX(10), NULL
, false),
1032 XADC_CHAN_VOLTAGE(20, 27, XADC_REG_VAUX(11), NULL
, false),
1033 XADC_CHAN_VOLTAGE(21, 28, XADC_REG_VAUX(12), NULL
, false),
1034 XADC_CHAN_VOLTAGE(22, 29, XADC_REG_VAUX(13), NULL
, false),
1035 XADC_CHAN_VOLTAGE(23, 30, XADC_REG_VAUX(14), NULL
, false),
1036 XADC_CHAN_VOLTAGE(24, 31, XADC_REG_VAUX(15), NULL
, false),
1039 static const struct iio_info xadc_info
= {
1040 .read_raw
= &xadc_read_raw
,
1041 .write_raw
= &xadc_write_raw
,
1042 .read_event_config
= &xadc_read_event_config
,
1043 .write_event_config
= &xadc_write_event_config
,
1044 .read_event_value
= &xadc_read_event_value
,
1045 .write_event_value
= &xadc_write_event_value
,
1046 .update_scan_mode
= &xadc_update_scan_mode
,
1049 static const struct of_device_id xadc_of_match_table
[] = {
1050 { .compatible
= "xlnx,zynq-xadc-1.00.a", (void *)&xadc_zynq_ops
},
1051 { .compatible
= "xlnx,axi-xadc-1.00.a", (void *)&xadc_axi_ops
},
1054 MODULE_DEVICE_TABLE(of
, xadc_of_match_table
);
1056 static int xadc_parse_dt(struct iio_dev
*indio_dev
, struct device_node
*np
,
1059 struct xadc
*xadc
= iio_priv(indio_dev
);
1060 struct iio_chan_spec
*channels
, *chan
;
1061 struct device_node
*chan_node
, *child
;
1062 unsigned int num_channels
;
1063 const char *external_mux
;
1070 ret
= of_property_read_string(np
, "xlnx,external-mux", &external_mux
);
1071 if (ret
< 0 || strcasecmp(external_mux
, "none") == 0)
1072 xadc
->external_mux_mode
= XADC_EXTERNAL_MUX_NONE
;
1073 else if (strcasecmp(external_mux
, "single") == 0)
1074 xadc
->external_mux_mode
= XADC_EXTERNAL_MUX_SINGLE
;
1075 else if (strcasecmp(external_mux
, "dual") == 0)
1076 xadc
->external_mux_mode
= XADC_EXTERNAL_MUX_DUAL
;
1080 if (xadc
->external_mux_mode
!= XADC_EXTERNAL_MUX_NONE
) {
1081 ret
= of_property_read_u32(np
, "xlnx,external-mux-channel",
1086 if (xadc
->external_mux_mode
== XADC_EXTERNAL_MUX_SINGLE
) {
1087 if (ext_mux_chan
== 0)
1088 ext_mux_chan
= XADC_REG_VPVN
;
1089 else if (ext_mux_chan
<= 16)
1090 ext_mux_chan
= XADC_REG_VAUX(ext_mux_chan
- 1);
1094 if (ext_mux_chan
> 0 && ext_mux_chan
<= 8)
1095 ext_mux_chan
= XADC_REG_VAUX(ext_mux_chan
- 1);
1100 *conf
|= XADC_CONF0_MUX
| XADC_CONF0_CHAN(ext_mux_chan
);
1103 channels
= kmemdup(xadc_channels
, sizeof(xadc_channels
), GFP_KERNEL
);
1108 chan
= &channels
[9];
1110 chan_node
= of_get_child_by_name(np
, "xlnx,channels");
1112 for_each_child_of_node(chan_node
, child
) {
1113 if (num_channels
>= ARRAY_SIZE(xadc_channels
)) {
1118 ret
= of_property_read_u32(child
, "reg", ®
);
1119 if (ret
|| reg
> 16)
1122 if (of_property_read_bool(child
, "xlnx,bipolar"))
1123 chan
->scan_type
.sign
= 's';
1126 chan
->scan_index
= 11;
1127 chan
->address
= XADC_REG_VPVN
;
1129 chan
->scan_index
= 15 + reg
;
1130 chan
->address
= XADC_REG_VAUX(reg
- 1);
1136 of_node_put(chan_node
);
1138 indio_dev
->num_channels
= num_channels
;
1139 indio_dev
->channels
= krealloc(channels
, sizeof(*channels
) *
1140 num_channels
, GFP_KERNEL
);
1141 /* If we can't resize the channels array, just use the original */
1142 if (!indio_dev
->channels
)
1143 indio_dev
->channels
= channels
;
1148 static int xadc_probe(struct platform_device
*pdev
)
1150 const struct of_device_id
*id
;
1151 struct iio_dev
*indio_dev
;
1152 unsigned int bipolar_mask
;
1153 struct resource
*mem
;
1160 if (!pdev
->dev
.of_node
)
1163 id
= of_match_node(xadc_of_match_table
, pdev
->dev
.of_node
);
1167 irq
= platform_get_irq(pdev
, 0);
1171 indio_dev
= devm_iio_device_alloc(&pdev
->dev
, sizeof(*xadc
));
1175 xadc
= iio_priv(indio_dev
);
1176 xadc
->ops
= id
->data
;
1178 init_completion(&xadc
->completion
);
1179 mutex_init(&xadc
->mutex
);
1180 spin_lock_init(&xadc
->lock
);
1181 INIT_DELAYED_WORK(&xadc
->zynq_unmask_work
, xadc_zynq_unmask_worker
);
1183 mem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1184 xadc
->base
= devm_ioremap_resource(&pdev
->dev
, mem
);
1185 if (IS_ERR(xadc
->base
))
1186 return PTR_ERR(xadc
->base
);
1188 indio_dev
->dev
.parent
= &pdev
->dev
;
1189 indio_dev
->dev
.of_node
= pdev
->dev
.of_node
;
1190 indio_dev
->name
= "xadc";
1191 indio_dev
->modes
= INDIO_DIRECT_MODE
;
1192 indio_dev
->info
= &xadc_info
;
1194 ret
= xadc_parse_dt(indio_dev
, pdev
->dev
.of_node
, &conf0
);
1196 goto err_device_free
;
1198 if (xadc
->ops
->flags
& XADC_FLAGS_BUFFERED
) {
1199 ret
= iio_triggered_buffer_setup(indio_dev
,
1200 &iio_pollfunc_store_time
, &xadc_trigger_handler
,
1203 goto err_device_free
;
1205 xadc
->convst_trigger
= xadc_alloc_trigger(indio_dev
, "convst");
1206 if (IS_ERR(xadc
->convst_trigger
)) {
1207 ret
= PTR_ERR(xadc
->convst_trigger
);
1208 goto err_triggered_buffer_cleanup
;
1210 xadc
->samplerate_trigger
= xadc_alloc_trigger(indio_dev
,
1212 if (IS_ERR(xadc
->samplerate_trigger
)) {
1213 ret
= PTR_ERR(xadc
->samplerate_trigger
);
1214 goto err_free_convst_trigger
;
1218 xadc
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
1219 if (IS_ERR(xadc
->clk
)) {
1220 ret
= PTR_ERR(xadc
->clk
);
1221 goto err_free_samplerate_trigger
;
1224 ret
= clk_prepare_enable(xadc
->clk
);
1226 goto err_free_samplerate_trigger
;
1228 ret
= request_irq(xadc
->irq
, xadc
->ops
->interrupt_handler
, 0,
1229 dev_name(&pdev
->dev
), indio_dev
);
1231 goto err_clk_disable_unprepare
;
1233 ret
= xadc
->ops
->setup(pdev
, indio_dev
, xadc
->irq
);
1237 for (i
= 0; i
< 16; i
++)
1238 xadc_read_adc_reg(xadc
, XADC_REG_THRESHOLD(i
),
1239 &xadc
->threshold
[i
]);
1241 ret
= xadc_write_adc_reg(xadc
, XADC_REG_CONF0
, conf0
);
1246 for (i
= 0; i
< indio_dev
->num_channels
; i
++) {
1247 if (indio_dev
->channels
[i
].scan_type
.sign
== 's')
1248 bipolar_mask
|= BIT(indio_dev
->channels
[i
].scan_index
);
1251 ret
= xadc_write_adc_reg(xadc
, XADC_REG_INPUT_MODE(0), bipolar_mask
);
1254 ret
= xadc_write_adc_reg(xadc
, XADC_REG_INPUT_MODE(1),
1255 bipolar_mask
>> 16);
1259 /* Disable all alarms */
1260 ret
= xadc_update_adc_reg(xadc
, XADC_REG_CONF1
, XADC_CONF1_ALARM_MASK
,
1261 XADC_CONF1_ALARM_MASK
);
1265 /* Set thresholds to min/max */
1266 for (i
= 0; i
< 16; i
++) {
1268 * Set max voltage threshold and both temperature thresholds to
1269 * 0xffff, min voltage threshold to 0.
1271 if (i
% 8 < 4 || i
== 7)
1272 xadc
->threshold
[i
] = 0xffff;
1274 xadc
->threshold
[i
] = 0;
1275 ret
= xadc_write_adc_reg(xadc
, XADC_REG_THRESHOLD(i
),
1276 xadc
->threshold
[i
]);
1281 /* Go to non-buffered mode */
1282 xadc_postdisable(indio_dev
);
1284 ret
= iio_device_register(indio_dev
);
1288 platform_set_drvdata(pdev
, indio_dev
);
1293 free_irq(xadc
->irq
, indio_dev
);
1294 cancel_delayed_work_sync(&xadc
->zynq_unmask_work
);
1295 err_clk_disable_unprepare
:
1296 clk_disable_unprepare(xadc
->clk
);
1297 err_free_samplerate_trigger
:
1298 if (xadc
->ops
->flags
& XADC_FLAGS_BUFFERED
)
1299 iio_trigger_free(xadc
->samplerate_trigger
);
1300 err_free_convst_trigger
:
1301 if (xadc
->ops
->flags
& XADC_FLAGS_BUFFERED
)
1302 iio_trigger_free(xadc
->convst_trigger
);
1303 err_triggered_buffer_cleanup
:
1304 if (xadc
->ops
->flags
& XADC_FLAGS_BUFFERED
)
1305 iio_triggered_buffer_cleanup(indio_dev
);
1307 kfree(indio_dev
->channels
);
1312 static int xadc_remove(struct platform_device
*pdev
)
1314 struct iio_dev
*indio_dev
= platform_get_drvdata(pdev
);
1315 struct xadc
*xadc
= iio_priv(indio_dev
);
1317 iio_device_unregister(indio_dev
);
1318 if (xadc
->ops
->flags
& XADC_FLAGS_BUFFERED
) {
1319 iio_trigger_free(xadc
->samplerate_trigger
);
1320 iio_trigger_free(xadc
->convst_trigger
);
1321 iio_triggered_buffer_cleanup(indio_dev
);
1323 free_irq(xadc
->irq
, indio_dev
);
1324 cancel_delayed_work_sync(&xadc
->zynq_unmask_work
);
1325 clk_disable_unprepare(xadc
->clk
);
1327 kfree(indio_dev
->channels
);
1332 static struct platform_driver xadc_driver
= {
1333 .probe
= xadc_probe
,
1334 .remove
= xadc_remove
,
1337 .of_match_table
= xadc_of_match_table
,
1340 module_platform_driver(xadc_driver
);
1342 MODULE_LICENSE("GPL v2");
1343 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
1344 MODULE_DESCRIPTION("Xilinx XADC IIO driver");