4 * Copyright 2013-2014 Analog Devices Inc.
5 * Author: Lars-Peter Clauen <lars@metafoo.de>
7 * Licensed under the GPL-2.
9 * Documentation for the parts can be found at:
10 * - XADC hardmacro: Xilinx UG480
11 * - ZYNQ XADC interface: Xilinx UG585
12 * - AXI XADC interface: Xilinx PG019
15 #include <linux/clk.h>
16 #include <linux/device.h>
17 #include <linux/err.h>
18 #include <linux/interrupt.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
23 #include <linux/platform_device.h>
24 #include <linux/slab.h>
25 #include <linux/sysfs.h>
27 #include <linux/iio/buffer.h>
28 #include <linux/iio/events.h>
29 #include <linux/iio/iio.h>
30 #include <linux/iio/sysfs.h>
31 #include <linux/iio/trigger.h>
32 #include <linux/iio/trigger_consumer.h>
33 #include <linux/iio/triggered_buffer.h>
35 #include "xilinx-xadc.h"
37 static const unsigned int XADC_ZYNQ_UNMASK_TIMEOUT
= 500;
39 /* ZYNQ register definitions */
40 #define XADC_ZYNQ_REG_CFG 0x00
41 #define XADC_ZYNQ_REG_INTSTS 0x04
42 #define XADC_ZYNQ_REG_INTMSK 0x08
43 #define XADC_ZYNQ_REG_STATUS 0x0c
44 #define XADC_ZYNQ_REG_CFIFO 0x10
45 #define XADC_ZYNQ_REG_DFIFO 0x14
46 #define XADC_ZYNQ_REG_CTL 0x18
48 #define XADC_ZYNQ_CFG_ENABLE BIT(31)
49 #define XADC_ZYNQ_CFG_CFIFOTH_MASK (0xf << 20)
50 #define XADC_ZYNQ_CFG_CFIFOTH_OFFSET 20
51 #define XADC_ZYNQ_CFG_DFIFOTH_MASK (0xf << 16)
52 #define XADC_ZYNQ_CFG_DFIFOTH_OFFSET 16
53 #define XADC_ZYNQ_CFG_WEDGE BIT(13)
54 #define XADC_ZYNQ_CFG_REDGE BIT(12)
55 #define XADC_ZYNQ_CFG_TCKRATE_MASK (0x3 << 8)
56 #define XADC_ZYNQ_CFG_TCKRATE_DIV2 (0x0 << 8)
57 #define XADC_ZYNQ_CFG_TCKRATE_DIV4 (0x1 << 8)
58 #define XADC_ZYNQ_CFG_TCKRATE_DIV8 (0x2 << 8)
59 #define XADC_ZYNQ_CFG_TCKRATE_DIV16 (0x3 << 8)
60 #define XADC_ZYNQ_CFG_IGAP_MASK 0x1f
61 #define XADC_ZYNQ_CFG_IGAP(x) (x)
63 #define XADC_ZYNQ_INT_CFIFO_LTH BIT(9)
64 #define XADC_ZYNQ_INT_DFIFO_GTH BIT(8)
65 #define XADC_ZYNQ_INT_ALARM_MASK 0xff
66 #define XADC_ZYNQ_INT_ALARM_OFFSET 0
68 #define XADC_ZYNQ_STATUS_CFIFO_LVL_MASK (0xf << 16)
69 #define XADC_ZYNQ_STATUS_CFIFO_LVL_OFFSET 16
70 #define XADC_ZYNQ_STATUS_DFIFO_LVL_MASK (0xf << 12)
71 #define XADC_ZYNQ_STATUS_DFIFO_LVL_OFFSET 12
72 #define XADC_ZYNQ_STATUS_CFIFOF BIT(11)
73 #define XADC_ZYNQ_STATUS_CFIFOE BIT(10)
74 #define XADC_ZYNQ_STATUS_DFIFOF BIT(9)
75 #define XADC_ZYNQ_STATUS_DFIFOE BIT(8)
76 #define XADC_ZYNQ_STATUS_OT BIT(7)
77 #define XADC_ZYNQ_STATUS_ALM(x) BIT(x)
79 #define XADC_ZYNQ_CTL_RESET BIT(4)
81 #define XADC_ZYNQ_CMD_NOP 0x00
82 #define XADC_ZYNQ_CMD_READ 0x01
83 #define XADC_ZYNQ_CMD_WRITE 0x02
85 #define XADC_ZYNQ_CMD(cmd, addr, data) (((cmd) << 26) | ((addr) << 16) | (data))
87 /* AXI register definitions */
88 #define XADC_AXI_REG_RESET 0x00
89 #define XADC_AXI_REG_STATUS 0x04
90 #define XADC_AXI_REG_ALARM_STATUS 0x08
91 #define XADC_AXI_REG_CONVST 0x0c
92 #define XADC_AXI_REG_XADC_RESET 0x10
93 #define XADC_AXI_REG_GIER 0x5c
94 #define XADC_AXI_REG_IPISR 0x60
95 #define XADC_AXI_REG_IPIER 0x68
96 #define XADC_AXI_ADC_REG_OFFSET 0x200
98 #define XADC_AXI_RESET_MAGIC 0xa
99 #define XADC_AXI_GIER_ENABLE BIT(31)
101 #define XADC_AXI_INT_EOS BIT(4)
102 #define XADC_AXI_INT_ALARM_MASK 0x3c0f
104 #define XADC_FLAGS_BUFFERED BIT(0)
106 static void xadc_write_reg(struct xadc
*xadc
, unsigned int reg
,
109 writel(val
, xadc
->base
+ reg
);
112 static void xadc_read_reg(struct xadc
*xadc
, unsigned int reg
,
115 *val
= readl(xadc
->base
+ reg
);
119 * The ZYNQ interface uses two asynchronous FIFOs for communication with the
120 * XADC. Reads and writes to the XADC register are performed by submitting a
121 * request to the command FIFO (CFIFO), once the request has been completed the
122 * result can be read from the data FIFO (DFIFO). The method currently used in
123 * this driver is to submit the request for a read/write operation, then go to
124 * sleep and wait for an interrupt that signals that a response is available in
128 static void xadc_zynq_write_fifo(struct xadc
*xadc
, uint32_t *cmd
,
133 for (i
= 0; i
< n
; i
++)
134 xadc_write_reg(xadc
, XADC_ZYNQ_REG_CFIFO
, cmd
[i
]);
137 static void xadc_zynq_drain_fifo(struct xadc
*xadc
)
139 uint32_t status
, tmp
;
141 xadc_read_reg(xadc
, XADC_ZYNQ_REG_STATUS
, &status
);
143 while (!(status
& XADC_ZYNQ_STATUS_DFIFOE
)) {
144 xadc_read_reg(xadc
, XADC_ZYNQ_REG_DFIFO
, &tmp
);
145 xadc_read_reg(xadc
, XADC_ZYNQ_REG_STATUS
, &status
);
149 static void xadc_zynq_update_intmsk(struct xadc
*xadc
, unsigned int mask
,
152 xadc
->zynq_intmask
&= ~mask
;
153 xadc
->zynq_intmask
|= val
;
155 xadc_write_reg(xadc
, XADC_ZYNQ_REG_INTMSK
,
156 xadc
->zynq_intmask
| xadc
->zynq_masked_alarm
);
159 static int xadc_zynq_write_adc_reg(struct xadc
*xadc
, unsigned int reg
,
166 spin_lock_irq(&xadc
->lock
);
167 xadc_zynq_update_intmsk(xadc
, XADC_ZYNQ_INT_DFIFO_GTH
,
168 XADC_ZYNQ_INT_DFIFO_GTH
);
170 reinit_completion(&xadc
->completion
);
172 cmd
[0] = XADC_ZYNQ_CMD(XADC_ZYNQ_CMD_WRITE
, reg
, val
);
173 xadc_zynq_write_fifo(xadc
, cmd
, ARRAY_SIZE(cmd
));
174 xadc_read_reg(xadc
, XADC_ZYNQ_REG_CFG
, &tmp
);
175 tmp
&= ~XADC_ZYNQ_CFG_DFIFOTH_MASK
;
176 tmp
|= 0 << XADC_ZYNQ_CFG_DFIFOTH_OFFSET
;
177 xadc_write_reg(xadc
, XADC_ZYNQ_REG_CFG
, tmp
);
179 xadc_zynq_update_intmsk(xadc
, XADC_ZYNQ_INT_DFIFO_GTH
, 0);
180 spin_unlock_irq(&xadc
->lock
);
182 ret
= wait_for_completion_interruptible_timeout(&xadc
->completion
, HZ
);
188 xadc_read_reg(xadc
, XADC_ZYNQ_REG_DFIFO
, &tmp
);
193 static int xadc_zynq_read_adc_reg(struct xadc
*xadc
, unsigned int reg
,
200 cmd
[0] = XADC_ZYNQ_CMD(XADC_ZYNQ_CMD_READ
, reg
, 0);
201 cmd
[1] = XADC_ZYNQ_CMD(XADC_ZYNQ_CMD_NOP
, 0, 0);
203 spin_lock_irq(&xadc
->lock
);
204 xadc_zynq_update_intmsk(xadc
, XADC_ZYNQ_INT_DFIFO_GTH
,
205 XADC_ZYNQ_INT_DFIFO_GTH
);
206 xadc_zynq_drain_fifo(xadc
);
207 reinit_completion(&xadc
->completion
);
209 xadc_zynq_write_fifo(xadc
, cmd
, ARRAY_SIZE(cmd
));
210 xadc_read_reg(xadc
, XADC_ZYNQ_REG_CFG
, &tmp
);
211 tmp
&= ~XADC_ZYNQ_CFG_DFIFOTH_MASK
;
212 tmp
|= 1 << XADC_ZYNQ_CFG_DFIFOTH_OFFSET
;
213 xadc_write_reg(xadc
, XADC_ZYNQ_REG_CFG
, tmp
);
215 xadc_zynq_update_intmsk(xadc
, XADC_ZYNQ_INT_DFIFO_GTH
, 0);
216 spin_unlock_irq(&xadc
->lock
);
217 ret
= wait_for_completion_interruptible_timeout(&xadc
->completion
, HZ
);
223 xadc_read_reg(xadc
, XADC_ZYNQ_REG_DFIFO
, &resp
);
224 xadc_read_reg(xadc
, XADC_ZYNQ_REG_DFIFO
, &resp
);
226 *val
= resp
& 0xffff;
231 static unsigned int xadc_zynq_transform_alarm(unsigned int alarm
)
233 return ((alarm
& 0x80) >> 4) |
234 ((alarm
& 0x78) << 1) |
239 * The ZYNQ threshold interrupts are level sensitive. Since we can't make the
240 * threshold condition go way from within the interrupt handler, this means as
241 * soon as a threshold condition is present we would enter the interrupt handler
242 * again and again. To work around this we mask all active thresholds interrupts
243 * in the interrupt handler and start a timer. In this timer we poll the
244 * interrupt status and only if the interrupt is inactive we unmask it again.
246 static void xadc_zynq_unmask_worker(struct work_struct
*work
)
248 struct xadc
*xadc
= container_of(work
, struct xadc
, zynq_unmask_work
.work
);
249 unsigned int misc_sts
, unmask
;
251 xadc_read_reg(xadc
, XADC_ZYNQ_REG_STATUS
, &misc_sts
);
253 misc_sts
&= XADC_ZYNQ_INT_ALARM_MASK
;
255 spin_lock_irq(&xadc
->lock
);
257 /* Clear those bits which are not active anymore */
258 unmask
= (xadc
->zynq_masked_alarm
^ misc_sts
) & xadc
->zynq_masked_alarm
;
259 xadc
->zynq_masked_alarm
&= misc_sts
;
261 /* Also clear those which are masked out anyway */
262 xadc
->zynq_masked_alarm
&= ~xadc
->zynq_intmask
;
264 /* Clear the interrupts before we unmask them */
265 xadc_write_reg(xadc
, XADC_ZYNQ_REG_INTSTS
, unmask
);
267 xadc_zynq_update_intmsk(xadc
, 0, 0);
269 spin_unlock_irq(&xadc
->lock
);
271 /* if still pending some alarm re-trigger the timer */
272 if (xadc
->zynq_masked_alarm
) {
273 schedule_delayed_work(&xadc
->zynq_unmask_work
,
274 msecs_to_jiffies(XADC_ZYNQ_UNMASK_TIMEOUT
));
279 static irqreturn_t
xadc_zynq_interrupt_handler(int irq
, void *devid
)
281 struct iio_dev
*indio_dev
= devid
;
282 struct xadc
*xadc
= iio_priv(indio_dev
);
285 xadc_read_reg(xadc
, XADC_ZYNQ_REG_INTSTS
, &status
);
287 status
&= ~(xadc
->zynq_intmask
| xadc
->zynq_masked_alarm
);
292 spin_lock(&xadc
->lock
);
294 xadc_write_reg(xadc
, XADC_ZYNQ_REG_INTSTS
, status
);
296 if (status
& XADC_ZYNQ_INT_DFIFO_GTH
) {
297 xadc_zynq_update_intmsk(xadc
, XADC_ZYNQ_INT_DFIFO_GTH
,
298 XADC_ZYNQ_INT_DFIFO_GTH
);
299 complete(&xadc
->completion
);
302 status
&= XADC_ZYNQ_INT_ALARM_MASK
;
304 xadc
->zynq_masked_alarm
|= status
;
306 * mask the current event interrupt,
307 * unmask it when the interrupt is no more active.
309 xadc_zynq_update_intmsk(xadc
, 0, 0);
311 xadc_handle_events(indio_dev
,
312 xadc_zynq_transform_alarm(status
));
314 /* unmask the required interrupts in timer. */
315 schedule_delayed_work(&xadc
->zynq_unmask_work
,
316 msecs_to_jiffies(XADC_ZYNQ_UNMASK_TIMEOUT
));
318 spin_unlock(&xadc
->lock
);
323 #define XADC_ZYNQ_TCK_RATE_MAX 50000000
324 #define XADC_ZYNQ_IGAP_DEFAULT 20
326 static int xadc_zynq_setup(struct platform_device
*pdev
,
327 struct iio_dev
*indio_dev
, int irq
)
329 struct xadc
*xadc
= iio_priv(indio_dev
);
330 unsigned long pcap_rate
;
331 unsigned int tck_div
;
334 unsigned int tck_rate
;
336 /* TODO: Figure out how to make igap and tck_rate configurable */
337 igap
= XADC_ZYNQ_IGAP_DEFAULT
;
338 tck_rate
= XADC_ZYNQ_TCK_RATE_MAX
;
340 xadc
->zynq_intmask
= ~0;
342 pcap_rate
= clk_get_rate(xadc
->clk
);
344 if (tck_rate
> XADC_ZYNQ_TCK_RATE_MAX
)
345 tck_rate
= XADC_ZYNQ_TCK_RATE_MAX
;
346 if (tck_rate
> pcap_rate
/ 2) {
349 div
= pcap_rate
/ tck_rate
;
350 if (pcap_rate
/ div
> XADC_ZYNQ_TCK_RATE_MAX
)
355 tck_div
= XADC_ZYNQ_CFG_TCKRATE_DIV2
;
357 tck_div
= XADC_ZYNQ_CFG_TCKRATE_DIV4
;
359 tck_div
= XADC_ZYNQ_CFG_TCKRATE_DIV8
;
361 tck_div
= XADC_ZYNQ_CFG_TCKRATE_DIV16
;
363 xadc_write_reg(xadc
, XADC_ZYNQ_REG_CTL
, XADC_ZYNQ_CTL_RESET
);
364 xadc_write_reg(xadc
, XADC_ZYNQ_REG_CTL
, 0);
365 xadc_write_reg(xadc
, XADC_ZYNQ_REG_INTSTS
, ~0);
366 xadc_write_reg(xadc
, XADC_ZYNQ_REG_INTMSK
, xadc
->zynq_intmask
);
367 xadc_write_reg(xadc
, XADC_ZYNQ_REG_CFG
, XADC_ZYNQ_CFG_ENABLE
|
368 XADC_ZYNQ_CFG_REDGE
| XADC_ZYNQ_CFG_WEDGE
|
369 tck_div
| XADC_ZYNQ_CFG_IGAP(igap
));
374 static unsigned long xadc_zynq_get_dclk_rate(struct xadc
*xadc
)
379 xadc_read_reg(xadc
, XADC_ZYNQ_REG_CFG
, &val
);
381 switch (val
& XADC_ZYNQ_CFG_TCKRATE_MASK
) {
382 case XADC_ZYNQ_CFG_TCKRATE_DIV4
:
385 case XADC_ZYNQ_CFG_TCKRATE_DIV8
:
388 case XADC_ZYNQ_CFG_TCKRATE_DIV16
:
396 return clk_get_rate(xadc
->clk
) / div
;
399 static void xadc_zynq_update_alarm(struct xadc
*xadc
, unsigned int alarm
)
404 /* Move OT to bit 7 */
405 alarm
= ((alarm
& 0x08) << 4) | ((alarm
& 0xf0) >> 1) | (alarm
& 0x07);
407 spin_lock_irqsave(&xadc
->lock
, flags
);
409 /* Clear previous interrupts if any. */
410 xadc_read_reg(xadc
, XADC_ZYNQ_REG_INTSTS
, &status
);
411 xadc_write_reg(xadc
, XADC_ZYNQ_REG_INTSTS
, status
& alarm
);
413 xadc_zynq_update_intmsk(xadc
, XADC_ZYNQ_INT_ALARM_MASK
,
414 ~alarm
& XADC_ZYNQ_INT_ALARM_MASK
);
416 spin_unlock_irqrestore(&xadc
->lock
, flags
);
419 static const struct xadc_ops xadc_zynq_ops
= {
420 .read
= xadc_zynq_read_adc_reg
,
421 .write
= xadc_zynq_write_adc_reg
,
422 .setup
= xadc_zynq_setup
,
423 .get_dclk_rate
= xadc_zynq_get_dclk_rate
,
424 .interrupt_handler
= xadc_zynq_interrupt_handler
,
425 .update_alarm
= xadc_zynq_update_alarm
,
428 static int xadc_axi_read_adc_reg(struct xadc
*xadc
, unsigned int reg
,
433 xadc_read_reg(xadc
, XADC_AXI_ADC_REG_OFFSET
+ reg
* 4, &val32
);
434 *val
= val32
& 0xffff;
439 static int xadc_axi_write_adc_reg(struct xadc
*xadc
, unsigned int reg
,
442 xadc_write_reg(xadc
, XADC_AXI_ADC_REG_OFFSET
+ reg
* 4, val
);
447 static int xadc_axi_setup(struct platform_device
*pdev
,
448 struct iio_dev
*indio_dev
, int irq
)
450 struct xadc
*xadc
= iio_priv(indio_dev
);
452 xadc_write_reg(xadc
, XADC_AXI_REG_RESET
, XADC_AXI_RESET_MAGIC
);
453 xadc_write_reg(xadc
, XADC_AXI_REG_GIER
, XADC_AXI_GIER_ENABLE
);
458 static irqreturn_t
xadc_axi_interrupt_handler(int irq
, void *devid
)
460 struct iio_dev
*indio_dev
= devid
;
461 struct xadc
*xadc
= iio_priv(indio_dev
);
462 uint32_t status
, mask
;
465 xadc_read_reg(xadc
, XADC_AXI_REG_IPISR
, &status
);
466 xadc_read_reg(xadc
, XADC_AXI_REG_IPIER
, &mask
);
472 if ((status
& XADC_AXI_INT_EOS
) && xadc
->trigger
)
473 iio_trigger_poll(xadc
->trigger
);
475 if (status
& XADC_AXI_INT_ALARM_MASK
) {
477 * The order of the bits in the AXI-XADC status register does
478 * not match the order of the bits in the XADC alarm enable
479 * register. xadc_handle_events() expects the events to be in
480 * the same order as the XADC alarm enable register.
482 events
= (status
& 0x000e) >> 1;
483 events
|= (status
& 0x0001) << 3;
484 events
|= (status
& 0x3c00) >> 6;
485 xadc_handle_events(indio_dev
, events
);
488 xadc_write_reg(xadc
, XADC_AXI_REG_IPISR
, status
);
493 static void xadc_axi_update_alarm(struct xadc
*xadc
, unsigned int alarm
)
499 * The order of the bits in the AXI-XADC status register does not match
500 * the order of the bits in the XADC alarm enable register. We get
501 * passed the alarm mask in the same order as in the XADC alarm enable
504 alarm
= ((alarm
& 0x07) << 1) | ((alarm
& 0x08) >> 3) |
505 ((alarm
& 0xf0) << 6);
507 spin_lock_irqsave(&xadc
->lock
, flags
);
508 xadc_read_reg(xadc
, XADC_AXI_REG_IPIER
, &val
);
509 val
&= ~XADC_AXI_INT_ALARM_MASK
;
511 xadc_write_reg(xadc
, XADC_AXI_REG_IPIER
, val
);
512 spin_unlock_irqrestore(&xadc
->lock
, flags
);
515 static unsigned long xadc_axi_get_dclk(struct xadc
*xadc
)
517 return clk_get_rate(xadc
->clk
);
520 static const struct xadc_ops xadc_axi_ops
= {
521 .read
= xadc_axi_read_adc_reg
,
522 .write
= xadc_axi_write_adc_reg
,
523 .setup
= xadc_axi_setup
,
524 .get_dclk_rate
= xadc_axi_get_dclk
,
525 .update_alarm
= xadc_axi_update_alarm
,
526 .interrupt_handler
= xadc_axi_interrupt_handler
,
527 .flags
= XADC_FLAGS_BUFFERED
,
530 static int _xadc_update_adc_reg(struct xadc
*xadc
, unsigned int reg
,
531 uint16_t mask
, uint16_t val
)
536 ret
= _xadc_read_adc_reg(xadc
, reg
, &tmp
);
540 return _xadc_write_adc_reg(xadc
, reg
, (tmp
& ~mask
) | val
);
543 static int xadc_update_adc_reg(struct xadc
*xadc
, unsigned int reg
,
544 uint16_t mask
, uint16_t val
)
548 mutex_lock(&xadc
->mutex
);
549 ret
= _xadc_update_adc_reg(xadc
, reg
, mask
, val
);
550 mutex_unlock(&xadc
->mutex
);
555 static unsigned long xadc_get_dclk_rate(struct xadc
*xadc
)
557 return xadc
->ops
->get_dclk_rate(xadc
);
560 static int xadc_update_scan_mode(struct iio_dev
*indio_dev
,
561 const unsigned long *mask
)
563 struct xadc
*xadc
= iio_priv(indio_dev
);
566 n
= bitmap_weight(mask
, indio_dev
->masklength
);
569 xadc
->data
= kcalloc(n
, sizeof(*xadc
->data
), GFP_KERNEL
);
576 static unsigned int xadc_scan_index_to_channel(unsigned int scan_index
)
578 switch (scan_index
) {
580 return XADC_REG_VCCPINT
;
582 return XADC_REG_VCCPAUX
;
584 return XADC_REG_VCCO_DDR
;
586 return XADC_REG_TEMP
;
588 return XADC_REG_VCCINT
;
590 return XADC_REG_VCCAUX
;
592 return XADC_REG_VPVN
;
594 return XADC_REG_VREFP
;
596 return XADC_REG_VREFN
;
598 return XADC_REG_VCCBRAM
;
600 return XADC_REG_VAUX(scan_index
- 16);
604 static irqreturn_t
xadc_trigger_handler(int irq
, void *p
)
606 struct iio_poll_func
*pf
= p
;
607 struct iio_dev
*indio_dev
= pf
->indio_dev
;
608 struct xadc
*xadc
= iio_priv(indio_dev
);
616 for_each_set_bit(i
, indio_dev
->active_scan_mask
,
617 indio_dev
->masklength
) {
618 chan
= xadc_scan_index_to_channel(i
);
619 xadc_read_adc_reg(xadc
, chan
, &xadc
->data
[j
]);
623 iio_push_to_buffers(indio_dev
, xadc
->data
);
626 iio_trigger_notify_done(indio_dev
->trig
);
631 static int xadc_trigger_set_state(struct iio_trigger
*trigger
, bool state
)
633 struct xadc
*xadc
= iio_trigger_get_drvdata(trigger
);
639 mutex_lock(&xadc
->mutex
);
642 /* Only one of the two triggers can be active at the a time. */
643 if (xadc
->trigger
!= NULL
) {
647 xadc
->trigger
= trigger
;
648 if (trigger
== xadc
->convst_trigger
)
649 convst
= XADC_CONF0_EC
;
653 ret
= _xadc_update_adc_reg(xadc
, XADC_REG_CONF1
, XADC_CONF0_EC
,
658 xadc
->trigger
= NULL
;
661 spin_lock_irqsave(&xadc
->lock
, flags
);
662 xadc_read_reg(xadc
, XADC_AXI_REG_IPIER
, &val
);
663 xadc_write_reg(xadc
, XADC_AXI_REG_IPISR
, val
& XADC_AXI_INT_EOS
);
665 val
|= XADC_AXI_INT_EOS
;
667 val
&= ~XADC_AXI_INT_EOS
;
668 xadc_write_reg(xadc
, XADC_AXI_REG_IPIER
, val
);
669 spin_unlock_irqrestore(&xadc
->lock
, flags
);
672 mutex_unlock(&xadc
->mutex
);
677 static const struct iio_trigger_ops xadc_trigger_ops
= {
678 .owner
= THIS_MODULE
,
679 .set_trigger_state
= &xadc_trigger_set_state
,
682 static struct iio_trigger
*xadc_alloc_trigger(struct iio_dev
*indio_dev
,
685 struct iio_trigger
*trig
;
688 trig
= iio_trigger_alloc("%s%d-%s", indio_dev
->name
,
689 indio_dev
->id
, name
);
691 return ERR_PTR(-ENOMEM
);
693 trig
->dev
.parent
= indio_dev
->dev
.parent
;
694 trig
->ops
= &xadc_trigger_ops
;
695 iio_trigger_set_drvdata(trig
, iio_priv(indio_dev
));
697 ret
= iio_trigger_register(trig
);
699 goto error_free_trig
;
704 iio_trigger_free(trig
);
708 static int xadc_power_adc_b(struct xadc
*xadc
, unsigned int seq_mode
)
713 case XADC_CONF1_SEQ_SIMULTANEOUS
:
714 case XADC_CONF1_SEQ_INDEPENDENT
:
715 val
= XADC_CONF2_PD_ADC_B
;
722 return xadc_update_adc_reg(xadc
, XADC_REG_CONF2
, XADC_CONF2_PD_MASK
,
726 static int xadc_get_seq_mode(struct xadc
*xadc
, unsigned long scan_mode
)
728 unsigned int aux_scan_mode
= scan_mode
>> 16;
730 if (xadc
->external_mux_mode
== XADC_EXTERNAL_MUX_DUAL
)
731 return XADC_CONF1_SEQ_SIMULTANEOUS
;
733 if ((aux_scan_mode
& 0xff00) == 0 ||
734 (aux_scan_mode
& 0x00ff) == 0)
735 return XADC_CONF1_SEQ_CONTINUOUS
;
737 return XADC_CONF1_SEQ_SIMULTANEOUS
;
740 static int xadc_postdisable(struct iio_dev
*indio_dev
)
742 struct xadc
*xadc
= iio_priv(indio_dev
);
743 unsigned long scan_mask
;
747 scan_mask
= 1; /* Run calibration as part of the sequence */
748 for (i
= 0; i
< indio_dev
->num_channels
; i
++)
749 scan_mask
|= BIT(indio_dev
->channels
[i
].scan_index
);
751 /* Enable all channels and calibration */
752 ret
= xadc_write_adc_reg(xadc
, XADC_REG_SEQ(0), scan_mask
& 0xffff);
756 ret
= xadc_write_adc_reg(xadc
, XADC_REG_SEQ(1), scan_mask
>> 16);
760 ret
= xadc_update_adc_reg(xadc
, XADC_REG_CONF1
, XADC_CONF1_SEQ_MASK
,
761 XADC_CONF1_SEQ_CONTINUOUS
);
765 return xadc_power_adc_b(xadc
, XADC_CONF1_SEQ_CONTINUOUS
);
768 static int xadc_preenable(struct iio_dev
*indio_dev
)
770 struct xadc
*xadc
= iio_priv(indio_dev
);
771 unsigned long scan_mask
;
775 ret
= xadc_update_adc_reg(xadc
, XADC_REG_CONF1
, XADC_CONF1_SEQ_MASK
,
776 XADC_CONF1_SEQ_DEFAULT
);
780 scan_mask
= *indio_dev
->active_scan_mask
;
781 seq_mode
= xadc_get_seq_mode(xadc
, scan_mask
);
783 ret
= xadc_write_adc_reg(xadc
, XADC_REG_SEQ(0), scan_mask
& 0xffff);
787 ret
= xadc_write_adc_reg(xadc
, XADC_REG_SEQ(1), scan_mask
>> 16);
791 ret
= xadc_power_adc_b(xadc
, seq_mode
);
795 ret
= xadc_update_adc_reg(xadc
, XADC_REG_CONF1
, XADC_CONF1_SEQ_MASK
,
802 xadc_postdisable(indio_dev
);
806 static const struct iio_buffer_setup_ops xadc_buffer_ops
= {
807 .preenable
= &xadc_preenable
,
808 .postenable
= &iio_triggered_buffer_postenable
,
809 .predisable
= &iio_triggered_buffer_predisable
,
810 .postdisable
= &xadc_postdisable
,
813 static int xadc_read_raw(struct iio_dev
*indio_dev
,
814 struct iio_chan_spec
const *chan
, int *val
, int *val2
, long info
)
816 struct xadc
*xadc
= iio_priv(indio_dev
);
822 case IIO_CHAN_INFO_RAW
:
823 if (iio_buffer_enabled(indio_dev
))
825 ret
= xadc_read_adc_reg(xadc
, chan
->address
, &val16
);
830 if (chan
->scan_type
.sign
== 'u')
833 *val
= sign_extend32(val16
, 11);
836 case IIO_CHAN_INFO_SCALE
:
837 switch (chan
->type
) {
839 /* V = (val * 3.0) / 4096 */
840 switch (chan
->address
) {
841 case XADC_REG_VCCINT
:
842 case XADC_REG_VCCAUX
:
845 case XADC_REG_VCCBRAM
:
846 case XADC_REG_VCCPINT
:
847 case XADC_REG_VCCPAUX
:
848 case XADC_REG_VCCO_DDR
:
856 return IIO_VAL_FRACTIONAL_LOG2
;
858 /* Temp in C = (val * 503.975) / 4096 - 273.15 */
861 return IIO_VAL_FRACTIONAL_LOG2
;
865 case IIO_CHAN_INFO_OFFSET
:
866 /* Only the temperature channel has an offset */
867 *val
= -((273150 << 12) / 503975);
869 case IIO_CHAN_INFO_SAMP_FREQ
:
870 ret
= xadc_read_adc_reg(xadc
, XADC_REG_CONF2
, &val16
);
874 div
= (val16
& XADC_CONF2_DIV_MASK
) >> XADC_CONF2_DIV_OFFSET
;
878 *val
= xadc_get_dclk_rate(xadc
) / div
/ 26;
886 static int xadc_write_raw(struct iio_dev
*indio_dev
,
887 struct iio_chan_spec
const *chan
, int val
, int val2
, long info
)
889 struct xadc
*xadc
= iio_priv(indio_dev
);
890 unsigned long clk_rate
= xadc_get_dclk_rate(xadc
);
893 if (info
!= IIO_CHAN_INFO_SAMP_FREQ
)
910 * We want to round down, but only if we do not exceed the 150 kSPS
913 div
= clk_rate
/ val
;
914 if (clk_rate
/ div
/ 26 > 150000)
921 return xadc_update_adc_reg(xadc
, XADC_REG_CONF2
, XADC_CONF2_DIV_MASK
,
922 div
<< XADC_CONF2_DIV_OFFSET
);
925 static const struct iio_event_spec xadc_temp_events
[] = {
927 .type
= IIO_EV_TYPE_THRESH
,
928 .dir
= IIO_EV_DIR_RISING
,
929 .mask_separate
= BIT(IIO_EV_INFO_ENABLE
) |
930 BIT(IIO_EV_INFO_VALUE
) |
931 BIT(IIO_EV_INFO_HYSTERESIS
),
935 /* Separate values for upper and lower thresholds, but only a shared enabled */
936 static const struct iio_event_spec xadc_voltage_events
[] = {
938 .type
= IIO_EV_TYPE_THRESH
,
939 .dir
= IIO_EV_DIR_RISING
,
940 .mask_separate
= BIT(IIO_EV_INFO_VALUE
),
942 .type
= IIO_EV_TYPE_THRESH
,
943 .dir
= IIO_EV_DIR_FALLING
,
944 .mask_separate
= BIT(IIO_EV_INFO_VALUE
),
946 .type
= IIO_EV_TYPE_THRESH
,
947 .dir
= IIO_EV_DIR_EITHER
,
948 .mask_separate
= BIT(IIO_EV_INFO_ENABLE
),
952 #define XADC_CHAN_TEMP(_chan, _scan_index, _addr) { \
955 .channel = (_chan), \
956 .address = (_addr), \
957 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
958 BIT(IIO_CHAN_INFO_SCALE) | \
959 BIT(IIO_CHAN_INFO_OFFSET), \
960 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
961 .event_spec = xadc_temp_events, \
962 .num_event_specs = ARRAY_SIZE(xadc_temp_events), \
963 .scan_index = (_scan_index), \
969 .endianness = IIO_CPU, \
973 #define XADC_CHAN_VOLTAGE(_chan, _scan_index, _addr, _ext, _alarm) { \
974 .type = IIO_VOLTAGE, \
976 .channel = (_chan), \
977 .address = (_addr), \
978 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
979 BIT(IIO_CHAN_INFO_SCALE), \
980 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
981 .event_spec = (_alarm) ? xadc_voltage_events : NULL, \
982 .num_event_specs = (_alarm) ? ARRAY_SIZE(xadc_voltage_events) : 0, \
983 .scan_index = (_scan_index), \
985 .sign = ((_addr) == XADC_REG_VREFN) ? 's' : 'u', \
989 .endianness = IIO_CPU, \
991 .extend_name = _ext, \
994 static const struct iio_chan_spec xadc_channels
[] = {
995 XADC_CHAN_TEMP(0, 8, XADC_REG_TEMP
),
996 XADC_CHAN_VOLTAGE(0, 9, XADC_REG_VCCINT
, "vccint", true),
997 XADC_CHAN_VOLTAGE(1, 10, XADC_REG_VCCAUX
, "vccaux", true),
998 XADC_CHAN_VOLTAGE(2, 14, XADC_REG_VCCBRAM
, "vccbram", true),
999 XADC_CHAN_VOLTAGE(3, 5, XADC_REG_VCCPINT
, "vccpint", true),
1000 XADC_CHAN_VOLTAGE(4, 6, XADC_REG_VCCPAUX
, "vccpaux", true),
1001 XADC_CHAN_VOLTAGE(5, 7, XADC_REG_VCCO_DDR
, "vccoddr", true),
1002 XADC_CHAN_VOLTAGE(6, 12, XADC_REG_VREFP
, "vrefp", false),
1003 XADC_CHAN_VOLTAGE(7, 13, XADC_REG_VREFN
, "vrefn", false),
1004 XADC_CHAN_VOLTAGE(8, 11, XADC_REG_VPVN
, NULL
, false),
1005 XADC_CHAN_VOLTAGE(9, 16, XADC_REG_VAUX(0), NULL
, false),
1006 XADC_CHAN_VOLTAGE(10, 17, XADC_REG_VAUX(1), NULL
, false),
1007 XADC_CHAN_VOLTAGE(11, 18, XADC_REG_VAUX(2), NULL
, false),
1008 XADC_CHAN_VOLTAGE(12, 19, XADC_REG_VAUX(3), NULL
, false),
1009 XADC_CHAN_VOLTAGE(13, 20, XADC_REG_VAUX(4), NULL
, false),
1010 XADC_CHAN_VOLTAGE(14, 21, XADC_REG_VAUX(5), NULL
, false),
1011 XADC_CHAN_VOLTAGE(15, 22, XADC_REG_VAUX(6), NULL
, false),
1012 XADC_CHAN_VOLTAGE(16, 23, XADC_REG_VAUX(7), NULL
, false),
1013 XADC_CHAN_VOLTAGE(17, 24, XADC_REG_VAUX(8), NULL
, false),
1014 XADC_CHAN_VOLTAGE(18, 25, XADC_REG_VAUX(9), NULL
, false),
1015 XADC_CHAN_VOLTAGE(19, 26, XADC_REG_VAUX(10), NULL
, false),
1016 XADC_CHAN_VOLTAGE(20, 27, XADC_REG_VAUX(11), NULL
, false),
1017 XADC_CHAN_VOLTAGE(21, 28, XADC_REG_VAUX(12), NULL
, false),
1018 XADC_CHAN_VOLTAGE(22, 29, XADC_REG_VAUX(13), NULL
, false),
1019 XADC_CHAN_VOLTAGE(23, 30, XADC_REG_VAUX(14), NULL
, false),
1020 XADC_CHAN_VOLTAGE(24, 31, XADC_REG_VAUX(15), NULL
, false),
1023 static const struct iio_info xadc_info
= {
1024 .read_raw
= &xadc_read_raw
,
1025 .write_raw
= &xadc_write_raw
,
1026 .read_event_config
= &xadc_read_event_config
,
1027 .write_event_config
= &xadc_write_event_config
,
1028 .read_event_value
= &xadc_read_event_value
,
1029 .write_event_value
= &xadc_write_event_value
,
1030 .update_scan_mode
= &xadc_update_scan_mode
,
1031 .driver_module
= THIS_MODULE
,
1034 static const struct of_device_id xadc_of_match_table
[] = {
1035 { .compatible
= "xlnx,zynq-xadc-1.00.a", (void *)&xadc_zynq_ops
},
1036 { .compatible
= "xlnx,axi-xadc-1.00.a", (void *)&xadc_axi_ops
},
1039 MODULE_DEVICE_TABLE(of
, xadc_of_match_table
);
1041 static int xadc_parse_dt(struct iio_dev
*indio_dev
, struct device_node
*np
,
1044 struct xadc
*xadc
= iio_priv(indio_dev
);
1045 struct iio_chan_spec
*channels
, *chan
;
1046 struct device_node
*chan_node
, *child
;
1047 unsigned int num_channels
;
1048 const char *external_mux
;
1055 ret
= of_property_read_string(np
, "xlnx,external-mux", &external_mux
);
1056 if (ret
< 0 || strcasecmp(external_mux
, "none") == 0)
1057 xadc
->external_mux_mode
= XADC_EXTERNAL_MUX_NONE
;
1058 else if (strcasecmp(external_mux
, "single") == 0)
1059 xadc
->external_mux_mode
= XADC_EXTERNAL_MUX_SINGLE
;
1060 else if (strcasecmp(external_mux
, "dual") == 0)
1061 xadc
->external_mux_mode
= XADC_EXTERNAL_MUX_DUAL
;
1065 if (xadc
->external_mux_mode
!= XADC_EXTERNAL_MUX_NONE
) {
1066 ret
= of_property_read_u32(np
, "xlnx,external-mux-channel",
1071 if (xadc
->external_mux_mode
== XADC_EXTERNAL_MUX_SINGLE
) {
1072 if (ext_mux_chan
== 0)
1073 ext_mux_chan
= XADC_REG_VPVN
;
1074 else if (ext_mux_chan
<= 16)
1075 ext_mux_chan
= XADC_REG_VAUX(ext_mux_chan
- 1);
1079 if (ext_mux_chan
> 0 && ext_mux_chan
<= 8)
1080 ext_mux_chan
= XADC_REG_VAUX(ext_mux_chan
- 1);
1085 *conf
|= XADC_CONF0_MUX
| XADC_CONF0_CHAN(ext_mux_chan
);
1088 channels
= kmemdup(xadc_channels
, sizeof(xadc_channels
), GFP_KERNEL
);
1093 chan
= &channels
[9];
1095 chan_node
= of_get_child_by_name(np
, "xlnx,channels");
1097 for_each_child_of_node(chan_node
, child
) {
1098 if (num_channels
>= ARRAY_SIZE(xadc_channels
)) {
1103 ret
= of_property_read_u32(child
, "reg", ®
);
1104 if (ret
|| reg
> 16)
1107 if (of_property_read_bool(child
, "xlnx,bipolar"))
1108 chan
->scan_type
.sign
= 's';
1111 chan
->scan_index
= 11;
1112 chan
->address
= XADC_REG_VPVN
;
1114 chan
->scan_index
= 15 + reg
;
1115 chan
->address
= XADC_REG_VAUX(reg
- 1);
1121 of_node_put(chan_node
);
1123 indio_dev
->num_channels
= num_channels
;
1124 indio_dev
->channels
= krealloc(channels
, sizeof(*channels
) *
1125 num_channels
, GFP_KERNEL
);
1126 /* If we can't resize the channels array, just use the original */
1127 if (!indio_dev
->channels
)
1128 indio_dev
->channels
= channels
;
1133 static int xadc_probe(struct platform_device
*pdev
)
1135 const struct of_device_id
*id
;
1136 struct iio_dev
*indio_dev
;
1137 unsigned int bipolar_mask
;
1138 struct resource
*mem
;
1145 if (!pdev
->dev
.of_node
)
1148 id
= of_match_node(xadc_of_match_table
, pdev
->dev
.of_node
);
1152 irq
= platform_get_irq(pdev
, 0);
1156 indio_dev
= devm_iio_device_alloc(&pdev
->dev
, sizeof(*xadc
));
1160 xadc
= iio_priv(indio_dev
);
1161 xadc
->ops
= id
->data
;
1162 init_completion(&xadc
->completion
);
1163 mutex_init(&xadc
->mutex
);
1164 spin_lock_init(&xadc
->lock
);
1165 INIT_DELAYED_WORK(&xadc
->zynq_unmask_work
, xadc_zynq_unmask_worker
);
1167 mem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1168 xadc
->base
= devm_ioremap_resource(&pdev
->dev
, mem
);
1169 if (IS_ERR(xadc
->base
))
1170 return PTR_ERR(xadc
->base
);
1172 indio_dev
->dev
.parent
= &pdev
->dev
;
1173 indio_dev
->dev
.of_node
= pdev
->dev
.of_node
;
1174 indio_dev
->name
= "xadc";
1175 indio_dev
->modes
= INDIO_DIRECT_MODE
;
1176 indio_dev
->info
= &xadc_info
;
1178 ret
= xadc_parse_dt(indio_dev
, pdev
->dev
.of_node
, &conf0
);
1180 goto err_device_free
;
1182 if (xadc
->ops
->flags
& XADC_FLAGS_BUFFERED
) {
1183 ret
= iio_triggered_buffer_setup(indio_dev
,
1184 &iio_pollfunc_store_time
, &xadc_trigger_handler
,
1187 goto err_device_free
;
1189 xadc
->convst_trigger
= xadc_alloc_trigger(indio_dev
, "convst");
1190 if (IS_ERR(xadc
->convst_trigger
)) {
1191 ret
= PTR_ERR(xadc
->convst_trigger
);
1192 goto err_triggered_buffer_cleanup
;
1194 xadc
->samplerate_trigger
= xadc_alloc_trigger(indio_dev
,
1196 if (IS_ERR(xadc
->samplerate_trigger
)) {
1197 ret
= PTR_ERR(xadc
->samplerate_trigger
);
1198 goto err_free_convst_trigger
;
1202 xadc
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
1203 if (IS_ERR(xadc
->clk
)) {
1204 ret
= PTR_ERR(xadc
->clk
);
1205 goto err_free_samplerate_trigger
;
1207 clk_prepare_enable(xadc
->clk
);
1209 ret
= xadc
->ops
->setup(pdev
, indio_dev
, irq
);
1211 goto err_free_samplerate_trigger
;
1213 ret
= request_irq(irq
, xadc
->ops
->interrupt_handler
, 0,
1214 dev_name(&pdev
->dev
), indio_dev
);
1216 goto err_clk_disable_unprepare
;
1218 for (i
= 0; i
< 16; i
++)
1219 xadc_read_adc_reg(xadc
, XADC_REG_THRESHOLD(i
),
1220 &xadc
->threshold
[i
]);
1222 ret
= xadc_write_adc_reg(xadc
, XADC_REG_CONF0
, conf0
);
1227 for (i
= 0; i
< indio_dev
->num_channels
; i
++) {
1228 if (indio_dev
->channels
[i
].scan_type
.sign
== 's')
1229 bipolar_mask
|= BIT(indio_dev
->channels
[i
].scan_index
);
1232 ret
= xadc_write_adc_reg(xadc
, XADC_REG_INPUT_MODE(0), bipolar_mask
);
1235 ret
= xadc_write_adc_reg(xadc
, XADC_REG_INPUT_MODE(1),
1236 bipolar_mask
>> 16);
1240 /* Disable all alarms */
1241 xadc_update_adc_reg(xadc
, XADC_REG_CONF1
, XADC_CONF1_ALARM_MASK
,
1242 XADC_CONF1_ALARM_MASK
);
1244 /* Set thresholds to min/max */
1245 for (i
= 0; i
< 16; i
++) {
1247 * Set max voltage threshold and both temperature thresholds to
1248 * 0xffff, min voltage threshold to 0.
1250 if (i
% 8 < 4 || i
== 7)
1251 xadc
->threshold
[i
] = 0xffff;
1253 xadc
->threshold
[i
] = 0;
1254 xadc_write_adc_reg(xadc
, XADC_REG_THRESHOLD(i
),
1255 xadc
->threshold
[i
]);
1258 /* Go to non-buffered mode */
1259 xadc_postdisable(indio_dev
);
1261 ret
= iio_device_register(indio_dev
);
1265 platform_set_drvdata(pdev
, indio_dev
);
1270 free_irq(irq
, indio_dev
);
1271 err_free_samplerate_trigger
:
1272 if (xadc
->ops
->flags
& XADC_FLAGS_BUFFERED
)
1273 iio_trigger_free(xadc
->samplerate_trigger
);
1274 err_free_convst_trigger
:
1275 if (xadc
->ops
->flags
& XADC_FLAGS_BUFFERED
)
1276 iio_trigger_free(xadc
->convst_trigger
);
1277 err_triggered_buffer_cleanup
:
1278 if (xadc
->ops
->flags
& XADC_FLAGS_BUFFERED
)
1279 iio_triggered_buffer_cleanup(indio_dev
);
1280 err_clk_disable_unprepare
:
1281 clk_disable_unprepare(xadc
->clk
);
1283 kfree(indio_dev
->channels
);
1288 static int xadc_remove(struct platform_device
*pdev
)
1290 struct iio_dev
*indio_dev
= platform_get_drvdata(pdev
);
1291 struct xadc
*xadc
= iio_priv(indio_dev
);
1292 int irq
= platform_get_irq(pdev
, 0);
1294 iio_device_unregister(indio_dev
);
1295 if (xadc
->ops
->flags
& XADC_FLAGS_BUFFERED
) {
1296 iio_trigger_free(xadc
->samplerate_trigger
);
1297 iio_trigger_free(xadc
->convst_trigger
);
1298 iio_triggered_buffer_cleanup(indio_dev
);
1300 free_irq(irq
, indio_dev
);
1301 clk_disable_unprepare(xadc
->clk
);
1302 cancel_delayed_work(&xadc
->zynq_unmask_work
);
1304 kfree(indio_dev
->channels
);
1309 static struct platform_driver xadc_driver
= {
1310 .probe
= xadc_probe
,
1311 .remove
= xadc_remove
,
1314 .of_match_table
= xadc_of_match_table
,
1317 module_platform_driver(xadc_driver
);
1319 MODULE_LICENSE("GPL v2");
1320 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
1321 MODULE_DESCRIPTION("Xilinx XADC IIO driver");