4 * Copyright 2013-2014 Analog Devices Inc.
5 * Author: Lars-Peter Clauen <lars@metafoo.de>
7 * Licensed under the GPL-2.
9 * Documentation for the parts can be found at:
10 * - XADC hardmacro: Xilinx UG480
11 * - ZYNQ XADC interface: Xilinx UG585
12 * - AXI XADC interface: Xilinx PG019
15 #include <linux/clk.h>
16 #include <linux/device.h>
17 #include <linux/err.h>
18 #include <linux/interrupt.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
23 #include <linux/platform_device.h>
24 #include <linux/slab.h>
25 #include <linux/sysfs.h>
27 #include <linux/iio/buffer.h>
28 #include <linux/iio/events.h>
29 #include <linux/iio/iio.h>
30 #include <linux/iio/sysfs.h>
31 #include <linux/iio/trigger.h>
32 #include <linux/iio/trigger_consumer.h>
33 #include <linux/iio/triggered_buffer.h>
35 #include "xilinx-xadc.h"
37 static const unsigned int XADC_ZYNQ_UNMASK_TIMEOUT
= 500;
39 /* ZYNQ register definitions */
40 #define XADC_ZYNQ_REG_CFG 0x00
41 #define XADC_ZYNQ_REG_INTSTS 0x04
42 #define XADC_ZYNQ_REG_INTMSK 0x08
43 #define XADC_ZYNQ_REG_STATUS 0x0c
44 #define XADC_ZYNQ_REG_CFIFO 0x10
45 #define XADC_ZYNQ_REG_DFIFO 0x14
46 #define XADC_ZYNQ_REG_CTL 0x18
48 #define XADC_ZYNQ_CFG_ENABLE BIT(31)
49 #define XADC_ZYNQ_CFG_CFIFOTH_MASK (0xf << 20)
50 #define XADC_ZYNQ_CFG_CFIFOTH_OFFSET 20
51 #define XADC_ZYNQ_CFG_DFIFOTH_MASK (0xf << 16)
52 #define XADC_ZYNQ_CFG_DFIFOTH_OFFSET 16
53 #define XADC_ZYNQ_CFG_WEDGE BIT(13)
54 #define XADC_ZYNQ_CFG_REDGE BIT(12)
55 #define XADC_ZYNQ_CFG_TCKRATE_MASK (0x3 << 8)
56 #define XADC_ZYNQ_CFG_TCKRATE_DIV2 (0x0 << 8)
57 #define XADC_ZYNQ_CFG_TCKRATE_DIV4 (0x1 << 8)
58 #define XADC_ZYNQ_CFG_TCKRATE_DIV8 (0x2 << 8)
59 #define XADC_ZYNQ_CFG_TCKRATE_DIV16 (0x3 << 8)
60 #define XADC_ZYNQ_CFG_IGAP_MASK 0x1f
61 #define XADC_ZYNQ_CFG_IGAP(x) (x)
63 #define XADC_ZYNQ_INT_CFIFO_LTH BIT(9)
64 #define XADC_ZYNQ_INT_DFIFO_GTH BIT(8)
65 #define XADC_ZYNQ_INT_ALARM_MASK 0xff
66 #define XADC_ZYNQ_INT_ALARM_OFFSET 0
68 #define XADC_ZYNQ_STATUS_CFIFO_LVL_MASK (0xf << 16)
69 #define XADC_ZYNQ_STATUS_CFIFO_LVL_OFFSET 16
70 #define XADC_ZYNQ_STATUS_DFIFO_LVL_MASK (0xf << 12)
71 #define XADC_ZYNQ_STATUS_DFIFO_LVL_OFFSET 12
72 #define XADC_ZYNQ_STATUS_CFIFOF BIT(11)
73 #define XADC_ZYNQ_STATUS_CFIFOE BIT(10)
74 #define XADC_ZYNQ_STATUS_DFIFOF BIT(9)
75 #define XADC_ZYNQ_STATUS_DFIFOE BIT(8)
76 #define XADC_ZYNQ_STATUS_OT BIT(7)
77 #define XADC_ZYNQ_STATUS_ALM(x) BIT(x)
79 #define XADC_ZYNQ_CTL_RESET BIT(4)
81 #define XADC_ZYNQ_CMD_NOP 0x00
82 #define XADC_ZYNQ_CMD_READ 0x01
83 #define XADC_ZYNQ_CMD_WRITE 0x02
85 #define XADC_ZYNQ_CMD(cmd, addr, data) (((cmd) << 26) | ((addr) << 16) | (data))
87 /* AXI register definitions */
88 #define XADC_AXI_REG_RESET 0x00
89 #define XADC_AXI_REG_STATUS 0x04
90 #define XADC_AXI_REG_ALARM_STATUS 0x08
91 #define XADC_AXI_REG_CONVST 0x0c
92 #define XADC_AXI_REG_XADC_RESET 0x10
93 #define XADC_AXI_REG_GIER 0x5c
94 #define XADC_AXI_REG_IPISR 0x60
95 #define XADC_AXI_REG_IPIER 0x68
96 #define XADC_AXI_ADC_REG_OFFSET 0x200
98 #define XADC_AXI_RESET_MAGIC 0xa
99 #define XADC_AXI_GIER_ENABLE BIT(31)
101 #define XADC_AXI_INT_EOS BIT(4)
102 #define XADC_AXI_INT_ALARM_MASK 0x3c0f
104 #define XADC_FLAGS_BUFFERED BIT(0)
106 static void xadc_write_reg(struct xadc
*xadc
, unsigned int reg
,
109 writel(val
, xadc
->base
+ reg
);
112 static void xadc_read_reg(struct xadc
*xadc
, unsigned int reg
,
115 *val
= readl(xadc
->base
+ reg
);
119 * The ZYNQ interface uses two asynchronous FIFOs for communication with the
120 * XADC. Reads and writes to the XADC register are performed by submitting a
121 * request to the command FIFO (CFIFO), once the request has been completed the
122 * result can be read from the data FIFO (DFIFO). The method currently used in
123 * this driver is to submit the request for a read/write operation, then go to
124 * sleep and wait for an interrupt that signals that a response is available in
128 static void xadc_zynq_write_fifo(struct xadc
*xadc
, uint32_t *cmd
,
133 for (i
= 0; i
< n
; i
++)
134 xadc_write_reg(xadc
, XADC_ZYNQ_REG_CFIFO
, cmd
[i
]);
137 static void xadc_zynq_drain_fifo(struct xadc
*xadc
)
139 uint32_t status
, tmp
;
141 xadc_read_reg(xadc
, XADC_ZYNQ_REG_STATUS
, &status
);
143 while (!(status
& XADC_ZYNQ_STATUS_DFIFOE
)) {
144 xadc_read_reg(xadc
, XADC_ZYNQ_REG_DFIFO
, &tmp
);
145 xadc_read_reg(xadc
, XADC_ZYNQ_REG_STATUS
, &status
);
149 static void xadc_zynq_update_intmsk(struct xadc
*xadc
, unsigned int mask
,
152 xadc
->zynq_intmask
&= ~mask
;
153 xadc
->zynq_intmask
|= val
;
155 xadc_write_reg(xadc
, XADC_ZYNQ_REG_INTMSK
,
156 xadc
->zynq_intmask
| xadc
->zynq_masked_alarm
);
159 static int xadc_zynq_write_adc_reg(struct xadc
*xadc
, unsigned int reg
,
166 spin_lock_irq(&xadc
->lock
);
167 xadc_zynq_update_intmsk(xadc
, XADC_ZYNQ_INT_DFIFO_GTH
,
168 XADC_ZYNQ_INT_DFIFO_GTH
);
170 reinit_completion(&xadc
->completion
);
172 cmd
[0] = XADC_ZYNQ_CMD(XADC_ZYNQ_CMD_WRITE
, reg
, val
);
173 xadc_zynq_write_fifo(xadc
, cmd
, ARRAY_SIZE(cmd
));
174 xadc_read_reg(xadc
, XADC_ZYNQ_REG_CFG
, &tmp
);
175 tmp
&= ~XADC_ZYNQ_CFG_DFIFOTH_MASK
;
176 tmp
|= 0 << XADC_ZYNQ_CFG_DFIFOTH_OFFSET
;
177 xadc_write_reg(xadc
, XADC_ZYNQ_REG_CFG
, tmp
);
179 xadc_zynq_update_intmsk(xadc
, XADC_ZYNQ_INT_DFIFO_GTH
, 0);
180 spin_unlock_irq(&xadc
->lock
);
182 ret
= wait_for_completion_interruptible_timeout(&xadc
->completion
, HZ
);
188 xadc_read_reg(xadc
, XADC_ZYNQ_REG_DFIFO
, &tmp
);
193 static int xadc_zynq_read_adc_reg(struct xadc
*xadc
, unsigned int reg
,
200 cmd
[0] = XADC_ZYNQ_CMD(XADC_ZYNQ_CMD_READ
, reg
, 0);
201 cmd
[1] = XADC_ZYNQ_CMD(XADC_ZYNQ_CMD_NOP
, 0, 0);
203 spin_lock_irq(&xadc
->lock
);
204 xadc_zynq_update_intmsk(xadc
, XADC_ZYNQ_INT_DFIFO_GTH
,
205 XADC_ZYNQ_INT_DFIFO_GTH
);
206 xadc_zynq_drain_fifo(xadc
);
207 reinit_completion(&xadc
->completion
);
209 xadc_zynq_write_fifo(xadc
, cmd
, ARRAY_SIZE(cmd
));
210 xadc_read_reg(xadc
, XADC_ZYNQ_REG_CFG
, &tmp
);
211 tmp
&= ~XADC_ZYNQ_CFG_DFIFOTH_MASK
;
212 tmp
|= 1 << XADC_ZYNQ_CFG_DFIFOTH_OFFSET
;
213 xadc_write_reg(xadc
, XADC_ZYNQ_REG_CFG
, tmp
);
215 xadc_zynq_update_intmsk(xadc
, XADC_ZYNQ_INT_DFIFO_GTH
, 0);
216 spin_unlock_irq(&xadc
->lock
);
217 ret
= wait_for_completion_interruptible_timeout(&xadc
->completion
, HZ
);
223 xadc_read_reg(xadc
, XADC_ZYNQ_REG_DFIFO
, &resp
);
224 xadc_read_reg(xadc
, XADC_ZYNQ_REG_DFIFO
, &resp
);
226 *val
= resp
& 0xffff;
231 static unsigned int xadc_zynq_transform_alarm(unsigned int alarm
)
233 return ((alarm
& 0x80) >> 4) |
234 ((alarm
& 0x78) << 1) |
239 * The ZYNQ threshold interrupts are level sensitive. Since we can't make the
240 * threshold condition go way from within the interrupt handler, this means as
241 * soon as a threshold condition is present we would enter the interrupt handler
242 * again and again. To work around this we mask all active thresholds interrupts
243 * in the interrupt handler and start a timer. In this timer we poll the
244 * interrupt status and only if the interrupt is inactive we unmask it again.
246 static void xadc_zynq_unmask_worker(struct work_struct
*work
)
248 struct xadc
*xadc
= container_of(work
, struct xadc
, zynq_unmask_work
.work
);
249 unsigned int misc_sts
, unmask
;
251 xadc_read_reg(xadc
, XADC_ZYNQ_REG_STATUS
, &misc_sts
);
253 misc_sts
&= XADC_ZYNQ_INT_ALARM_MASK
;
255 spin_lock_irq(&xadc
->lock
);
257 /* Clear those bits which are not active anymore */
258 unmask
= (xadc
->zynq_masked_alarm
^ misc_sts
) & xadc
->zynq_masked_alarm
;
259 xadc
->zynq_masked_alarm
&= misc_sts
;
261 /* Also clear those which are masked out anyway */
262 xadc
->zynq_masked_alarm
&= ~xadc
->zynq_intmask
;
264 /* Clear the interrupts before we unmask them */
265 xadc_write_reg(xadc
, XADC_ZYNQ_REG_INTSTS
, unmask
);
267 xadc_zynq_update_intmsk(xadc
, 0, 0);
269 spin_unlock_irq(&xadc
->lock
);
271 /* if still pending some alarm re-trigger the timer */
272 if (xadc
->zynq_masked_alarm
) {
273 schedule_delayed_work(&xadc
->zynq_unmask_work
,
274 msecs_to_jiffies(XADC_ZYNQ_UNMASK_TIMEOUT
));
279 static irqreturn_t
xadc_zynq_interrupt_handler(int irq
, void *devid
)
281 struct iio_dev
*indio_dev
= devid
;
282 struct xadc
*xadc
= iio_priv(indio_dev
);
285 xadc_read_reg(xadc
, XADC_ZYNQ_REG_INTSTS
, &status
);
287 status
&= ~(xadc
->zynq_intmask
| xadc
->zynq_masked_alarm
);
292 spin_lock(&xadc
->lock
);
294 xadc_write_reg(xadc
, XADC_ZYNQ_REG_INTSTS
, status
);
296 if (status
& XADC_ZYNQ_INT_DFIFO_GTH
) {
297 xadc_zynq_update_intmsk(xadc
, XADC_ZYNQ_INT_DFIFO_GTH
,
298 XADC_ZYNQ_INT_DFIFO_GTH
);
299 complete(&xadc
->completion
);
302 status
&= XADC_ZYNQ_INT_ALARM_MASK
;
304 xadc
->zynq_masked_alarm
|= status
;
306 * mask the current event interrupt,
307 * unmask it when the interrupt is no more active.
309 xadc_zynq_update_intmsk(xadc
, 0, 0);
311 xadc_handle_events(indio_dev
,
312 xadc_zynq_transform_alarm(status
));
314 /* unmask the required interrupts in timer. */
315 schedule_delayed_work(&xadc
->zynq_unmask_work
,
316 msecs_to_jiffies(XADC_ZYNQ_UNMASK_TIMEOUT
));
318 spin_unlock(&xadc
->lock
);
323 #define XADC_ZYNQ_TCK_RATE_MAX 50000000
324 #define XADC_ZYNQ_IGAP_DEFAULT 20
326 static int xadc_zynq_setup(struct platform_device
*pdev
,
327 struct iio_dev
*indio_dev
, int irq
)
329 struct xadc
*xadc
= iio_priv(indio_dev
);
330 unsigned long pcap_rate
;
331 unsigned int tck_div
;
334 unsigned int tck_rate
;
336 /* TODO: Figure out how to make igap and tck_rate configurable */
337 igap
= XADC_ZYNQ_IGAP_DEFAULT
;
338 tck_rate
= XADC_ZYNQ_TCK_RATE_MAX
;
340 xadc
->zynq_intmask
= ~0;
342 pcap_rate
= clk_get_rate(xadc
->clk
);
344 if (tck_rate
> XADC_ZYNQ_TCK_RATE_MAX
)
345 tck_rate
= XADC_ZYNQ_TCK_RATE_MAX
;
346 if (tck_rate
> pcap_rate
/ 2) {
349 div
= pcap_rate
/ tck_rate
;
350 if (pcap_rate
/ div
> XADC_ZYNQ_TCK_RATE_MAX
)
355 tck_div
= XADC_ZYNQ_CFG_TCKRATE_DIV2
;
357 tck_div
= XADC_ZYNQ_CFG_TCKRATE_DIV4
;
359 tck_div
= XADC_ZYNQ_CFG_TCKRATE_DIV8
;
361 tck_div
= XADC_ZYNQ_CFG_TCKRATE_DIV16
;
363 xadc_write_reg(xadc
, XADC_ZYNQ_REG_CTL
, XADC_ZYNQ_CTL_RESET
);
364 xadc_write_reg(xadc
, XADC_ZYNQ_REG_CTL
, 0);
365 xadc_write_reg(xadc
, XADC_ZYNQ_REG_INTSTS
, ~0);
366 xadc_write_reg(xadc
, XADC_ZYNQ_REG_INTMSK
, xadc
->zynq_intmask
);
367 xadc_write_reg(xadc
, XADC_ZYNQ_REG_CFG
, XADC_ZYNQ_CFG_ENABLE
|
368 XADC_ZYNQ_CFG_REDGE
| XADC_ZYNQ_CFG_WEDGE
|
369 tck_div
| XADC_ZYNQ_CFG_IGAP(igap
));
374 static unsigned long xadc_zynq_get_dclk_rate(struct xadc
*xadc
)
379 xadc_read_reg(xadc
, XADC_ZYNQ_REG_CFG
, &val
);
381 switch (val
& XADC_ZYNQ_CFG_TCKRATE_MASK
) {
382 case XADC_ZYNQ_CFG_TCKRATE_DIV4
:
385 case XADC_ZYNQ_CFG_TCKRATE_DIV8
:
388 case XADC_ZYNQ_CFG_TCKRATE_DIV16
:
396 return clk_get_rate(xadc
->clk
) / div
;
399 static void xadc_zynq_update_alarm(struct xadc
*xadc
, unsigned int alarm
)
404 /* Move OT to bit 7 */
405 alarm
= ((alarm
& 0x08) << 4) | ((alarm
& 0xf0) >> 1) | (alarm
& 0x07);
407 spin_lock_irqsave(&xadc
->lock
, flags
);
409 /* Clear previous interrupts if any. */
410 xadc_read_reg(xadc
, XADC_ZYNQ_REG_INTSTS
, &status
);
411 xadc_write_reg(xadc
, XADC_ZYNQ_REG_INTSTS
, status
& alarm
);
413 xadc_zynq_update_intmsk(xadc
, XADC_ZYNQ_INT_ALARM_MASK
,
414 ~alarm
& XADC_ZYNQ_INT_ALARM_MASK
);
416 spin_unlock_irqrestore(&xadc
->lock
, flags
);
419 static const struct xadc_ops xadc_zynq_ops
= {
420 .read
= xadc_zynq_read_adc_reg
,
421 .write
= xadc_zynq_write_adc_reg
,
422 .setup
= xadc_zynq_setup
,
423 .get_dclk_rate
= xadc_zynq_get_dclk_rate
,
424 .interrupt_handler
= xadc_zynq_interrupt_handler
,
425 .update_alarm
= xadc_zynq_update_alarm
,
428 static int xadc_axi_read_adc_reg(struct xadc
*xadc
, unsigned int reg
,
433 xadc_read_reg(xadc
, XADC_AXI_ADC_REG_OFFSET
+ reg
* 4, &val32
);
434 *val
= val32
& 0xffff;
439 static int xadc_axi_write_adc_reg(struct xadc
*xadc
, unsigned int reg
,
442 xadc_write_reg(xadc
, XADC_AXI_ADC_REG_OFFSET
+ reg
* 4, val
);
447 static int xadc_axi_setup(struct platform_device
*pdev
,
448 struct iio_dev
*indio_dev
, int irq
)
450 struct xadc
*xadc
= iio_priv(indio_dev
);
452 xadc_write_reg(xadc
, XADC_AXI_REG_RESET
, XADC_AXI_RESET_MAGIC
);
453 xadc_write_reg(xadc
, XADC_AXI_REG_GIER
, XADC_AXI_GIER_ENABLE
);
458 static irqreturn_t
xadc_axi_interrupt_handler(int irq
, void *devid
)
460 struct iio_dev
*indio_dev
= devid
;
461 struct xadc
*xadc
= iio_priv(indio_dev
);
462 uint32_t status
, mask
;
465 xadc_read_reg(xadc
, XADC_AXI_REG_IPISR
, &status
);
466 xadc_read_reg(xadc
, XADC_AXI_REG_IPIER
, &mask
);
472 if ((status
& XADC_AXI_INT_EOS
) && xadc
->trigger
)
473 iio_trigger_poll(xadc
->trigger
);
475 if (status
& XADC_AXI_INT_ALARM_MASK
) {
477 * The order of the bits in the AXI-XADC status register does
478 * not match the order of the bits in the XADC alarm enable
479 * register. xadc_handle_events() expects the events to be in
480 * the same order as the XADC alarm enable register.
482 events
= (status
& 0x000e) >> 1;
483 events
|= (status
& 0x0001) << 3;
484 events
|= (status
& 0x3c00) >> 6;
485 xadc_handle_events(indio_dev
, events
);
488 xadc_write_reg(xadc
, XADC_AXI_REG_IPISR
, status
);
493 static void xadc_axi_update_alarm(struct xadc
*xadc
, unsigned int alarm
)
499 * The order of the bits in the AXI-XADC status register does not match
500 * the order of the bits in the XADC alarm enable register. We get
501 * passed the alarm mask in the same order as in the XADC alarm enable
504 alarm
= ((alarm
& 0x07) << 1) | ((alarm
& 0x08) >> 3) |
505 ((alarm
& 0xf0) << 6);
507 spin_lock_irqsave(&xadc
->lock
, flags
);
508 xadc_read_reg(xadc
, XADC_AXI_REG_IPIER
, &val
);
509 val
&= ~XADC_AXI_INT_ALARM_MASK
;
511 xadc_write_reg(xadc
, XADC_AXI_REG_IPIER
, val
);
512 spin_unlock_irqrestore(&xadc
->lock
, flags
);
515 static unsigned long xadc_axi_get_dclk(struct xadc
*xadc
)
517 return clk_get_rate(xadc
->clk
);
520 static const struct xadc_ops xadc_axi_ops
= {
521 .read
= xadc_axi_read_adc_reg
,
522 .write
= xadc_axi_write_adc_reg
,
523 .setup
= xadc_axi_setup
,
524 .get_dclk_rate
= xadc_axi_get_dclk
,
525 .update_alarm
= xadc_axi_update_alarm
,
526 .interrupt_handler
= xadc_axi_interrupt_handler
,
527 .flags
= XADC_FLAGS_BUFFERED
,
530 static int _xadc_update_adc_reg(struct xadc
*xadc
, unsigned int reg
,
531 uint16_t mask
, uint16_t val
)
536 ret
= _xadc_read_adc_reg(xadc
, reg
, &tmp
);
540 return _xadc_write_adc_reg(xadc
, reg
, (tmp
& ~mask
) | val
);
543 static int xadc_update_adc_reg(struct xadc
*xadc
, unsigned int reg
,
544 uint16_t mask
, uint16_t val
)
548 mutex_lock(&xadc
->mutex
);
549 ret
= _xadc_update_adc_reg(xadc
, reg
, mask
, val
);
550 mutex_unlock(&xadc
->mutex
);
555 static unsigned long xadc_get_dclk_rate(struct xadc
*xadc
)
557 return xadc
->ops
->get_dclk_rate(xadc
);
560 static int xadc_update_scan_mode(struct iio_dev
*indio_dev
,
561 const unsigned long *mask
)
563 struct xadc
*xadc
= iio_priv(indio_dev
);
566 n
= bitmap_weight(mask
, indio_dev
->masklength
);
569 xadc
->data
= kcalloc(n
, sizeof(*xadc
->data
), GFP_KERNEL
);
576 static unsigned int xadc_scan_index_to_channel(unsigned int scan_index
)
578 switch (scan_index
) {
580 return XADC_REG_VCCPINT
;
582 return XADC_REG_VCCPAUX
;
584 return XADC_REG_VCCO_DDR
;
586 return XADC_REG_TEMP
;
588 return XADC_REG_VCCINT
;
590 return XADC_REG_VCCAUX
;
592 return XADC_REG_VPVN
;
594 return XADC_REG_VREFP
;
596 return XADC_REG_VREFN
;
598 return XADC_REG_VCCBRAM
;
600 return XADC_REG_VAUX(scan_index
- 16);
604 static irqreturn_t
xadc_trigger_handler(int irq
, void *p
)
606 struct iio_poll_func
*pf
= p
;
607 struct iio_dev
*indio_dev
= pf
->indio_dev
;
608 struct xadc
*xadc
= iio_priv(indio_dev
);
616 for_each_set_bit(i
, indio_dev
->active_scan_mask
,
617 indio_dev
->masklength
) {
618 chan
= xadc_scan_index_to_channel(i
);
619 xadc_read_adc_reg(xadc
, chan
, &xadc
->data
[j
]);
623 iio_push_to_buffers(indio_dev
, xadc
->data
);
626 iio_trigger_notify_done(indio_dev
->trig
);
631 static int xadc_trigger_set_state(struct iio_trigger
*trigger
, bool state
)
633 struct xadc
*xadc
= iio_trigger_get_drvdata(trigger
);
639 mutex_lock(&xadc
->mutex
);
642 /* Only one of the two triggers can be active at the a time. */
643 if (xadc
->trigger
!= NULL
) {
647 xadc
->trigger
= trigger
;
648 if (trigger
== xadc
->convst_trigger
)
649 convst
= XADC_CONF0_EC
;
653 ret
= _xadc_update_adc_reg(xadc
, XADC_REG_CONF1
, XADC_CONF0_EC
,
658 xadc
->trigger
= NULL
;
661 spin_lock_irqsave(&xadc
->lock
, flags
);
662 xadc_read_reg(xadc
, XADC_AXI_REG_IPIER
, &val
);
663 xadc_write_reg(xadc
, XADC_AXI_REG_IPISR
, val
& XADC_AXI_INT_EOS
);
665 val
|= XADC_AXI_INT_EOS
;
667 val
&= ~XADC_AXI_INT_EOS
;
668 xadc_write_reg(xadc
, XADC_AXI_REG_IPIER
, val
);
669 spin_unlock_irqrestore(&xadc
->lock
, flags
);
672 mutex_unlock(&xadc
->mutex
);
677 static const struct iio_trigger_ops xadc_trigger_ops
= {
678 .set_trigger_state
= &xadc_trigger_set_state
,
681 static struct iio_trigger
*xadc_alloc_trigger(struct iio_dev
*indio_dev
,
684 struct iio_trigger
*trig
;
687 trig
= iio_trigger_alloc("%s%d-%s", indio_dev
->name
,
688 indio_dev
->id
, name
);
690 return ERR_PTR(-ENOMEM
);
692 trig
->dev
.parent
= indio_dev
->dev
.parent
;
693 trig
->ops
= &xadc_trigger_ops
;
694 iio_trigger_set_drvdata(trig
, iio_priv(indio_dev
));
696 ret
= iio_trigger_register(trig
);
698 goto error_free_trig
;
703 iio_trigger_free(trig
);
707 static int xadc_power_adc_b(struct xadc
*xadc
, unsigned int seq_mode
)
712 case XADC_CONF1_SEQ_SIMULTANEOUS
:
713 case XADC_CONF1_SEQ_INDEPENDENT
:
714 val
= XADC_CONF2_PD_ADC_B
;
721 return xadc_update_adc_reg(xadc
, XADC_REG_CONF2
, XADC_CONF2_PD_MASK
,
725 static int xadc_get_seq_mode(struct xadc
*xadc
, unsigned long scan_mode
)
727 unsigned int aux_scan_mode
= scan_mode
>> 16;
729 if (xadc
->external_mux_mode
== XADC_EXTERNAL_MUX_DUAL
)
730 return XADC_CONF1_SEQ_SIMULTANEOUS
;
732 if ((aux_scan_mode
& 0xff00) == 0 ||
733 (aux_scan_mode
& 0x00ff) == 0)
734 return XADC_CONF1_SEQ_CONTINUOUS
;
736 return XADC_CONF1_SEQ_SIMULTANEOUS
;
739 static int xadc_postdisable(struct iio_dev
*indio_dev
)
741 struct xadc
*xadc
= iio_priv(indio_dev
);
742 unsigned long scan_mask
;
746 scan_mask
= 1; /* Run calibration as part of the sequence */
747 for (i
= 0; i
< indio_dev
->num_channels
; i
++)
748 scan_mask
|= BIT(indio_dev
->channels
[i
].scan_index
);
750 /* Enable all channels and calibration */
751 ret
= xadc_write_adc_reg(xadc
, XADC_REG_SEQ(0), scan_mask
& 0xffff);
755 ret
= xadc_write_adc_reg(xadc
, XADC_REG_SEQ(1), scan_mask
>> 16);
759 ret
= xadc_update_adc_reg(xadc
, XADC_REG_CONF1
, XADC_CONF1_SEQ_MASK
,
760 XADC_CONF1_SEQ_CONTINUOUS
);
764 return xadc_power_adc_b(xadc
, XADC_CONF1_SEQ_CONTINUOUS
);
767 static int xadc_preenable(struct iio_dev
*indio_dev
)
769 struct xadc
*xadc
= iio_priv(indio_dev
);
770 unsigned long scan_mask
;
774 ret
= xadc_update_adc_reg(xadc
, XADC_REG_CONF1
, XADC_CONF1_SEQ_MASK
,
775 XADC_CONF1_SEQ_DEFAULT
);
779 scan_mask
= *indio_dev
->active_scan_mask
;
780 seq_mode
= xadc_get_seq_mode(xadc
, scan_mask
);
782 ret
= xadc_write_adc_reg(xadc
, XADC_REG_SEQ(0), scan_mask
& 0xffff);
786 ret
= xadc_write_adc_reg(xadc
, XADC_REG_SEQ(1), scan_mask
>> 16);
790 ret
= xadc_power_adc_b(xadc
, seq_mode
);
794 ret
= xadc_update_adc_reg(xadc
, XADC_REG_CONF1
, XADC_CONF1_SEQ_MASK
,
801 xadc_postdisable(indio_dev
);
805 static const struct iio_buffer_setup_ops xadc_buffer_ops
= {
806 .preenable
= &xadc_preenable
,
807 .postenable
= &iio_triggered_buffer_postenable
,
808 .predisable
= &iio_triggered_buffer_predisable
,
809 .postdisable
= &xadc_postdisable
,
812 static int xadc_read_raw(struct iio_dev
*indio_dev
,
813 struct iio_chan_spec
const *chan
, int *val
, int *val2
, long info
)
815 struct xadc
*xadc
= iio_priv(indio_dev
);
821 case IIO_CHAN_INFO_RAW
:
822 if (iio_buffer_enabled(indio_dev
))
824 ret
= xadc_read_adc_reg(xadc
, chan
->address
, &val16
);
829 if (chan
->scan_type
.sign
== 'u')
832 *val
= sign_extend32(val16
, 11);
835 case IIO_CHAN_INFO_SCALE
:
836 switch (chan
->type
) {
838 /* V = (val * 3.0) / 4096 */
839 switch (chan
->address
) {
840 case XADC_REG_VCCINT
:
841 case XADC_REG_VCCAUX
:
844 case XADC_REG_VCCBRAM
:
845 case XADC_REG_VCCPINT
:
846 case XADC_REG_VCCPAUX
:
847 case XADC_REG_VCCO_DDR
:
855 return IIO_VAL_FRACTIONAL_LOG2
;
857 /* Temp in C = (val * 503.975) / 4096 - 273.15 */
860 return IIO_VAL_FRACTIONAL_LOG2
;
864 case IIO_CHAN_INFO_OFFSET
:
865 /* Only the temperature channel has an offset */
866 *val
= -((273150 << 12) / 503975);
868 case IIO_CHAN_INFO_SAMP_FREQ
:
869 ret
= xadc_read_adc_reg(xadc
, XADC_REG_CONF2
, &val16
);
873 div
= (val16
& XADC_CONF2_DIV_MASK
) >> XADC_CONF2_DIV_OFFSET
;
877 *val
= xadc_get_dclk_rate(xadc
) / div
/ 26;
885 static int xadc_write_raw(struct iio_dev
*indio_dev
,
886 struct iio_chan_spec
const *chan
, int val
, int val2
, long info
)
888 struct xadc
*xadc
= iio_priv(indio_dev
);
889 unsigned long clk_rate
= xadc_get_dclk_rate(xadc
);
892 if (info
!= IIO_CHAN_INFO_SAMP_FREQ
)
909 * We want to round down, but only if we do not exceed the 150 kSPS
912 div
= clk_rate
/ val
;
913 if (clk_rate
/ div
/ 26 > 150000)
920 return xadc_update_adc_reg(xadc
, XADC_REG_CONF2
, XADC_CONF2_DIV_MASK
,
921 div
<< XADC_CONF2_DIV_OFFSET
);
924 static const struct iio_event_spec xadc_temp_events
[] = {
926 .type
= IIO_EV_TYPE_THRESH
,
927 .dir
= IIO_EV_DIR_RISING
,
928 .mask_separate
= BIT(IIO_EV_INFO_ENABLE
) |
929 BIT(IIO_EV_INFO_VALUE
) |
930 BIT(IIO_EV_INFO_HYSTERESIS
),
934 /* Separate values for upper and lower thresholds, but only a shared enabled */
935 static const struct iio_event_spec xadc_voltage_events
[] = {
937 .type
= IIO_EV_TYPE_THRESH
,
938 .dir
= IIO_EV_DIR_RISING
,
939 .mask_separate
= BIT(IIO_EV_INFO_VALUE
),
941 .type
= IIO_EV_TYPE_THRESH
,
942 .dir
= IIO_EV_DIR_FALLING
,
943 .mask_separate
= BIT(IIO_EV_INFO_VALUE
),
945 .type
= IIO_EV_TYPE_THRESH
,
946 .dir
= IIO_EV_DIR_EITHER
,
947 .mask_separate
= BIT(IIO_EV_INFO_ENABLE
),
951 #define XADC_CHAN_TEMP(_chan, _scan_index, _addr) { \
954 .channel = (_chan), \
955 .address = (_addr), \
956 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
957 BIT(IIO_CHAN_INFO_SCALE) | \
958 BIT(IIO_CHAN_INFO_OFFSET), \
959 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
960 .event_spec = xadc_temp_events, \
961 .num_event_specs = ARRAY_SIZE(xadc_temp_events), \
962 .scan_index = (_scan_index), \
968 .endianness = IIO_CPU, \
972 #define XADC_CHAN_VOLTAGE(_chan, _scan_index, _addr, _ext, _alarm) { \
973 .type = IIO_VOLTAGE, \
975 .channel = (_chan), \
976 .address = (_addr), \
977 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
978 BIT(IIO_CHAN_INFO_SCALE), \
979 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
980 .event_spec = (_alarm) ? xadc_voltage_events : NULL, \
981 .num_event_specs = (_alarm) ? ARRAY_SIZE(xadc_voltage_events) : 0, \
982 .scan_index = (_scan_index), \
984 .sign = ((_addr) == XADC_REG_VREFN) ? 's' : 'u', \
988 .endianness = IIO_CPU, \
990 .extend_name = _ext, \
993 static const struct iio_chan_spec xadc_channels
[] = {
994 XADC_CHAN_TEMP(0, 8, XADC_REG_TEMP
),
995 XADC_CHAN_VOLTAGE(0, 9, XADC_REG_VCCINT
, "vccint", true),
996 XADC_CHAN_VOLTAGE(1, 10, XADC_REG_VCCAUX
, "vccaux", true),
997 XADC_CHAN_VOLTAGE(2, 14, XADC_REG_VCCBRAM
, "vccbram", true),
998 XADC_CHAN_VOLTAGE(3, 5, XADC_REG_VCCPINT
, "vccpint", true),
999 XADC_CHAN_VOLTAGE(4, 6, XADC_REG_VCCPAUX
, "vccpaux", true),
1000 XADC_CHAN_VOLTAGE(5, 7, XADC_REG_VCCO_DDR
, "vccoddr", true),
1001 XADC_CHAN_VOLTAGE(6, 12, XADC_REG_VREFP
, "vrefp", false),
1002 XADC_CHAN_VOLTAGE(7, 13, XADC_REG_VREFN
, "vrefn", false),
1003 XADC_CHAN_VOLTAGE(8, 11, XADC_REG_VPVN
, NULL
, false),
1004 XADC_CHAN_VOLTAGE(9, 16, XADC_REG_VAUX(0), NULL
, false),
1005 XADC_CHAN_VOLTAGE(10, 17, XADC_REG_VAUX(1), NULL
, false),
1006 XADC_CHAN_VOLTAGE(11, 18, XADC_REG_VAUX(2), NULL
, false),
1007 XADC_CHAN_VOLTAGE(12, 19, XADC_REG_VAUX(3), NULL
, false),
1008 XADC_CHAN_VOLTAGE(13, 20, XADC_REG_VAUX(4), NULL
, false),
1009 XADC_CHAN_VOLTAGE(14, 21, XADC_REG_VAUX(5), NULL
, false),
1010 XADC_CHAN_VOLTAGE(15, 22, XADC_REG_VAUX(6), NULL
, false),
1011 XADC_CHAN_VOLTAGE(16, 23, XADC_REG_VAUX(7), NULL
, false),
1012 XADC_CHAN_VOLTAGE(17, 24, XADC_REG_VAUX(8), NULL
, false),
1013 XADC_CHAN_VOLTAGE(18, 25, XADC_REG_VAUX(9), NULL
, false),
1014 XADC_CHAN_VOLTAGE(19, 26, XADC_REG_VAUX(10), NULL
, false),
1015 XADC_CHAN_VOLTAGE(20, 27, XADC_REG_VAUX(11), NULL
, false),
1016 XADC_CHAN_VOLTAGE(21, 28, XADC_REG_VAUX(12), NULL
, false),
1017 XADC_CHAN_VOLTAGE(22, 29, XADC_REG_VAUX(13), NULL
, false),
1018 XADC_CHAN_VOLTAGE(23, 30, XADC_REG_VAUX(14), NULL
, false),
1019 XADC_CHAN_VOLTAGE(24, 31, XADC_REG_VAUX(15), NULL
, false),
1022 static const struct iio_info xadc_info
= {
1023 .read_raw
= &xadc_read_raw
,
1024 .write_raw
= &xadc_write_raw
,
1025 .read_event_config
= &xadc_read_event_config
,
1026 .write_event_config
= &xadc_write_event_config
,
1027 .read_event_value
= &xadc_read_event_value
,
1028 .write_event_value
= &xadc_write_event_value
,
1029 .update_scan_mode
= &xadc_update_scan_mode
,
1032 static const struct of_device_id xadc_of_match_table
[] = {
1033 { .compatible
= "xlnx,zynq-xadc-1.00.a", (void *)&xadc_zynq_ops
},
1034 { .compatible
= "xlnx,axi-xadc-1.00.a", (void *)&xadc_axi_ops
},
1037 MODULE_DEVICE_TABLE(of
, xadc_of_match_table
);
1039 static int xadc_parse_dt(struct iio_dev
*indio_dev
, struct device_node
*np
,
1042 struct xadc
*xadc
= iio_priv(indio_dev
);
1043 struct iio_chan_spec
*channels
, *chan
;
1044 struct device_node
*chan_node
, *child
;
1045 unsigned int num_channels
;
1046 const char *external_mux
;
1053 ret
= of_property_read_string(np
, "xlnx,external-mux", &external_mux
);
1054 if (ret
< 0 || strcasecmp(external_mux
, "none") == 0)
1055 xadc
->external_mux_mode
= XADC_EXTERNAL_MUX_NONE
;
1056 else if (strcasecmp(external_mux
, "single") == 0)
1057 xadc
->external_mux_mode
= XADC_EXTERNAL_MUX_SINGLE
;
1058 else if (strcasecmp(external_mux
, "dual") == 0)
1059 xadc
->external_mux_mode
= XADC_EXTERNAL_MUX_DUAL
;
1063 if (xadc
->external_mux_mode
!= XADC_EXTERNAL_MUX_NONE
) {
1064 ret
= of_property_read_u32(np
, "xlnx,external-mux-channel",
1069 if (xadc
->external_mux_mode
== XADC_EXTERNAL_MUX_SINGLE
) {
1070 if (ext_mux_chan
== 0)
1071 ext_mux_chan
= XADC_REG_VPVN
;
1072 else if (ext_mux_chan
<= 16)
1073 ext_mux_chan
= XADC_REG_VAUX(ext_mux_chan
- 1);
1077 if (ext_mux_chan
> 0 && ext_mux_chan
<= 8)
1078 ext_mux_chan
= XADC_REG_VAUX(ext_mux_chan
- 1);
1083 *conf
|= XADC_CONF0_MUX
| XADC_CONF0_CHAN(ext_mux_chan
);
1086 channels
= kmemdup(xadc_channels
, sizeof(xadc_channels
), GFP_KERNEL
);
1091 chan
= &channels
[9];
1093 chan_node
= of_get_child_by_name(np
, "xlnx,channels");
1095 for_each_child_of_node(chan_node
, child
) {
1096 if (num_channels
>= ARRAY_SIZE(xadc_channels
)) {
1101 ret
= of_property_read_u32(child
, "reg", ®
);
1102 if (ret
|| reg
> 16)
1105 if (of_property_read_bool(child
, "xlnx,bipolar"))
1106 chan
->scan_type
.sign
= 's';
1109 chan
->scan_index
= 11;
1110 chan
->address
= XADC_REG_VPVN
;
1112 chan
->scan_index
= 15 + reg
;
1113 chan
->address
= XADC_REG_VAUX(reg
- 1);
1119 of_node_put(chan_node
);
1121 indio_dev
->num_channels
= num_channels
;
1122 indio_dev
->channels
= krealloc(channels
, sizeof(*channels
) *
1123 num_channels
, GFP_KERNEL
);
1124 /* If we can't resize the channels array, just use the original */
1125 if (!indio_dev
->channels
)
1126 indio_dev
->channels
= channels
;
1131 static int xadc_probe(struct platform_device
*pdev
)
1133 const struct of_device_id
*id
;
1134 struct iio_dev
*indio_dev
;
1135 unsigned int bipolar_mask
;
1136 struct resource
*mem
;
1143 if (!pdev
->dev
.of_node
)
1146 id
= of_match_node(xadc_of_match_table
, pdev
->dev
.of_node
);
1150 irq
= platform_get_irq(pdev
, 0);
1154 indio_dev
= devm_iio_device_alloc(&pdev
->dev
, sizeof(*xadc
));
1158 xadc
= iio_priv(indio_dev
);
1159 xadc
->ops
= id
->data
;
1160 init_completion(&xadc
->completion
);
1161 mutex_init(&xadc
->mutex
);
1162 spin_lock_init(&xadc
->lock
);
1163 INIT_DELAYED_WORK(&xadc
->zynq_unmask_work
, xadc_zynq_unmask_worker
);
1165 mem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1166 xadc
->base
= devm_ioremap_resource(&pdev
->dev
, mem
);
1167 if (IS_ERR(xadc
->base
))
1168 return PTR_ERR(xadc
->base
);
1170 indio_dev
->dev
.parent
= &pdev
->dev
;
1171 indio_dev
->dev
.of_node
= pdev
->dev
.of_node
;
1172 indio_dev
->name
= "xadc";
1173 indio_dev
->modes
= INDIO_DIRECT_MODE
;
1174 indio_dev
->info
= &xadc_info
;
1176 ret
= xadc_parse_dt(indio_dev
, pdev
->dev
.of_node
, &conf0
);
1178 goto err_device_free
;
1180 if (xadc
->ops
->flags
& XADC_FLAGS_BUFFERED
) {
1181 ret
= iio_triggered_buffer_setup(indio_dev
,
1182 &iio_pollfunc_store_time
, &xadc_trigger_handler
,
1185 goto err_device_free
;
1187 xadc
->convst_trigger
= xadc_alloc_trigger(indio_dev
, "convst");
1188 if (IS_ERR(xadc
->convst_trigger
)) {
1189 ret
= PTR_ERR(xadc
->convst_trigger
);
1190 goto err_triggered_buffer_cleanup
;
1192 xadc
->samplerate_trigger
= xadc_alloc_trigger(indio_dev
,
1194 if (IS_ERR(xadc
->samplerate_trigger
)) {
1195 ret
= PTR_ERR(xadc
->samplerate_trigger
);
1196 goto err_free_convst_trigger
;
1200 xadc
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
1201 if (IS_ERR(xadc
->clk
)) {
1202 ret
= PTR_ERR(xadc
->clk
);
1203 goto err_free_samplerate_trigger
;
1206 ret
= clk_prepare_enable(xadc
->clk
);
1208 goto err_free_samplerate_trigger
;
1210 ret
= xadc
->ops
->setup(pdev
, indio_dev
, irq
);
1212 goto err_clk_disable_unprepare
;
1214 ret
= request_irq(irq
, xadc
->ops
->interrupt_handler
, 0,
1215 dev_name(&pdev
->dev
), indio_dev
);
1217 goto err_clk_disable_unprepare
;
1219 for (i
= 0; i
< 16; i
++)
1220 xadc_read_adc_reg(xadc
, XADC_REG_THRESHOLD(i
),
1221 &xadc
->threshold
[i
]);
1223 ret
= xadc_write_adc_reg(xadc
, XADC_REG_CONF0
, conf0
);
1228 for (i
= 0; i
< indio_dev
->num_channels
; i
++) {
1229 if (indio_dev
->channels
[i
].scan_type
.sign
== 's')
1230 bipolar_mask
|= BIT(indio_dev
->channels
[i
].scan_index
);
1233 ret
= xadc_write_adc_reg(xadc
, XADC_REG_INPUT_MODE(0), bipolar_mask
);
1236 ret
= xadc_write_adc_reg(xadc
, XADC_REG_INPUT_MODE(1),
1237 bipolar_mask
>> 16);
1241 /* Disable all alarms */
1242 xadc_update_adc_reg(xadc
, XADC_REG_CONF1
, XADC_CONF1_ALARM_MASK
,
1243 XADC_CONF1_ALARM_MASK
);
1245 /* Set thresholds to min/max */
1246 for (i
= 0; i
< 16; i
++) {
1248 * Set max voltage threshold and both temperature thresholds to
1249 * 0xffff, min voltage threshold to 0.
1251 if (i
% 8 < 4 || i
== 7)
1252 xadc
->threshold
[i
] = 0xffff;
1254 xadc
->threshold
[i
] = 0;
1255 xadc_write_adc_reg(xadc
, XADC_REG_THRESHOLD(i
),
1256 xadc
->threshold
[i
]);
1259 /* Go to non-buffered mode */
1260 xadc_postdisable(indio_dev
);
1262 ret
= iio_device_register(indio_dev
);
1266 platform_set_drvdata(pdev
, indio_dev
);
1271 free_irq(irq
, indio_dev
);
1272 err_clk_disable_unprepare
:
1273 clk_disable_unprepare(xadc
->clk
);
1274 err_free_samplerate_trigger
:
1275 if (xadc
->ops
->flags
& XADC_FLAGS_BUFFERED
)
1276 iio_trigger_free(xadc
->samplerate_trigger
);
1277 err_free_convst_trigger
:
1278 if (xadc
->ops
->flags
& XADC_FLAGS_BUFFERED
)
1279 iio_trigger_free(xadc
->convst_trigger
);
1280 err_triggered_buffer_cleanup
:
1281 if (xadc
->ops
->flags
& XADC_FLAGS_BUFFERED
)
1282 iio_triggered_buffer_cleanup(indio_dev
);
1284 kfree(indio_dev
->channels
);
1289 static int xadc_remove(struct platform_device
*pdev
)
1291 struct iio_dev
*indio_dev
= platform_get_drvdata(pdev
);
1292 struct xadc
*xadc
= iio_priv(indio_dev
);
1293 int irq
= platform_get_irq(pdev
, 0);
1295 iio_device_unregister(indio_dev
);
1296 if (xadc
->ops
->flags
& XADC_FLAGS_BUFFERED
) {
1297 iio_trigger_free(xadc
->samplerate_trigger
);
1298 iio_trigger_free(xadc
->convst_trigger
);
1299 iio_triggered_buffer_cleanup(indio_dev
);
1301 free_irq(irq
, indio_dev
);
1302 clk_disable_unprepare(xadc
->clk
);
1303 cancel_delayed_work(&xadc
->zynq_unmask_work
);
1305 kfree(indio_dev
->channels
);
1310 static struct platform_driver xadc_driver
= {
1311 .probe
= xadc_probe
,
1312 .remove
= xadc_remove
,
1315 .of_match_table
= xadc_of_match_table
,
1318 module_platform_driver(xadc_driver
);
1320 MODULE_LICENSE("GPL v2");
1321 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
1322 MODULE_DESCRIPTION("Xilinx XADC IIO driver");