4 * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/kernel.h>
17 #include <linux/err.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/interrupt.h>
21 #include <linux/platform_device.h>
23 #include <linux/iio/iio.h>
25 #include <linux/of_device.h>
26 #include <linux/iio/machine.h>
27 #include <linux/iio/driver.h>
29 #include <linux/mfd/ti_am335x_tscadc.h>
30 #include <linux/iio/buffer.h>
31 #include <linux/iio/kfifo_buf.h>
33 #include <linux/dmaengine.h>
34 #include <linux/dma-mapping.h>
36 #define DMA_BUFFER_SIZE SZ_2K
39 struct dma_slave_config conf
;
40 struct dma_chan
*chan
;
50 struct ti_tscadc_dev
*mfd_tscadc
;
52 struct mutex fifo1_lock
; /* to protect fifo access */
57 int buffer_en_ch_steps
;
59 u32 open_delay
[8], sample_delay
[8], step_avg
[8];
62 static unsigned int tiadc_readl(struct tiadc_device
*adc
, unsigned int reg
)
64 return readl(adc
->mfd_tscadc
->tscadc_base
+ reg
);
67 static void tiadc_writel(struct tiadc_device
*adc
, unsigned int reg
,
70 writel(val
, adc
->mfd_tscadc
->tscadc_base
+ reg
);
73 static u32
get_adc_step_mask(struct tiadc_device
*adc_dev
)
77 step_en
= ((1 << adc_dev
->channels
) - 1);
78 step_en
<<= TOTAL_STEPS
- adc_dev
->channels
+ 1;
82 static u32
get_adc_chan_step_mask(struct tiadc_device
*adc_dev
,
83 struct iio_chan_spec
const *chan
)
87 for (i
= 0; i
< ARRAY_SIZE(adc_dev
->channel_step
); i
++) {
88 if (chan
->channel
== adc_dev
->channel_line
[i
]) {
91 step
= adc_dev
->channel_step
[i
];
92 /* +1 for the charger */
93 return 1 << (step
+ 1);
100 static u32
get_adc_step_bit(struct tiadc_device
*adc_dev
, int chan
)
102 return 1 << adc_dev
->channel_step
[chan
];
105 static void tiadc_step_config(struct iio_dev
*indio_dev
)
107 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
108 struct device
*dev
= adc_dev
->mfd_tscadc
->dev
;
109 unsigned int stepconfig
;
113 * There are 16 configurable steps and 8 analog input
114 * lines available which are shared between Touchscreen and ADC.
116 * Steps forwards i.e. from 0 towards 16 are used by ADC
117 * depending on number of input lines needed.
118 * Channel would represent which analog input
119 * needs to be given to ADC to digitalize data.
123 for (i
= 0; i
< adc_dev
->channels
; i
++) {
126 chan
= adc_dev
->channel_line
[i
];
128 if (adc_dev
->step_avg
[i
] > STEPCONFIG_AVG_16
) {
129 dev_warn(dev
, "chan %d step_avg truncating to %d\n",
130 chan
, STEPCONFIG_AVG_16
);
131 adc_dev
->step_avg
[i
] = STEPCONFIG_AVG_16
;
134 if (adc_dev
->step_avg
[i
])
136 STEPCONFIG_AVG(ffs(adc_dev
->step_avg
[i
]) - 1) |
139 stepconfig
= STEPCONFIG_FIFO1
;
141 if (iio_buffer_enabled(indio_dev
))
142 stepconfig
|= STEPCONFIG_MODE_SWCNT
;
144 tiadc_writel(adc_dev
, REG_STEPCONFIG(steps
),
145 stepconfig
| STEPCONFIG_INP(chan
) |
146 STEPCONFIG_INM_ADCREFM
|
147 STEPCONFIG_RFP_VREFP
|
148 STEPCONFIG_RFM_VREFN
);
150 if (adc_dev
->open_delay
[i
] > STEPDELAY_OPEN_MASK
) {
151 dev_warn(dev
, "chan %d open delay truncating to 0x3FFFF\n",
153 adc_dev
->open_delay
[i
] = STEPDELAY_OPEN_MASK
;
156 if (adc_dev
->sample_delay
[i
] > 0xFF) {
157 dev_warn(dev
, "chan %d sample delay truncating to 0xFF\n",
159 adc_dev
->sample_delay
[i
] = 0xFF;
162 tiadc_writel(adc_dev
, REG_STEPDELAY(steps
),
163 STEPDELAY_OPEN(adc_dev
->open_delay
[i
]) |
164 STEPDELAY_SAMPLE(adc_dev
->sample_delay
[i
]));
166 adc_dev
->channel_step
[i
] = steps
;
171 static irqreturn_t
tiadc_irq_h(int irq
, void *private)
173 struct iio_dev
*indio_dev
= private;
174 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
175 unsigned int status
, config
, adc_fsm
;
176 unsigned short count
= 0;
178 status
= tiadc_readl(adc_dev
, REG_IRQSTATUS
);
181 * ADC and touchscreen share the IRQ line.
182 * FIFO0 interrupts are used by TSC. Handle FIFO1 IRQs here only
184 if (status
& IRQENB_FIFO1OVRRUN
) {
185 /* FIFO Overrun. Clear flag. Disable/Enable ADC to recover */
186 config
= tiadc_readl(adc_dev
, REG_CTRL
);
187 config
&= ~(CNTRLREG_TSCSSENB
);
188 tiadc_writel(adc_dev
, REG_CTRL
, config
);
189 tiadc_writel(adc_dev
, REG_IRQSTATUS
, IRQENB_FIFO1OVRRUN
190 | IRQENB_FIFO1UNDRFLW
| IRQENB_FIFO1THRES
);
192 /* wait for idle state.
193 * ADC needs to finish the current conversion
194 * before disabling the module
197 adc_fsm
= tiadc_readl(adc_dev
, REG_ADCFSM
);
198 } while (adc_fsm
!= 0x10 && count
++ < 100);
200 tiadc_writel(adc_dev
, REG_CTRL
, (config
| CNTRLREG_TSCSSENB
));
202 } else if (status
& IRQENB_FIFO1THRES
) {
203 /* Disable irq and wake worker thread */
204 tiadc_writel(adc_dev
, REG_IRQCLR
, IRQENB_FIFO1THRES
);
205 return IRQ_WAKE_THREAD
;
211 static irqreturn_t
tiadc_worker_h(int irq
, void *private)
213 struct iio_dev
*indio_dev
= private;
214 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
215 int i
, k
, fifo1count
, read
;
216 u16
*data
= adc_dev
->data
;
218 fifo1count
= tiadc_readl(adc_dev
, REG_FIFO1CNT
);
219 for (k
= 0; k
< fifo1count
; k
= k
+ i
) {
220 for (i
= 0; i
< (indio_dev
->scan_bytes
)/2; i
++) {
221 read
= tiadc_readl(adc_dev
, REG_FIFO1
);
222 data
[i
] = read
& FIFOREAD_DATA_MASK
;
224 iio_push_to_buffers(indio_dev
, (u8
*) data
);
227 tiadc_writel(adc_dev
, REG_IRQSTATUS
, IRQENB_FIFO1THRES
);
228 tiadc_writel(adc_dev
, REG_IRQENABLE
, IRQENB_FIFO1THRES
);
233 static void tiadc_dma_rx_complete(void *param
)
235 struct iio_dev
*indio_dev
= param
;
236 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
237 struct tiadc_dma
*dma
= &adc_dev
->dma
;
241 data
= dma
->buf
+ dma
->current_period
* dma
->period_size
;
242 dma
->current_period
= 1 - dma
->current_period
; /* swap the buffer ID */
244 for (i
= 0; i
< dma
->period_size
; i
+= indio_dev
->scan_bytes
) {
245 iio_push_to_buffers(indio_dev
, data
);
246 data
+= indio_dev
->scan_bytes
;
250 static int tiadc_start_dma(struct iio_dev
*indio_dev
)
252 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
253 struct tiadc_dma
*dma
= &adc_dev
->dma
;
254 struct dma_async_tx_descriptor
*desc
;
256 dma
->current_period
= 0; /* We start to fill period 0 */
258 * Make the fifo thresh as the multiple of total number of
259 * channels enabled, so make sure that cyclic DMA period
260 * length is also a multiple of total number of channels
261 * enabled. This ensures that no invalid data is reported
262 * to the stack via iio_push_to_buffers().
264 dma
->fifo_thresh
= rounddown(FIFO1_THRESHOLD
+ 1,
265 adc_dev
->total_ch_enabled
) - 1;
266 /* Make sure that period length is multiple of fifo thresh level */
267 dma
->period_size
= rounddown(DMA_BUFFER_SIZE
/ 2,
268 (dma
->fifo_thresh
+ 1) * sizeof(u16
));
270 dma
->conf
.src_maxburst
= dma
->fifo_thresh
+ 1;
271 dmaengine_slave_config(dma
->chan
, &dma
->conf
);
273 desc
= dmaengine_prep_dma_cyclic(dma
->chan
, dma
->addr
,
274 dma
->period_size
* 2,
275 dma
->period_size
, DMA_DEV_TO_MEM
,
280 desc
->callback
= tiadc_dma_rx_complete
;
281 desc
->callback_param
= indio_dev
;
283 dma
->cookie
= dmaengine_submit(desc
);
285 dma_async_issue_pending(dma
->chan
);
287 tiadc_writel(adc_dev
, REG_FIFO1THR
, dma
->fifo_thresh
);
288 tiadc_writel(adc_dev
, REG_DMA1REQ
, dma
->fifo_thresh
);
289 tiadc_writel(adc_dev
, REG_DMAENABLE_SET
, DMA_FIFO1
);
294 static int tiadc_buffer_preenable(struct iio_dev
*indio_dev
)
296 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
299 tiadc_writel(adc_dev
, REG_IRQCLR
, (IRQENB_FIFO1THRES
|
301 IRQENB_FIFO1UNDRFLW
));
303 /* Flush FIFO. Needed in corner cases in simultaneous tsc/adc use */
304 fifo1count
= tiadc_readl(adc_dev
, REG_FIFO1CNT
);
305 for (i
= 0; i
< fifo1count
; i
++)
306 tiadc_readl(adc_dev
, REG_FIFO1
);
311 static int tiadc_buffer_postenable(struct iio_dev
*indio_dev
)
313 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
314 struct tiadc_dma
*dma
= &adc_dev
->dma
;
315 unsigned int irq_enable
;
316 unsigned int enb
= 0;
319 tiadc_step_config(indio_dev
);
320 for_each_set_bit(bit
, indio_dev
->active_scan_mask
, adc_dev
->channels
) {
321 enb
|= (get_adc_step_bit(adc_dev
, bit
) << 1);
322 adc_dev
->total_ch_enabled
++;
324 adc_dev
->buffer_en_ch_steps
= enb
;
327 tiadc_start_dma(indio_dev
);
329 am335x_tsc_se_set_cache(adc_dev
->mfd_tscadc
, enb
);
331 tiadc_writel(adc_dev
, REG_IRQSTATUS
, IRQENB_FIFO1THRES
332 | IRQENB_FIFO1OVRRUN
| IRQENB_FIFO1UNDRFLW
);
334 irq_enable
= IRQENB_FIFO1OVRRUN
;
336 irq_enable
|= IRQENB_FIFO1THRES
;
337 tiadc_writel(adc_dev
, REG_IRQENABLE
, irq_enable
);
342 static int tiadc_buffer_predisable(struct iio_dev
*indio_dev
)
344 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
345 struct tiadc_dma
*dma
= &adc_dev
->dma
;
348 tiadc_writel(adc_dev
, REG_IRQCLR
, (IRQENB_FIFO1THRES
|
349 IRQENB_FIFO1OVRRUN
| IRQENB_FIFO1UNDRFLW
));
350 am335x_tsc_se_clr(adc_dev
->mfd_tscadc
, adc_dev
->buffer_en_ch_steps
);
351 adc_dev
->buffer_en_ch_steps
= 0;
352 adc_dev
->total_ch_enabled
= 0;
354 tiadc_writel(adc_dev
, REG_DMAENABLE_CLEAR
, 0x2);
355 dmaengine_terminate_async(dma
->chan
);
358 /* Flush FIFO of leftover data in the time it takes to disable adc */
359 fifo1count
= tiadc_readl(adc_dev
, REG_FIFO1CNT
);
360 for (i
= 0; i
< fifo1count
; i
++)
361 tiadc_readl(adc_dev
, REG_FIFO1
);
366 static int tiadc_buffer_postdisable(struct iio_dev
*indio_dev
)
368 tiadc_step_config(indio_dev
);
373 static const struct iio_buffer_setup_ops tiadc_buffer_setup_ops
= {
374 .preenable
= &tiadc_buffer_preenable
,
375 .postenable
= &tiadc_buffer_postenable
,
376 .predisable
= &tiadc_buffer_predisable
,
377 .postdisable
= &tiadc_buffer_postdisable
,
380 static int tiadc_iio_buffered_hardware_setup(struct device
*dev
,
381 struct iio_dev
*indio_dev
,
382 irqreturn_t (*pollfunc_bh
)(int irq
, void *p
),
383 irqreturn_t (*pollfunc_th
)(int irq
, void *p
),
386 const struct iio_buffer_setup_ops
*setup_ops
)
388 struct iio_buffer
*buffer
;
391 buffer
= devm_iio_kfifo_allocate(dev
);
395 iio_device_attach_buffer(indio_dev
, buffer
);
397 ret
= devm_request_threaded_irq(dev
, irq
, pollfunc_th
, pollfunc_bh
,
398 flags
, indio_dev
->name
, indio_dev
);
400 goto error_kfifo_free
;
402 indio_dev
->setup_ops
= setup_ops
;
403 indio_dev
->modes
|= INDIO_BUFFER_SOFTWARE
;
408 iio_kfifo_free(indio_dev
->buffer
);
412 static const char * const chan_name_ain
[] = {
423 static int tiadc_channel_init(struct device
*dev
, struct iio_dev
*indio_dev
,
426 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
427 struct iio_chan_spec
*chan_array
;
428 struct iio_chan_spec
*chan
;
431 indio_dev
->num_channels
= channels
;
432 chan_array
= devm_kcalloc(dev
, channels
, sizeof(*chan_array
),
434 if (chan_array
== NULL
)
438 for (i
= 0; i
< channels
; i
++, chan
++) {
440 chan
->type
= IIO_VOLTAGE
;
442 chan
->channel
= adc_dev
->channel_line
[i
];
443 chan
->info_mask_separate
= BIT(IIO_CHAN_INFO_RAW
);
444 chan
->datasheet_name
= chan_name_ain
[chan
->channel
];
445 chan
->scan_index
= i
;
446 chan
->scan_type
.sign
= 'u';
447 chan
->scan_type
.realbits
= 12;
448 chan
->scan_type
.storagebits
= 16;
451 indio_dev
->channels
= chan_array
;
456 static int tiadc_read_raw(struct iio_dev
*indio_dev
,
457 struct iio_chan_spec
const *chan
,
458 int *val
, int *val2
, long mask
)
460 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
461 int ret
= IIO_VAL_INT
;
463 unsigned int fifo1count
, read
, stepid
;
466 unsigned long timeout
;
468 if (iio_buffer_enabled(indio_dev
))
471 step_en
= get_adc_chan_step_mask(adc_dev
, chan
);
475 mutex_lock(&adc_dev
->fifo1_lock
);
476 fifo1count
= tiadc_readl(adc_dev
, REG_FIFO1CNT
);
478 tiadc_readl(adc_dev
, REG_FIFO1
);
480 am335x_tsc_se_set_once(adc_dev
->mfd_tscadc
, step_en
);
482 timeout
= jiffies
+ msecs_to_jiffies
483 (IDLE_TIMEOUT
* adc_dev
->channels
);
484 /* Wait for Fifo threshold interrupt */
486 fifo1count
= tiadc_readl(adc_dev
, REG_FIFO1CNT
);
490 if (time_after(jiffies
, timeout
)) {
491 am335x_tsc_se_adc_done(adc_dev
->mfd_tscadc
);
496 map_val
= adc_dev
->channel_step
[chan
->scan_index
];
499 * We check the complete FIFO. We programmed just one entry but in case
500 * something went wrong we left empty handed (-EAGAIN previously) and
501 * then the value apeared somehow in the FIFO we would have two entries.
502 * Therefore we read every item and keep only the latest version of the
505 for (i
= 0; i
< fifo1count
; i
++) {
506 read
= tiadc_readl(adc_dev
, REG_FIFO1
);
507 stepid
= read
& FIFOREAD_CHNLID_MASK
;
508 stepid
= stepid
>> 0x10;
510 if (stepid
== map_val
) {
511 read
= read
& FIFOREAD_DATA_MASK
;
516 am335x_tsc_se_adc_done(adc_dev
->mfd_tscadc
);
522 mutex_unlock(&adc_dev
->fifo1_lock
);
526 static const struct iio_info tiadc_info
= {
527 .read_raw
= &tiadc_read_raw
,
530 static int tiadc_request_dma(struct platform_device
*pdev
,
531 struct tiadc_device
*adc_dev
)
533 struct tiadc_dma
*dma
= &adc_dev
->dma
;
536 /* Default slave configuration parameters */
537 dma
->conf
.direction
= DMA_DEV_TO_MEM
;
538 dma
->conf
.src_addr_width
= DMA_SLAVE_BUSWIDTH_2_BYTES
;
539 dma
->conf
.src_addr
= adc_dev
->mfd_tscadc
->tscadc_phys_base
+ REG_FIFO1
;
542 dma_cap_set(DMA_CYCLIC
, mask
);
544 /* Get a channel for RX */
545 dma
->chan
= dma_request_chan(adc_dev
->mfd_tscadc
->dev
, "fifo1");
546 if (IS_ERR(dma
->chan
)) {
547 int ret
= PTR_ERR(dma
->chan
);
554 dma
->buf
= dma_alloc_coherent(dma
->chan
->device
->dev
, DMA_BUFFER_SIZE
,
555 &dma
->addr
, GFP_KERNEL
);
561 dma_release_channel(dma
->chan
);
565 static int tiadc_parse_dt(struct platform_device
*pdev
,
566 struct tiadc_device
*adc_dev
)
568 struct device_node
*node
= pdev
->dev
.of_node
;
569 struct property
*prop
;
574 of_property_for_each_u32(node
, "ti,adc-channels", prop
, cur
, val
) {
575 adc_dev
->channel_line
[channels
] = val
;
577 /* Set Default values for optional DT parameters */
578 adc_dev
->open_delay
[channels
] = STEPCONFIG_OPENDLY
;
579 adc_dev
->sample_delay
[channels
] = STEPCONFIG_SAMPLEDLY
;
580 adc_dev
->step_avg
[channels
] = 16;
585 of_property_read_u32_array(node
, "ti,chan-step-avg",
586 adc_dev
->step_avg
, channels
);
587 of_property_read_u32_array(node
, "ti,chan-step-opendelay",
588 adc_dev
->open_delay
, channels
);
589 of_property_read_u32_array(node
, "ti,chan-step-sampledelay",
590 adc_dev
->sample_delay
, channels
);
592 adc_dev
->channels
= channels
;
596 static int tiadc_probe(struct platform_device
*pdev
)
598 struct iio_dev
*indio_dev
;
599 struct tiadc_device
*adc_dev
;
600 struct device_node
*node
= pdev
->dev
.of_node
;
604 dev_err(&pdev
->dev
, "Could not find valid DT data.\n");
608 indio_dev
= devm_iio_device_alloc(&pdev
->dev
, sizeof(*adc_dev
));
609 if (indio_dev
== NULL
) {
610 dev_err(&pdev
->dev
, "failed to allocate iio device\n");
613 adc_dev
= iio_priv(indio_dev
);
615 adc_dev
->mfd_tscadc
= ti_tscadc_dev_get(pdev
);
616 tiadc_parse_dt(pdev
, adc_dev
);
618 indio_dev
->name
= dev_name(&pdev
->dev
);
619 indio_dev
->modes
= INDIO_DIRECT_MODE
;
620 indio_dev
->info
= &tiadc_info
;
622 tiadc_step_config(indio_dev
);
623 tiadc_writel(adc_dev
, REG_FIFO1THR
, FIFO1_THRESHOLD
);
624 mutex_init(&adc_dev
->fifo1_lock
);
626 err
= tiadc_channel_init(&pdev
->dev
, indio_dev
, adc_dev
->channels
);
630 err
= tiadc_iio_buffered_hardware_setup(&pdev
->dev
, indio_dev
,
633 adc_dev
->mfd_tscadc
->irq
,
635 &tiadc_buffer_setup_ops
);
638 goto err_free_channels
;
640 err
= iio_device_register(indio_dev
);
642 goto err_buffer_unregister
;
644 platform_set_drvdata(pdev
, indio_dev
);
646 err
= tiadc_request_dma(pdev
, adc_dev
);
647 if (err
&& err
== -EPROBE_DEFER
)
653 iio_device_unregister(indio_dev
);
654 err_buffer_unregister
:
659 static int tiadc_remove(struct platform_device
*pdev
)
661 struct iio_dev
*indio_dev
= platform_get_drvdata(pdev
);
662 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
663 struct tiadc_dma
*dma
= &adc_dev
->dma
;
667 dma_free_coherent(dma
->chan
->device
->dev
, DMA_BUFFER_SIZE
,
668 dma
->buf
, dma
->addr
);
669 dma_release_channel(dma
->chan
);
671 iio_device_unregister(indio_dev
);
673 step_en
= get_adc_step_mask(adc_dev
);
674 am335x_tsc_se_clr(adc_dev
->mfd_tscadc
, step_en
);
679 static int __maybe_unused
tiadc_suspend(struct device
*dev
)
681 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
682 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
685 idle
= tiadc_readl(adc_dev
, REG_CTRL
);
686 idle
&= ~(CNTRLREG_TSCSSENB
);
687 tiadc_writel(adc_dev
, REG_CTRL
, (idle
|
688 CNTRLREG_POWERDOWN
));
693 static int __maybe_unused
tiadc_resume(struct device
*dev
)
695 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
696 struct tiadc_device
*adc_dev
= iio_priv(indio_dev
);
697 unsigned int restore
;
699 /* Make sure ADC is powered up */
700 restore
= tiadc_readl(adc_dev
, REG_CTRL
);
701 restore
&= ~(CNTRLREG_POWERDOWN
);
702 tiadc_writel(adc_dev
, REG_CTRL
, restore
);
704 tiadc_step_config(indio_dev
);
705 am335x_tsc_se_set_cache(adc_dev
->mfd_tscadc
,
706 adc_dev
->buffer_en_ch_steps
);
710 static SIMPLE_DEV_PM_OPS(tiadc_pm_ops
, tiadc_suspend
, tiadc_resume
);
712 static const struct of_device_id ti_adc_dt_ids
[] = {
713 { .compatible
= "ti,am3359-adc", },
716 MODULE_DEVICE_TABLE(of
, ti_adc_dt_ids
);
718 static struct platform_driver tiadc_driver
= {
720 .name
= "TI-am335x-adc",
722 .of_match_table
= ti_adc_dt_ids
,
724 .probe
= tiadc_probe
,
725 .remove
= tiadc_remove
,
727 module_platform_driver(tiadc_driver
);
729 MODULE_DESCRIPTION("TI ADC controller driver");
730 MODULE_AUTHOR("Rachna Patil <rachna@ti.com>");
731 MODULE_LICENSE("GPL");