1 // SPDX-License-Identifier: GPL-2.0
3 * MediaTek UART APDMA driver.
5 * Copyright (c) 2019 MediaTek Inc.
6 * Author: Long Cheng <long.cheng@mediatek.com>
10 #include <linux/dmaengine.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/err.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/iopoll.h>
16 #include <linux/kernel.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
19 #include <linux/of_device.h>
20 #include <linux/of_dma.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
26 #include "../virt-dma.h"
28 /* The default number of virtual channel */
29 #define MTK_UART_APDMA_NR_VCHANS 8
31 #define VFF_EN_B BIT(0)
32 #define VFF_STOP_B BIT(0)
33 #define VFF_FLUSH_B BIT(0)
34 #define VFF_4G_EN_B BIT(0)
35 /* rx valid size >= vff thre */
36 #define VFF_RX_INT_EN_B (BIT(0) | BIT(1))
37 /* tx left size >= vff thre */
38 #define VFF_TX_INT_EN_B BIT(0)
39 #define VFF_WARM_RST_B BIT(0)
40 #define VFF_RX_INT_CLR_B (BIT(0) | BIT(1))
41 #define VFF_TX_INT_CLR_B 0
42 #define VFF_STOP_CLR_B 0
43 #define VFF_EN_CLR_B 0
44 #define VFF_INT_EN_CLR_B 0
45 #define VFF_4G_SUPPORT_CLR_B 0
48 * interrupt trigger level for tx
49 * if threshold is n, no polling is required to start tx.
50 * otherwise need polling VFF_FLUSH.
52 #define VFF_TX_THRE(n) (n)
53 /* interrupt trigger level for rx */
54 #define VFF_RX_THRE(n) ((n) * 3 / 4)
56 #define VFF_RING_SIZE 0xffff
57 /* invert this bit when wrap ring head again */
58 #define VFF_RING_WRAP 0x10000
60 #define VFF_INT_FLAG 0x00
61 #define VFF_INT_EN 0x04
65 #define VFF_FLUSH 0x14
71 /* TX: the buffer size HW can read. RX: the buffer size SW can read. */
72 #define VFF_VALID_SIZE 0x3c
73 /* TX: the buffer size SW can write. RX: the buffer size HW can write. */
74 #define VFF_LEFT_SIZE 0x40
75 #define VFF_DEBUG_STATUS 0x50
76 #define VFF_4G_SUPPORT 0x54
78 struct mtk_uart_apdmadev
{
79 struct dma_device ddev
;
82 unsigned int dma_requests
;
85 struct mtk_uart_apdma_desc
{
86 struct virt_dma_desc vd
;
89 unsigned int avail_len
;
93 struct virt_dma_chan vc
;
94 struct dma_slave_config cfg
;
95 struct mtk_uart_apdma_desc
*desc
;
96 enum dma_transfer_direction dir
;
101 unsigned int rx_status
;
104 static inline struct mtk_uart_apdmadev
*
105 to_mtk_uart_apdma_dev(struct dma_device
*d
)
107 return container_of(d
, struct mtk_uart_apdmadev
, ddev
);
110 static inline struct mtk_chan
*to_mtk_uart_apdma_chan(struct dma_chan
*c
)
112 return container_of(c
, struct mtk_chan
, vc
.chan
);
115 static inline struct mtk_uart_apdma_desc
*to_mtk_uart_apdma_desc
116 (struct dma_async_tx_descriptor
*t
)
118 return container_of(t
, struct mtk_uart_apdma_desc
, vd
.tx
);
121 static void mtk_uart_apdma_write(struct mtk_chan
*c
,
122 unsigned int reg
, unsigned int val
)
124 writel(val
, c
->base
+ reg
);
127 static unsigned int mtk_uart_apdma_read(struct mtk_chan
*c
, unsigned int reg
)
129 return readl(c
->base
+ reg
);
132 static void mtk_uart_apdma_desc_free(struct virt_dma_desc
*vd
)
134 struct dma_chan
*chan
= vd
->tx
.chan
;
135 struct mtk_chan
*c
= to_mtk_uart_apdma_chan(chan
);
140 static void mtk_uart_apdma_start_tx(struct mtk_chan
*c
)
142 struct mtk_uart_apdmadev
*mtkd
=
143 to_mtk_uart_apdma_dev(c
->vc
.chan
.device
);
144 struct mtk_uart_apdma_desc
*d
= c
->desc
;
145 unsigned int wpt
, vff_sz
;
147 vff_sz
= c
->cfg
.dst_port_window_size
;
148 if (!mtk_uart_apdma_read(c
, VFF_LEN
)) {
149 mtk_uart_apdma_write(c
, VFF_ADDR
, d
->addr
);
150 mtk_uart_apdma_write(c
, VFF_LEN
, vff_sz
);
151 mtk_uart_apdma_write(c
, VFF_THRE
, VFF_TX_THRE(vff_sz
));
152 mtk_uart_apdma_write(c
, VFF_WPT
, 0);
153 mtk_uart_apdma_write(c
, VFF_INT_FLAG
, VFF_TX_INT_CLR_B
);
155 if (mtkd
->support_33bits
)
156 mtk_uart_apdma_write(c
, VFF_4G_SUPPORT
, VFF_4G_EN_B
);
159 mtk_uart_apdma_write(c
, VFF_EN
, VFF_EN_B
);
160 if (mtk_uart_apdma_read(c
, VFF_EN
) != VFF_EN_B
)
161 dev_err(c
->vc
.chan
.device
->dev
, "Enable TX fail\n");
163 if (!mtk_uart_apdma_read(c
, VFF_LEFT_SIZE
)) {
164 mtk_uart_apdma_write(c
, VFF_INT_EN
, VFF_TX_INT_EN_B
);
168 wpt
= mtk_uart_apdma_read(c
, VFF_WPT
);
170 wpt
+= c
->desc
->avail_len
;
171 if ((wpt
& VFF_RING_SIZE
) == vff_sz
)
172 wpt
= (wpt
& VFF_RING_WRAP
) ^ VFF_RING_WRAP
;
174 /* Let DMA start moving data */
175 mtk_uart_apdma_write(c
, VFF_WPT
, wpt
);
177 /* HW auto set to 0 when left size >= threshold */
178 mtk_uart_apdma_write(c
, VFF_INT_EN
, VFF_TX_INT_EN_B
);
179 if (!mtk_uart_apdma_read(c
, VFF_FLUSH
))
180 mtk_uart_apdma_write(c
, VFF_FLUSH
, VFF_FLUSH_B
);
183 static void mtk_uart_apdma_start_rx(struct mtk_chan
*c
)
185 struct mtk_uart_apdmadev
*mtkd
=
186 to_mtk_uart_apdma_dev(c
->vc
.chan
.device
);
187 struct mtk_uart_apdma_desc
*d
= c
->desc
;
190 vff_sz
= c
->cfg
.src_port_window_size
;
191 if (!mtk_uart_apdma_read(c
, VFF_LEN
)) {
192 mtk_uart_apdma_write(c
, VFF_ADDR
, d
->addr
);
193 mtk_uart_apdma_write(c
, VFF_LEN
, vff_sz
);
194 mtk_uart_apdma_write(c
, VFF_THRE
, VFF_RX_THRE(vff_sz
));
195 mtk_uart_apdma_write(c
, VFF_RPT
, 0);
196 mtk_uart_apdma_write(c
, VFF_INT_FLAG
, VFF_RX_INT_CLR_B
);
198 if (mtkd
->support_33bits
)
199 mtk_uart_apdma_write(c
, VFF_4G_SUPPORT
, VFF_4G_EN_B
);
202 mtk_uart_apdma_write(c
, VFF_INT_EN
, VFF_RX_INT_EN_B
);
203 mtk_uart_apdma_write(c
, VFF_EN
, VFF_EN_B
);
204 if (mtk_uart_apdma_read(c
, VFF_EN
) != VFF_EN_B
)
205 dev_err(c
->vc
.chan
.device
->dev
, "Enable RX fail\n");
208 static void mtk_uart_apdma_tx_handler(struct mtk_chan
*c
)
210 struct mtk_uart_apdma_desc
*d
= c
->desc
;
212 mtk_uart_apdma_write(c
, VFF_INT_FLAG
, VFF_TX_INT_CLR_B
);
213 mtk_uart_apdma_write(c
, VFF_INT_EN
, VFF_INT_EN_CLR_B
);
214 mtk_uart_apdma_write(c
, VFF_EN
, VFF_EN_CLR_B
);
216 list_del(&d
->vd
.node
);
217 vchan_cookie_complete(&d
->vd
);
220 static void mtk_uart_apdma_rx_handler(struct mtk_chan
*c
)
222 struct mtk_uart_apdma_desc
*d
= c
->desc
;
223 unsigned int len
, wg
, rg
;
226 mtk_uart_apdma_write(c
, VFF_INT_FLAG
, VFF_RX_INT_CLR_B
);
228 if (!mtk_uart_apdma_read(c
, VFF_VALID_SIZE
))
231 mtk_uart_apdma_write(c
, VFF_EN
, VFF_EN_CLR_B
);
232 mtk_uart_apdma_write(c
, VFF_INT_EN
, VFF_INT_EN_CLR_B
);
234 len
= c
->cfg
.src_port_window_size
;
235 rg
= mtk_uart_apdma_read(c
, VFF_RPT
);
236 wg
= mtk_uart_apdma_read(c
, VFF_WPT
);
237 cnt
= (wg
& VFF_RING_SIZE
) - (rg
& VFF_RING_SIZE
);
240 * The buffer is ring buffer. If wrap bit different,
241 * represents the start of the next cycle for WPT
243 if ((rg
^ wg
) & VFF_RING_WRAP
)
246 c
->rx_status
= d
->avail_len
- cnt
;
247 mtk_uart_apdma_write(c
, VFF_RPT
, wg
);
249 list_del(&d
->vd
.node
);
250 vchan_cookie_complete(&d
->vd
);
253 static irqreturn_t
mtk_uart_apdma_irq_handler(int irq
, void *dev_id
)
255 struct dma_chan
*chan
= (struct dma_chan
*)dev_id
;
256 struct mtk_chan
*c
= to_mtk_uart_apdma_chan(chan
);
259 spin_lock_irqsave(&c
->vc
.lock
, flags
);
260 if (c
->dir
== DMA_DEV_TO_MEM
)
261 mtk_uart_apdma_rx_handler(c
);
262 else if (c
->dir
== DMA_MEM_TO_DEV
)
263 mtk_uart_apdma_tx_handler(c
);
264 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
269 static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan
*chan
)
271 struct mtk_uart_apdmadev
*mtkd
= to_mtk_uart_apdma_dev(chan
->device
);
272 struct mtk_chan
*c
= to_mtk_uart_apdma_chan(chan
);
276 ret
= pm_runtime_get_sync(mtkd
->ddev
.dev
);
278 pm_runtime_put_noidle(chan
->device
->dev
);
282 mtk_uart_apdma_write(c
, VFF_ADDR
, 0);
283 mtk_uart_apdma_write(c
, VFF_THRE
, 0);
284 mtk_uart_apdma_write(c
, VFF_LEN
, 0);
285 mtk_uart_apdma_write(c
, VFF_RST
, VFF_WARM_RST_B
);
287 ret
= readx_poll_timeout(readl
, c
->base
+ VFF_EN
,
288 status
, !status
, 10, 100);
292 ret
= request_irq(c
->irq
, mtk_uart_apdma_irq_handler
,
293 IRQF_TRIGGER_NONE
, KBUILD_MODNAME
, chan
);
295 dev_err(chan
->device
->dev
, "Can't request dma IRQ\n");
299 if (mtkd
->support_33bits
)
300 mtk_uart_apdma_write(c
, VFF_4G_SUPPORT
, VFF_4G_SUPPORT_CLR_B
);
305 static void mtk_uart_apdma_free_chan_resources(struct dma_chan
*chan
)
307 struct mtk_uart_apdmadev
*mtkd
= to_mtk_uart_apdma_dev(chan
->device
);
308 struct mtk_chan
*c
= to_mtk_uart_apdma_chan(chan
);
310 free_irq(c
->irq
, chan
);
312 tasklet_kill(&c
->vc
.task
);
314 vchan_free_chan_resources(&c
->vc
);
316 pm_runtime_put_sync(mtkd
->ddev
.dev
);
319 static enum dma_status
mtk_uart_apdma_tx_status(struct dma_chan
*chan
,
321 struct dma_tx_state
*txstate
)
323 struct mtk_chan
*c
= to_mtk_uart_apdma_chan(chan
);
326 ret
= dma_cookie_status(chan
, cookie
, txstate
);
330 dma_set_residue(txstate
, c
->rx_status
);
336 * dmaengine_prep_slave_single will call the function. and sglen is 1.
337 * 8250 uart using one ring buffer, and deal with one sg.
339 static struct dma_async_tx_descriptor
*mtk_uart_apdma_prep_slave_sg
340 (struct dma_chan
*chan
, struct scatterlist
*sgl
,
341 unsigned int sglen
, enum dma_transfer_direction dir
,
342 unsigned long tx_flags
, void *context
)
344 struct mtk_chan
*c
= to_mtk_uart_apdma_chan(chan
);
345 struct mtk_uart_apdma_desc
*d
;
347 if (!is_slave_direction(dir
) || sglen
!= 1)
350 /* Now allocate and setup the descriptor */
351 d
= kzalloc(sizeof(*d
), GFP_ATOMIC
);
355 d
->avail_len
= sg_dma_len(sgl
);
356 d
->addr
= sg_dma_address(sgl
);
359 return vchan_tx_prep(&c
->vc
, &d
->vd
, tx_flags
);
362 static void mtk_uart_apdma_issue_pending(struct dma_chan
*chan
)
364 struct mtk_chan
*c
= to_mtk_uart_apdma_chan(chan
);
365 struct virt_dma_desc
*vd
;
368 spin_lock_irqsave(&c
->vc
.lock
, flags
);
369 if (vchan_issue_pending(&c
->vc
)) {
370 vd
= vchan_next_desc(&c
->vc
);
371 c
->desc
= to_mtk_uart_apdma_desc(&vd
->tx
);
373 if (c
->dir
== DMA_DEV_TO_MEM
)
374 mtk_uart_apdma_start_rx(c
);
375 else if (c
->dir
== DMA_MEM_TO_DEV
)
376 mtk_uart_apdma_start_tx(c
);
379 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
382 static int mtk_uart_apdma_slave_config(struct dma_chan
*chan
,
383 struct dma_slave_config
*config
)
385 struct mtk_chan
*c
= to_mtk_uart_apdma_chan(chan
);
387 memcpy(&c
->cfg
, config
, sizeof(*config
));
392 static int mtk_uart_apdma_terminate_all(struct dma_chan
*chan
)
394 struct mtk_chan
*c
= to_mtk_uart_apdma_chan(chan
);
400 mtk_uart_apdma_write(c
, VFF_FLUSH
, VFF_FLUSH_B
);
402 ret
= readx_poll_timeout(readl
, c
->base
+ VFF_FLUSH
,
403 status
, status
!= VFF_FLUSH_B
, 10, 100);
405 dev_err(c
->vc
.chan
.device
->dev
, "flush: fail, status=0x%x\n",
406 mtk_uart_apdma_read(c
, VFF_DEBUG_STATUS
));
414 mtk_uart_apdma_write(c
, VFF_STOP
, VFF_STOP_B
);
415 ret
= readx_poll_timeout(readl
, c
->base
+ VFF_EN
,
416 status
, !status
, 10, 100);
418 dev_err(c
->vc
.chan
.device
->dev
, "stop: fail, status=0x%x\n",
419 mtk_uart_apdma_read(c
, VFF_DEBUG_STATUS
));
421 mtk_uart_apdma_write(c
, VFF_STOP
, VFF_STOP_CLR_B
);
422 mtk_uart_apdma_write(c
, VFF_INT_EN
, VFF_INT_EN_CLR_B
);
424 if (c
->dir
== DMA_DEV_TO_MEM
)
425 mtk_uart_apdma_write(c
, VFF_INT_FLAG
, VFF_RX_INT_CLR_B
);
426 else if (c
->dir
== DMA_MEM_TO_DEV
)
427 mtk_uart_apdma_write(c
, VFF_INT_FLAG
, VFF_TX_INT_CLR_B
);
429 synchronize_irq(c
->irq
);
431 spin_lock_irqsave(&c
->vc
.lock
, flags
);
432 vchan_get_all_descriptors(&c
->vc
, &head
);
433 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
435 vchan_dma_desc_free_list(&c
->vc
, &head
);
440 static int mtk_uart_apdma_device_pause(struct dma_chan
*chan
)
442 struct mtk_chan
*c
= to_mtk_uart_apdma_chan(chan
);
445 spin_lock_irqsave(&c
->vc
.lock
, flags
);
447 mtk_uart_apdma_write(c
, VFF_EN
, VFF_EN_CLR_B
);
448 mtk_uart_apdma_write(c
, VFF_INT_EN
, VFF_INT_EN_CLR_B
);
450 synchronize_irq(c
->irq
);
452 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
457 static void mtk_uart_apdma_free(struct mtk_uart_apdmadev
*mtkd
)
459 while (!list_empty(&mtkd
->ddev
.channels
)) {
460 struct mtk_chan
*c
= list_first_entry(&mtkd
->ddev
.channels
,
461 struct mtk_chan
, vc
.chan
.device_node
);
463 list_del(&c
->vc
.chan
.device_node
);
464 tasklet_kill(&c
->vc
.task
);
468 static const struct of_device_id mtk_uart_apdma_match
[] = {
469 { .compatible
= "mediatek,mt6577-uart-dma", },
472 MODULE_DEVICE_TABLE(of
, mtk_uart_apdma_match
);
474 static int mtk_uart_apdma_probe(struct platform_device
*pdev
)
476 struct device_node
*np
= pdev
->dev
.of_node
;
477 struct mtk_uart_apdmadev
*mtkd
;
478 int bit_mask
= 32, rc
;
482 mtkd
= devm_kzalloc(&pdev
->dev
, sizeof(*mtkd
), GFP_KERNEL
);
486 mtkd
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
487 if (IS_ERR(mtkd
->clk
)) {
488 dev_err(&pdev
->dev
, "No clock specified\n");
489 rc
= PTR_ERR(mtkd
->clk
);
493 if (of_property_read_bool(np
, "mediatek,dma-33bits"))
494 mtkd
->support_33bits
= true;
496 if (mtkd
->support_33bits
)
499 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(bit_mask
));
503 dma_cap_set(DMA_SLAVE
, mtkd
->ddev
.cap_mask
);
504 mtkd
->ddev
.device_alloc_chan_resources
=
505 mtk_uart_apdma_alloc_chan_resources
;
506 mtkd
->ddev
.device_free_chan_resources
=
507 mtk_uart_apdma_free_chan_resources
;
508 mtkd
->ddev
.device_tx_status
= mtk_uart_apdma_tx_status
;
509 mtkd
->ddev
.device_issue_pending
= mtk_uart_apdma_issue_pending
;
510 mtkd
->ddev
.device_prep_slave_sg
= mtk_uart_apdma_prep_slave_sg
;
511 mtkd
->ddev
.device_config
= mtk_uart_apdma_slave_config
;
512 mtkd
->ddev
.device_pause
= mtk_uart_apdma_device_pause
;
513 mtkd
->ddev
.device_terminate_all
= mtk_uart_apdma_terminate_all
;
514 mtkd
->ddev
.src_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE
);
515 mtkd
->ddev
.dst_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE
);
516 mtkd
->ddev
.directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
517 mtkd
->ddev
.residue_granularity
= DMA_RESIDUE_GRANULARITY_SEGMENT
;
518 mtkd
->ddev
.dev
= &pdev
->dev
;
519 INIT_LIST_HEAD(&mtkd
->ddev
.channels
);
521 mtkd
->dma_requests
= MTK_UART_APDMA_NR_VCHANS
;
522 if (of_property_read_u32(np
, "dma-requests", &mtkd
->dma_requests
)) {
524 "Using %u as missing dma-requests property\n",
525 MTK_UART_APDMA_NR_VCHANS
);
528 for (i
= 0; i
< mtkd
->dma_requests
; i
++) {
529 c
= devm_kzalloc(mtkd
->ddev
.dev
, sizeof(*c
), GFP_KERNEL
);
535 c
->base
= devm_platform_ioremap_resource(pdev
, i
);
536 if (IS_ERR(c
->base
)) {
537 rc
= PTR_ERR(c
->base
);
540 c
->vc
.desc_free
= mtk_uart_apdma_desc_free
;
541 vchan_init(&c
->vc
, &mtkd
->ddev
);
543 rc
= platform_get_irq(pdev
, i
);
549 pm_runtime_enable(&pdev
->dev
);
550 pm_runtime_set_active(&pdev
->dev
);
552 rc
= dma_async_device_register(&mtkd
->ddev
);
556 platform_set_drvdata(pdev
, mtkd
);
558 /* Device-tree DMA controller registration */
559 rc
= of_dma_controller_register(np
, of_dma_xlate_by_chan_id
, mtkd
);
566 dma_async_device_unregister(&mtkd
->ddev
);
568 pm_runtime_disable(&pdev
->dev
);
570 mtk_uart_apdma_free(mtkd
);
574 static int mtk_uart_apdma_remove(struct platform_device
*pdev
)
576 struct mtk_uart_apdmadev
*mtkd
= platform_get_drvdata(pdev
);
578 of_dma_controller_free(pdev
->dev
.of_node
);
580 mtk_uart_apdma_free(mtkd
);
582 dma_async_device_unregister(&mtkd
->ddev
);
584 pm_runtime_disable(&pdev
->dev
);
589 #ifdef CONFIG_PM_SLEEP
590 static int mtk_uart_apdma_suspend(struct device
*dev
)
592 struct mtk_uart_apdmadev
*mtkd
= dev_get_drvdata(dev
);
594 if (!pm_runtime_suspended(dev
))
595 clk_disable_unprepare(mtkd
->clk
);
600 static int mtk_uart_apdma_resume(struct device
*dev
)
603 struct mtk_uart_apdmadev
*mtkd
= dev_get_drvdata(dev
);
605 if (!pm_runtime_suspended(dev
)) {
606 ret
= clk_prepare_enable(mtkd
->clk
);
613 #endif /* CONFIG_PM_SLEEP */
616 static int mtk_uart_apdma_runtime_suspend(struct device
*dev
)
618 struct mtk_uart_apdmadev
*mtkd
= dev_get_drvdata(dev
);
620 clk_disable_unprepare(mtkd
->clk
);
625 static int mtk_uart_apdma_runtime_resume(struct device
*dev
)
627 struct mtk_uart_apdmadev
*mtkd
= dev_get_drvdata(dev
);
629 return clk_prepare_enable(mtkd
->clk
);
631 #endif /* CONFIG_PM */
633 static const struct dev_pm_ops mtk_uart_apdma_pm_ops
= {
634 SET_SYSTEM_SLEEP_PM_OPS(mtk_uart_apdma_suspend
, mtk_uart_apdma_resume
)
635 SET_RUNTIME_PM_OPS(mtk_uart_apdma_runtime_suspend
,
636 mtk_uart_apdma_runtime_resume
, NULL
)
639 static struct platform_driver mtk_uart_apdma_driver
= {
640 .probe
= mtk_uart_apdma_probe
,
641 .remove
= mtk_uart_apdma_remove
,
643 .name
= KBUILD_MODNAME
,
644 .pm
= &mtk_uart_apdma_pm_ops
,
645 .of_match_table
= of_match_ptr(mtk_uart_apdma_match
),
649 module_platform_driver(mtk_uart_apdma_driver
);
651 MODULE_DESCRIPTION("MediaTek UART APDMA Controller Driver");
652 MODULE_AUTHOR("Long Cheng <long.cheng@mediatek.com>");
653 MODULE_LICENSE("GPL v2");