1 // SPDX-License-Identifier: GPL-2.0
3 * MediaTek UART APDMA driver.
5 * Copyright (c) 2019 MediaTek Inc.
6 * Author: Long Cheng <long.cheng@mediatek.com>
10 #include <linux/dmaengine.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/err.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/iopoll.h>
16 #include <linux/kernel.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
19 #include <linux/of_dma.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/slab.h>
23 #include <linux/spinlock.h>
25 #include "../virt-dma.h"
27 /* The default number of virtual channel */
28 #define MTK_UART_APDMA_NR_VCHANS 8
30 #define VFF_EN_B BIT(0)
31 #define VFF_STOP_B BIT(0)
32 #define VFF_FLUSH_B BIT(0)
33 #define VFF_4G_EN_B BIT(0)
34 /* rx valid size >= vff thre */
35 #define VFF_RX_INT_EN_B (BIT(0) | BIT(1))
36 /* tx left size >= vff thre */
37 #define VFF_TX_INT_EN_B BIT(0)
38 #define VFF_WARM_RST_B BIT(0)
39 #define VFF_RX_INT_CLR_B (BIT(0) | BIT(1))
40 #define VFF_TX_INT_CLR_B 0
41 #define VFF_STOP_CLR_B 0
42 #define VFF_EN_CLR_B 0
43 #define VFF_INT_EN_CLR_B 0
44 #define VFF_4G_SUPPORT_CLR_B 0
47 * interrupt trigger level for tx
48 * if threshold is n, no polling is required to start tx.
49 * otherwise need polling VFF_FLUSH.
51 #define VFF_TX_THRE(n) (n)
52 /* interrupt trigger level for rx */
53 #define VFF_RX_THRE(n) ((n) * 3 / 4)
55 #define VFF_RING_SIZE 0xffff
56 /* invert this bit when wrap ring head again */
57 #define VFF_RING_WRAP 0x10000
59 #define VFF_INT_FLAG 0x00
60 #define VFF_INT_EN 0x04
64 #define VFF_FLUSH 0x14
70 /* TX: the buffer size HW can read. RX: the buffer size SW can read. */
71 #define VFF_VALID_SIZE 0x3c
72 /* TX: the buffer size SW can write. RX: the buffer size HW can write. */
73 #define VFF_LEFT_SIZE 0x40
74 #define VFF_DEBUG_STATUS 0x50
75 #define VFF_4G_SUPPORT 0x54
77 struct mtk_uart_apdmadev
{
78 struct dma_device ddev
;
81 unsigned int dma_requests
;
84 struct mtk_uart_apdma_desc
{
85 struct virt_dma_desc vd
;
88 unsigned int avail_len
;
92 struct virt_dma_chan vc
;
93 struct dma_slave_config cfg
;
94 struct mtk_uart_apdma_desc
*desc
;
95 enum dma_transfer_direction dir
;
100 unsigned int rx_status
;
103 static inline struct mtk_uart_apdmadev
*
104 to_mtk_uart_apdma_dev(struct dma_device
*d
)
106 return container_of(d
, struct mtk_uart_apdmadev
, ddev
);
109 static inline struct mtk_chan
*to_mtk_uart_apdma_chan(struct dma_chan
*c
)
111 return container_of(c
, struct mtk_chan
, vc
.chan
);
114 static inline struct mtk_uart_apdma_desc
*to_mtk_uart_apdma_desc
115 (struct dma_async_tx_descriptor
*t
)
117 return container_of(t
, struct mtk_uart_apdma_desc
, vd
.tx
);
120 static void mtk_uart_apdma_write(struct mtk_chan
*c
,
121 unsigned int reg
, unsigned int val
)
123 writel(val
, c
->base
+ reg
);
126 static unsigned int mtk_uart_apdma_read(struct mtk_chan
*c
, unsigned int reg
)
128 return readl(c
->base
+ reg
);
131 static void mtk_uart_apdma_desc_free(struct virt_dma_desc
*vd
)
133 kfree(container_of(vd
, struct mtk_uart_apdma_desc
, vd
));
136 static void mtk_uart_apdma_start_tx(struct mtk_chan
*c
)
138 struct mtk_uart_apdmadev
*mtkd
=
139 to_mtk_uart_apdma_dev(c
->vc
.chan
.device
);
140 struct mtk_uart_apdma_desc
*d
= c
->desc
;
141 unsigned int wpt
, vff_sz
;
143 vff_sz
= c
->cfg
.dst_port_window_size
;
144 if (!mtk_uart_apdma_read(c
, VFF_LEN
)) {
145 mtk_uart_apdma_write(c
, VFF_ADDR
, d
->addr
);
146 mtk_uart_apdma_write(c
, VFF_LEN
, vff_sz
);
147 mtk_uart_apdma_write(c
, VFF_THRE
, VFF_TX_THRE(vff_sz
));
148 mtk_uart_apdma_write(c
, VFF_WPT
, 0);
149 mtk_uart_apdma_write(c
, VFF_INT_FLAG
, VFF_TX_INT_CLR_B
);
151 if (mtkd
->support_33bits
)
152 mtk_uart_apdma_write(c
, VFF_4G_SUPPORT
, VFF_4G_EN_B
);
155 mtk_uart_apdma_write(c
, VFF_EN
, VFF_EN_B
);
156 if (mtk_uart_apdma_read(c
, VFF_EN
) != VFF_EN_B
)
157 dev_err(c
->vc
.chan
.device
->dev
, "Enable TX fail\n");
159 if (!mtk_uart_apdma_read(c
, VFF_LEFT_SIZE
)) {
160 mtk_uart_apdma_write(c
, VFF_INT_EN
, VFF_TX_INT_EN_B
);
164 wpt
= mtk_uart_apdma_read(c
, VFF_WPT
);
166 wpt
+= c
->desc
->avail_len
;
167 if ((wpt
& VFF_RING_SIZE
) == vff_sz
)
168 wpt
= (wpt
& VFF_RING_WRAP
) ^ VFF_RING_WRAP
;
170 /* Let DMA start moving data */
171 mtk_uart_apdma_write(c
, VFF_WPT
, wpt
);
173 /* HW auto set to 0 when left size >= threshold */
174 mtk_uart_apdma_write(c
, VFF_INT_EN
, VFF_TX_INT_EN_B
);
175 if (!mtk_uart_apdma_read(c
, VFF_FLUSH
))
176 mtk_uart_apdma_write(c
, VFF_FLUSH
, VFF_FLUSH_B
);
179 static void mtk_uart_apdma_start_rx(struct mtk_chan
*c
)
181 struct mtk_uart_apdmadev
*mtkd
=
182 to_mtk_uart_apdma_dev(c
->vc
.chan
.device
);
183 struct mtk_uart_apdma_desc
*d
= c
->desc
;
186 vff_sz
= c
->cfg
.src_port_window_size
;
187 if (!mtk_uart_apdma_read(c
, VFF_LEN
)) {
188 mtk_uart_apdma_write(c
, VFF_ADDR
, d
->addr
);
189 mtk_uart_apdma_write(c
, VFF_LEN
, vff_sz
);
190 mtk_uart_apdma_write(c
, VFF_THRE
, VFF_RX_THRE(vff_sz
));
191 mtk_uart_apdma_write(c
, VFF_RPT
, 0);
192 mtk_uart_apdma_write(c
, VFF_INT_FLAG
, VFF_RX_INT_CLR_B
);
194 if (mtkd
->support_33bits
)
195 mtk_uart_apdma_write(c
, VFF_4G_SUPPORT
, VFF_4G_EN_B
);
198 mtk_uart_apdma_write(c
, VFF_INT_EN
, VFF_RX_INT_EN_B
);
199 mtk_uart_apdma_write(c
, VFF_EN
, VFF_EN_B
);
200 if (mtk_uart_apdma_read(c
, VFF_EN
) != VFF_EN_B
)
201 dev_err(c
->vc
.chan
.device
->dev
, "Enable RX fail\n");
204 static void mtk_uart_apdma_tx_handler(struct mtk_chan
*c
)
206 mtk_uart_apdma_write(c
, VFF_INT_FLAG
, VFF_TX_INT_CLR_B
);
207 mtk_uart_apdma_write(c
, VFF_INT_EN
, VFF_INT_EN_CLR_B
);
208 mtk_uart_apdma_write(c
, VFF_EN
, VFF_EN_CLR_B
);
211 static void mtk_uart_apdma_rx_handler(struct mtk_chan
*c
)
213 struct mtk_uart_apdma_desc
*d
= c
->desc
;
214 unsigned int len
, wg
, rg
;
217 mtk_uart_apdma_write(c
, VFF_INT_FLAG
, VFF_RX_INT_CLR_B
);
219 if (!mtk_uart_apdma_read(c
, VFF_VALID_SIZE
))
222 mtk_uart_apdma_write(c
, VFF_EN
, VFF_EN_CLR_B
);
223 mtk_uart_apdma_write(c
, VFF_INT_EN
, VFF_INT_EN_CLR_B
);
225 len
= c
->cfg
.src_port_window_size
;
226 rg
= mtk_uart_apdma_read(c
, VFF_RPT
);
227 wg
= mtk_uart_apdma_read(c
, VFF_WPT
);
228 cnt
= (wg
& VFF_RING_SIZE
) - (rg
& VFF_RING_SIZE
);
231 * The buffer is ring buffer. If wrap bit different,
232 * represents the start of the next cycle for WPT
234 if ((rg
^ wg
) & VFF_RING_WRAP
)
237 c
->rx_status
= d
->avail_len
- cnt
;
238 mtk_uart_apdma_write(c
, VFF_RPT
, wg
);
241 static void mtk_uart_apdma_chan_complete_handler(struct mtk_chan
*c
)
243 struct mtk_uart_apdma_desc
*d
= c
->desc
;
246 list_del(&d
->vd
.node
);
247 vchan_cookie_complete(&d
->vd
);
252 static irqreturn_t
mtk_uart_apdma_irq_handler(int irq
, void *dev_id
)
254 struct dma_chan
*chan
= (struct dma_chan
*)dev_id
;
255 struct mtk_chan
*c
= to_mtk_uart_apdma_chan(chan
);
258 spin_lock_irqsave(&c
->vc
.lock
, flags
);
259 if (c
->dir
== DMA_DEV_TO_MEM
)
260 mtk_uart_apdma_rx_handler(c
);
261 else if (c
->dir
== DMA_MEM_TO_DEV
)
262 mtk_uart_apdma_tx_handler(c
);
263 mtk_uart_apdma_chan_complete_handler(c
);
264 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
269 static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan
*chan
)
271 struct mtk_uart_apdmadev
*mtkd
= to_mtk_uart_apdma_dev(chan
->device
);
272 struct mtk_chan
*c
= to_mtk_uart_apdma_chan(chan
);
276 ret
= pm_runtime_resume_and_get(mtkd
->ddev
.dev
);
278 pm_runtime_put_noidle(chan
->device
->dev
);
282 mtk_uart_apdma_write(c
, VFF_ADDR
, 0);
283 mtk_uart_apdma_write(c
, VFF_THRE
, 0);
284 mtk_uart_apdma_write(c
, VFF_LEN
, 0);
285 mtk_uart_apdma_write(c
, VFF_RST
, VFF_WARM_RST_B
);
287 ret
= readx_poll_timeout(readl
, c
->base
+ VFF_EN
,
288 status
, !status
, 10, 100);
292 ret
= request_irq(c
->irq
, mtk_uart_apdma_irq_handler
,
293 IRQF_TRIGGER_NONE
, KBUILD_MODNAME
, chan
);
295 dev_err(chan
->device
->dev
, "Can't request dma IRQ\n");
300 if (mtkd
->support_33bits
)
301 mtk_uart_apdma_write(c
, VFF_4G_SUPPORT
, VFF_4G_SUPPORT_CLR_B
);
304 pm_runtime_put_noidle(mtkd
->ddev
.dev
);
308 static void mtk_uart_apdma_free_chan_resources(struct dma_chan
*chan
)
310 struct mtk_uart_apdmadev
*mtkd
= to_mtk_uart_apdma_dev(chan
->device
);
311 struct mtk_chan
*c
= to_mtk_uart_apdma_chan(chan
);
313 free_irq(c
->irq
, chan
);
315 tasklet_kill(&c
->vc
.task
);
317 vchan_free_chan_resources(&c
->vc
);
319 pm_runtime_put_sync(mtkd
->ddev
.dev
);
322 static enum dma_status
mtk_uart_apdma_tx_status(struct dma_chan
*chan
,
324 struct dma_tx_state
*txstate
)
326 struct mtk_chan
*c
= to_mtk_uart_apdma_chan(chan
);
329 ret
= dma_cookie_status(chan
, cookie
, txstate
);
333 dma_set_residue(txstate
, c
->rx_status
);
339 * dmaengine_prep_slave_single will call the function. and sglen is 1.
340 * 8250 uart using one ring buffer, and deal with one sg.
342 static struct dma_async_tx_descriptor
*mtk_uart_apdma_prep_slave_sg
343 (struct dma_chan
*chan
, struct scatterlist
*sgl
,
344 unsigned int sglen
, enum dma_transfer_direction dir
,
345 unsigned long tx_flags
, void *context
)
347 struct mtk_chan
*c
= to_mtk_uart_apdma_chan(chan
);
348 struct mtk_uart_apdma_desc
*d
;
350 if (!is_slave_direction(dir
) || sglen
!= 1)
353 /* Now allocate and setup the descriptor */
354 d
= kzalloc(sizeof(*d
), GFP_NOWAIT
);
358 d
->avail_len
= sg_dma_len(sgl
);
359 d
->addr
= sg_dma_address(sgl
);
362 return vchan_tx_prep(&c
->vc
, &d
->vd
, tx_flags
);
365 static void mtk_uart_apdma_issue_pending(struct dma_chan
*chan
)
367 struct mtk_chan
*c
= to_mtk_uart_apdma_chan(chan
);
368 struct virt_dma_desc
*vd
;
371 spin_lock_irqsave(&c
->vc
.lock
, flags
);
372 if (vchan_issue_pending(&c
->vc
) && !c
->desc
) {
373 vd
= vchan_next_desc(&c
->vc
);
374 c
->desc
= to_mtk_uart_apdma_desc(&vd
->tx
);
376 if (c
->dir
== DMA_DEV_TO_MEM
)
377 mtk_uart_apdma_start_rx(c
);
378 else if (c
->dir
== DMA_MEM_TO_DEV
)
379 mtk_uart_apdma_start_tx(c
);
382 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
385 static int mtk_uart_apdma_slave_config(struct dma_chan
*chan
,
386 struct dma_slave_config
*config
)
388 struct mtk_chan
*c
= to_mtk_uart_apdma_chan(chan
);
390 memcpy(&c
->cfg
, config
, sizeof(*config
));
395 static int mtk_uart_apdma_terminate_all(struct dma_chan
*chan
)
397 struct mtk_chan
*c
= to_mtk_uart_apdma_chan(chan
);
403 mtk_uart_apdma_write(c
, VFF_FLUSH
, VFF_FLUSH_B
);
405 ret
= readx_poll_timeout(readl
, c
->base
+ VFF_FLUSH
,
406 status
, status
!= VFF_FLUSH_B
, 10, 100);
408 dev_err(c
->vc
.chan
.device
->dev
, "flush: fail, status=0x%x\n",
409 mtk_uart_apdma_read(c
, VFF_DEBUG_STATUS
));
417 mtk_uart_apdma_write(c
, VFF_STOP
, VFF_STOP_B
);
418 ret
= readx_poll_timeout(readl
, c
->base
+ VFF_EN
,
419 status
, !status
, 10, 100);
421 dev_err(c
->vc
.chan
.device
->dev
, "stop: fail, status=0x%x\n",
422 mtk_uart_apdma_read(c
, VFF_DEBUG_STATUS
));
424 mtk_uart_apdma_write(c
, VFF_STOP
, VFF_STOP_CLR_B
);
425 mtk_uart_apdma_write(c
, VFF_INT_EN
, VFF_INT_EN_CLR_B
);
427 if (c
->dir
== DMA_DEV_TO_MEM
)
428 mtk_uart_apdma_write(c
, VFF_INT_FLAG
, VFF_RX_INT_CLR_B
);
429 else if (c
->dir
== DMA_MEM_TO_DEV
)
430 mtk_uart_apdma_write(c
, VFF_INT_FLAG
, VFF_TX_INT_CLR_B
);
432 synchronize_irq(c
->irq
);
434 spin_lock_irqsave(&c
->vc
.lock
, flags
);
435 vchan_get_all_descriptors(&c
->vc
, &head
);
436 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
438 vchan_dma_desc_free_list(&c
->vc
, &head
);
443 static int mtk_uart_apdma_device_pause(struct dma_chan
*chan
)
445 struct mtk_chan
*c
= to_mtk_uart_apdma_chan(chan
);
448 spin_lock_irqsave(&c
->vc
.lock
, flags
);
450 mtk_uart_apdma_write(c
, VFF_EN
, VFF_EN_CLR_B
);
451 mtk_uart_apdma_write(c
, VFF_INT_EN
, VFF_INT_EN_CLR_B
);
453 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
454 synchronize_irq(c
->irq
);
459 static void mtk_uart_apdma_free(struct mtk_uart_apdmadev
*mtkd
)
461 while (!list_empty(&mtkd
->ddev
.channels
)) {
462 struct mtk_chan
*c
= list_first_entry(&mtkd
->ddev
.channels
,
463 struct mtk_chan
, vc
.chan
.device_node
);
465 list_del(&c
->vc
.chan
.device_node
);
466 tasklet_kill(&c
->vc
.task
);
470 static const struct of_device_id mtk_uart_apdma_match
[] = {
471 { .compatible
= "mediatek,mt6577-uart-dma", },
474 MODULE_DEVICE_TABLE(of
, mtk_uart_apdma_match
);
476 static int mtk_uart_apdma_probe(struct platform_device
*pdev
)
478 struct device_node
*np
= pdev
->dev
.of_node
;
479 struct mtk_uart_apdmadev
*mtkd
;
480 int bit_mask
= 32, rc
;
484 mtkd
= devm_kzalloc(&pdev
->dev
, sizeof(*mtkd
), GFP_KERNEL
);
488 mtkd
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
489 if (IS_ERR(mtkd
->clk
)) {
490 dev_err(&pdev
->dev
, "No clock specified\n");
491 rc
= PTR_ERR(mtkd
->clk
);
495 if (of_property_read_bool(np
, "mediatek,dma-33bits"))
496 mtkd
->support_33bits
= true;
498 if (mtkd
->support_33bits
)
501 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(bit_mask
));
505 dma_cap_set(DMA_SLAVE
, mtkd
->ddev
.cap_mask
);
506 mtkd
->ddev
.device_alloc_chan_resources
=
507 mtk_uart_apdma_alloc_chan_resources
;
508 mtkd
->ddev
.device_free_chan_resources
=
509 mtk_uart_apdma_free_chan_resources
;
510 mtkd
->ddev
.device_tx_status
= mtk_uart_apdma_tx_status
;
511 mtkd
->ddev
.device_issue_pending
= mtk_uart_apdma_issue_pending
;
512 mtkd
->ddev
.device_prep_slave_sg
= mtk_uart_apdma_prep_slave_sg
;
513 mtkd
->ddev
.device_config
= mtk_uart_apdma_slave_config
;
514 mtkd
->ddev
.device_pause
= mtk_uart_apdma_device_pause
;
515 mtkd
->ddev
.device_terminate_all
= mtk_uart_apdma_terminate_all
;
516 mtkd
->ddev
.src_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE
);
517 mtkd
->ddev
.dst_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE
);
518 mtkd
->ddev
.directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
519 mtkd
->ddev
.residue_granularity
= DMA_RESIDUE_GRANULARITY_SEGMENT
;
520 mtkd
->ddev
.dev
= &pdev
->dev
;
521 INIT_LIST_HEAD(&mtkd
->ddev
.channels
);
523 mtkd
->dma_requests
= MTK_UART_APDMA_NR_VCHANS
;
524 if (of_property_read_u32(np
, "dma-requests", &mtkd
->dma_requests
)) {
526 "Using %u as missing dma-requests property\n",
527 MTK_UART_APDMA_NR_VCHANS
);
530 for (i
= 0; i
< mtkd
->dma_requests
; i
++) {
531 c
= devm_kzalloc(mtkd
->ddev
.dev
, sizeof(*c
), GFP_KERNEL
);
537 c
->base
= devm_platform_ioremap_resource(pdev
, i
);
538 if (IS_ERR(c
->base
)) {
539 rc
= PTR_ERR(c
->base
);
542 c
->vc
.desc_free
= mtk_uart_apdma_desc_free
;
543 vchan_init(&c
->vc
, &mtkd
->ddev
);
545 rc
= platform_get_irq(pdev
, i
);
551 pm_runtime_enable(&pdev
->dev
);
553 rc
= dma_async_device_register(&mtkd
->ddev
);
557 platform_set_drvdata(pdev
, mtkd
);
559 /* Device-tree DMA controller registration */
560 rc
= of_dma_controller_register(np
, of_dma_xlate_by_chan_id
, mtkd
);
567 dma_async_device_unregister(&mtkd
->ddev
);
569 pm_runtime_disable(&pdev
->dev
);
571 mtk_uart_apdma_free(mtkd
);
575 static void mtk_uart_apdma_remove(struct platform_device
*pdev
)
577 struct mtk_uart_apdmadev
*mtkd
= platform_get_drvdata(pdev
);
579 of_dma_controller_free(pdev
->dev
.of_node
);
581 mtk_uart_apdma_free(mtkd
);
583 dma_async_device_unregister(&mtkd
->ddev
);
585 pm_runtime_disable(&pdev
->dev
);
588 #ifdef CONFIG_PM_SLEEP
589 static int mtk_uart_apdma_suspend(struct device
*dev
)
591 struct mtk_uart_apdmadev
*mtkd
= dev_get_drvdata(dev
);
593 if (!pm_runtime_suspended(dev
))
594 clk_disable_unprepare(mtkd
->clk
);
599 static int mtk_uart_apdma_resume(struct device
*dev
)
602 struct mtk_uart_apdmadev
*mtkd
= dev_get_drvdata(dev
);
604 if (!pm_runtime_suspended(dev
)) {
605 ret
= clk_prepare_enable(mtkd
->clk
);
612 #endif /* CONFIG_PM_SLEEP */
615 static int mtk_uart_apdma_runtime_suspend(struct device
*dev
)
617 struct mtk_uart_apdmadev
*mtkd
= dev_get_drvdata(dev
);
619 clk_disable_unprepare(mtkd
->clk
);
624 static int mtk_uart_apdma_runtime_resume(struct device
*dev
)
626 struct mtk_uart_apdmadev
*mtkd
= dev_get_drvdata(dev
);
628 return clk_prepare_enable(mtkd
->clk
);
630 #endif /* CONFIG_PM */
632 static const struct dev_pm_ops mtk_uart_apdma_pm_ops
= {
633 SET_SYSTEM_SLEEP_PM_OPS(mtk_uart_apdma_suspend
, mtk_uart_apdma_resume
)
634 SET_RUNTIME_PM_OPS(mtk_uart_apdma_runtime_suspend
,
635 mtk_uart_apdma_runtime_resume
, NULL
)
638 static struct platform_driver mtk_uart_apdma_driver
= {
639 .probe
= mtk_uart_apdma_probe
,
640 .remove
= mtk_uart_apdma_remove
,
642 .name
= KBUILD_MODNAME
,
643 .pm
= &mtk_uart_apdma_pm_ops
,
644 .of_match_table
= of_match_ptr(mtk_uart_apdma_match
),
648 module_platform_driver(mtk_uart_apdma_driver
);
650 MODULE_DESCRIPTION("MediaTek UART APDMA Controller Driver");
651 MODULE_AUTHOR("Long Cheng <long.cheng@mediatek.com>");
652 MODULE_LICENSE("GPL v2");