2 * drivers/net/ethernet/beckhoff/ec_bhf.c
4 * Copyright (C) 2014 Darek Marcinkiewicz <reksio@newterm.pl>
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 /* This is a driver for EtherCAT master module present on CCAT FPGA.
18 * Those can be found on Bechhoff CX50xx industrial PCs.
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/pci.h>
28 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/hrtimer.h>
35 #include <linux/interrupt.h>
36 #include <linux/stat.h>
38 #define TIMER_INTERVAL_NSEC 20000
40 #define INFO_BLOCK_SIZE 0x10
41 #define INFO_BLOCK_TYPE 0x0
42 #define INFO_BLOCK_REV 0x2
43 #define INFO_BLOCK_BLK_CNT 0x4
44 #define INFO_BLOCK_TX_CHAN 0x4
45 #define INFO_BLOCK_RX_CHAN 0x5
46 #define INFO_BLOCK_OFFSET 0x8
48 #define EC_MII_OFFSET 0x4
49 #define EC_FIFO_OFFSET 0x8
50 #define EC_MAC_OFFSET 0xc
52 #define MAC_FRAME_ERR_CNT 0x0
53 #define MAC_RX_ERR_CNT 0x1
54 #define MAC_CRC_ERR_CNT 0x2
55 #define MAC_LNK_LST_ERR_CNT 0x3
56 #define MAC_TX_FRAME_CNT 0x10
57 #define MAC_RX_FRAME_CNT 0x14
58 #define MAC_TX_FIFO_LVL 0x20
59 #define MAC_DROPPED_FRMS 0x28
60 #define MAC_CONNECTED_CCAT_FLAG 0x78
62 #define MII_MAC_ADDR 0x8
63 #define MII_MAC_FILT_FLAG 0xe
64 #define MII_LINK_STATUS 0xf
66 #define FIFO_TX_REG 0x0
67 #define FIFO_TX_RESET 0x8
68 #define FIFO_RX_REG 0x10
69 #define FIFO_RX_ADDR_VALID (1u << 31)
70 #define FIFO_RX_RESET 0x18
72 #define DMA_CHAN_OFFSET 0x1000
73 #define DMA_CHAN_SIZE 0x8
75 #define DMA_WINDOW_SIZE_MASK 0xfffffffc
77 static struct pci_device_id ids
[] = {
78 { PCI_DEVICE(0x15ec, 0x5000), },
81 MODULE_DEVICE_TABLE(pci
, ids
);
84 #define RXHDR_NEXT_ADDR_MASK 0xffffffu
85 #define RXHDR_NEXT_VALID (1u << 31)
87 #define RXHDR_NEXT_RECV_FLAG 0x1
89 #define RXHDR_LEN_MASK 0xfffu
96 #define PKT_PAYLOAD_SIZE 0x7e8
98 struct rx_header header
;
99 u8 data
[PKT_PAYLOAD_SIZE
];
104 #define TX_HDR_PORT_0 0x1
105 #define TX_HDR_PORT_1 0x2
108 #define TX_HDR_SENT 0x1
114 struct tx_header header
;
115 u8 data
[PKT_PAYLOAD_SIZE
];
120 static long polling_frequency
= TIMER_INTERVAL_NSEC
;
129 dma_addr_t alloc_phys
;
133 struct net_device
*net_dev
;
138 void __iomem
*dma_io
;
140 struct hrtimer hrtimer
;
145 void __iomem
*fifo_io
;
146 void __iomem
*mii_io
;
147 void __iomem
*mac_io
;
149 struct bhf_dma rx_buf
;
150 struct rx_desc
*rx_descs
;
154 struct bhf_dma tx_buf
;
155 struct tx_desc
*tx_descs
;
163 #define PRIV_TO_DEV(priv) (&(priv)->dev->dev)
165 #define ETHERCAT_MASTER_ID 0x14
167 static void ec_bhf_print_status(struct ec_bhf_priv
*priv
)
169 struct device
*dev
= PRIV_TO_DEV(priv
);
171 dev_dbg(dev
, "Frame error counter: %d\n",
172 ioread8(priv
->mac_io
+ MAC_FRAME_ERR_CNT
));
173 dev_dbg(dev
, "RX error counter: %d\n",
174 ioread8(priv
->mac_io
+ MAC_RX_ERR_CNT
));
175 dev_dbg(dev
, "CRC error counter: %d\n",
176 ioread8(priv
->mac_io
+ MAC_CRC_ERR_CNT
));
177 dev_dbg(dev
, "TX frame counter: %d\n",
178 ioread32(priv
->mac_io
+ MAC_TX_FRAME_CNT
));
179 dev_dbg(dev
, "RX frame counter: %d\n",
180 ioread32(priv
->mac_io
+ MAC_RX_FRAME_CNT
));
181 dev_dbg(dev
, "TX fifo level: %d\n",
182 ioread8(priv
->mac_io
+ MAC_TX_FIFO_LVL
));
183 dev_dbg(dev
, "Dropped frames: %d\n",
184 ioread8(priv
->mac_io
+ MAC_DROPPED_FRMS
));
185 dev_dbg(dev
, "Connected with CCAT slot: %d\n",
186 ioread8(priv
->mac_io
+ MAC_CONNECTED_CCAT_FLAG
));
187 dev_dbg(dev
, "Link status: %d\n",
188 ioread8(priv
->mii_io
+ MII_LINK_STATUS
));
191 static void ec_bhf_reset(struct ec_bhf_priv
*priv
)
193 iowrite8(0, priv
->mac_io
+ MAC_FRAME_ERR_CNT
);
194 iowrite8(0, priv
->mac_io
+ MAC_RX_ERR_CNT
);
195 iowrite8(0, priv
->mac_io
+ MAC_CRC_ERR_CNT
);
196 iowrite8(0, priv
->mac_io
+ MAC_LNK_LST_ERR_CNT
);
197 iowrite32(0, priv
->mac_io
+ MAC_TX_FRAME_CNT
);
198 iowrite32(0, priv
->mac_io
+ MAC_RX_FRAME_CNT
);
199 iowrite8(0, priv
->mac_io
+ MAC_DROPPED_FRMS
);
201 iowrite8(0, priv
->fifo_io
+ FIFO_TX_RESET
);
202 iowrite8(0, priv
->fifo_io
+ FIFO_RX_RESET
);
204 iowrite8(0, priv
->mac_io
+ MAC_TX_FIFO_LVL
);
207 static void ec_bhf_send_packet(struct ec_bhf_priv
*priv
, struct tx_desc
*desc
)
209 u32 len
= le16_to_cpu(desc
->header
.len
) + sizeof(desc
->header
);
210 u32 addr
= (u8
*)desc
- priv
->tx_buf
.buf
;
212 iowrite32((ALIGN(len
, 8) << 24) | addr
, priv
->fifo_io
+ FIFO_TX_REG
);
214 dev_dbg(PRIV_TO_DEV(priv
), "Done sending packet\n");
217 static int ec_bhf_desc_sent(struct tx_desc
*desc
)
219 return le32_to_cpu(desc
->header
.sent
) & TX_HDR_SENT
;
222 static void ec_bhf_process_tx(struct ec_bhf_priv
*priv
)
224 if (unlikely(netif_queue_stopped(priv
->net_dev
))) {
225 /* Make sure that we perceive changes to tx_dnext. */
228 if (ec_bhf_desc_sent(&priv
->tx_descs
[priv
->tx_dnext
]))
229 netif_wake_queue(priv
->net_dev
);
233 static int ec_bhf_pkt_received(struct rx_desc
*desc
)
235 return le32_to_cpu(desc
->header
.recv
) & RXHDR_NEXT_RECV_FLAG
;
238 static void ec_bhf_add_rx_desc(struct ec_bhf_priv
*priv
, struct rx_desc
*desc
)
240 iowrite32(FIFO_RX_ADDR_VALID
| ((u8
*)(desc
) - priv
->rx_buf
.buf
),
241 priv
->fifo_io
+ FIFO_RX_REG
);
244 static void ec_bhf_process_rx(struct ec_bhf_priv
*priv
)
246 struct rx_desc
*desc
= &priv
->rx_descs
[priv
->rx_dnext
];
247 struct device
*dev
= PRIV_TO_DEV(priv
);
249 while (ec_bhf_pkt_received(desc
)) {
250 int pkt_size
= (le16_to_cpu(desc
->header
.len
) &
251 RXHDR_LEN_MASK
) - sizeof(struct rx_header
) - 4;
252 u8
*data
= desc
->data
;
255 skb
= netdev_alloc_skb_ip_align(priv
->net_dev
, pkt_size
);
256 dev_dbg(dev
, "Received packet, size: %d\n", pkt_size
);
259 memcpy(skb_put(skb
, pkt_size
), data
, pkt_size
);
260 skb
->protocol
= eth_type_trans(skb
, priv
->net_dev
);
261 dev_dbg(dev
, "Protocol type: %x\n", skb
->protocol
);
263 priv
->stat_rx_bytes
+= pkt_size
;
267 dev_err_ratelimited(dev
,
268 "Couldn't allocate a skb_buff for a packet of size %u\n",
272 desc
->header
.recv
= 0;
274 ec_bhf_add_rx_desc(priv
, desc
);
276 priv
->rx_dnext
= (priv
->rx_dnext
+ 1) % priv
->rx_dcount
;
277 desc
= &priv
->rx_descs
[priv
->rx_dnext
];
282 static enum hrtimer_restart
ec_bhf_timer_fun(struct hrtimer
*timer
)
284 struct ec_bhf_priv
*priv
= container_of(timer
, struct ec_bhf_priv
,
286 ec_bhf_process_rx(priv
);
287 ec_bhf_process_tx(priv
);
289 if (!netif_running(priv
->net_dev
))
290 return HRTIMER_NORESTART
;
292 hrtimer_forward_now(timer
, ktime_set(0, polling_frequency
));
293 return HRTIMER_RESTART
;
296 static int ec_bhf_setup_offsets(struct ec_bhf_priv
*priv
)
298 struct device
*dev
= PRIV_TO_DEV(priv
);
299 unsigned block_count
, i
;
300 void __iomem
*ec_info
;
302 dev_dbg(dev
, "Info block:\n");
303 dev_dbg(dev
, "Type of function: %x\n", (unsigned)ioread16(priv
->io
));
304 dev_dbg(dev
, "Revision of function: %x\n",
305 (unsigned)ioread16(priv
->io
+ INFO_BLOCK_REV
));
307 block_count
= ioread8(priv
->io
+ INFO_BLOCK_BLK_CNT
);
308 dev_dbg(dev
, "Number of function blocks: %x\n", block_count
);
310 for (i
= 0; i
< block_count
; i
++) {
311 u16 type
= ioread16(priv
->io
+ i
* INFO_BLOCK_SIZE
+
313 if (type
== ETHERCAT_MASTER_ID
)
316 if (i
== block_count
) {
317 dev_err(dev
, "EtherCAT master with DMA block not found\n");
320 dev_dbg(dev
, "EtherCAT master with DMA block found at pos: %d\n", i
);
322 ec_info
= priv
->io
+ i
* INFO_BLOCK_SIZE
;
323 dev_dbg(dev
, "EtherCAT master revision: %d\n",
324 ioread16(ec_info
+ INFO_BLOCK_REV
));
326 priv
->tx_dma_chan
= ioread8(ec_info
+ INFO_BLOCK_TX_CHAN
);
327 dev_dbg(dev
, "EtherCAT master tx dma channel: %d\n",
330 priv
->rx_dma_chan
= ioread8(ec_info
+ INFO_BLOCK_RX_CHAN
);
331 dev_dbg(dev
, "EtherCAT master rx dma channel: %d\n",
334 priv
->ec_io
= priv
->io
+ ioread32(ec_info
+ INFO_BLOCK_OFFSET
);
335 priv
->mii_io
= priv
->ec_io
+ ioread32(priv
->ec_io
+ EC_MII_OFFSET
);
336 priv
->fifo_io
= priv
->ec_io
+ ioread32(priv
->ec_io
+ EC_FIFO_OFFSET
);
337 priv
->mac_io
= priv
->ec_io
+ ioread32(priv
->ec_io
+ EC_MAC_OFFSET
);
340 "EtherCAT block addres: %p, fifo address: %p, mii address: %p, mac address: %p\n",
341 priv
->ec_io
, priv
->fifo_io
, priv
->mii_io
, priv
->mac_io
);
346 static netdev_tx_t
ec_bhf_start_xmit(struct sk_buff
*skb
,
347 struct net_device
*net_dev
)
349 struct ec_bhf_priv
*priv
= netdev_priv(net_dev
);
350 struct tx_desc
*desc
;
353 dev_dbg(PRIV_TO_DEV(priv
), "Starting xmit\n");
355 desc
= &priv
->tx_descs
[priv
->tx_dnext
];
357 skb_copy_and_csum_dev(skb
, desc
->data
);
360 memset(&desc
->header
, 0, sizeof(desc
->header
));
361 desc
->header
.len
= cpu_to_le16(len
);
362 desc
->header
.port
= TX_HDR_PORT_0
;
364 ec_bhf_send_packet(priv
, desc
);
366 priv
->tx_dnext
= (priv
->tx_dnext
+ 1) % priv
->tx_dcount
;
368 if (!ec_bhf_desc_sent(&priv
->tx_descs
[priv
->tx_dnext
])) {
369 /* Make sure that update updates to tx_dnext are perceived
374 netif_stop_queue(net_dev
);
376 dev_dbg(PRIV_TO_DEV(priv
), "Stopping netif queue\n");
377 ec_bhf_print_status(priv
);
380 priv
->stat_tx_bytes
+= len
;
387 static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv
*priv
,
392 int offset
= channel
* DMA_CHAN_SIZE
+ DMA_CHAN_OFFSET
;
393 struct device
*dev
= PRIV_TO_DEV(priv
);
396 iowrite32(0xffffffff, priv
->dma_io
+ offset
);
398 mask
= ioread32(priv
->dma_io
+ offset
);
399 mask
&= DMA_WINDOW_SIZE_MASK
;
400 dev_dbg(dev
, "Read mask %x for channel %d\n", mask
, channel
);
402 /* We want to allocate a chunk of memory that is:
403 * - aligned to the mask we just read
404 * - is of size 2^mask bytes (at most)
405 * In order to ensure that we will allocate buffer of
408 buf
->len
= min_t(int, ~mask
+ 1, size
);
409 buf
->alloc_len
= 2 * buf
->len
;
411 dev_dbg(dev
, "Allocating %d bytes for channel %d",
412 (int)buf
->alloc_len
, channel
);
413 buf
->alloc
= dma_alloc_coherent(dev
, buf
->alloc_len
, &buf
->alloc_phys
,
415 if (buf
->alloc
== NULL
) {
416 dev_info(dev
, "Failed to allocate buffer\n");
420 buf
->buf_phys
= (buf
->alloc_phys
+ buf
->len
) & mask
;
421 buf
->buf
= buf
->alloc
+ (buf
->buf_phys
- buf
->alloc_phys
);
423 iowrite32(0, priv
->dma_io
+ offset
+ 4);
424 iowrite32(buf
->buf_phys
, priv
->dma_io
+ offset
);
425 dev_dbg(dev
, "Buffer: %x and read from dev: %x",
426 (unsigned)buf
->buf_phys
, ioread32(priv
->dma_io
+ offset
));
431 static void ec_bhf_setup_tx_descs(struct ec_bhf_priv
*priv
)
435 priv
->tx_dcount
= priv
->tx_buf
.len
/ sizeof(struct tx_desc
);
436 priv
->tx_descs
= (struct tx_desc
*) priv
->tx_buf
.buf
;
439 for (i
= 0; i
< priv
->tx_dcount
; i
++)
440 priv
->tx_descs
[i
].header
.sent
= cpu_to_le32(TX_HDR_SENT
);
443 static void ec_bhf_setup_rx_descs(struct ec_bhf_priv
*priv
)
447 priv
->rx_dcount
= priv
->rx_buf
.len
/ sizeof(struct rx_desc
);
448 priv
->rx_descs
= (struct rx_desc
*) priv
->rx_buf
.buf
;
451 for (i
= 0; i
< priv
->rx_dcount
; i
++) {
452 struct rx_desc
*desc
= &priv
->rx_descs
[i
];
455 if (i
!= priv
->rx_dcount
- 1)
456 next
= (u8
*)(desc
+ 1) - priv
->rx_buf
.buf
;
459 next
|= RXHDR_NEXT_VALID
;
460 desc
->header
.next
= cpu_to_le32(next
);
461 desc
->header
.recv
= 0;
462 ec_bhf_add_rx_desc(priv
, desc
);
466 static int ec_bhf_open(struct net_device
*net_dev
)
468 struct ec_bhf_priv
*priv
= netdev_priv(net_dev
);
469 struct device
*dev
= PRIV_TO_DEV(priv
);
472 dev_info(dev
, "Opening device\n");
476 err
= ec_bhf_alloc_dma_mem(priv
, &priv
->rx_buf
, priv
->rx_dma_chan
,
477 FIFO_SIZE
* sizeof(struct rx_desc
));
479 dev_err(dev
, "Failed to allocate rx buffer\n");
482 ec_bhf_setup_rx_descs(priv
);
484 dev_info(dev
, "RX buffer allocated, address: %x\n",
485 (unsigned)priv
->rx_buf
.buf_phys
);
487 err
= ec_bhf_alloc_dma_mem(priv
, &priv
->tx_buf
, priv
->tx_dma_chan
,
488 FIFO_SIZE
* sizeof(struct tx_desc
));
490 dev_err(dev
, "Failed to allocate tx buffer\n");
493 dev_dbg(dev
, "TX buffer allocated, addres: %x\n",
494 (unsigned)priv
->tx_buf
.buf_phys
);
496 iowrite8(0, priv
->mii_io
+ MII_MAC_FILT_FLAG
);
498 ec_bhf_setup_tx_descs(priv
);
500 netif_start_queue(net_dev
);
502 hrtimer_init(&priv
->hrtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
503 priv
->hrtimer
.function
= ec_bhf_timer_fun
;
504 hrtimer_start(&priv
->hrtimer
, ktime_set(0, polling_frequency
),
507 dev_info(PRIV_TO_DEV(priv
), "Device open\n");
509 ec_bhf_print_status(priv
);
514 dma_free_coherent(dev
, priv
->rx_buf
.alloc_len
, priv
->rx_buf
.alloc
,
515 priv
->rx_buf
.alloc_len
);
520 static int ec_bhf_stop(struct net_device
*net_dev
)
522 struct ec_bhf_priv
*priv
= netdev_priv(net_dev
);
523 struct device
*dev
= PRIV_TO_DEV(priv
);
525 hrtimer_cancel(&priv
->hrtimer
);
529 netif_tx_disable(net_dev
);
531 dma_free_coherent(dev
, priv
->tx_buf
.alloc_len
,
532 priv
->tx_buf
.alloc
, priv
->tx_buf
.alloc_phys
);
533 dma_free_coherent(dev
, priv
->rx_buf
.alloc_len
,
534 priv
->rx_buf
.alloc
, priv
->rx_buf
.alloc_phys
);
539 static struct rtnl_link_stats64
*
540 ec_bhf_get_stats(struct net_device
*net_dev
,
541 struct rtnl_link_stats64
*stats
)
543 struct ec_bhf_priv
*priv
= netdev_priv(net_dev
);
545 stats
->rx_errors
= ioread8(priv
->mac_io
+ MAC_RX_ERR_CNT
) +
546 ioread8(priv
->mac_io
+ MAC_CRC_ERR_CNT
) +
547 ioread8(priv
->mac_io
+ MAC_FRAME_ERR_CNT
);
548 stats
->rx_packets
= ioread32(priv
->mac_io
+ MAC_RX_FRAME_CNT
);
549 stats
->tx_packets
= ioread32(priv
->mac_io
+ MAC_TX_FRAME_CNT
);
550 stats
->rx_dropped
= ioread8(priv
->mac_io
+ MAC_DROPPED_FRMS
);
552 stats
->tx_bytes
= priv
->stat_tx_bytes
;
553 stats
->rx_bytes
= priv
->stat_rx_bytes
;
558 static const struct net_device_ops ec_bhf_netdev_ops
= {
559 .ndo_start_xmit
= ec_bhf_start_xmit
,
560 .ndo_open
= ec_bhf_open
,
561 .ndo_stop
= ec_bhf_stop
,
562 .ndo_get_stats64
= ec_bhf_get_stats
,
563 .ndo_change_mtu
= eth_change_mtu
,
564 .ndo_validate_addr
= eth_validate_addr
,
565 .ndo_set_mac_address
= eth_mac_addr
568 static int ec_bhf_probe(struct pci_dev
*dev
, const struct pci_device_id
*id
)
570 struct net_device
*net_dev
;
571 struct ec_bhf_priv
*priv
;
572 void __iomem
*dma_io
;
576 err
= pci_enable_device(dev
);
582 err
= pci_set_dma_mask(dev
, DMA_BIT_MASK(32));
585 "Required dma mask not supported, failed to initialize device\n");
587 goto err_disable_dev
;
590 err
= pci_set_consistent_dma_mask(dev
, DMA_BIT_MASK(32));
593 "Required dma mask not supported, failed to initialize device\n");
594 goto err_disable_dev
;
597 err
= pci_request_regions(dev
, "ec_bhf");
599 dev_err(&dev
->dev
, "Failed to request pci memory regions\n");
600 goto err_disable_dev
;
603 io
= pci_iomap(dev
, 0, 0);
605 dev_err(&dev
->dev
, "Failed to map pci card memory bar 0");
607 goto err_release_regions
;
610 dma_io
= pci_iomap(dev
, 2, 0);
612 dev_err(&dev
->dev
, "Failed to map pci card memory bar 2");
617 net_dev
= alloc_etherdev(sizeof(struct ec_bhf_priv
));
618 if (net_dev
== NULL
) {
620 goto err_unmap_dma_io
;
623 pci_set_drvdata(dev
, net_dev
);
624 SET_NETDEV_DEV(net_dev
, &dev
->dev
);
626 net_dev
->features
= 0;
627 net_dev
->flags
|= IFF_NOARP
;
629 net_dev
->netdev_ops
= &ec_bhf_netdev_ops
;
631 priv
= netdev_priv(net_dev
);
632 priv
->net_dev
= net_dev
;
634 priv
->dma_io
= dma_io
;
637 err
= ec_bhf_setup_offsets(priv
);
639 goto err_free_net_dev
;
641 memcpy_fromio(net_dev
->dev_addr
, priv
->mii_io
+ MII_MAC_ADDR
, 6);
643 dev_dbg(&dev
->dev
, "CX5020 Ethercat master address: %pM\n",
646 err
= register_netdev(net_dev
);
648 goto err_free_net_dev
;
653 free_netdev(net_dev
);
655 pci_iounmap(dev
, dma_io
);
657 pci_iounmap(dev
, io
);
659 pci_release_regions(dev
);
661 pci_clear_master(dev
);
662 pci_disable_device(dev
);
667 static void ec_bhf_remove(struct pci_dev
*dev
)
669 struct net_device
*net_dev
= pci_get_drvdata(dev
);
670 struct ec_bhf_priv
*priv
= netdev_priv(net_dev
);
672 unregister_netdev(net_dev
);
673 free_netdev(net_dev
);
675 pci_iounmap(dev
, priv
->dma_io
);
676 pci_iounmap(dev
, priv
->io
);
677 pci_release_regions(dev
);
678 pci_clear_master(dev
);
679 pci_disable_device(dev
);
682 static struct pci_driver pci_driver
= {
685 .probe
= ec_bhf_probe
,
686 .remove
= ec_bhf_remove
,
689 static int __init
ec_bhf_init(void)
691 return pci_register_driver(&pci_driver
);
694 static void __exit
ec_bhf_exit(void)
696 pci_unregister_driver(&pci_driver
);
699 module_init(ec_bhf_init
);
700 module_exit(ec_bhf_exit
);
702 module_param(polling_frequency
, long, S_IRUGO
);
703 MODULE_PARM_DESC(polling_frequency
, "Polling timer frequency in ns");
705 MODULE_LICENSE("GPL");
706 MODULE_AUTHOR("Dariusz Marcinkiewicz <reksio@newterm.pl>");