2 * Copyright (c) 2005 by MCST.
6 //#include <linux/config.h>
7 #include <linux/init.h>
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/ioctl.h>
11 #include <linux/errno.h>
12 #include <linux/version.h>
13 #include <linux/moduleparam.h>
14 #include <linux/interrupt.h>
15 #include <linux/delay.h>
16 #include <linux/cdev.h>
17 #include <linux/audit.h>
18 #include <linux/poll.h>
19 #include <linux/namei.h>
20 #include <linux/pci.h>
21 #include <linux/mount.h>
22 #include <linux/security.h>
24 //-----------------------------------------------------------------------------
25 #define MOKMWAITLASTTX // TODO
27 #define BUG56455 // [Bug 56455] e90: big pci register resources
29 #define MAX_MOKM 8 // Max number of MOK/M devices.
31 //-----------------------------------------------------------------------------
32 #include <linux/mcst/mokm.h>
35 //-----------------------------------------------------------------------------
36 MODULE_LICENSE ("GPL");
37 MODULE_AUTHOR ("Denis Fedotov, Andrew Kalita");
38 MODULE_DESCRIPTION("MCST MOKM driver");
40 //-----------------------------------------------------------------------------
41 #define MODULE_NAME "MOKM"
42 #define DEV_MODULE_NAME "mokm"
44 #define MOKM_VENDOR_ID 0x8086
45 #define MOKM_DEVICE_ID 0x4643
47 #define REQ_IRG_FLAG IRQF_SHARED
48 #define MAX_DRV_NM_SZ 64
49 extern struct dentry
*lookup_hash(struct nameidata
*nd
);
51 #define DEF_MUTEX_R mutex_r
53 #define DEF_MUTEX_W mutex_w
55 #define DEF_MUTEX_W mutex_r
58 //#if defined(__sparc__)
59 //#if defined(__e2k__)
61 //-----------------------------------------------------------------------------
62 static char mokm_name
[] = "MCST,mokm"; // request_irq
64 unsigned int major
= 0;
66 unsigned int mokm_nr_devs
;
67 mokm_dev_t
*mokm_devices
[MAX_MOKM
];
69 static struct class *mokm_class
;
71 //=============================================================================
72 static int set_buffer_in_RCV (mokm_dev_t
*mokm
);
73 static int set_buffer_in_XMT (mokm_dev_t
*mokm
);
75 //-----------------------------------------------------------------------------
78 * @mokm: device private struct
79 * @cmd: reset command (0,MOKM_CR_RESET,MOKM_CR_PEAR_RESET,MOKM_CR_RESET_XMT,MOKM_CR_RESET_RCV)
81 * called by mokm_open, mokm_close, mokm_intr_handler(MOKM_ISR_RCV_RESET)
83 static void mokm_reset_device (mokm_dev_t
*mokm
, u32 cmd
)
85 mokm_pool_buf_t
* pool_buf
;
86 unsigned long flags_w
, flags_r
;
88 unsigned long try_num
;
89 #endif // MOKMWAITLASTTX
91 if(mokm
->debug
& MOKM_DBG_RESET
) PDEBUG("%d [%lu]: RESET: code = %s (%u)\n", mokm
->instance
, jiffies
,
92 (MOKM_CR_RESET
==cmd
)?"RESET":(
93 (MOKM_CR_PEAR_RESET
==cmd
)?"PEAR":(
94 (MOKM_CR_RESET_XMT
==cmd
)?"XMT":(
95 (MOKM_CR_RESET_RCV
==cmd
)?"RCV":(
96 (0==cmd
)?"soft":"unknown"
99 // wait for last transmit finished
100 #ifdef MOKMWAITLASTTX
102 while(ioread32(&mokm
->regs
->TCR
.r
) & TRC_TX_ACT
) {
103 try_num
--; if (0 == try_num
) break;
105 #endif // MOKMWAITLASTTX
107 iowrite32(MOKM_CR_ENDIAN
| cmd
, &mokm
->regs
->CR
);
108 iowrite32(ioread32(&mokm
->regs
->TRC
.r
) | MOKM_TRC_ENDIAN
, &mokm
->regs
->TRC
.r
);
110 if(MOKM_CR_PEAR_RESET
== cmd
) {
114 cmd
&= ~MOKM_CR_RESET
;
116 if((0 == cmd
) || (MOKM_CR_RESET_XMT
== cmd
)) {
117 spin_lock_irqsave(&mokm
->DEF_MUTEX_W
, flags_w
); /// spin_lock write_pool
119 // Set WRITE buffs as free
120 pool_buf
= &mokm
->write_pool
;
122 if(mokm
->debug
& MOKM_DBG_RESET
) PDEBUG("%d [%lu]: RESET: list_empty(xmt_ready_list)\n", mokm
->instance
, jiffies
);
123 while (!list_empty(&pool_buf
->ready_list
)) {
124 list_move_tail(pool_buf
->ready_list
.next
, &pool_buf
->free_list
);
127 while (!list_empty(&pool_buf
->busy_list
)) {
128 list_move_tail(pool_buf
->busy_list
.next
, &pool_buf
->free_list
);
130 mokm
->in_trm
= NULL
; // Off WRITE_IS_ACTIVE
131 spin_unlock_irqrestore(&mokm
->DEF_MUTEX_W
, flags_w
); /// spin_unlock write_pool
134 if((0 == cmd
) || (MOKM_CR_RESET_RCV
== cmd
)) {
135 spin_lock_irqsave(&mokm
->DEF_MUTEX_R
, flags_r
); /// spin_lock read_pool
137 // Set READ buffs as free
138 pool_buf
= &mokm
->read_pool
;
140 if(mokm
->debug
& MOKM_DBG_RESET
) PDEBUG("%d [%lu]: RESET: list_empty(rcv_ready_list)\n", mokm
->instance
, jiffies
);
141 while (!list_empty(&pool_buf
->ready_list
)) {
142 list_move_tail(pool_buf
->ready_list
.next
, &pool_buf
->free_list
);
145 while (!list_empty(&pool_buf
->busy_list
)) {
146 list_move_tail(pool_buf
->busy_list
.next
, &pool_buf
->free_list
);
148 mokm
->in_rcv
= NULL
; // Off READ_IS_ACTIVE
149 spin_unlock_irqrestore(&mokm
->DEF_MUTEX_R
, flags_r
); /// spin_unlock read_pool
151 } // mokm_reset_device
153 //-----------------------------------------------------------------------------
155 * Print all hardware registers
157 * called by mokm_ioctl[MOKM_IOC_WR_BUF]
159 static void dump_all_regs (mokm_dev_t
*mokm
, char *string
)
162 int inst
= mokm
->instance
;
165 PDEBUG("%d: Dump all regs (%s):\n", inst
, string
);
166 PDEBUG("%d \tBase + 0x0000: TDAR 0x%08X\n", inst
, ioread32(&mokm
->regs
->TDAR
));
167 PDEBUG("%d \tBase + 0x0004: TDCR 0x%08X\n", inst
, ioread32(&mokm
->regs
->TDCR
));
168 PDEBUG("%d \tBase + 0x0008: TCR 0x%08X\n", inst
, ioread32(&mokm
->regs
->TCR
.r
));
169 PDEBUG("%d \tBase + 0x000c: RDAR 0x%08X\n", inst
, ioread32(&mokm
->regs
->RDAR
));
170 PDEBUG("%d \tBase + 0x0010: RTR 0x%08X\n", inst
, ioread32(&mokm
->regs
->RTR
));
171 PDEBUG("%d \tBase + 0x0014: RCR 0x%08X\n", inst
, ioread32(&mokm
->regs
->RCR
.r
));
172 PDEBUG("%d \tBase + 0x0018: ISR 0x%08X\n", inst
, 0/*ioread32(&mokm->regs->ISR)*/);
173 PDEBUG("%d \tBase + 0x001c: IMR 0x%08X\n", inst
, ioread32(&mokm
->regs
->IMR
));
174 PDEBUG("%d \tBase + 0x0020: CR 0x%08X\n", inst
, ioread32(&mokm
->regs
->CR
));
175 PDEBUG("%d \tBase + 0x0024: PMR 0x%08X\n", inst
, ioread32(&mokm
->regs
->PMR
));
176 PDEBUG("%d \tBase + 0x0028: DR1 0x%08X\n", inst
, ioread32(&mokm
->regs
->DR1
));
177 PDEBUG("%d \tBase + 0x002c: DR2 0x%08X\n", inst
, ioread32(&mokm
->regs
->DR2
));
178 PDEBUG("%d \tBase + 0x0030: TRC 0x%08X\n", inst
, ioread32(&mokm
->regs
->TRC
.r
));
179 PDEBUG("%d \tBase + 0x0034: TC 0x%08X\n", inst
, ioread32(&mokm
->regs
->TC
.r
));
180 PDEBUG("%d \tBase + 0x0038: RDCR 0x%08X\n", inst
, ioread32(&mokm
->regs
->RDCR
));
181 PDEBUG("%d \tBase + 0x003c: TET 0x%08X\n\n", inst
, ioread32(&mokm
->regs
->TET
));
184 //-----------------------------------------------------------------------------
188 * called by mokm_close
190 static void print_stat (mokm_dev_t
*mokm
)
193 int inst
= mokm
->instance
;
196 PDEBUG("%d \t--- interrupts ---\n", inst
);
197 PDEBUG("%d \t%u - Reset request\n", inst
, mokm
->stat
.rsv_reset
);
198 PDEBUG("%d \t%u - Connection lost\n", inst
, mokm
->stat
.lost_connect
);
199 PDEBUG("%d \t%u - Pear optic error\n", inst
, mokm
->stat
.pear_optic_error
);
200 PDEBUG("%d \t%u - Our optic error\n", inst
, mokm
->stat
.optic_error
);
201 PDEBUG("%d \t--- int cmd ---\n", inst
);
202 PDEBUG("%d \t%u - Receive CMD buffer full\n", inst
, mokm
->stat
.rx_cmd_buf_full
);
203 PDEBUG("%d \t%u - ERROR transmit CMD\n", inst
, mokm
->stat
.tx_cmd_error
);
204 PDEBUG("%d \t%u - Transmit CMD buffer busy\n", inst
, mokm
->stat
.tx_cmd_buf_busy
);
205 PDEBUG("%d \t%u - We recieved CMD\n", inst
, mokm
->stat
.rx_cmd_int
);
206 PDEBUG("%d \t--- int rx ---\n", inst
);
207 PDEBUG("%d \t%u - Reciever Done\n", inst
, mokm
->stat
.rx_buf_ok
);
208 PDEBUG("%d \t%llu - Received bytes\n", inst
, mokm
->stat
.rx_size_all
);
209 PDEBUG("%d \t%u - Reciever Done with empty buf ptr\n", inst
, mokm
->stat
.rx_buf_lost
);
210 PDEBUG("%d \t%u - PCI ERROR on RCV DATA\n", inst
, mokm
->stat
.rx_buf_pci_err
);
211 PDEBUG("%d \t%u - DATA recieve ERROR\n", inst
, mokm
->stat
.rx_buf_error
);
212 PDEBUG("%d \t--- int tx ---\n", inst
);
213 PDEBUG("%d \t%u - Transmitter Done\n", inst
, mokm
->stat
.tx_buf_ok
);
214 PDEBUG("%d \t%llu - Transmitted bytes\n", inst
, mokm
->stat
.tx_size_all
);
215 PDEBUG("%d \t%u - Transmiter Done with empty buf ptr\n", inst
, mokm
->stat
.tx_unknown_buf
);
216 PDEBUG("%d \t%u - PCI ERROR on TRANSMIT DATA\n", inst
, mokm
->stat
.tx_buf_pci_err
);
217 PDEBUG("%d \t%u - DATA transmit ERROR\n", inst
, mokm
->stat
.tx_buf_error
);
218 PDEBUG("%d \t%u - Transmiter channel error\n", inst
, mokm
->stat
.tx_channel_error
);
219 PDEBUG("%d \t--- all ---\n", inst
);
220 PDEBUG("%d \t%u - Remote request\n", inst
, mokm
->stat
.tx_remote_req
);
221 PDEBUG("%d \t%u - EXCEPTIONs (last: %u)\n", inst
, mokm
->stat
.exception
, mokm
->stat
.exception_code
);
222 PDEBUG("%d \t--- last write error ---\n", inst
);
223 PDEBUG("%d \t%u - Pear still not ready\n", inst
, mokm
->stat
.lwr_pearrd
);
224 PDEBUG("%d \t%u - copy_from_user failure\n", inst
, mokm
->stat
.lwr_cfu_fail
);
225 PDEBUG("%d \t%u - can't find requested buf\n", inst
, mokm
->stat
.lwr_nobuf
);
226 PDEBUG("%d \t%u - Timeout in IOC_WR_BUF\n", inst
, mokm
->stat
.lwr_timeout
);
227 PDEBUG("%d \t%u - cant xmit buf\n", inst
, mokm
->stat
.lwr_cantxmit
);
228 PDEBUG("%d \t-----------\n", inst
);
232 //=============================================================================
235 //-----------------------------------------------------------------------------
240 static irqreturn_t
mokm_intr_handler (int irq
, void *arg
/*, struct pt_regs *regs*/)
245 mokm_dev_t
*mokm
= (mokm_dev_t
*) arg
;
246 unsigned long flags_w
, flags_r
;
247 //mokm_buf_t* p_buff;
249 if ( mokm
== NULL
) return (IRQ_NONE
);
251 // Get & Clear interrupt
252 intr
= ioread32(&mokm
->regs
->ISR
) & MOKM_ISR_ALL
;
253 if ( intr
== 0 ) return (IRQ_NONE
);
256 if(mokm
->debug
& MOKM_DBG_INTR
) PDEBUG("%d [%lu]: ISR = 0x%08X (%u)\n", mokm
->instance
, jiffies
, intr
, intr
);
259 if ( intr
& MOKM_ISR_CONECT
) {
260 intr
&= ~MOKM_ISR_CONECT
;
261 if ((ioread32(&mokm
->regs
->TRC
.r
) & TRC_B_WR_RDY
) == 0) {
262 mokm
->stat
.lost_connect
++;
263 if(mokm
->debug
& MOKM_DBG_INTR
) PDEBUG("%d [%lu]: ISR_CONECT - lost (%u)\n", mokm
->instance
, jiffies
, mokm
->stat
.lost_connect
);
265 if(mokm
->debug
& MOKM_DBG_INTR
) PDEBUG("%d [%lu]: ISR_CONECT - find\n", mokm
->instance
, jiffies
);
267 err_num
= 1; // for reinit Rx
270 /// One or more common error presented. Handle them
271 if ( intr
& MOKM_ISR_ANY_ERR
) {
272 if ( intr
& MOKM_ISR_PEAR_FIBER_ERR
) {
273 mokm
->stat
.pear_optic_error
++;
274 if(mokm
->debug
& MOKM_DBG_INTR
) PDEBUG("%d [%lu]: ISR_PEAR_FIBER_ERR (%u)\n", mokm
->instance
, jiffies
, mokm
->stat
.pear_optic_error
);
275 err_num
= 1; // for reinit Rx
277 if ( intr
& MOKM_ISR_FIBER_ERR
) {
278 mokm
->stat
.optic_error
++;
279 if(mokm
->debug
& MOKM_DBG_INTR
) PDEBUG("%d [%lu]: ISR_FIBER_ERR (%u)\n", mokm
->instance
, jiffies
, mokm
->stat
.optic_error
);
280 err_num
= 1; // for reinit Rx
282 intr
&= ~MOKM_ISR_ANY_ERR
;
285 /// Peer whant us to reset
286 if ( intr
& MOKM_ISR_RCV_RESET
) {
287 intr
&= ~MOKM_ISR_RCV_RESET
;
288 mokm
->stat
.rsv_reset
++;
289 if(mokm
->debug
& MOKM_DBG_INTR
) PDEBUG("%d [%lu]: ISR_RCV_RESET (%u)\n", mokm
->instance
, jiffies
, mokm
->stat
.rsv_reset
);
290 mokm_reset_device(mokm
, MOKM_CR_RESET_RCV
);
291 mokm_reset_device(mokm
, MOKM_CR_RESET_XMT
);
292 set_buffer_in_RCV(mokm
); // prepare to receive
297 // Receive CMD buffer full
298 if ( intr
& MOKM_ISR_CMD_BUF_FULL
) {
299 intr
&= ~MOKM_ISR_CMD_BUF_FULL
;
300 mokm
->stat
.rx_cmd_buf_full
++;
301 if(mokm
->debug
& MOKM_DBG_INTR
) PDEBUG("%d [%lu]: ISR_CMD_BUF_FULL (%u)\n", mokm
->instance
, jiffies
, mokm
->stat
.rx_cmd_buf_full
);
303 // ERROR transmit CMD
304 if ( intr
& MOKM_ISR_CMD_XMT_ERR
) {
305 intr
&= ~MOKM_ISR_CMD_XMT_ERR
;
306 mokm
->stat
.tx_cmd_error
++;
307 if(mokm
->debug
& MOKM_DBG_INTR
) PDEBUG("%d [%lu]: ISR_CMD_XMT_ERR (%u)\n", mokm
->instance
, jiffies
, mokm
->stat
.tx_cmd_error
);
309 // Transmit CMD buffer busy
310 if ( intr
& MOKM_ISR_CMD_BUF_BUSY
) {
311 intr
&= ~MOKM_ISR_CMD_BUF_BUSY
;
312 mokm
->stat
.tx_cmd_buf_busy
++;
313 if(mokm
->debug
& MOKM_DBG_INTR
) PDEBUG("%d [%lu]: ISR_CMD_BUF_BUSY (%u)\n", mokm
->instance
, jiffies
, mokm
->stat
.tx_cmd_buf_busy
);
315 /// We recieved CMD handle all of them (if multy)
316 if ( intr
& MOKM_ISR_RCV_CMD
) {
317 intr
&= ~MOKM_ISR_RCV_CMD
;
318 cmd_num
= MOKM_GET_CMD_NUM(ioread32(&mokm
->regs
->CR
));
319 if(mokm
->debug
& MOKM_DBG_INTR
) PDEBUG("%d [%lu]: ISR_RCV_CMD (%u), cmd_num = %d\n", mokm
->instance
, jiffies
, mokm
->stat
.rx_cmd_int
+1, cmd_num
);
321 mokm
->stat
.rx_cmd_int
++;
322 wake_up_interruptible(&mokm
->wq_ready_rcmd
);
326 /// Reciever Interrupts
327 if ( intr
& MOKM_ISR_ALL_RX
) {
328 err_num
= 0; // for reinit Rx
329 mokm_pool_buf_t
* pool_buf
= &mokm
->read_pool
;
331 if ( intr
& MOKM_ISR_RCV_BUF
) {
332 spin_lock_irqsave(&mokm
->DEF_MUTEX_R
, flags_r
); /// spin_lock read_pool
334 mokm
->in_rcv
->size
= ioread32(&mokm
->regs
->RTR
); // get received size
335 mokm
->stat
.rx_size_all
+= (u64
)((mokm
->in_rcv
->size
)<<2); // in bytes
336 list_move_tail(&mokm
->in_rcv
->list
, &pool_buf
->ready_list
);
338 spin_unlock_irqrestore(&mokm
->DEF_MUTEX_R
, flags_r
); /// spin_unlock read_pool
340 mokm
->stat
.rx_buf_ok
++;
341 if(mokm
->debug
& MOKM_DBG_INTR
) PDEBUG("%d [%lu]: ISR_RCV_BUF - Ok (%u)\n", mokm
->instance
, jiffies
, mokm
->stat
.rx_buf_ok
);
342 wake_up_interruptible(&mokm
->wq_ready_rbuf
);
344 spin_unlock_irqrestore(&mokm
->DEF_MUTEX_R
, flags_r
); /// spin_unlock read_pool
345 mokm
->stat
.rx_buf_lost
++;
346 if(mokm
->debug
& MOKM_DBG_INTR
) PDEBUG("%d [%lu]: ISR_RCV_BUF - buffer lost (%u)\n", mokm
->instance
, jiffies
, mokm
->stat
.rx_buf_lost
);
349 // PCI ERROR on RCV DATA
350 if ( intr
& MOKM_ISR_PCIBUS_RCV_ERR
) {
352 mokm
->stat
.rx_buf_pci_err
++;
353 if(mokm
->debug
& MOKM_DBG_INTR
) PDEBUG("%d [%lu]: ISR_PCIBUS_RCV_ERR (%u)\n", mokm
->instance
, jiffies
, mokm
->stat
.rx_buf_pci_err
);
355 // DATA recieve ERROR
356 if ( intr
& MOKM_ISR_RCV_DATA_ERR
) {
358 mokm
->stat
.rx_buf_error
++;
359 if(mokm
->debug
& MOKM_DBG_INTR
) PDEBUG("%d [%lu]: ISR_RCV_DATA_ERR (%u)\n", mokm
->instance
, jiffies
, mokm
->stat
.rx_buf_error
);
361 // ERROR recieve pkt num
362 if ( intr
& MOKM_ISR_RCV_NUM_ERR
) {
364 mokm
->stat
.rx_buf_error
++;
365 if(mokm
->debug
& MOKM_DBG_INTR
) PDEBUG("%d [%lu]: ISR_RCV_NUM_ERR (%u)\n", mokm
->instance
, jiffies
, mokm
->stat
.rx_buf_error
);
367 intr
&= ~MOKM_ISR_ALL_RX
;
369 // Set buf for Receive
370 spin_lock_irqsave(&mokm
->DEF_MUTEX_R
, flags_r
); /// spin_lock read_pool
372 iowrite32((u32
)mokm
->in_rcv
->dma_addr
, &mokm
->regs
->RDAR
); // reuse buffer if errors
373 spin_unlock_irqrestore(&mokm
->DEF_MUTEX_R
, flags_r
); /// spin_unlock read_pool
374 if(mokm
->debug
& MOKM_DBG_INTR
) PDEBUG("%d [%lu]: ISR_ALL_RX - reuse Rx buffer #%u\n", mokm
->instance
, jiffies
, mokm
->in_rcv
->num
);
376 spin_unlock_irqrestore(&mokm
->DEF_MUTEX_R
, flags_r
); /// spin_unlock read_pool
377 set_buffer_in_RCV(mokm
); // prepare to receive
381 /// Transmiter interrupt
382 if ( intr
& MOKM_ISR_ALL_TX
) {
383 mokm_pool_buf_t
*pool_buf
= &mokm
->write_pool
;
385 if ( intr
& MOKM_ISR_XMT_BUF
) {
386 if(mokm
->in_trm
) { // WRITE_IS_ACTIVE
387 mokm
->stat
.tx_size_all
+= (u64
)((mokm
->in_trm
->size
)<<2); // in bytes
388 spin_lock_irqsave(&mokm
->DEF_MUTEX_W
, flags_w
); /// spin_lock write_pool
389 list_move_tail(&mokm
->in_trm
->list
/*pool_buf->busy_list.next*/, &pool_buf
->ready_list
);
390 mokm
->in_trm
= NULL
; // Off WRITE_IS_ACTIVE
391 spin_unlock_irqrestore(&mokm
->DEF_MUTEX_W
, flags_w
); /// spin_unlock write_pool
393 mokm
->stat
.tx_buf_ok
++;
394 if(mokm
->debug
& MOKM_DBG_INTR
) PDEBUG("%d [%lu]: ISR_XMT_BUF - Ok (%u)\n", mokm
->instance
, jiffies
, mokm
->stat
.tx_buf_ok
);
397 mokm
->stat
.tx_unknown_buf
++;
398 if(mokm
->debug
& MOKM_DBG_INTR
) PDEBUG("%d [%lu]: MOKM_ISR_XMT_BUF - buffer unknown (%u)\n", mokm
->instance
, jiffies
, mokm
->stat
.tx_unknown_buf
);
401 // PCI ERROR on TRANSMIT DATA
402 if ( intr
& MOKM_ISR_PCIBUS_XMT_ERR
) {
404 mokm
->stat
.tx_buf_pci_err
++;
405 if(mokm
->debug
& MOKM_DBG_INTR
) PDEBUG("%d [%lu]: ISR_PCIBUS_XMT_ERR (%u)\n", mokm
->instance
, jiffies
, mokm
->stat
.tx_buf_pci_err
);
407 // DATA transmit ERROR
408 if ( intr
& MOKM_ISR_XMT_DATA_ERR
) {
410 mokm
->stat
.tx_buf_error
++;
411 if(mokm
->debug
& MOKM_DBG_INTR
) PDEBUG("%d [%lu]: ISR_XMT_DATA_ERR (%u)\n", mokm
->instance
, jiffies
, mokm
->stat
.tx_buf_error
);
413 // Transmiter channel error
414 if ( intr
& MOKM_ISR_XMT_ERR
) {
416 mokm
->stat
.tx_channel_error
++;
417 if(mokm
->debug
& MOKM_DBG_INTR
) PDEBUG("%d [%lu]: ISR_XMT_ERR (%u)\n", mokm
->instance
, jiffies
, mokm
->stat
.tx_channel_error
);
418 //mokm_reset_device(mokm, MOKM_CR_RESET_XMT);
420 intr
&= ~MOKM_ISR_ALL_TX
;
422 if(mokm
->in_trm
) { // WRITE_IS_ACTIVE
423 // if errors - move buffer
424 spin_lock_irqsave(&mokm
->DEF_MUTEX_W
, flags_w
); /// spin_lock write_pool
425 list_move_tail(&mokm
->in_trm
->list
, &pool_buf
->ready_list
);
426 mokm
->in_trm
= NULL
; // Off WRITE_IS_ACTIVE
427 spin_unlock_irqrestore(&mokm
->DEF_MUTEX_W
, flags_w
); /// spin_unlock write_pool
430 if ( !mokm
->sync_mode
) {
432 /* TODO: ??? check if last try passed ??? */
433 /*#ifdef MOKMWAITLASTTX
434 if((ioread32(&mokm->regs->TRC.r) & TRC_B_PEARRD_RDY) && !(ioread32(&mokm->regs->TCR.r) & TRC_TX_ACT)) {
436 if(ioread32(&mokm
->regs
->TRC
.r
) & TRC_B_PEARRD_RDY
) {
437 /*#endif // MOKMWAITLASTTX*/
438 set_buffer_in_XMT(mokm
); // START Transmit
441 wake_up_interruptible(&mokm
->wq_ready_wbuf
);
443 intr
&= ~MOKM_ISR_PEAR_READY_RCV
; // this START Transmit
446 /// We ready to Transmit / Remote ready to receive
447 if ( intr
& MOKM_ISR_PEAR_READY_RCV
) {
448 intr
&= ~MOKM_ISR_PEAR_READY_RCV
;
449 if(mokm
->debug
& MOKM_DBG_INTR
) nPDEBUG("%d [%lu]: ISR_PEAR_READY_RCV - We ready to Transmit\n", mokm
->instance
, jiffies
);
450 /*#ifdef MOKMWAITLASTTX
451 if((ioread32(&mokm->regs->TRC.r) & TRC_B_PEARRD_RDY) && !(ioread32(&mokm->regs->TCR.r) & TRC_TX_ACT)) {
453 if(ioread32(&mokm
->regs
->TRC
.r
) & TRC_B_PEARRD_RDY
) {
454 /*#endif // MOKMWAITLASTTX*/
455 if(mokm
->debug
& MOKM_DBG_INTR
) nPDEBUG("%d [%lu]: ISR_PEAR_READY_RCV - Remote ready to receive\n", mokm
->instance
, jiffies
);
456 set_buffer_in_XMT(mokm
); // START Transmit
461 intr
&= ~MOKM_ISR_REMOTE_REQ
;
462 if ( intr
& MOKM_ISR_REMOTE_REQ
) {
464 mokm
->stat
.tx_remote_req
++;
465 if(mokm
->debug
& MOKM_DBG_INTR
) PDEBUG("%d [%lu]: ISR_REMOTE_REQ (%u)\n", mokm
->instance
, jiffies
, mokm
->stat
.tx_remote_req
);
469 intr
&= ~MOKM_ISR_EXCEPTION
;
470 if ( intr
& MOKM_ISR_EXCEPTION
) {
472 mokm
->stat
.exception
++;
473 mokm
->stat
.exception_code
= ioread32(&mokm
->regs
->TET
);
474 if(mokm
->debug
& MOKM_DBG_INTR
) PDEBUG("%d [%lu]: ISR_EXCEPTION (%u), code: %u\n", mokm
->instance
, jiffies
, mokm
->stat
.exception
, mokm
->stat
.exception_code
);
477 // Reinit Rx if any common errors
479 // Set buf for Receive
480 spin_lock_irqsave(&mokm
->DEF_MUTEX_R
, flags_r
); /// spin_lock read_pool
482 iowrite32((u32
)mokm
->in_rcv
->dma_addr
, &mokm
->regs
->RDAR
); // reuse buffer if errors
484 spin_unlock_irqrestore(&mokm
->DEF_MUTEX_R
, flags_r
); /// spin_unlock read_pool
487 /*} while((intr = mokm->regs->ISR & MOKM_ISR_ALL) != 0);*/
488 if (intr
!= 0) if(mokm
->debug
& MOKM_DBG_INTR
) PDEBUG("%d [%lu]: ISR_* unknown interrupts = %08X\n", mokm
->instance
, jiffies
, intr
);
491 } // mokm_intr_handler
494 //=============================================================================
497 //-----------------------------------------------------------------------------
499 * Alloc memory for pool
500 * @mokm: device private struct
501 * @size: pool size for alloc
502 * @phys_mem: return hardware DMA base address
503 * @real_size: return allocated size
504 * @virt_memory: return user access base address
505 * @dir: PCI_DMA_FROMDEVICE or PCI_DMA_TODEVICE
507 * Returns 0 on success, -1 on failure
509 * called by init_pool_buff
511 static int mokm_mem_alloc (mokm_dev_t
*mokm
, size_t size
, dma_addr_t
*phys_mem
, size_t *real_size
, unsigned long *virt_memory
, int dir
)
514 struct page
*map
, *mapend
;
516 // *buf = (caddr_t) pci_alloc_consistent(mokm->pdev , pool_size, &dev_memory);
518 order
= get_order(size
);
519 *virt_memory
= __get_free_pages(GFP_ATOMIC
/*GFP_KERNEL*/ | GFP_DMA
, order
);
520 if (!(*virt_memory
)) {
521 ERROR_MSG("%d: mem_alloc ERROR - Cannot bind DMA address order: %d size: 0x%lx\n", mokm
->instance
, order
, (unsigned long)size
);
525 mapend
= virt_to_page((*virt_memory
) + (PAGE_SIZE
<< order
) - 1);
526 for(map
= virt_to_page((*virt_memory
)); map
<= mapend
; map
++) SetPageReserved(map
);
528 *phys_mem
= pci_map_single((struct pci_dev
*)mokm
->pdev
, (void *)*virt_memory
, size
, dir
);
529 *real_size
= PAGE_SIZE
<< order
;
531 if(mokm
->debug
& MOKM_DBG_MEM
) PDEBUG("%d: mem_alloc FINISH - va: 0x%lx fa: 0x%x size: 0x%lx real_size: 0x%lx\n", mokm
->instance
, *virt_memory
, *phys_mem
, size
, *real_size
);
536 //-----------------------------------------------------------------------------
539 * @mokm: device private struct
541 * @phys_mem: hardware DMA base address
542 * @virt_memory: user access base address
543 * @dir: PCI_DMA_FROMDEVICE or PCI_DMA_TODEVICE
545 * called by free_bufs
547 static void mokm_mem_free (mokm_dev_t
*mokm
, size_t size
, dma_addr_t phys_mem
, caddr_t virt_memory
, int dir
)
551 struct page
*map
, *mapend
;
553 mem
= (caddr_t
)virt_memory
;
554 order
= get_order(size
);
556 mapend
= virt_to_page(mem
+ (PAGE_SIZE
<< order
) - 1);
557 for (map
= virt_to_page(mem
); map
<= mapend
; map
++) {
558 ClearPageReserved(map
);
560 pci_unmap_single((struct pci_dev
*)mokm
->pdev
, phys_mem
, size
, dir
);
561 free_pages((unsigned long)virt_memory
, order
);
565 //=============================================================================
568 //-----------------------------------------------------------------------------
570 * Search buffer in list
571 * @list1: buffer list
572 * @num1: buffer number
574 * Returns mokm_buf_t* on success, NULL on failure
576 * called by mokm_ioctl
577 * need spin_lock (read or write)
579 static mokm_buf_t
* search_in_list (struct list_head
* list1
, int num1
)
581 struct list_head
* tmp
;
582 mokm_buf_t
* ret
= NULL
;
584 list_for_each(tmp
, list1
) {
585 ret
= list_entry(tmp
, mokm_buf_t
, list
);
586 if(ret
->num
== num1
) return (ret
);
591 //-----------------------------------------------------------------------------
593 * Init Receive or Transmit Buffers Pool
594 * @mokm: device private struct
595 * @pool_buf: &mokm->read_pool or &mokm->write_pool
596 * @buf: &mokm->rbuff or &mokm->wbuff
597 * @dir: PCI_DMA_FROMDEVICE or PCI_DMA_TODEVICE
599 * Returns 0 on success, negative on failure
601 * called by mokm_init
603 static int init_pool_buff (mokm_dev_t
*mokm
, mokm_pool_buf_t
* pool_buf
, caddr_t
*buf
, int dir
)
607 size_t pool_size
= (MOKM_BUF_SIZE
* MOKM_BUF_NUM
);
608 dma_addr_t dev_memory
= 0;
610 INIT_LIST_HEAD(&pool_buf
->ready_list
);
611 INIT_LIST_HEAD(&pool_buf
->free_list
);
612 INIT_LIST_HEAD(&pool_buf
->busy_list
);
614 // Alloc memory for pool (get user access address and DMA address)
615 if (mokm_mem_alloc(mokm
, pool_size
, &dev_memory
, &real_sz
, (unsigned long *)buf
, dir
)) {
616 ERROR_MSG("%d: ERROR: Cannot alloc device buffer\n", mokm
->instance
);
620 // Init all buffers in pool
621 for(i
= 0; i
< MOKM_BUF_NUM
; i
++) {
622 pool_buf
->buf
[i
].num
= i
;
623 pool_buf
->buf
[i
].st
= MOKM_BUF_ST_FREE
;
624 pool_buf
->buf
[i
].size
= 0;
625 pool_buf
->buf
[i
].buf_addr
= (caddr_t
)(*buf
+ MOKM_BUF_SIZE
*i
);
626 pool_buf
->buf
[i
].dma_addr
= (dma_addr_t
/*caddr_t*/)(dev_memory
+ MOKM_BUF_SIZE
*i
);
627 /*DEBUG?*/memset((void *)pool_buf
->buf
[i
].buf_addr
, 0xac, MOKM_BUF_SIZE
);
628 list_add_tail(&pool_buf
->buf
[i
].list
, &pool_buf
->free_list
);
634 //-----------------------------------------------------------------------------
638 * called by mokm_init
640 static void show_buff (mokm_dev_t
*mokm
)
644 mokm_pool_buf_t
* pool_buf
= &mokm
->read_pool
;
645 PDEBUG("%d: RCV BUFFERS:\n", mokm
->instance
);
646 for (i
= 0; i
< MOKM_BUF_NUM
; i
++) {
647 PDEBUG("%d: NUM %05u ADDR %p DMA %llu STATE 0x%05x\n",mokm
->instance
, pool_buf
->buf
[i
].num
,
648 pool_buf
->buf
[i
].buf_addr
, (u64
)pool_buf
->buf
[i
].dma_addr
, pool_buf
->buf
[i
].st
);
651 pool_buf
= &mokm
->write_pool
;
652 PDEBUG("%d: WRITE BUFFERS:\n", mokm
->instance
);
653 for (i
= 0; i
< MOKM_BUF_NUM
; i
++) {
654 PDEBUG("%d: NUM %05u ADDR %p DMA %llu STATE 0x%05x\n",mokm
->instance
, pool_buf
->buf
[i
].num
,
655 pool_buf
->buf
[i
].buf_addr
, (u64
)pool_buf
->buf
[i
].dma_addr
, pool_buf
->buf
[i
].st
);
659 //-----------------------------------------------------------------------------
661 * Select buffer for next receive
662 * @mokm: device private struct
664 * Returns 0 on success, negative on failure (TODO: void)
666 * called by mokm_intr_handler, mokm_open, mokm_ioctl(MOKM_IOC_PUT_RD_BUF)
668 static int set_buffer_in_RCV (mokm_dev_t
*mokm
)
670 mokm_pool_buf_t
* pool_buf
= &mokm
->read_pool
;
672 unsigned long flags_r
;
674 spin_lock_irqsave(&mokm
->DEF_MUTEX_R
, flags_r
); /// spin_lock read_pool
676 if ( list_empty(&pool_buf
->free_list
) ) {
677 //iowrite32((u_int)0, &mokm->regs->RDAR);
678 spin_unlock_irqrestore(&mokm
->DEF_MUTEX_R
, flags_r
); /// spin_unlock read_pool
679 if(mokm
->debug
& MOKM_DBG_RX
) PDEBUG("%d [%lu]: < set_buffer_in_RCV - NO free buffer for RCV\n", mokm
->instance
, jiffies
);
683 buff
= list_entry(pool_buf
->free_list
.next
, mokm_buf_t
, list
);
685 list_move_tail(pool_buf
->free_list
.next
, &pool_buf
->busy_list
);
688 iowrite32((u_int
)buff
->dma_addr
, &mokm
->regs
->RDAR
);
691 spin_unlock_irqrestore(&mokm
->DEF_MUTEX_R
, flags_r
); /// spin_unlock read_pool
693 if(mokm
->debug
& MOKM_DBG_RX
) {
694 if(buff
) PDEBUG("%d [%lu]: < set_buffer_in_RCV - DONE\n",mokm
->instance
, jiffies
);
695 else PDEBUG("%d [%lu]: < set_buffer_in_RCV - ERROR\n",mokm
->instance
, jiffies
);
699 } // set_buffer_in_RCV
703 * @mokm: device private struct
705 * Returns 0 on success, negative on failure (TODO: void)
707 * called by mokm_intr_handler & ioctl
709 static int set_buffer_in_XMT (mokm_dev_t
*mokm
)
711 mokm_pool_buf_t
* pool_buf
= &mokm
->write_pool
;
713 unsigned long flags_w
;
715 if (mokm
->in_trm
) { // WRITE_IS_ACTIVE
716 if(mokm
->debug
& MOKM_DBG_TX
) PDEBUG("%d [%lu]: > set_buffer_in_XMT - There is allready some buffer #%u in xmt\n", mokm
->instance
, jiffies
, (unsigned int)mokm
->in_trm
->num
);
719 if ( list_empty(&pool_buf
->busy_list
) ) {
720 if(mokm
->debug
& MOKM_DBG_TX
) nPDEBUG("%d [%lu]: > set_buffer_in_XMT - NO buffer for XMT\n", mokm
->instance
, jiffies
);
724 spin_lock_irqsave(&mokm
->DEF_MUTEX_W
, flags_w
); /// spin_lock write_pool
725 buff
= list_entry(pool_buf
->busy_list
.next
, mokm_buf_t
, list
);
727 mokm
->in_trm
= buff
; // On WRITE_IS_ACTIVE
728 iowrite32((u_int
)buff
->dma_addr
, &mokm
->regs
->TDAR
);
729 iowrite32(buff
->size
, &mokm
->regs
->TDCR
);
731 spin_unlock_irqrestore(&mokm
->DEF_MUTEX_W
, flags_w
); /// spin_unlock write_pool
734 if(mokm
->debug
& MOKM_DBG_TX
) PDEBUG("%d [%lu]: > set_buffer_in_XMT - DONE: WR id #%u addr %p DMA %llu\n", mokm
->instance
, jiffies
, buff
->num
, buff
->buf_addr
, (u64
)buff
->dma_addr
);
736 if(mokm
->debug
& MOKM_DBG_TX
) PDEBUG("%d [%lu]: > set_buffer_in_XMT - ERROR: NO buffer for XMT\n", mokm
->instance
, jiffies
);
739 } // set_buffer_in_XMT
741 //-----------------------------------------------------------------------------
743 * Free read and write pools
744 * @mokm: device private struct
746 * called by mokm_detach
748 static void free_bufs (mokm_dev_t
*mokm
)
750 size_t pool_size
= (MOKM_BUF_SIZE
*MOKM_BUF_NUM
);
751 mokm_pool_buf_t
* pool_buf
;
754 pool_buf
= &mokm
->read_pool
;
755 if(mokm
->debug
& MOKM_DBG_MEM
) PDEBUG("%d: do free read bufs Addr %p DMA %llu\n", mokm
->instance
, mokm
->rbuff
, (u64
)pool_buf
->buf
[0].dma_addr
);
756 if ( pool_buf
->buf
[0].dma_addr
) {
757 mokm_mem_free(mokm
, pool_size
, (dma_addr_t
)pool_buf
->buf
[0].dma_addr
, mokm
->rbuff
, PCI_DMA_FROMDEVICE
);
758 //pci_free_consistent(mokm->pdev, pool_size, mokm->rbuff, (dma_addr_t)(pool_buf->buf[0].dma_addr));
761 pool_buf
= &mokm
->write_pool
;
762 if(mokm
->debug
& MOKM_DBG_MEM
) PDEBUG("%d: do free write bufs Addr %p DMA %llu\n", mokm
->instance
, mokm
->wbuff
, (u64
)pool_buf
->buf
[0].dma_addr
);
763 if ( pool_buf
->buf
[0].dma_addr
) {
764 mokm_mem_free(mokm
, pool_size
, (dma_addr_t
)pool_buf
->buf
[0].dma_addr
, mokm
->wbuff
, PCI_DMA_TODEVICE
);
765 //pci_free_consistent(mokm->pdev, pool_size, mokm->wbuff, (dma_addr_t)(pool_buf->buf[0].dma_addr));
770 //=============================================================================
773 //-----------------------------------------------------------------------------
775 * Module open file operation
778 static int mokm_open (struct inode
*inode
, struct file
*filp
)
781 int minor
= MINOR(inode
->i_rdev
);
783 mokm
= (mokm_dev_t
*)filp
->private_data
;
786 if ( minor
>= mokm_nr_devs
) {
789 mokm
= mokm_devices
[minor
];
790 filp
->private_data
= mokm
;
793 PDEBUG("%d: open minor %d\n", mokm
->instance
, minor
);
795 spin_lock(&mokm
->DEF_MUTEX_R
); /// spin_lock read_pool
797 if (mokm
->open
== 1) {
798 spin_unlock(&mokm
->DEF_MUTEX_R
); /// spin_unlock read_pool
799 WARNING_MSG("%d: WARING:\t MOKM allready open and busy! Try again later\n", mokm
->instance
);
803 spin_unlock(&mokm
->DEF_MUTEX_R
); /// spin_unlock read_pool
805 // No Reset and One buf send to reciever (Do it after interrupt handler is avalible)
806 mokm_reset_device(mokm
, MOKM_CR_RESET_RCV
);
807 mokm_reset_device(mokm
, MOKM_CR_RESET_XMT
);
808 set_buffer_in_RCV(mokm
); // prepare to receive
810 // Enable all interrupts here
811 mokm
->intr_mask
= MOKM_IMR_ALL_ENABLE
;
812 iowrite32(MOKM_IMR_ALL_ENABLE
, &mokm
->regs
->IMR
);
817 //-----------------------------------------------------------------------------
819 * Module close file operation
822 static int mokm_close (struct inode
*inode
, struct file
*filp
)
825 int minor
= MINOR(inode
->i_rdev
);
826 unsigned long flags_r
;
827 unsigned long volatile dummy
;
829 if ( minor
>= mokm_nr_devs
) return -ENODEV
;
830 mokm
= mokm_devices
[minor
];
833 dump_all_regs(mokm
, "close");
835 // disable interrupts here
837 iowrite32(0, &mokm
->regs
->IMR
);
838 dummy
= ioread32(&mokm
->regs
->ISR
);
840 mokm_reset_device(mokm
, MOKM_CR_RESET
);
842 spin_lock_irqsave(&mokm
->DEF_MUTEX_R
, flags_r
); /// spin_lock read_pool
844 spin_unlock_irqrestore(&mokm
->DEF_MUTEX_R
, flags_r
); /// spin_unlock read_pool
846 filp
->private_data
= NULL
;
851 //-----------------------------------------------------------------------------
853 * Module ioctl file operation
856 static long mokm_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
863 unsigned long flags_w
, flags_r
;
866 mokm
= (mokm_dev_t
*)filp
->private_data
;
867 inst
= mokm
->instance
;
869 //IN_TR(inst, mokm_ioctl, 1, cmd, arg); // start
871 PDEBUG("%d: ioctl cmd = %X\n", mokm
->instance
, cmd
);
874 case MOKM_IOC_RESET
:
876 if(mokm
->debug
& MOKM_DBG_IOCTL
) PDEBUG("%d [%lu]: IOC_RESET\n", inst
, jiffies
);
877 iowrite32(MOKM_CR_ENDIAN
| MOKM_CR_PEAR_RESET
, &mokm
->regs
->CR
);
878 iowrite32(ioread32(&mokm
->regs
->TRC
.r
) | MOKM_TRC_ENDIAN
, &mokm
->regs
->TRC
.r
);
879 ret
= 0; break; // return
882 /** int mokm_get_write_buf(int fd, int *pool, int **write_array) */
883 case MOKM_IOC_GET_WR_BUF
:
885 mokm_pool_buf_t
*pool_buf
= &mokm
->write_pool
;
888 // Search for free for write buffer
889 if (list_empty(&pool_buf
->free_list
)) {
890 ERROR_MSG("%d: IOC_GET_WR_BUF Error: No Free Bufs\n", inst
);
891 ret
= -EAGAIN
; break; // return
894 spin_lock_irqsave(&mokm
->DEF_MUTEX_W
, flags_w
); /// spin_lock write_pool
895 p_buff
= list_entry(pool_buf
->free_list
.next
, mokm_buf_t
, list
);
896 list_move_tail(&p_buff
->list
, &pool_buf
->ready_list
);
897 spin_unlock_irqrestore(&mokm
->DEF_MUTEX_W
, flags_w
); /// spin_unlock write_pool
899 if(mokm
->debug
& MOKM_DBG_IOCTL
) PDEBUG("%d [%lu]: > IOC_GET_WR_BUF - DONE\n", inst
, jiffies
);
900 ret
= p_buff
->num
; break; // return
901 } // MOKM_IOC_GET_WR_BUF
903 /** int mokm_write(int fd, int buf_num, int size) */
904 case MOKM_IOC_WR_BUF
: // User want write buffer #{n_buf = arg}
906 mokm_pool_buf_t
*pool_buf
= &mokm
->write_pool
;
910 // Pear or transmiter still not ready
911 #ifdef MOKMWAITLASTTX
912 if (!((ioread32(&mokm
->regs
->TRC
.r
) & TRC_B_PEARRD_RDY
) && !(ioread32(&mokm
->regs
->TCR
.r
) & TRC_TX_ACT
))) {
914 if (!(ioread32(&mokm
->regs
->TRC
.r
) & TRC_B_PEARRD_RDY
)) {
915 #endif // MOKMWAITLASTTX
916 /*if(mokm->debug & MOKM_DBG_IOCTL) PDEBUG("%d [%lu]: > IOC_WR_BUF Break - Pear still not ready\n", inst, jiffies);*/
917 mokm
->stat
.lwr_pearrd
++;
918 ret
= -EAGAIN
; break; // return
921 if (copy_from_user((caddr_t
)&buf_s
, (caddr_t
)arg
, sizeof(mokm_bufwr_t
))) {
922 mokm
->stat
.lwr_cfu_fail
++;
923 ERROR_MSG("%d: IOC_WR_BUF Error: copy_from_user failure\n", inst
);
924 ret
= -EBUSY
; break; // return
928 spin_lock_irqsave(&mokm
->DEF_MUTEX_W
, flags_w
); /// spin_lock write_pool
929 p_buff
= search_in_list(&pool_buf
->ready_list
, buf_s
.buf_num
);
930 if (p_buff
== NULL
) {
931 spin_unlock_irqrestore(&mokm
->DEF_MUTEX_W
, flags_w
); /// spin_unlock write_pool
932 mokm
->stat
.lwr_nobuf
++;
933 ERROR_MSG("%d: IOC_WR_BUF Error: cant find buf %d\n", inst
, buf_s
.buf_num
);
934 ret
= -EAGAIN
; break; // return
936 p_buff
->size
= (buf_s
.size
> MOKM_BUF_SIZE
)?MOKM_BUF_SIZE
:buf_s
.size
;
937 // Mark this buff as busy and place in in the end of queue
938 list_move_tail(&p_buff
->list
, &pool_buf
->busy_list
);
939 spin_unlock_irqrestore(&mokm
->DEF_MUTEX_W
, flags_w
); /// spin_unlock write_pool
941 if(mokm
->debug
& MOKM_DBG_IOCTL
) PDEBUG("%d [%lu]: > IOC_WR_BUF - User want to write %d words\n", inst
, jiffies
, buf_s
.size
);
944 set_buffer_in_XMT(mokm
);
946 if ( mokm
->sync_mode
) {
947 timeout
= 2*HZ
; // 2 sec
948 r
= wait_event_interruptible_timeout(mokm
->wq_ready_wbuf
, (NULL
== mokm
->in_trm
), timeout
); // while(WRITE_IS_ACTIVE)
950 // timeout time was reached
951 mokm
->stat
.lwr_timeout
++;
952 ERROR_MSG("%d: IOC_WR_BUF - timeout was reached\n", inst
);
953 // mokm_reset_device(mokm, MOKM_CR_RESET_XMT); /// RESET TX
954 ret
= -ETIMEDOUT
; break; // return
957 mokm
->stat
.lwr_cantxmit
++;
958 ERROR_MSG("%d: IOC_WR_BUF Error: cant xmit buf %d\n", inst
, buf_s
.buf_num
);
959 ret
= -EFAULT
; break; // return
961 ret
= p_buff
->size
; break; // return
965 ret
= 0; break; // return
969 /** int put_write_buf(int fd, int buf_num) */
970 case MOKM_IOC_PUT_WR_BUF
:
972 mokm_pool_buf_t
*pool_buf
= &mokm
->write_pool
;
974 int buf_num
= (int)arg
;
976 // Search for write buffer to free
977 p_buff
= search_in_list(&pool_buf
->ready_list
, buf_num
);
978 if (p_buff
== NULL
) {
979 if(mokm
->debug
& MOKM_DBG_IOCTL
) PDEBUG("%d [%lu]: > IOC_PUT_WR_BUF - Wait while current XMIT done\n", inst
, jiffies
);
981 r
= wait_event_interruptible_timeout(mokm
->wq_ready_wbuf
, (p_buff
= search_in_list(&pool_buf
->ready_list
, buf_num
)) != NULL
, timeout
);
982 if (p_buff
== NULL
) {
983 ERROR_MSG("%d: IOC_PUT_WR_BUF Error: cant find buf %d\n", inst
, buf_num
);
984 ret
= -EAGAIN
; break; // return
988 spin_lock_irqsave(&mokm
->DEF_MUTEX_W
, flags_w
); /// spin_lock write_pool
989 list_move_tail(&p_buff
->list
, &pool_buf
->free_list
);
990 spin_unlock_irqrestore(&mokm
->DEF_MUTEX_W
, flags_w
); /// spin_unlock write_pool
992 if(mokm
->debug
& MOKM_DBG_IOCTL
) PDEBUG("%d [%lu]: > IOC_PUT_WR_BUF - DONE\n", inst
, jiffies
);
993 ret
= 0; break; // return
994 } // MOKM_IOC_PUT_WR_BUF
997 /** mokm_read (int fd, int* pool, int** read_array, int* buf_num) */
998 case MOKM_IOC_RD_BUF
: // User want to read received buffer if any
1000 mokm_pool_buf_t
* pool_buf
= &mokm
->read_pool
;
1003 // if nothing to read...
1004 if (list_empty(&pool_buf
->ready_list
)) {
1005 /*if(mokm->debug & MOKM_DBG_IOCTL) PDEBUG("%d [%lu]: < IOC_RD_BUF Break - nothing to read\n", inst, jiffies);*/
1006 ret
= -EAGAIN
; break; // return
1009 spin_lock_irqsave(&mokm
->DEF_MUTEX_R
, flags_r
); /// spin_lock read_pool
1010 p_buff
= list_entry(pool_buf
->ready_list
.next
, mokm_buf_t
, list
);
1011 spin_unlock_irqrestore(&mokm
->DEF_MUTEX_R
, flags_r
); /// spin_unlock read_pool
1013 if (p_buff
== NULL
) {
1014 ERROR_MSG("%d: IOC_RD_BUF Error: BUFF == NULL!\n", inst
);
1015 ret
= -EFAULT
; break; // return
1018 if ( p_buff
->num
< 0 || p_buff
->num
> MOKM_BUF_NUM
) {
1019 ERROR_MSG("%d: IOC_RD_BUF Error: Rcv wrong buff_num = %u\n", inst
, p_buff
->num
);
1020 ret
= -EFAULT
; break; // return
1023 if (copy_to_user((caddr_t
)arg
, (caddr_t
)&(p_buff
->num
), sizeof(u_long
))) {
1024 ERROR_MSG("%d: IOC_RD_BUF Error: copy_to_user failure\n", inst
);
1025 ret
= -EINVAL
; break; // return
1028 if(mokm
->debug
& MOKM_DBG_IOCTL
) PDEBUG("%d [%lu]: < IOC_RD_BUF - DONE: read #%u buf, sz = %u, addr %p DMA %llu (Real 0x%x)\n", inst
, jiffies
, p_buff
->num
, p_buff
->size
, p_buff
->buf_addr
, (u64
)p_buff
->dma_addr
, ioread32(&mokm
->regs
->RDAR
));
1029 ret
= p_buff
->size
; break; // return
1030 } // MOKM_IOC_RD_BUF
1032 /** mokm_put_read_buf (int fd, int buf_num) */
1033 case MOKM_IOC_PUT_RD_BUF
:
1035 mokm_pool_buf_t
*pool_buf
= &mokm
->read_pool
;
1037 int buf_num
= (int)arg
;
1040 spin_lock_irqsave(&mokm
->DEF_MUTEX_R
, flags_r
); /// spin_lock read_pool
1041 // Search for read buffer to free
1042 p_buff
= search_in_list(&pool_buf
->ready_list
/*busy_list*/, buf_num
);
1043 if (p_buff
== NULL
) {
1044 spin_unlock_irqrestore(&mokm
->DEF_MUTEX_R
, flags_r
); /// spin_unlock read_pool
1045 if(mokm
->debug
& MOKM_DBG_IOCTL
) PDEBUG("%d [%lu]: < IOC_PUT_RD_BUF Error: cant find buf %d\n", inst
, jiffies
, buf_num
);
1046 ret
= -EAGAIN
; break; // return
1049 sf
= list_empty(&pool_buf
->free_list
);
1050 list_move_tail(&p_buff
->list
, &pool_buf
->free_list
);
1052 // if returns last free buff
1053 if(mokm
->in_rcv
== NULL
/*&& (sf)*/) {
1054 spin_unlock_irqrestore(&mokm
->DEF_MUTEX_R
, flags_r
); /// spin_unlock read_pool
1055 set_buffer_in_RCV(mokm
); // prepare to receive
1057 spin_unlock_irqrestore(&mokm
->DEF_MUTEX_R
, flags_r
); /// spin_unlock read_pool
1060 if(mokm
->debug
& MOKM_DBG_IOCTL
) PDEBUG("%d [%lu]: < IOC_PUT_RD_BUF - DONE\n", inst
, jiffies
);
1061 ret
= 0; break; // return
1062 } // MOKM_IOC_PUT_RD_BUF
1065 /** mokm_write_cmd (int fd, int* cmd) */
1066 case MOKM_IOC_SND_MSG
:
1069 u_int32_t cmd
= (u_int32_t
)arg
;
1071 // transmiter still not ready
1072 if ((ioread32(&mokm
->regs
->TRC
.r
) & TRC_B_WR_RDY
) == 0) {
1073 if(mokm
->debug
& MOKM_DBG_IOCTL
) PDEBUG("%d [%lu]: IOC_SND_MSG - Tranmiter NOT ready\n", inst
, jiffies
);
1074 ret
= -EBUSY
; break; // return
1078 iowrite32(cmd
, &mokm
->regs
->PMR
);
1080 if(mokm
->debug
& MOKM_DBG_IOCTL
) PDEBUG("%d [%lu]: IOC_SND_MSG - Send CMD 0x%x\n", inst
, jiffies
, cmd
);
1081 ret
= 0; break; // return
1082 } // MOKM_IOC_SND_MSG
1084 /** mokm_read_cmd (int fd, int* cmd) */
1085 case MOKM_IOC_RCV_MSG
:
1089 timeout
= 2*HZ
; // 1 sec
1090 /*while (0 == MOKM_GET_CMD_NUM(mokm->regs->CR)) {*/
1091 r
= wait_event_interruptible_timeout(mokm
->wq_ready_rcmd
, (MOKM_GET_CMD_NUM(ioread32(&mokm
->regs
->CR
)) != 0), timeout
);
1093 // timeout time was reached
1094 if(mokm
->debug
& MOKM_DBG_IOCTL
) PDEBUG("%d [%lu]: IOC_RCV_MSG - timeout time was reached\n", inst
, jiffies
);
1096 } else if (r
< 0) { // Interrupted/Error
1097 ERROR_MSG("%d: IOC_RCV_MSG: Interrupted\n", inst
);
1101 if( ret
< 0 ) break; // return
1104 cmd
= ioread32(&mokm
->regs
->PMR
);
1106 if (copy_to_user((caddr_t
)arg
, (caddr_t
)&cmd
, sizeof(u_int32_t
))) {
1107 ERROR_MSG("%d: IOC_RCV_MSG copy_to_user failure\n", inst
);
1108 ret
= -EINVAL
; break; // return
1111 if(mokm
->debug
& MOKM_DBG_IOCTL
) PDEBUG("%d [%lu]: IOC_RCV_MSG - Receive CMD 0x%x\n", inst
, jiffies
, cmd
);
1112 ret
= 0; break; // return
1113 } // MOKM_IOC_RCV_MSG
1116 /** mokm_get_stat (int fd) */
1117 case MOKM_IOC_GET_STAT
:
1119 struct list_head
*h
;
1120 mokm
->stat
.n_free_w_buf
=0;
1121 mokm
->stat
.n_free_r_buf
=0;
1123 if(mokm
->debug
& MOKM_DBG_IOCTL
) PDEBUG("%d [%lu]: IOC_GET_STAT\n", inst
, jiffies
);
1125 list_for_each(h
, &mokm
->write_pool
.free_list
)
1126 mokm
->stat
.n_free_w_buf
++;
1127 list_for_each(h
, &mokm
->read_pool
.free_list
)
1128 mokm
->stat
.n_free_r_buf
++;
1130 if (copy_to_user((caddr_t
)arg
, (caddr_t
)&mokm
->stat
, sizeof(mokm_stat_t
))) {
1131 ERROR_MSG("%d: IOC_GET_STAT copy_to_user failure\n", inst
);
1132 ret
= -EINVAL
; break; // return
1134 ret
= 0; break; // return
1135 } // MOKM_IOC_GET_STAT
1137 /** mokm_clear_stat (int fd) */
1138 case MOKM_IOC_CLEAR_STAT
:
1140 if(mokm
->debug
& MOKM_DBG_IOCTL
) PDEBUG("%d [%lu]: IOC_CLEAR_STAT\n", inst
, jiffies
);
1141 bzero((caddr_t
)&mokm
->stat
, sizeof(mokm_stat_t
));
1142 ret
= 0; break; // return
1143 } // MOKM_IOC_CLEAR_STAT
1147 case MOKM_IOC_DBG_POINT
:
1149 if(mokm
->debug
& MOKM_DBG_IOCTL
) PDEBUG("%d [%lu]: IOC_DBG_POINT\n", inst
, jiffies
);
1151 dump_all_regs(mokm
, "dbg point");
1153 } // MOKM_IOC_DBG_POINT
1156 case MOKM_IOC_GET_MAXXFER
:
1158 if(mokm
->debug
& MOKM_DBG_IOCTL
) PDEBUG("%d [%lu]: IOC_GET_MAXXFER\n", inst
, jiffies
);
1159 ret
= MOKM_BUF_SIZE
;
1161 } // MOKM_IOC_GET_MAXXFER
1164 case MOKM_IOC_SET_DBG
:
1166 if(mokm
->debug
& MOKM_DBG_IOCTL
) PDEBUG("%d [%lu]: IOC_SET_DBG\n", inst
, jiffies
);
1167 mokm
->debug
= (int)arg
;
1168 ret
= 0; break; // return
1172 case MOKM_IOC_GET_RD_WAIT
:
1174 if(mokm
->debug
& MOKM_DBG_IOCTL
) PDEBUG("%d [%lu]: IOC_GET_RD_WAIT\n", inst
, jiffies
);
1175 ret
= (int)mokm
->rd_wait_usecs
;
1179 case MOKM_IOC_GET_WR_WAIT
:
1181 if(mokm
->debug
& MOKM_DBG_IOCTL
) PDEBUG("%d [%lu]: IOC_GET_WR_WAIT\n", inst
, jiffies
);
1182 ret
= (int)mokm
->wr_wait_usecs
;
1186 case MOKM_IOC_SET_RD_WAIT
:
1188 if(mokm
->debug
& MOKM_DBG_IOCTL
) PDEBUG("%d [%lu]: IOC_SET_RD_WAIT\n", inst
, jiffies
);
1189 mokm
->rd_wait_usecs
= (long)arg
;
1190 ret
= 0; break; // return
1193 case MOKM_IOC_SET_WR_WAIT
:
1195 if(mokm
->debug
& MOKM_DBG_IOCTL
) PDEBUG("%d [%lu]: IOC_SET_WR_WAIT\n", inst
, jiffies
);
1196 mokm
->wr_wait_usecs
= (long)arg
;
1197 ret
= 0; break; // return
1201 ERROR_MSG("%d: unknown ioctl cmd = 0x%x\n", inst
, cmd
);
1202 ret
= -EFAULT
; break; // return
1209 #ifdef CONFIG_COMPAT
1211 static int do_ioctl(struct file
*f
, unsigned cmd
, unsigned long arg
)
1215 ret
= mokm_ioctl(f
, cmd
, arg
);
1220 static long mokm_compat_ioctl(struct file
*f
, unsigned cmd
, unsigned long arg
)
1225 return do_ioctl(f
, cmd
, arg
);
1227 return -ENOIOCTLCMD;
1231 #endif // CONFIG_COMPAT
1233 //-----------------------------------------------------------------------------
1235 * Module mmap file operation
1238 static int mokm_mmap (struct file
*filp
, struct vm_area_struct
*vma
)
1241 unsigned long mem_start
;
1242 unsigned long offset
= (vma
->vm_pgoff
<< PAGE_SHIFT
);
1243 unsigned long vm_start
;
1245 if ((mokm
= (mokm_dev_t
*)filp
->private_data
) == NULL
) {
1249 if(mokm
->debug
& MOKM_DBG_MEM
) PDEBUG("%d: mmap: start\n", mokm
->instance
);
1251 if ( offset
>= 0 && offset
< POOLBUFS
) {
1252 if(mokm
->debug
& MOKM_DBG_MEM
) PDEBUG("%d: mmap: RD BUFS %p\n", mokm
->instance
, mokm
->rbuff
);
1253 mem_start
= virt_to_phys(mokm
->rbuff
);
1254 } else if ( offset
>= POOLBUFS
&& (offset
< (POOLBUFS
+ POOLBUFS
)) ) {
1256 if(mokm
->debug
& MOKM_DBG_MEM
) PDEBUG("%d: mmap: WR BUFS %p\n", mokm
->instance
, mokm
->wbuff
);
1257 mem_start
= virt_to_phys(mokm
->wbuff
);
1262 mem_start
+= offset
;
1264 if ( vma
->vm_start
+ offset
> vma
->vm_end
) {
1265 ERROR_MSG("%d: MMAP ERROR: Error offset more than size\n ", mokm
->instance
);
1269 if (vma
->vm_end
- vma
->vm_start
> POOLBUFS
) {
1270 ERROR_MSG("%d: MMAP ERROR: vma->vm_end - vma->vm_start > POOLBUFS\n", mokm
->instance
);
1274 vm_start
= vma
->vm_start
;
1276 vma
->vm_flags
|= (VM_READ
| VM_WRITE
| VM_DONTEXPAND
| VM_DONTDUMP
);
1279 LOG_MSG("%d: mmap:\tmem: %#x; off: %#x; off_r: %#x; st: %#x; en: %#x "
1280 "\tmof: %#x; sof: %#x; st_en: %#x; sz: %#x; \n", mokm
->instance
,
1282 (u_int
)vma
->vm_pgoff
,
1284 (u_int
)vma
->vm_start
,
1286 (u_int
)(mem_start
+ offset
),
1287 (u_int
)(vma
->vm_start
+ offset
),
1288 (u_int
)(vma
->vm_end
- vma
->vm_start
),
1292 if (remap_pfn_range(vma
, vm_start
, (mem_start
>> PAGE_SHIFT
), vma
->vm_end
- vma
->vm_start
, vma
->vm_page_prot
)) {
1293 ERROR_MSG("%d: MMAP ERROR: Error remap memory to user\n ", mokm
->instance
);
1300 //-----------------------------------------------------------------------------
1301 static struct file_operations mokm_fops
= {
1302 .owner
= THIS_MODULE
,
1304 .release
= mokm_close
,
1305 .unlocked_ioctl
= mokm_ioctl
,
1306 #ifdef CONFIG_COMPAT
1307 .compat_ioctl
= mokm_compat_ioctl
,
1308 #endif // CONFIG_COMPAT
1312 //-----------------------------------------------------------------------------
1316 * called by mokm_init, mokm_exit
1318 static void mokm_detach ( mokm_dev_t
*mokm
)
1320 switch (mokm
->attach_level
) {
1326 free_irq(mokm
->irq
, mokm
);
1329 pci_set_drvdata(mokm
->pdev
, NULL
);
1331 cdev_del(&mokm
->cdev
);
1337 if(mokm
->regs
) iounmap(mokm
->regs
);
1338 //pci_iounmap(mokm->pdev, mokm->regs);
1340 //pci_disable_device(mokm->pdev);
1343 unregister_chrdev_region(MKDEV(major
, 0), MAX_MOKM
);
1350 //-----------------------------------------------------------------------------
1354 * insmod ./${module}.ko $*
1356 static int __init
mokm_init (void)
1358 mokm_dev_t
*mokm
= NULL
;
1359 struct pci_dev
*pdev
= NULL
;
1366 PDEBUG("===========================================================================\n");
1367 #ifdef TRASS_MOKM /* ---------------------------------- with trass mokm */
1368 PDEBUG(" %s %s (with TR): mokm_init start\n", MODULE_NAME
, __DATE__
);
1369 #else /*TRASS_MOKM ------------------------------- without trass mokm */
1370 PDEBUG(" %s %s: mokm_init start\n", MODULE_NAME
, __DATE__
);
1371 #endif /*TRASS_MOKM ----------------------------------- end trass mokm */
1372 PDEBUG(" %s: PAGE_SIZE = %d \n", MODULE_NAME
, (int)PAGE_SIZE
);
1374 result
= alloc_chrdev_region(&dev_mn
, 0, MAX_MOKM
, "MOKM");
1376 ERROR_MSG(" ERROR: can't get major %u\n", major
);
1380 major
= MAJOR(dev_mn
);
1381 PDEBUG(" got dynamic major %u\n", major
);
1384 ERROR_MSG(" ERROR: wrong major %u\n", major
);
1388 // MODULE DEFAULTS INITIALISATION
1390 while ((pdev
= pci_get_device(MOKM_VENDOR_ID
, MOKM_DEVICE_ID
, pdev
)) != NULL
) {
1391 unsigned long start_addr
, length
;
1393 //pci_enable_device(pdev);
1395 { u32 Bar0
; // FOR DEBUG only
1396 pci_read_config_dword(pdev
, 0x10, &Bar0
);
1397 PDEBUG("%u: BAR0 ADDR 0x%X\n", mokm_nr_devs
, Bar0
);
1404 length
= pci_resource_len(pdev
, 0);
1406 start_addr
= pci_resource_start(pdev
, 0);
1408 /*pci_write_config_dword(pdev, 0x0c, 0xf000);*/
1410 PDEBUG("%u: 0x%X 0x%X pci_res start 0x%X len 0x%X\n", mokm_nr_devs
, pdev
->vendor
, pdev
->device
, (unsigned int)start_addr
, (unsigned int)length
);
1412 if ( (start_addr
== 0) || (length
== 0) ) {
1413 ERROR_MSG("%u: ERROR: NO PCI resurces avalible! Skip this module\n", mokm_nr_devs
);
1418 if ( (mokm
= kmalloc(sizeof(mokm_dev_t
), GFP_KERNEL
)) < 0 ) {
1419 ERROR_MSG("%u: ERROR: Cannot allocate memory for mokm_dev_t\n", mokm_nr_devs
);
1424 mokm
->attach_level
= 0;
1426 memset(mokm
, 0, sizeof(mokm_dev_t
));
1428 mokm
->debug
= 0xFFFF;
1433 mokm
->attach_level
= 1;
1438 mokm
->sync_mode
= 1;
1439 mokm
->irq
= pdev
->irq
;
1440 mokm
->instance
= mokm_nr_devs
;
1441 PDEBUG("%d: IRQ = 0x%X (%u)\n", mokm
->instance
, mokm
->irq
, mokm
->irq
);
1444 spin_lock_init(&mokm
->DEF_MUTEX_R
); /// spin_lock
1445 #ifndef SINGLE_MUTEX
1446 spin_lock_init(&mokm
->DEF_MUTEX_W
); /// spin_lock
1447 #endif // SINGLE_MUTEX
1448 init_waitqueue_head(&mokm
->wq_ready_rcmd
);
1449 /*init_waitqueue_head(&mokm->wq_pear_ready_for_cmd);*/
1450 init_waitqueue_head(&mokm
->wq_ready_rbuf
);
1451 init_waitqueue_head(&mokm
->wq_ready_wbuf
);
1454 mokm
->regs
= (mokm_regs_t
*)ioremap_nocache(start_addr
/* & PCI_BASE_ADDRESS_MEM_MASK*/, length
);
1455 //mokm->regs = (mokm_regs_t *) = (caddr_t)pci_iomap(pdev, 0, length);
1456 mokm
->attach_level
= 2;
1459 iowrite32(MOKM_CR_ENDIAN
| MOKM_CR_RESET
, &mokm
->regs
->CR
);
1461 iowrite32(ioread32(&mokm
->regs
->TRC
.r
) | MOKM_TRC_ENDIAN
, &mokm
->regs
->TRC
.r
);
1463 // Init TSMIT/RCV buffers
1464 if(init_pool_buff(mokm
, &mokm
->read_pool
, &mokm
->rbuff
, PCI_DMA_FROMDEVICE
)) {
1465 ERROR_MSG("%d: ERROR: Failed to init pool buffers for read\n", mokm
->instance
);
1468 mokm
->attach_level
= 3;
1470 if(init_pool_buff(mokm
, &mokm
->write_pool
, &mokm
->wbuff
, PCI_DMA_TODEVICE
)) {
1471 ERROR_MSG("%d: ERROR: Failed to init pool buffers for write\n", mokm
->instance
);
1474 mokm
->attach_level
= 4;
1476 if (0/*mokm->debug & MOKM_DBG_ATTACH*/) show_buff(mokm
);
1478 { // Add divice to the system
1479 dev_t devno
= MKDEV(major
, mokm_nr_devs
);
1480 mokm
->attach_level
= 5;
1481 cdev_init(&mokm
->cdev
, &mokm_fops
);
1482 mokm
->cdev
.owner
= THIS_MODULE
;
1483 mokm
->cdev
.ops
= &mokm_fops
;
1485 result
= cdev_add(&mokm
->cdev
, devno
, 1);
1487 if ( result
!= 0 ) {
1488 ERROR_MSG("%d: ERROR: Cannot add device to the system\n", mokm
->instance
);
1492 mokm
->attach_level
= 6;
1494 pci_set_drvdata(pdev
, mokm
);
1495 mokm
->attach_level
= 7;
1498 mokm_devices
[mokm_nr_devs
] = mokm
;
1500 // INCREMENT DEVICE NUMBER
1504 PDEBUG(" %u MOKM DEVICES ARE AVAILABLE\n", mokm_nr_devs
);
1505 if ( mokm_nr_devs
<= 0 ) {
1506 ERROR_MSG(" ERROR: Can't find any device (%u)\n", mokm_nr_devs
);
1512 for ( i
= 0; i
< mokm_nr_devs
; i
++ ) {
1513 if ( request_irq(mokm_devices
[i
]->irq
, mokm_intr_handler
, REQ_IRG_FLAG
, mokm_name
, (void*) mokm_devices
[i
] ) ) {
1514 ERROR_MSG("%d: ERROR: Cannot register interrupt handler %s\n", i
, mokm_name
);
1518 mokm_devices
[i
]->dev
= dev_mn
;
1519 mokm_devices
[i
]->attach_level
= 9;
1522 mokm_class
= class_create(THIS_MODULE
, "mokm");
1523 if (IS_ERR(mokm_class
)) {
1524 pr_err("Error creating class: /sys/class/mokm.\n");
1527 for (i
= 0; i
< mokm_nr_devs
; i
++) {
1528 sprintf(nod
, "%s%d", DEV_MODULE_NAME
, i
);
1529 if (!IS_ERR(mokm_class
)) {
1530 pr_info("make node /sys/class/mokm/%s\n", nod
);
1531 if (device_create(mokm_class
, NULL
,
1532 MKDEV(major
, i
), NULL
, nod
) == NULL
)
1533 pr_err("create a node %d failed\n", i
);
1536 mokm
->attach_level
= 10;
1539 dump_all_regs(mokm
, "open");
1544 for(i
= 0; i
< mokm_nr_devs
; i
++) {
1545 if ( mokm_devices
[i
] ) {
1546 mokm_detach( mokm_devices
[i
] );
1552 //-----------------------------------------------------------------------------
1556 * rmmod ./${module}.ko
1558 static void __exit
mokm_exit (void)
1562 for(i
= 0; i
< mokm_nr_devs
; i
++) {
1563 device_destroy(mokm_class
, MKDEV(major
, i
));
1564 if ( mokm_devices
[i
] ) {
1565 mokm_detach( mokm_devices
[i
] );
1568 class_destroy(mokm_class
);
1571 //-----------------------------------------------------------------------------
1572 module_init(mokm_init
);
1573 module_exit(mokm_exit
);