1 #include <linux/module.h>
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/string.h>
7 #include <linux/slab.h>
8 #include <linux/delay.h>
10 #include <linux/init.h>
11 #include <linux/proc_fs.h>
12 #include <linux/pci.h>
14 #include <asm/uaccess.h>
15 #include <asm/setup.h>
17 #include <linux/mcst/ddi.h>
18 #include <linux/mcst/rdma_user_intf.h>
19 #include "rdma_regs.h"
21 #include "rdma_error.h"
24 #define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP)
27 #define RDMA_NODE_DEV 7
29 /* Struct for class rdma in sysfs */
30 static struct class *rdma_class
;
32 /******************************************************************************/
33 /* OLD VERSION (version_mem_alloc = 0) - mem_alloc over __get_free_pages. */
34 /* NEW VERSION (version_mem_alloc != 0) - mem_alloc over dma_alloc_coherent. */
35 /* Default OLD VERSION. */
36 /******************************************************************************/
37 static int version_mem_alloc
= 0;
38 module_param(version_mem_alloc
, int, 0);
41 static int atl_v
= TR_ATL_B
;
42 module_param(atl_v
, int, 0);
43 MODULE_PARM_DESC(atl_v
, "Changes the value of ATL (alive timer limit) reg CAM.");
45 /*********************************************************************/
46 /* Enable RFSM - rfsm. */
47 /* rfsm = ENABLE_RFSM - RFSM disable (default). */
48 /* rfsm = DMA_RCS_RFSM - RFSM enable. */
49 /*********************************************************************/
50 #define CLEAR_RFSM DISABLE_RFSM
51 unsigned int rfsm
= CLEAR_RFSM
;
53 /*********************************************************************/
54 /* Enable exit GP0 - enable_exit_gp0. */
55 /* enable_exit_gp0 = 0 - disable (default). */
56 /* enable_exit_gp0 = 1 - RFSM enable. */
57 /*********************************************************************/
58 unsigned int enable_exit_gp0
= DISABLE_EXIT_GP0
;
60 DEFINE_RAW_SPINLOCK(mu_fix_event
);
62 unsigned char *rdma_reg_VID
; /* RDMA VID */
63 unsigned char *rdma_reg_CH0_IDT
; /* RDMA ID/Type */
64 unsigned char *rdma_reg_CS
; /* RDMA Control/Status 000028a0 */
65 unsigned char *rdma_reg_CH1_IDT
; /* RDMA ID/Type */
66 unsigned char *rdma_reg_DD_ID_0
; /* Data Destination ID */
67 unsigned char *rdma_reg_DMD_ID_0
; /* Data Message Destination ID */
68 unsigned char *rdma_reg_N_IDT_0
; /* Neighbour ID/Type */
69 unsigned char *rdma_reg_ES_0
; /* Event Status */
70 unsigned char *rdma_reg_IRQ_MC_0
; /* Interrupt Mask Control */
71 unsigned char *rdma_reg_DMA_TCS_0
; /* DMA Tx Control/Status */
72 unsigned char *rdma_reg_DMA_TSA_0
; /* DMA Tx Start Address */
73 unsigned char *rdma_reg_DMA_TBC_0
; /* DMA Tx Byte Counter */
74 unsigned char *rdma_reg_DMA_RCS_0
; /* DMA Rx Control/Status */
75 unsigned char *rdma_reg_DMA_RSA_0
; /* DMA Rx Start Address */
76 unsigned char *rdma_reg_DMA_RBC_0
; /* DMA Rx Byte Counter */
77 unsigned char *rdma_reg_MSG_CS_0
; /* Messages Control/Status */
78 unsigned char *rdma_reg_TDMSG_0
; /* Tx Data_Messages Buffer */
79 unsigned char *rdma_reg_RDMSG_0
; /* Rx Data_Messages Buffer */
80 unsigned char *rdma_reg_CAM_0
; /* CAM - channel alive management */
82 unsigned char *rdma_reg_DD_ID_1
; /* Data Destination ID */
83 unsigned char *rdma_reg_DMD_ID_1
; /* Data Message Destination ID */
84 unsigned char *rdma_reg_N_IDT_1
; /* Neighbour ID/Type */
85 unsigned char *rdma_reg_ES_1
; /* Event Status */
86 unsigned char *rdma_reg_IRQ_MC_1
; /* Interrupt Mask Control */
87 unsigned char *rdma_reg_DMA_TCS_1
; /* DMA Tx Control/Status */
88 unsigned char *rdma_reg_DMA_TSA_1
; /* DMA Tx Start Address */
89 unsigned char *rdma_reg_DMA_TBC_1
; /* DMA Tx Byte Counter */
90 unsigned char *rdma_reg_DMA_RCS_1
; /* DMA Rx Control/Status */
91 unsigned char *rdma_reg_DMA_RSA_1
; /* DMA Rx Start Address */
92 unsigned char *rdma_reg_DMA_RBC_1
; /* DMA Rx Byte Counter */
93 unsigned char *rdma_reg_MSG_CS_1
; /* Messages Control/Status */
94 unsigned char *rdma_reg_TDMSG_1
; /* Tx Data_Messages Buffer */
95 unsigned char *rdma_reg_RDMSG_1
; /* Rx Data_Messages Buffer */
96 unsigned char *rdma_reg_CAM_1
; /* CAM - channel alive management */
98 struct stat_rdma stat_rdma
;
99 unsigned char *e0regad
;
100 unsigned int count_read_sm_max
= 80;
101 unsigned int intr_rdc_count
[2];
102 unsigned int msg_cs_dmrcl
;
103 unsigned int state_cam
= 0;
104 unsigned long time_ID_REQ
;
105 unsigned long time_ID_ANS
;
107 link_id_t rdma_link_id
;
108 extern int rdma_present
;
110 static long rdma_ioctl(struct file
*filp
, unsigned int cmd
,
113 static int do_ioctl(struct file
*f
, unsigned cmd
, unsigned long arg
);
114 static long rdma_compat_ioctl(struct file
*f
, unsigned cmd
, unsigned long arg
);
116 static ssize_t
rdma_read(struct file
*, char *, size_t, loff_t
*);
117 static ssize_t
rdma_write(struct file
*, const char *, size_t, loff_t
*);
118 static int rdma_open(struct inode
*inode
, struct file
*file
);
119 static int rdma_close(struct inode
*inode
, struct file
*file
);
120 static int rdma_mmap(struct file
*file
, struct vm_area_struct
*vma
);
121 void test_reg_rdma(void);
122 int get_file_minor(struct file
*file
);
124 void free_chan(dma_chan_t
*chd
);
125 void rdma_mem_free(size_t size
, dma_addr_t dev_memory
,
126 unsigned long dma_memory
);
127 void init_rdma_sti(int instance
);
128 void read_regs_rdma(void);
129 int rdma_mem_alloc(size_t size
, dma_addr_t
*mem
, size_t *real_size
,
130 unsigned long *dma_memory
);
131 int init_chan(dma_chan_t
*chd
, int reqlen
, int tm
);
132 int write_buf(rdma_state_inst_t
*xsp
, const char *buf
, unsigned int size
,
133 int instance
, int channel
, rdma_ioc_parm_t
*parm
);
134 int read_buf(rdma_state_inst_t
*xsp
, const char *buf
, int size
, int instance
,
135 int channel
, rdma_ioc_parm_t
*parm
);
136 int rdma_remap_page(void *va
, size_t sz
, struct vm_area_struct
*vma
);
138 int create_dev_rdma(int major
)
144 /* Create rdma nodes in /sysfs */
145 rdma_class
= class_create(THIS_MODULE
, "rdma");
146 if (IS_ERR(rdma_class
)) {
147 pr_err("Error creating class: /sys/class/rdma.\n");
149 for (i_rdma
= 0; i_rdma
< RDMA_NODE_DEV
; i_rdma
++) {
150 minor
= i
* RDMA_NODE_DEV
+ i_rdma
;
151 sprintf(nod
,"rdma_%d_:%d",i
, i_rdma
);
152 pr_info("make node /sys/class/rdma/%s\n", nod
);
153 if (device_create(rdma_class
, NULL
, MKDEV(major
, minor
),
154 NULL
, nod
) == NULL
) {
155 pr_err("create dev: %s a node: %d failed\n",
163 int remove_dev_rdma(int major
)
169 /* Remove rdma nodes in /sysfs */
170 for (i_rdma
= 0; i_rdma
< RDMA_NODE_DEV
; i_rdma
++) {
171 minor
= i
* RDMA_NODE_DEV
+ i_rdma
;
172 (void) sprintf(nod
, "rdma_%d_:%d", i
, i_rdma
);
173 device_destroy(rdma_class
, MKDEV(major
, minor
));
174 pr_info("remove node /sys/class/rdma/%s\n", nod
);
176 class_destroy(rdma_class
);
181 static struct file_operations rdma_fops
= {
182 .owner
= THIS_MODULE
,
185 .unlocked_ioctl
= rdma_ioctl
,
187 .compat_ioctl
= rdma_compat_ioctl
,
191 .release
= rdma_close
,
194 unsigned int rdc_byte
;
196 #ifdef RDMA_REG_TRACE
198 void WRR_rdma(unsigned char *reg
, unsigned int val
)
200 int ddd
= (int)(reg
-e0regad
);
204 inst
= ((ddd
& 0xf00)>>8);
206 case 0: inst
= 2; break;
207 case 1: inst
= 0; break;
208 case 2: inst
= 1; break;
210 fix_event(inst
, WRR_EVENT
, ddd
& 0xff, val
);
213 unsigned int RDR_rdma(unsigned char *reg
)
215 unsigned int val
= readl(reg
);
216 int ddd
= (int)(reg
-e0regad
);
219 inst
= ((ddd
& 0xf00)>>8);
221 case 0: inst
= 2; break;
222 case 1: inst
= 0; break;
223 case 2: inst
= 1; break;
225 fix_event(inst
, RDR_EVENT
, ddd
& 0xff, val
);
229 #if defined(TRACE_LATENCY) || defined(TRACE_LATENCY_MSG) || \
230 defined(TRACE_LATENCY_SM)
231 void user_trace_stop_my(void)
233 #ifdef CONFIG_FUNCTION_TRACER
238 void user_trace_start_my(void)
240 #ifdef CONFIG_FUNCTION_TRACER
246 int pcibios_read_config_dword (unsigned char bus
, unsigned char devfn
,
247 unsigned char where
, u32
*val
)
249 outl(CONFIG_CMD_RDMA(bus
, devfn
, where
), 0xCF8);
254 int pcibios_write_config_dword (unsigned char bus
, unsigned char devfn
,
255 unsigned char where
, u32 val
)
257 outl(CONFIG_CMD_RDMA(bus
, devfn
, where
), 0xCF8);
262 static struct pci_device_id rdma_devices
[] = {
263 { PCI_DEVICE(PCI_VENDOR_ID_MCST_RDMA
, PCI_DEVICE_ID_MCST_RDMA
) },
267 unsigned int allign_dma(unsigned int n
)
269 if (n
&(ALLIGN_RDMA
-1)) {
271 n
= n
&(~(ALLIGN_RDMA
-1));
276 int MCG_CS_SEND_ALL_MSG
=
277 (MSG_CS_SD_Msg
| MSG_CS_SGP0_Msg
| MSG_CS_SGP1_Msg
|
278 MSG_CS_SGP2_Msg
| MSG_CS_SGP3_Msg
| MSG_CS_SL_Msg
|
279 MSG_CS_SUL_Msg
| MSG_CS_SIR_Msg
);
280 int MSG_CS_MSF_ALL
= MSG_CS_DMPS_Err
| MSG_CS_MPCRC_Err
| MSG_CS_MPTO_Err
|
282 unsigned int count_loop_send_msg_max
= 10;
283 unsigned int count_wait_rdm_max
= 64;
285 dev_rdma_sem_t
*msg_snd_dev
[2];
290 drv_getparm_from_ddi(unsigned long parm
, unsigned long *valuep
)
294 *valuep
= (unsigned long)jiffies
;
297 printk("drv_getparm_from_ddi: Unknown parm %ld\n", parm
);
303 static inline void __raw_add_wait_queue_from_ddi(raw_wait_queue_head_t
*head
,
304 raw_wait_queue_t
*new)
306 list_add(&new->task_list
, &head
->task_list
);
309 static inline void __raw_remove_wait_queue_from_ddi(raw_wait_queue_head_t
*head
,
310 raw_wait_queue_t
*old
)
312 list_del(&old
->task_list
);
315 void raw_add_wait_queue_from_ddi(raw_wait_queue_head_t
*q
,
316 raw_wait_queue_t
*wait
)
320 raw_spin_lock_irqsave(&q
->lock
, flags
);
321 __raw_add_wait_queue_from_ddi(q
, wait
);
322 raw_spin_unlock_irqrestore(&q
->lock
, flags
);
325 void raw_remove_wait_queue_from_ddi(raw_wait_queue_head_t
*q
,
326 raw_wait_queue_t
*wait
)
330 raw_spin_lock_irqsave(&q
->lock
, flags
);
331 __raw_remove_wait_queue_from_ddi(q
, wait
);
332 raw_spin_unlock_irqrestore(&q
->lock
, flags
);
335 /* Convert mksec to HZ */
337 drv_usectohz_from_ddi(register clock_t mksec
)
340 struct timespec rqtp
;
342 rqtp
.tv_nsec
= ((mksec
% 1000000L) * 1000L);
343 rqtp
.tv_sec
= mksec
/ 1000000L;
344 clock
= timespec_to_jiffies(&rqtp
);
348 extern int wake_up_state(struct task_struct
*p
, unsigned int state
);
350 static void __raw_wake_up_common_from_ddi(raw_wait_queue_head_t
*q
)
352 struct list_head
*tmp
, *next
;
353 raw_wait_queue_t
*curr
;
355 list_for_each_safe(tmp
, next
, &q
->task_list
) {
356 curr
= list_entry(tmp
, raw_wait_queue_t
, task_list
);
357 //wake_up_state(curr->task, TASK_UNINTERRUPTIBLE |
358 // TASK_INTERRUPTIBLE);
359 wake_up_process(curr
->task
);
362 void __raw_wake_up_from_ddi(raw_wait_queue_head_t
*q
)
366 raw_spin_lock_irqsave(&q
->lock
, flags
);
367 __raw_wake_up_common_from_ddi(q
);
368 raw_spin_unlock_irqrestore(&q
->lock
, flags
);
371 ddi_cv_broadcast_from_ddi(kcondvar_t
*cvp
)
373 __raw_wake_up_from_ddi(cvp
);
377 int rdma_cv_broadcast_rdma(void* dev_rdma_sem
)
379 dev_rdma_sem_t
*dev
= dev_rdma_sem
;
380 dev
->irq_count_rdma
++;
381 dev
->time_broadcast
= E2K_GET_DSREG(clkr
);
382 ddi_cv_broadcast_from_ddi(&dev
->cond_var
);
387 ddi_cv_spin_timedwait_from_ddi(kcondvar_t
*cvp
, raw_spinlock_t
*lock
, long tim
)
389 /* unsigned long flags; */
390 unsigned long expire
;
392 int spin_locking_done
= 0;
393 struct task_struct
*tsk
= current
;
394 DECLARE_RAW_WAIT_QUEUE(wait
);
395 expire
= tim
- jiffies
;
396 tsk
->state
= TASK_INTERRUPTIBLE
;
397 raw_add_wait_queue_from_ddi(cvp
, &wait
);
398 spin_locking_done
= raw_spin_is_locked(lock
);
399 if(spin_locking_done
)
400 spin_mutex_exit(lock
);
401 fix_event(0, WAIT_TRY_SCHTO_EVENT
,
402 (unsigned int)expire
, 0);
403 expire
= schedule_timeout(expire
);
404 raw_remove_wait_queue_from_ddi(cvp
, &wait
);
405 tsk
->state
= TASK_RUNNING
;
406 if(spin_locking_done
)
407 spin_mutex_enter(lock
);
409 if (signal_pending(current
)) {
425 do_gettimeofday(&tv
);
426 val
= tv
.tv_sec
* 1000000000LL + tv
.tv_usec
* 1000LL;
430 int wait_for_irq_rdma_sem(void* dev_rdma_sem
, signed long usec_timeout
)
432 unsigned long expire
= 0;
433 unsigned long time_current
;
434 unsigned int delta_time
;
435 dev_rdma_sem_t
*dev
= dev_rdma_sem
;
437 signed long timeout_tick
;
439 if (!raw_spin_is_locked(&dev
->lock
)) {
440 printk("wait_for_irq_rdma_sem: spin is NOT locked:dev: %p\n",
444 if (dev
->irq_count_rdma
) {
445 printk("wait_for_irq_rdma_sem(%p): dev->irq_count_rdma: %d"
446 "num_obmen: %d\n", &dev
->lock
,
447 (int)dev
->irq_count_rdma
,
448 (unsigned int)dev
->num_obmen
);
450 if (dev
->time_broadcast
) {
451 time_current
= E2K_GET_DSREG(clkr
);
452 if (time_current
> dev
->time_broadcast
) {
453 delta_time
= (unsigned int)(time_current
-
454 dev
->time_broadcast
);
456 delta_time
= (unsigned int)(time_current
+
457 (~0U - dev
->time_broadcast
));
459 delta_time
|= (1<<31);
460 event_ddi_cv(delta_time
, WAIT_RET_SCHT0_EVENT
, expire
,
461 (unsigned int)dev
->num_obmen
);
462 dev
->time_broadcast
= 0;
466 usec_timeout
*= 10000;
467 event_ddi_cv(0, WAIT_TRY_SCHTO_EVENT
, usec_timeout
,
468 (unsigned int)dev
->num_obmen
);
469 drv_getparm_from_ddi(LBOLT
, &timeout_tick
);
470 timeout_tick
+= drv_usectohz_from_ddi(usec_timeout
);
471 ret
= ddi_cv_spin_timedwait_from_ddi(&dev
->cond_var
, &dev
->lock
,
474 if (dev
->time_broadcast
) {
475 time_current
= E2K_GET_DSREG(clkr
);
476 if (time_current
> dev
->time_broadcast
) {
477 delta_time
= (unsigned int)(time_current
-
478 dev
->time_broadcast
);
480 delta_time
= (unsigned int)(time_current
+
481 (~0U - dev
->time_broadcast
));
483 event_ddi_cv(delta_time
, WAIT_RET_SCHT0_EVENT
, expire
,
484 (unsigned int)dev
->num_obmen
);
485 dev
->time_broadcast
= 0;
487 event_ddi_cv(dev
->irq_count_rdma
, WAIT_RET_SCHT0_EVENT
, expire
,
488 (unsigned int)dev
->num_obmen
);
489 DEBUG_MSG("wait_for_irq_rdma_sem FINISH\n");
493 rdma_event_t rdma_event
;
495 int rdma_event_init
= 0;
496 void fix_event_proc(unsigned int channel
, unsigned int event
,
497 unsigned int val1
, unsigned int val2
)
500 struct event_cur
*event_cur
;
502 if (!rdma_event_init
)
505 raw_spin_lock_irqsave(&mu_fix_event
, flags
);
506 event_cur
= &rdma_event
.event
[rdma_event
.event_cur
];
507 event_cur
->clkr
= E2K_GET_DSREG(clkr
);
508 event_cur
->event
= event
;
509 event_cur
->channel
= channel
;
510 event_cur
->val1
= val1
;
511 event_cur
->val2
= val2
;
512 rdma_event
.event_cur
++;
513 if (SIZE_EVENT
== rdma_event
.event_cur
)
514 rdma_event
.event_cur
= 0;
515 raw_spin_unlock_irqrestore(&mu_fix_event
, flags
);
519 #include "rdma_intr.c"
520 #include "rdma_read_buf.c"
521 #include "rdma_write_buf.c"
522 #include "rdma_send_msg.c"
524 static void __exit
rdma_remove(struct pci_dev
*dev
);
525 static int __init
rdma_probe(struct pci_dev
*dev
,
526 const struct pci_device_id
*ent
);
528 static struct pci_driver rdma_driver
= {
530 .id_table
= rdma_devices
,
532 .remove
= rdma_remove
536 * Main structutre RDMA
538 struct rdma_state
*rdma_state
;
540 static int __init
rdma_init(void)
544 if (HAS_MACHINE_E2K_FULL_SIC
) {
545 ERROR_MSG("Sorry, I am worked on e3m, use rdma_sic.\n");
552 ERROR_MSG("RDMA registers busy. \n");
556 if (!rdma_apic_init
) {
557 ERROR_MSG("Hard rdma is absent\n");
562 pci_ret
= pci_register_driver(&rdma_driver
);
564 ERROR_MSG("Module rdma FAIL initialization: %d\n", pci_ret
);
570 unsigned char bus_number_rdma
, devfn_rdma
;
573 static int rdma_probe(struct pci_dev
*dev
,
574 const struct pci_device_id
*ent
)
584 DEBUG_MSG("rdma_probe: START\n");
585 #if RDMA_PRN_ADDR_FUN
586 printk("ADDR_FUN: %p - static rdma_ioctl\n", rdma_ioctl
);
587 printk("ADDR_FUN: %p - static rdma_read\n", rdma_read
);
588 printk("ADDR_FUN: %p - static rdma_write\n", rdma_write
);
589 printk("ADDR_FUN: %p - static rdma_open\n", rdma_open
);
590 printk("ADDR_FUN: %p - static rdma_close\n", rdma_close
);
591 printk("ADDR_FUN: %p - static rdma_mmap\n", rdma_mmap
);
592 printk("ADDR_FUN: %p - test_reg_rdma\n", test_reg_rdma
);
593 printk("ADDR_FUN: %p - get_file_minor\n", get_file_minor
);
594 printk("ADDR_FUN: %p - init_reg\n", init_reg
);
595 printk("ADDR_FUN: %p - free_chan\n", free_chan
);
596 printk("ADDR_FUN: %p - rdma_mem_free\n", rdma_mem_free
);
597 printk("ADDR_FUN: %p - init_rdma_sti\n", init_rdma_sti
);
598 printk("ADDR_FUN: %p - read_regs_rdma\n", read_regs_rdma
);
599 printk("ADDR_FUN: %p - rdma_mem_alloc\n", rdma_mem_alloc
);
600 printk("ADDR_FUN: %p - init_chan\n", init_chan
);
601 printk("ADDR_FUN: %p - write_buf\n", write_buf
);
602 printk("ADDR_FUN: %p - read_buf\n", read_buf
);
603 printk("ADDR_FUN: %p - rdma_remap_page\n", rdma_remap_page
);
604 printk("ADDR_FUN: %p - rdma_fops->read rdma_fops->read: %x\n",
605 rdma_fops
.read
, *rdma_fops
.read
);
606 printk("ADDR_FUN: %p - rdma_fops->write rdma_fops->write: %x\n",
607 rdma_fops
.write
, *rdma_fops
.write
);
608 printk("ADDR_FUN: %p - rdma_fops->unlocked_ioctl "
609 "rdma_fops->unlocked_ioctl: %x\n",
610 rdma_fops
.unlocked_ioctl
, *rdma_fops
.unlocked_ioctl
);
611 printk("ADDR_FUN: %p - rdma_fops->compat_ioctl "
612 "rdma_fops->compat_ioctl: %x\n", rdma_fops
.compat_ioctl
,
613 *rdma_fops
.compat_ioctl
);
614 printk("ADDR_FUN: %p - rdma_fops->mmap rdma_fops->mmap: %x\n",
615 rdma_fops
.mmap
, *rdma_fops
.mmap
);
616 printk("ADDR_FUN: %p - rdma_fops->open rdma_fops->open: %x\n",
617 rdma_fops
.open
, *rdma_fops
.open
);
618 printk("ADDR_FUN: %p - rdma_fops->release rdma_fops->release: %x\n",
619 rdma_fops
.release
, *rdma_fops
.release
);
621 if ( (ret
= pci_enable_device(dev
)) ) {
622 ERROR_MSG( KERN_ERR
"rdma_probe: cannot enable pci "
623 "device err: %d\n", ret
);
624 DEBUG_MSG("rdma_probe: FINISH\n");
628 DEBUG_MSG("rdma_probe: dev->dev.init_name: %s \n", dev
->dev
.init_name
);
629 DEBUG_MSG("rdma_probe: dev->devfn: %x \n", dev
->devfn
);
630 DEBUG_MSG("rdma_probe: dev->vendor: %x \n", dev
->vendor
);
631 DEBUG_MSG("rdma_probe: dev->device: %x \n", dev
->device
);
632 DEBUG_MSG("rdma_probe: dev->subsystem_vendor: %x \n",
633 dev
->subsystem_vendor
);
634 DEBUG_MSG("rdma_probe: dev->subsystem_device: %x \n",
635 dev
->subsystem_device
);
636 DEBUG_MSG("rdma_probe: dev->devfn: %x \n", dev
->devfn
);
637 if (!(bus
= dev
->bus
)) {
638 ERROR_MSG("rdma_probe: bus is NULL\n");
641 for (devfn_rdma
= 0; devfn_rdma
< 0xff; devfn_rdma
++) {
642 pcibios_read_config_dword(bus
->number
, devfn_rdma
, 0, &id
);
643 if (id
== 0x71918086) {
644 bus_number_rdma
= bus
->number
;
645 DEBUG_MSG("rdma_probe: EDBUS-RDMA config space\n");
646 for (i
= 0; i
< 7; i
++) {
647 pcibios_read_config_dword(bus
->number
,
648 devfn_rdma
, i
<<2, &val
);
649 DEBUG_MSG("rdma_probe: %2d 0x%08x\n", i
<<2,val
);
654 if (devfn_rdma
== 0xff) {
655 ERROR_MSG("rdma_probe: devfn_rdma == 0xff\n");
658 pcibios_write_config_dword(bus
->number
, devfn_rdma
, 4, 0x7);
659 pcibios_read_config_dword(bus
->number
, devfn_rdma
, 4, &val
);
661 major
= register_chrdev(0, board_name
, &rdma_fops
);
663 ERROR_MSG("rdma_probe: There isn't free major\n");
666 DEBUG_MSG("rdma_probe: major: %d\n", major
);
668 size_rdma_state
= sizeof (struct rdma_state
);
669 rdma_state
= (struct rdma_state
*)kmalloc(size_rdma_state
, GFP_KERNEL
);
670 if (rdma_state
== (struct rdma_state
*)NULL
) {
671 pci_disable_device(dev
);
672 ERROR_MSG("rdma_probe: rdma_state == NULL\n");
676 memset(rdma_state
, 0, size_rdma_state
);
677 rdma_state
->dev_rdma
= dev
;
678 rdma_state
->size_rdma_state
= size_rdma_state
;
679 rdma_state
->major
= major
;
680 rdma_state
->mmio_base
= pci_resource_start(dev
, PCI_MMIO_BAR
);
681 rdma_state
->mmio_len
= pci_resource_len(dev
, PCI_MMIO_BAR
);
683 if ( (ret
= pci_request_region(dev
, PCI_MMIO_BAR
, "rdma MMIO")) )
686 rdma_state
->mmio_vbase
= ioremap(rdma_state
->mmio_base
,
687 rdma_state
->mmio_len
);
688 if ( !rdma_state
->mmio_vbase
)
690 ERROR_MSG("rdma_probe: cannot ioremap MMIO (0x%08lx:0x%x)\n",
691 rdma_state
->mmio_base
, rdma_state
->mmio_len
);
693 goto fail_mmio_ioremap
;
695 DEBUG_MSG("rdma_probe: mmio_vbase: %p mmio_base: 0x%ld mmio_len: %d\n",
696 rdma_state
->mmio_vbase
, rdma_state
->mmio_base
,
697 rdma_state
->mmio_len
);
699 e0regad
= (unsigned char *)rdma_state
->mmio_vbase
;
701 mutex_init(&rdma_state
->mu
);
705 rdma_interrupt_p
= rdma_interrupt
;
724 tr_atl
= ATL_B
| (atl_v
& ATL
);
725 printk("Reg CAM ATL: %x\n", tr_atl
);
726 WRR_rdma(SHIFT_CH_IDT(0), ( l_base_mac_addr
[3] | (l_base_mac_addr
[4] ) << 8));
729 WRR_rdma(SHIFT_CS
, 0x2a00);
730 msg_cs_dmrcl
= 0x1000;
731 WRR_rdma(SHIFT_IRQ_MC(0), irq_mc
); /* 0x07fe000f */
732 WRR_rdma(SHIFT_MSG_CS(0), msg_cs_dmrcl
| MSG_CS_SIR_Msg
);
733 printk("ES: 0x%x MSG_CS: 0x%x\n",
734 RDR_rdma(SHIFT_ES(0)), RDR_rdma(SHIFT_MSG_CS(0)));
735 printk("ES: 0x%x MSG_CS: 0x%x\n",
736 RDR_rdma(SHIFT_ES(0)), RDR_rdma(SHIFT_MSG_CS(0)));
737 DEBUG_MSG("SHIFT_IRQ_MC(0): %p 0x%08x (0x%08x)\n",
738 SHIFT_IRQ_MC(0), RDR_rdma(SHIFT_IRQ_MC(0)), irq_mc
);
739 WRR_rdma(SHIFT_ES(0), ES_SM_Ev
);
740 pci_set_drvdata(dev
, rdma_state
);
743 if (create_dev_rdma(major
))
744 goto error_create_dev
;
746 if (!version_mem_alloc
) {
747 printk("RDMA_ALLOC_MEMMORY: OLD VERSION.\n");
749 printk("RDMA_ALLOC_MEMMORY: NEW VERSION\n");
751 DEBUG_MSG("rdma_probe: FINISH\n");
754 rdma_interrupt_p
= NULL
;
756 pci_release_region(rdma_state
->dev_rdma
, PCI_MMIO_BAR
);
757 pci_disable_device(rdma_state
->dev_rdma
);
764 static void rdma_remove(struct pci_dev
*dev
)
766 struct rdma_state
*rdma_st
= pci_get_drvdata(dev
);
768 WRR_rdma(SHIFT_IRQ_MC(0), 0);
769 WRR_rdma(SHIFT_CAM(0), 0);
770 DEBUG_MSG("rdma_remove: START\n");
772 DEBUG_MSG("rdma_remove rdma_st yes\n");
773 iounmap(rdma_st
->mmio_vbase
);
774 pci_release_region(dev
, PCI_MMIO_BAR
);
775 pci_set_drvdata(dev
, NULL
);
777 unregister_chrdev(rdma_state
->major
, board_name
);
778 pci_disable_device(rdma_state
->dev_rdma
);
780 remove_dev_rdma(rdma_state
->major
);
782 DEBUG_MSG("rdma_remove: FINISH\n");
785 static void __exit
rdma_cleanup(void)
787 DEBUG_MSG("rdma_cleanup: START\n");
788 WRR_rdma(SHIFT_IRQ_MC(0), 0);
789 WRR_rdma(SHIFT_CAM(0), 0);
790 rdma_interrupt_p
= NULL
;
791 pci_unregister_driver(&rdma_driver
);
795 DEBUG_MSG("rdma_cleanup: FINISH\n");
798 static int rdma_close(struct inode
*inode
, struct file
*file
)
804 rdma_state_inst_t
*rdma_sti
;
806 DEBUG_MSG("rdma_close: START\n");
807 minor
= get_file_minor(file
);
808 DEBUG_MSG("rdma_close: minor:%d\n", minor
);
810 ERROR_MSG("rdma_close: minor < 0\n");
813 instance
= DEV_inst(minor
);
814 channel
= DEV_chan(minor
);
815 rdma_sti
= &rdma_state
->rdma_sti
[instance
];
816 mutex_enter(&rdma_sti
->mu
);
817 rdma_sti
->opened
&= ~(1 << channel
);
819 chd
= &rdma_sti
->dma_chans
[channel
];
822 DEBUG_MSG("rdma_close: opened.minor.instance.channel: 0x%x.%d.%d.%d\n",
823 rdma_sti
->opened
, minor
, instance
, channel
);
824 mutex_exit(&rdma_sti
->mu
);
825 DEBUG_MSG("rdma_close: FINISH\n");
829 static int rdma_open(struct inode
*inode
, struct file
*file
)
835 rdma_state_inst_t
*rdma_sti
;
836 DEBUG_MSG("rdma_open: START\n");
837 minor
= get_file_minor(file
);
838 DEBUG_MSG("rdma_open: minor:%d\n", minor
);
840 ERROR_MSG("rdma_open: minor < 0\n");
843 instance
= DEV_inst(minor
);
844 channel
= DEV_chan(minor
);
845 if (channel
>= MAX_CHANNEL_RDMA
) {
846 ERROR_MSG("rdma_open: channel(%d) >= MAX_CHANNEL_RDMA(%d)\n",
847 channel
, MAX_CHANNEL_RDMA
);
850 rdma_sti
= &rdma_state
->rdma_sti
[instance
];
851 mutex_enter(&rdma_sti
->mu
);
852 firstopen
= (((1 << channel
) & rdma_sti
->opened
) == 0);
853 if (firstopen
== 0) {
854 ERROR_MSG("rdma_open: device EBUSY: minor: %d inst: %d"
855 "channel: %d\n", minor
, instance
, channel
);
856 mutex_exit(&rdma_sti
->mu
);
859 rdma_sti
->opened
|= (1 << channel
);
860 DEBUG_MSG("rdma_open: opened.minor.instance.channel: 0x%x.%d.%d.%d\n",
861 rdma_sti
->opened
, minor
, instance
, channel
);
862 mutex_exit(&rdma_sti
->mu
);
863 DEBUG_MSG("rdma_open: FINISH\n");
867 static long rdma_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
874 rdma_state_inst_t
*rdma_sti
;
875 rdma_ioc_parm_t parm
;
877 dev_rdma_sem_t
*dev_sem
;
880 DEBUG_MSG("rdma_ioctl: START cmd %x\n", cmd
);
881 minor
= get_file_minor(filp
);
883 ERROR_MSG("rdma_ioctl: minor(%d) < 0 cmd: %d\n", minor
, cmd
);
886 instance
= DEV_inst(minor
);
887 channel
= DEV_chan(minor
);
888 rdma_sti
= &rdma_state
->rdma_sti
[instance
];
889 DEBUG_MSG("rdma_ioctl: minor: %d\n", minor
);
890 /* Get ID link rdma */
893 case RDMA_IOC_GET_ID
:
895 rdma_link_id
.count_links
= MAX_NUMIOLINKS
;
896 rdma_link_id
.link_id
[0][0] = 1;
897 rdma_link_id
.link_id
[0][1] = RDR_rdma(SHIFT_CH_IDT(0));
898 rdma_link_id
.link_id
[0][2] = RDR_rdma(SHIFT_N_IDT(0));
899 if (copy_to_user((void __user
*)arg
, &rdma_link_id
,
900 sizeof (link_id_t
)) == -1) {
901 ERROR_MSG("rdma_ioctl:RDMA_IOC_GET_ID: \
902 copy_to_user failed\n");
909 case RDMA_IOC_RESET_DMA
:
911 reset_link_t reset_link
;
912 rw_state_p pd
= NULL
;
913 dev_rdma_sem_t
*dev_sem
;
914 rdma_state_inst_t
*xsp
;
916 xsp
= &rdma_state
->rdma_sti
[instance
];
917 rval
= copy_from_user(&reset_link
, (void __user
*)arg
,
918 sizeof (reset_link_t
));
920 ERROR_MSG("rdma_ioctl(%d, %d, %x): copy_from_user"
921 "failed size: %lx rval: %lx\n",
922 instance
, channel
, cmd
,
923 sizeof (reset_link_t
), rval
);
926 if (reset_link
.tcs_reset
== 1) {
927 /* Enable exit gp0 */
928 if (enable_exit_gp0
) {
930 for (j
= 0; j
< 10; j
++) {
931 ret_send_msg
= send_msg(xsp
,
933 instance
, 1, dev_sem
);
934 if (ret_send_msg
> 0)
936 if (ret_send_msg
< 0) {
937 ERROR_MSG("rdma_ioctl:"
938 "FAIL send MSG_CS_SGP0_Msg "
939 "from link: %x ret: %d\n",
940 instance
, ret_send_msg
);
941 } else if (ret_send_msg
== 0) {
942 printk("rdma_ioctl: FAIL send"
944 "from link: %x. SM is absent "
946 instance
, ret_send_msg
,
947 RDR_rdma(SHIFT_MSG_CS(0)));
952 if (reset_link
.rcs_reset
== 1) {
953 /* Enable exit gp0 */
954 if (enable_exit_gp0
) {
955 pd
= &rdma_sti
->rw_states_d
[READER
];
956 dev_sem
= &pd
->dev_rdma_sem
;
957 raw_spin_lock_irq(&dev_sem
->lock
);
959 raw_spin_unlock_irq(&dev_sem
->lock
);
962 reset_link
.tcs
= RDR_rdma(SHIFT_DMA_TCS(0));
963 reset_link
.rcs
= RDR_rdma(SHIFT_DMA_RCS(0));
964 rval
= copy_to_user((reset_link_t __user
*)arg
, &reset_link
,
965 sizeof (reset_link
));
971 rval
= copy_from_user((caddr_t
)&parm
, (caddr_t
)arg
,
972 sizeof (rdma_ioc_parm_t
));
974 ERROR_MSG("rdma_ioctl(%d, %d, %d): copy_from_user failed "
975 "size: %ld rval: %ld\n",instance
, channel
, cmd
,
976 sizeof (rdma_ioc_parm_t
), rval
);
979 parm
.err_no
= res
= 0;
981 case RDMA_IOC_SET_MODE_RFSM
:
983 if (parm
.reqlen
== DISABLE_RFSM
) {
991 case RDMA_IOC_SET_MODE_EXIT_GP0
:
993 if (parm
.reqlen
== DISABLE_EXIT_GP0
) {
994 enable_exit_gp0
= DISABLE_EXIT_GP0
;
996 enable_exit_gp0
= ENABLE_EXIT_GP0
;
998 parm
.acclen
= enable_exit_gp0
;
1004 if ((parm
.reqlen
<= 0xc) ||
1005 ((parm
.reqlen
>= 0x100) && (parm
.reqlen
<= 0x138)) ||
1006 ((parm
.reqlen
>= 0x200) && (parm
.reqlen
<= 0x238))) {
1007 #if defined(TRACE_LATENCY_SM)
1008 user_trace_start_my();
1010 // *(unsigned int *)(e0regad+parm.reqlen) = parm.acclen;
1011 WRR_rdma(e0regad
+parm
.reqlen
, parm
.acclen
);
1013 #if defined(TRACE_LATENCY_SM)
1014 user_trace_stop_my();
1023 if ((parm
.reqlen
<= 0xc) ||
1024 ((parm
.reqlen
>= 0x100) && (parm
.reqlen
<= 0x138)) ||
1025 ((parm
.reqlen
>= 0x200) && (parm
.reqlen
<= 0x238))) {
1026 #if defined(TRACE_LATENCY_SM)
1027 user_trace_start_my();
1029 // *(unsigned int *)(e0regad+parm.reqlen) = parm.acclen;
1030 parm
.acclen
= RDR_rdma(e0regad
+parm
.reqlen
);
1031 #if defined(TRACE_LATENCY_SM)
1032 user_trace_stop_my();
1039 case RDMA_WAKEUP_WRITER
:
1041 dev_rdma_sem_t
*dev_sem
;
1044 pd
= &rdma_sti
->rw_states_d
[WRITER
];
1045 dev_sem
= &pd
->dev_rdma_sem
;
1046 raw_spin_lock_irq(&dev_sem
->lock
);
1047 rdma_cv_broadcast_rdma(&pd
->dev_rdma_sem
);
1048 raw_spin_unlock_irq(&dev_sem
->lock
);
1052 case RDMA_WAKEUP_READER
:
1054 dev_rdma_sem_t
*dev_sem
;
1057 pd
= &rdma_sti
->rw_states_d
[READER
];
1058 dev_sem
= &pd
->dev_rdma_sem
;
1059 raw_spin_lock_irq(&dev_sem
->lock
);
1060 rdma_cv_broadcast_rdma(&pd
->dev_rdma_sem
);
1061 raw_spin_unlock_irq(&dev_sem
->lock
);
1064 case RDMA_IOC_DUMPREG0
:
1065 case RDMA_IOC_DUMPREG1
:
1068 case RDMA_IOC_BROAD
:
1070 dev_rdma_sem_t
*dev_sem
;
1071 rdma_state_inst_t
*xspi
= &rdma_state
->rdma_sti
[0];
1074 pcam
= &xspi
->rw_states_m
[0];
1075 dev_sem
= &pcam
->dev_rdma_sem
;
1076 raw_spin_lock_irq(&dev_sem
->lock
);
1079 rdma_cv_broadcast_rdma(dev_sem
);
1082 raw_spin_unlock_irq(&dev_sem
->lock
);
1085 case RDMA_IOC_WAITD
:
1087 dev_rdma_sem_t
*dev_sem
;
1088 rdma_state_inst_t
*xspi
= &rdma_state
->rdma_sti
[0];
1092 pcam
= &xspi
->rw_states_m
[0];
1093 dev_sem
= &pcam
->dev_rdma_sem
;
1094 raw_spin_lock_irq(&dev_sem
->lock
);
1096 dev_sem
->num_obmen
++;
1097 ret_time_dwait
= wait_for_irq_rdma_sem(dev_sem
, IO_TIMEOUT
);
1098 parm
.reqlen
= ret_time_dwait
;
1099 parm
.acclen
= dev_sem
->irq_count_rdma
;
1100 pcam
->stat
= dev_sem
->irq_count_rdma
= 0;
1101 raw_spin_unlock_irq(&dev_sem
->lock
);
1104 case RDMA_CLEAN_TDC_COUNT
:
1112 pd
= &rdma_sti
->rw_states_d
[WRITER
];
1115 ERROR_MSG("rdma_ioctl: CLEAN_TDC: (%d,%d): "
1116 "Unexpected channel\n", instance
, channel
);
1119 dev_sem
= &pd
->dev_rdma_sem
;
1120 dev_sem
->num_obmen
= 0;
1121 dev_sem
->irq_count_rdma
= 0;
1122 dbg_ioctl("CLEAN_TDC: %d dev_sem->num_obmen: %d\n",
1123 instance
, dev_sem
->num_obmen
);
1126 #define COUNT_CLK 1000
1129 u64 time
[COUNT_CLK
];
1132 for (i
= 0; i
< COUNT_CLK
; i
++)
1133 time
[i
] = E2K_GET_DSREG(clkr
);
1134 for (i
= 0; i
< COUNT_CLK
; i
++)
1135 printk("0x%lx\n", time
[i
]);
1138 case RDMA_GET_MAX_CLKR
:
1140 u64 time
[COUNT_CLK
];
1142 u64 max_clk_all
= 0;
1144 int count_rep_clk
= 0;
1146 #define COUNT_REP_CLK 100
1148 for (i
= 0; i
< COUNT_CLK
; i
++)
1149 time
[i
] = E2K_GET_DSREG(clkr
);
1150 for (i
= 0; i
< COUNT_CLK
; i
++) {
1151 if (max_clk
< time
[i
])
1154 if (max_clk_all
< max_clk
) {
1155 max_clk_all
= max_clk
;
1156 printk("0x%lx - max_clk_all\n", max_clk_all
);
1158 if (count_rep_clk
< COUNT_REP_CLK
)
1164 case RDMA_CLEAN_RDC_COUNT
:
1166 intr_rdc_count
[instance
] = 0;
1172 pd
= &rdma_sti
->rw_states_d
[READER
];
1175 ERROR_MSG("rdma_ioctl: CLEAN_RDC: (%d,%d): "
1176 "Unexpected channel\n", instance
, channel
);
1179 dev_sem
= &pd
->dev_rdma_sem
;
1180 dev_sem
->num_obmen
= 0;
1181 dev_sem
->irq_count_rdma
= 0;
1182 dbg_ioctl("CLEAN_RDC: intr_rdc_count[%d]: %x "
1183 "dev_sem->num_obmen: %d\n", instance
,
1184 intr_rdc_count
[instance
], dev_sem
->num_obmen
);
1187 case RDMA_TIMER_FOR_READ
:
1189 dbg_ioctl("cmd = RDMA_TIMER_FOR_READ, "
1190 "reqlen (mksec) = 0x%x\n",
1191 MIN_min(TIMER_FOR_READ_MAX
, parm
.reqlen
));
1192 parm
.acclen
= (&rdma_sti
->rw_states_d
[READER
])->timer_for_read
;
1193 (&rdma_sti
->rw_states_d
[READER
])->timer_for_read
=
1194 MAX_max(TIMER_FOR_READ_MIN
, MIN_min(TIMER_FOR_READ_MAX
,
1196 parm
.reqlen
= (&rdma_sti
->rw_states_d
[READER
])->timer_for_read
;
1200 case RDMA_TIMER_FOR_WRITE
:
1202 dbg_ioctl("cmd = RDMA_TIMER_FOR_WRITE, "
1203 "reqlen (mksec) = 0x%x\n",
1204 MIN_min(TIMER_FOR_WRITE_MAX
, parm
.reqlen
));
1205 parm
.acclen
= (&rdma_sti
->rw_states_d
[WRITER
])->timer_for_write
;
1206 (&rdma_sti
->rw_states_d
[WRITER
])->timer_for_write
=
1207 MAX_max(TIMER_FOR_WRITE_MIN
,
1208 MIN_min(TIMER_FOR_WRITE_MAX
, parm
.reqlen
));
1209 parm
.reqlen
= (&rdma_sti
->rw_states_d
[WRITER
])->timer_for_write
;
1212 case RDMA_IOC_ALLOCB
:
1214 DEBUG_MSG("cmd = RDMA_IOC_ALLOCB, "
1217 chd
= &rdma_sti
->dma_chans
[channel
];
1218 if (chd
->allocs
!= RCS_EMPTY
) {
1219 ERROR_MSG("rdma_ioctl: RDMA_IOC_ALLOCB: "
1220 "WRONGLY finish: chd->allocs: %i\n",
1223 parm
.err_no
= RDMA_E_ALLOC
;
1224 parm
.acclen
= chd
->allocs
;
1227 parm
.acclen
= init_chan(chd
, parm
.reqlen
, parm
.rwmode
);
1228 if (parm
.acclen
< -1) {
1229 ERROR_MSG("rdma_ioctl: RDMA_IOC_ALLOCB: "
1230 "WRONGLY finish: chd->allocs: %i\n",
1232 res
= -1; parm
.err_no
= -parm
.acclen
;
1235 if (parm
.acclen
< 0) {
1236 ERROR_MSG("rdma_ioctl: RDMA_IOC_ALLOCB: "
1237 "WRONGLY finish: RDMA_E_NOBUF\n");
1238 res
= -1; parm
.err_no
= RDMA_E_NOBUF
;
1241 parm
.rwmode
= chd
->full
;
1242 DEBUG_MSG("rdma_ioctl: phys: 0x%x full: 0x%08x\n", chd
->dma
,
1248 stat_rdma
.cur_clock
= jiffies
;
1249 if (copy_to_user((caddr_t
)arg
, (caddr_t
)&stat_rdma
,
1250 sizeof (struct stat_rdma
)) == -1) {
1251 ERROR_MSG("rdma_ioctl: RDMA_GET_STAT:"
1252 "copy_to_user failed\n");
1257 case RDMA_GET_EVENT
:
1259 unsigned long flags
;
1261 raw_spin_lock_irqsave(&mu_fix_event
, flags
);
1262 if (copy_to_user((caddr_t
)arg
, (caddr_t
)(&rdma_event
),
1263 sizeof (rdma_event_t
)) == -1) {
1264 raw_spin_unlock_irqrestore(&mu_fix_event
, flags
);
1265 ERROR_MSG("rdma_ioctl: RDMA_GET_EVENT: "
1266 "copy_to_user failed\n");
1269 raw_spin_unlock_irqrestore(&mu_fix_event
, flags
);
1275 memset(&stat_rdma
, 0, sizeof (struct stat_rdma
));
1282 atl
= RDR_rdma(SHIFT_CAM(0));
1284 event_ioctl(0, RDMA_GET_CAM_EVENT
, 0, atl
);
1287 case RDMA_IS_CAM_YES
:
1290 int ret_time_dwait
= 0;
1291 dev_rdma_sem_t
*dev_sem
;
1294 event_ioctl(0, RDMA_IS_CAM_YES_EVENT
, 1, 0);
1295 pcam
= &rdma_sti
->ralive
;
1296 dev_sem
= &pcam
->dev_rdma_sem
;
1298 atl
= RDR_rdma(SHIFT_CAM(0));
1302 goto end_RDMA_IS_CAM_YES
;
1304 raw_spin_lock_irq(&dev_sem
->lock
);
1305 dev_sem
->irq_count_rdma
= 0;
1307 ret_time_dwait
= wait_for_irq_rdma_sem(dev_sem
, IO_TIMEOUT
);
1309 raw_spin_unlock_irq(&dev_sem
->lock
);
1310 parm
.acclen
= RDR_rdma(SHIFT_CAM(0));
1311 if (ret_time_dwait
== -2) {
1312 parm
.err_no
= -RDMA_E_SIGNAL
;
1314 if (ret_time_dwait
== -1) {
1315 parm
.err_no
= -RDMA_E_TIMER
;
1317 if (ret_time_dwait
> 0) {
1318 parm
.err_no
= ret_time_dwait
;
1321 end_RDMA_IS_CAM_YES
:
1322 event_ioctl(0, RDMA_IS_CAM_YES_EVENT
, 0, 0);
1325 case RDMA_IS_CAM_NO
:
1328 int ret_time_dwait
= 0;
1329 dev_rdma_sem_t
*dev_sem
;
1332 event_ioctl(0, RDMA_IS_CAM_NO_EVENT
, 1, 0);
1333 dbg_ioctl("RDMA_IS_CAM_NO\n");
1334 pcam
= &rdma_sti
->talive
;
1335 dev_sem
= &pcam
->dev_rdma_sem
;
1336 atl
= RDR_rdma(SHIFT_CAM(0));
1340 goto end_RDMA_IS_CAM_NO
;
1342 raw_spin_lock_irq(&dev_sem
->lock
);
1343 dev_sem
->irq_count_rdma
= 0;
1345 ret_time_dwait
= wait_for_irq_rdma_sem(dev_sem
, IO_TIMEOUT
);
1347 raw_spin_unlock_irq(&dev_sem
->lock
);
1348 parm
.acclen
= RDR_rdma(SHIFT_CAM(0));
1349 if (ret_time_dwait
== -2) {
1350 parm
.err_no
= -RDMA_E_SIGNAL
;
1352 if (ret_time_dwait
== -1) {
1353 parm
.err_no
= -RDMA_E_TIMER
;
1355 if (ret_time_dwait
> 0) {
1356 parm
.err_no
= ret_time_dwait
;
1360 parm
.clkr
= E2K_GET_DSREG(clkr
);
1361 parm
.clkr1
= pcam
->clkr
;
1362 parm
.reqlen
= pcam
->int_cnt
;
1364 event_ioctl(0, RDMA_IS_CAM_NO_EVENT
, 0, 0);
1366 case RDMA_UNSET_CAM
:
1369 dbg_ioctl("RDMA_UNSET_CAM(%d)\n", instance
);
1370 atl
= RDR_rdma(SHIFT_CAM(0));
1371 state_cam
= RDMA_UNSET_CAM
;
1372 event_ioctl(0, RDMA_UNSET_CAM_EVENT
, 1, atl
);
1373 parm
.clkr
= E2K_GET_DSREG(clkr
);
1375 /* dbg_ioctl("SHIFT_CAM(%d): 0x%08x\n", instance, atl);*/
1376 WRR_rdma(SHIFT_CAM(0), 0);
1377 event_ioctl(atl
, RDMA_UNSET_CAM_EVENT
, 0,
1378 RDR_rdma(SHIFT_CAM(instance
)));
1384 dbg_ioctl("RDMA_SET_CAM(%d)\n", instance
);
1385 atl
= RDR_rdma(SHIFT_CAM(0));
1386 state_cam
= RDMA_SET_CAM
;
1387 event_ioctl(0, RDMA_SET_CAM_EVENT
, 1, atl
);
1388 parm
.clkr
= E2K_GET_DSREG(clkr
);
1390 WRR_rdma(SHIFT_CAM(0), tr_atl
);
1391 event_ioctl(atl
, RDMA_SET_CAM_EVENT
, 0,
1392 RDR_rdma(SHIFT_CAM(instance
)));
1399 dbg_ioctl("RDMA_SET_ATL(%d): reqlen: 0x%x mksec: %d\n",
1400 instance
, parm
.reqlen
, parm
.reqlen
*10);
1401 event_ioctl(0, RDMA_SET_ATL_EVENT
, 1, parm
.reqlen
);
1402 atl
= RDR_rdma(SHIFT_CAM(instance
));
1403 dbg_ioctl("SHIFT_CAM(%d): 0x%08x\n", instance
, atl
);
1404 tr_atl
= ATL_B
| (parm
.reqlen
& ATL
);
1405 WRR_rdma(SHIFT_CAM(instance
), tr_atl
);
1406 atl
= RDR_rdma(SHIFT_CAM(instance
));
1407 event_ioctl(0, RDMA_SET_ATL_EVENT
, 0, atl
);
1412 /****************************************************************/
1413 /* 1. îÁ ÏÓÎÏ×ÎÏÊ ÚÁÐÕÓËÁÅÔÓÑ RDMA_SET_RAlive. */
1414 /* ðÏ ÜÔÏÊ ËÏÍÁÎÄÅ ÕÓÔÁÎÁ×ÌÉ×ÁÅÔÓÑ xspi->ralive->stat = 1 É */
1415 /* ÎÁÞÉÎÁÅÔÓÑ ÏÖÉÄÁÎÉÅ. */
1417 /* 2. ÐÏ ÐÒÉÈÏÄÕ GP3 ÂÅÚ ÐÒÏÓÙÐÁÎÉÑ ÕÓÔÁÎÁ×ÌÉ×ÁÅÔÓÑ */
1418 /* xspi->ralive->stat = 2 É RAlive. */
1419 /* üÔÏ ÏÚÎÁÞÁÅÔ, ÞÔÏ ÒÅÚÅÒ× ÅÓÔØ. */
1421 /* 3. ðÒÉ ÐÒÏÐÁÄÁÎÉÉ GP3 ÂÅÚ ÐÒÏÓÙÐÁÎÉÑ ÕÓÔÁÎÁ×ÌÉ×ÁÅÔÓÑ */
1422 /* spi->ralive->stat = 1 É ÓÎÉÍÁÅÔÓÑ RAlive. */
1423 /* üÔÏ ÏÚÎÁÞÁÅÔ, ÞÔÏ ÒÅÚÅÒ×Á ÎÅÔ. */
1425 /* ðÒÏÓÎÕÔØÓÑ ÍÏÖÎÏ ÔÏÌØËÏ ÐÏ ÓÉÇÎÁÌÕ. */
1427 /* 1. îÁ ÒÅÚÅÒ×ÎÏÊ ÚÁÐÕÓËÁÅÔÓÑ RDMA_SET_TAlive. */
1428 /* ðÏ ÜÔÏÊ ËÏÍÁÎÄÅ ÕÓÔÁÎÁ×ÌÉ×ÁÀÔÓÑ xspi->talive->stat = 1 É */
1429 /* TAlive É ÎÁÞÉÎÁÅÔÓÑ ÏÖÉÄÁÎÉÅ. */
1430 /* åÓÌÉ ÏÓÎÏ×ÎÁÑ ÚÁÐÕÝÅÎÁ,ÎÁÞÉÎÁÅÔÓÑ ÏÂÍÅÎ GP3. */
1431 /* åÓÌÉ ÏÂÍÅÎ ÐÒÅËÒÁÔÉÌÓÑ ÉÌÉ ÎÅ ÎÁÞÁÌÓÑ, ÂÕÄÅÔ ×ÏÚ×ÒÁÔ ÉÚ */
1432 /* ÏÖÉÄÁÎÉÑ Ó ÕÓÔÁÎÏ×ËÏÊ xspi->talive->stat = 0 É ÇÁÛÅÎÉÅÍ */
1434 /* üÔÏ ÏÚÎÁÞÁÅÔ, ÞÔÏ ÏÓÎÏ×ÎÏÊ ÎÅÔ. */
1436 /* úÁÐÒÏÓ ÓÏÓÔÏÑÎÉÑ - ËÏÍÁÎÄÁ GET_STAT_ALIVE */
1437 /* ÷ parm.reqlen ×ÏÚ×ÒÁÝÁÅÔÓÑ ÉÎÔÅÒ×ÁÌ ÐÏÓÙÌËÉ ÉÍÐÕÌØÓÏ× */
1438 /* ÷ parm.acclen ×ÏÚ×ÒÁÝÁÅÔÓÑ ÓÏÓÔÏÑÎÉÅ: */
1439 /* MAIN_REZERV_YES - ÏÓÎÏ×ÎÁÑ, ÒÅÚÅÒ× ÅÓÔØ */
1440 /* MAIN_REZERV_NOT - ÏÓÎÏ×ÎÁÑ, ÒÅÚÅÒ×Á ÎÅÔ */
1441 /* REZERV_MAIN_YES - ÒÅÚÅÒ×ÎÁÑ, ÏÓÎÏ×ÎÁÑ ÅÓÔØ */
1442 /* REZERV_MAIN_NOT - ÎÅÏÐÒÅÄÅÌÅÎÎÏÅ ÓÏÓÔÏÑÎÉÅ */
1443 /****************************************************************/
1445 case GET_STAT_ALIVE
:
1448 rw_state_p pcamr
, pcamt
;
1450 event_ioctl(0, GET_STAT_ALIVE_EVENT
, 1, 0);
1451 dbg_ioctl("GET_STAT_ALIVE(%d)\n", instance
);
1452 atl
= RDR_rdma(SHIFT_CAM(instance
));
1453 parm
.reqlen
= atl
& ATL
;
1454 pcamt
= &rdma_sti
->talive
;
1455 pcamr
= &rdma_sti
->ralive
;
1456 dbg_ioctl("SHIFT_CAM(%d): 0x%08x\n", instance
, atl
);
1457 if (atl
&& (pcamr
->stat
== 1)) {
1458 parm
.acclen
= MAIN_REZERV_NOT
;
1460 if (atl
&& (pcamr
->stat
== 2)) {
1461 parm
.acclen
= MAIN_REZERV_YES
;
1463 if (atl
&& (pcamt
->stat
== 1)) {
1464 parm
.acclen
= REZERV_MAIN_YES
;
1466 parm
.acclen
= REZERV_MAIN_NOT
;
1467 event_ioctl(atl
, GET_STAT_ALIVE_EVENT
, 0, parm
.acclen
);
1470 //#define MAIN_REZERV_YES 1 // - ÏÓÎÏ×ÎÁÑ, ÒÅÚÅÒ× ÅÓÔØ
1471 //#define MAIN_REZERV_NOT 2 // - ÏÓÎÏ×ÎÁÑ, ÒÅÚÅÒ×Á ÎÅÔ
1472 //#define REZERV_MAIN_YES 3 // - ÒÅÚÅÒ×ÎÁÑ, ÏÓÎÏ×ÎÁÑ ÅÓÔØ
1473 //#define REZERV_MAIN_NOT 4 // - ÎÅÏÐÒÅÄÅÌÅÎÎÏÅ ÓÏÓÔÏÑÎÉÅ
1476 case RDMA_SET_TAlive
:
1479 int ret_time_dwait
= 0;
1480 dev_rdma_sem_t
*dev_sem
;
1483 event_ioctl(0, RDMA_SET_TAlive_EVENT
, 1, 0);
1484 dbg_ioctl("RDMA_SET_TAlive(%d)\n", instance
);
1485 pcam
= &rdma_sti
->talive
;
1486 dev_sem
= &pcam
->dev_rdma_sem
;
1488 raw_spin_lock_irq(&dev_sem
->wait_head
.lock
);
1490 dbg_ioctl("RDMA_SET_TAlive(%d): ERROR pcam->stat: %d\n",
1491 instance
, pcam
->stat
);
1492 parm
.err_no
= RDMA_E_INVAL
;
1493 goto end_set_talive
;
1495 atl
= RDR_rdma(SHIFT_CAM(instance
));
1496 dbg_ioctl("SHIFT_CAM(%d): 0x%08x\n", instance
, atl
);
1497 dev_sem
->irq_count_rdma
= 0;
1498 // mcg_cs = RDR_rdma(SHIFT_MSG_CS(instance));
1499 // WRR_rdma(SHIFT_MSG_CS(instance), 1);
1500 // WRR_rdma(SHIFT_MSG_CS(instance), msg_cs_dmrcl);
1501 WRR_rdma(SHIFT_CAM(instance
), atl
| TAlive
);
1503 ret_time_dwait
= wait_for_irq_rdma_sem(dev_sem
, IO_TIMEOUT
);
1504 if (ret_time_dwait
== -2) {
1506 parm
.err_no
= RDMA_E_SIGNAL
;
1507 parm
.reqlen
= RDR_rdma(SHIFT_CAM(instance
));
1508 parm
.rwmode
= ret_time_dwait
;
1509 parm
.acclen
= ret_time_dwait
;
1511 raw_spin_unlock_irq(&dev_sem
->wait_head
.lock
);
1514 if (pcam
->stat
== ES_MSF_Ev
)
1515 parm
.err_no
= RDMA_E_MSF_WRD
;
1517 parm
.err_no
= RDMA_E_INVOP
;
1519 parm
.reqlen
= RDR_rdma(SHIFT_CAM(instance
));
1520 parm
.acclen
= ret_time_dwait
;
1522 raw_spin_unlock_irq(&dev_sem
->wait_head
.lock
);
1523 event_ioctl(ret_time_dwait
, RDMA_SET_TAlive_EVENT
, 0,
1528 case RDMA_SET_RAlive
:
1532 dev_rdma_sem_t
*dev_sem
;
1535 event_ioctl(0, RDMA_SET_RAlive_EVENT
, 1, 0);
1536 dbg_ioctl("RDMA_SET_RAlive(%d)\n", instance
);
1537 pcam
= &rdma_sti
->ralive
;
1538 dev_sem
= &pcam
->dev_rdma_sem
;
1539 raw_spin_lock_irq(&dev_sem
->wait_head
.lock
);
1541 dbg_ioctl("RDMA_SET_RAlive(%d): ERROR pcam->stat: %d\n",
1542 instance
, pcam
->stat
);
1544 goto wait_set_ralive
;
1545 // goto end_set_ralive;
1547 dbg_ioctl("RDMA_SET_RAlive(%d):pcam->int_ac == 0,change to 1\n",
1549 atl
= RDR_rdma(SHIFT_CAM(instance
));
1550 // mcg_cs = RDR_rdma(SHIFT_MSG_CS(instance));
1551 // WRR_rdma(SHIFT_MSG_CS(instance), 1);
1552 // WRR_rdma(SHIFT_MSG_CS(instance), msg_cs_dmrcl);
1553 WRR_rdma(SHIFT_CAM(instance
), atl
& ATL
);
1556 dev_sem
->irq_count_rdma
= 0;
1557 ret_time_dwait
= wait_for_irq_rdma_sem(dev_sem
, IO_TIMEOUT
);
1558 if (ret_time_dwait
== -2) {
1561 parm
.reqlen
= RDR_rdma(SHIFT_CAM(instance
));
1562 parm
.acclen
= ret_time_dwait
;
1563 raw_spin_unlock_irq(&dev_sem
->wait_head
.lock
);
1566 dev_sem
->irq_count_rdma
= 0;
1570 atl
= RDR_rdma(SHIFT_CAM(instance
));
1571 dbg_ioctl("SHIFT_CAM(%d): 0x%08x\n", instance
, atl
);
1572 raw_spin_unlock_irq(&dev_sem
->wait_head
.lock
);
1573 event_ioctl(ret_time_dwait
, RDMA_SET_RAlive_EVENT
, 0, atl
);
1577 case RDMA_UNSET_TAlive
:
1580 event_ioctl(0, RDMA_UNSET_TAlive_EVENT
, 1, 0);
1581 dbg_ioctl("RDMA_SET_TAlive(%d)\n", instance
);
1582 atl
= RDR_rdma(SHIFT_CAM(instance
));
1583 dbg_ioctl("SHIFT_CAM(%d): 0x%08x\n", instance
, atl
);
1584 WRR_rdma(SHIFT_CAM(instance
), atl
& ~TAlive
);
1585 event_ioctl(atl
, RDMA_UNSET_TAlive_EVENT
, 0,
1586 RDR_rdma(SHIFT_CAM(instance
)));
1590 case RDMA_UNSET_RAlive
:
1593 event_ioctl(0, RDMA_UNSET_RAlive_EVENT
, 1, 0);
1594 dbg_ioctl("RDMA_SET_RAlive(%d)\n", instance
);
1595 atl
= RDR_rdma(SHIFT_CAM(instance
));
1596 dbg_ioctl("SHIFT_CAM(%d): 0x%08x\n", instance
, atl
);
1597 WRR_rdma(SHIFT_CAM(instance
), atl
& ~RAlive
);
1598 event_ioctl(atl
, RDMA_UNSET_RAlive_EVENT
, 0,
1599 RDR_rdma(SHIFT_CAM(instance
)));
1604 ERROR_MSG("rdma_ioctl(%d, %d): default operation NOT EXPECTED "
1605 "cmd: %i\n", instance
, channel
, cmd
);
1607 parm
.err_no
= RDMA_E_INVOP
;
1610 rval
= copy_to_user((caddr_t
)arg
, (caddr_t
)&parm
,
1611 sizeof (rdma_ioc_parm_t
));
1613 ERROR_MSG("rdma_ioctl(%d, %d, %d): copy_to_user failed "
1614 "size: %ld rval: %ld\n", instance
, channel
, cmd
,
1615 sizeof (rdma_ioc_parm_t
), rval
);
1619 DEBUG_MSG("rdma_ioctl(%d, %d): NORMAL_END: acclen=%x *****\n\n",
1620 instance
, channel
, parm
.acclen
);
1621 DEBUG_MSG("rdma_ioctl FINISH\n");
1624 ERROR_MSG("rdma_ioctl: FAIL\n");
1625 DEBUG_MSG("rdma_ioctl FINISH\n");
1626 return -EINVAL
; /* !? return l>0 == return -1 !?*/
1629 #ifdef CONFIG_COMPAT
1630 static int do_ioctl(struct file
*f
, unsigned cmd
, unsigned long arg
)
1633 ret
= rdma_ioctl( f
, cmd
, arg
);
1637 static long rdma_compat_ioctl(struct file
*f
, unsigned cmd
, unsigned long arg
)
1640 case RDMA_IOC_DUMPREG0
:
1641 case RDMA_IOC_DUMPREG1
:
1644 case RDMA_CLEAN_TDC_COUNT
:
1646 case RDMA_GET_MAX_CLKR
:
1647 case RDMA_CLEAN_RDC_COUNT
:
1648 case RDMA_TIMER_FOR_READ
:
1649 case RDMA_TIMER_FOR_WRITE
:
1650 case RDMA_IOC_ALLOCB
:
1652 case RDMA_GET_EVENT
:
1654 case RDMA_IS_CAM_YES
:
1655 case RDMA_IS_CAM_NO
:
1656 case RDMA_WAKEUP_WRITER
:
1657 case RDMA_WAKEUP_READER
:
1658 case RDMA_IOC_GET_ID
:
1659 case RDMA_IOC_RESET_DMA
:
1660 case RDMA_IOC_SET_MODE_RFSM
:
1661 case RDMA_IOC_SET_MODE_EXIT_GP0
:
1662 return do_ioctl(f
, cmd
, arg
);
1664 return -ENOIOCTLCMD
;
1669 static ssize_t
rdma_read(struct file
*filp
, char *buf
, size_t size
, loff_t
*pos
)
1675 rdma_state_inst_t
*rdma_sti
;
1676 rdma_ioc_parm_t PRM
;
1679 DEBUG_MSG("rdma_read: START\n");
1680 minor
= get_file_minor(filp
);
1683 instance
= DEV_inst(minor
);
1684 channel
= DEV_chan(minor
);
1685 rdma_sti
= &rdma_state
->rdma_sti
[instance
];
1686 rval
= copy_from_user((caddr_t
)&PRM
, (caddr_t
)buf
,
1687 sizeof (rdma_ioc_parm_t
));
1689 ERROR_MSG("rdma_read(%d, %d): copy_from_user failed size: %ld"
1690 "rval: %ld\n", instance
, channel
,
1691 sizeof (rdma_ioc_parm_t
), rval
);
1695 ret
= read_buf(rdma_sti
, buf
, size
, instance
, channel
, &PRM
);
1696 PRM
.clkr
= E2K_GET_DSREG(clkr
);
1697 rval
= copy_to_user((caddr_t
)buf
, (caddr_t
)&PRM
,
1698 sizeof (rdma_ioc_parm_t
));
1700 ERROR_MSG("rdma_read(%d, %d): copy_to_user failed size: %ld"
1701 "rval: %ld\n", instance
, channel
,
1702 sizeof (rdma_ioc_parm_t
), rval
);
1705 DEBUG_MSG("rdma_read: FINISH\n");
1709 static ssize_t
rdma_write(struct file
*filp
, const char *buf
, size_t size
,
1716 rdma_state_inst_t
*rdma_sti
;
1717 rdma_ioc_parm_t PRM
;
1720 DEBUG_MSG("rdma_write: START\n");
1721 minor
= get_file_minor(filp
);
1724 instance
= DEV_inst(minor
);
1725 channel
= DEV_chan(minor
);
1726 rdma_sti
= &rdma_state
->rdma_sti
[instance
];
1727 rval
= copy_from_user((caddr_t
)&PRM
, (caddr_t
)buf
,
1728 sizeof (rdma_ioc_parm_t
));
1730 ERROR_MSG("rdma_write(%d, %d): copy_from_user failed size: %ld"
1731 "rval: %ld\n", instance
, channel
,
1732 sizeof (rdma_ioc_parm_t
), rval
);
1736 ret
= write_buf(rdma_sti
, buf
, size
, instance
, channel
, &PRM
);
1737 PRM
.clkr
= E2K_GET_DSREG(clkr
);
1738 rval
= copy_to_user((caddr_t
)buf
, (caddr_t
)&PRM
,
1739 sizeof (rdma_ioc_parm_t
));
1741 ERROR_MSG("rdma_write(%d, %d): copy_to_user failed size: %ld"
1742 "rval: %ld\n", instance
, channel
,
1743 sizeof (rdma_ioc_parm_t
), rval
);
1746 DEBUG_MSG("rdma_write: FINISH\n");
1750 int rdma_remap_page_tbl(void *va
, size_t sz
, struct vm_area_struct
*vma
)
1753 unsigned long sz_pha
;
1754 unsigned long vm_end
;
1755 unsigned long vm_start
;
1756 unsigned long vm_pgoff
;
1758 rdma_tbl_32_struct_t
*ptbl
;
1760 DEBUG_MSG("rdma_remap_page_tbl: START\n");
1761 if (!sz
) return -EINVAL
;
1762 if (vma
->vm_pgoff
) {
1763 ERROR_MSG("rdma_remap_page_tbl: vma->vm_pgoff: 0x%lx\n",
1767 size
= (long)PAGE_ALIGN(sz
);
1768 vm_end
= vma
->vm_end
;
1769 vm_start
= vma
->vm_start
;
1770 vm_pgoff
= vma
->vm_pgoff
;
1772 if ((vm_end
- vm_start
) < size
) {
1773 size
= vm_end
- vm_start
;
1774 DEBUG_MSG("rdma_remap_page_tbl: vm_end(%lx) - vm_start(%lx) < "
1775 "size(%lx)\n", vm_end
, vm_start
, size
);
1778 vma
->vm_flags
|= (VM_READ
| VM_WRITE
| VM_RESERVED
);
1781 if (vma
->vm_flags
& VM_IO
)
1782 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
) |
1783 _PAGE_CD_DIS
| _PAGE_PWT
);
1785 for (ptbl
= (rdma_tbl_32_struct_t
*)va
; ptbl
; ptbl
++) {
1786 rdma_addr_struct_t pxx
;
1787 pxx
.addr
= (unsigned long)ptbl
;
1788 DEBUG_MSG("rdma_remap_page_tbl: 0x%08x%08x ptbl\n",
1789 pxx
.fields
.haddr
, pxx
.fields
.laddr
);
1790 pxx
.addr
= ptbl
->laddr
;
1791 DEBUG_MSG("rdma_remap_page_tbl: 0x%08x%08x ptbl->addr\n",
1792 pxx
.fields
.haddr
, pxx
.fields
.laddr
);
1793 pha
= (unsigned long)ptbl
->laddr
;
1794 pxx
.addr
= (unsigned long)phys_to_virt(pha
);
1795 DEBUG_MSG("rdma_remap_page_tbl: 0x%08x%08x __va(ptbl->addr)\n",
1796 pxx
.fields
.haddr
, pxx
.fields
.laddr
);
1798 DEBUG_MSG("rdma_remap_page_tbl: 0x%08x%08x __fa(ptbl->addr)\n",
1799 pxx
.fields
.haddr
, pxx
.fields
.laddr
);
1801 if (remap_pfn_range(vma
, vm_start
, (pha
>> PAGE_SHIFT
), sz_pha
,
1802 vma
->vm_page_prot
)) {
1803 ERROR_MSG("rdma_remap_page_tbl:FAIL remap_pfn_range\n");
1807 DEBUG_MSG("rdma_remap_page_tbl: vm_start: %lx vm_end: %lx "
1808 "sz_pha: %lx \n", vm_start
, vm_end
, sz_pha
);
1809 if (vm_start
>= vm_end
) {
1810 DEBUG_MSG("rdma_remap_page_tbl: "
1811 "vm_start(%lx) >= vm_end(%lx)\n", vm_start
, vm_end
);
1815 DEBUG_MSG("rdma_remap_page_tbl: FINISH\n");
1819 int rdma_remap_page(void *va
, size_t sz
,
1820 struct vm_area_struct
*vma
)
1823 unsigned long vm_end
;
1824 unsigned long vm_start
;
1825 unsigned long vm_pgoff
;
1828 DEBUG_MSG("rdma_remap_page: START\n");
1829 if (!sz
) return -EINVAL
;
1830 pha
= virt_to_phys(va
);
1831 size
= (long )PAGE_ALIGN((pha
& ~PAGE_MASK
) + sz
);
1832 // if ((vma->vm_pgoff << PAGE_SHIFT) > size) return -ENXIO;
1833 pha
+= (vma
->vm_pgoff
<< PAGE_SHIFT
);
1834 vm_end
= vma
->vm_end
;
1835 vm_start
= vma
->vm_start
;
1836 vm_pgoff
= vma
->vm_pgoff
;
1838 if ((vm_end
- vm_start
) < size
)
1839 size
= vm_end
- vm_start
;
1841 // vma->vm_flags |= (VM_READ | VM_WRITE | VM_SHM);
1842 vma
->vm_flags
|= (VM_READ
| VM_WRITE
| VM_RESERVED
);
1845 if (vma
->vm_flags
& VM_IO
)
1846 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
) |
1847 _PAGE_CD_DIS
| _PAGE_PWT
);
1849 if (remap_pfn_range(vma
, vm_start
, (pha
>> PAGE_SHIFT
), size
,
1850 vma
->vm_page_prot
)) {
1851 ERROR_MSG("rdma_remap_page: FAIL remap_pfn_range\n");
1854 DEBUG_MSG("rdma_remap_page: FINISH\n");
1857 static int rdma_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1863 rdma_state_inst_t
*rdma_sti
;
1866 DEBUG_MSG("rdma_mmap: START\n");
1867 minor
= get_file_minor(file
);
1870 instance
= DEV_inst(minor
);
1871 channel
= DEV_chan(minor
);
1872 rdma_sti
= &rdma_state
->rdma_sti
[instance
];
1873 chd
= &rdma_sti
->dma_chans
[channel
];
1874 if (chd
->allocs
!= RCS_ALLOCED
) {
1875 ERROR_MSG("rdma_mmap : chd->allocs != RCS_ALLOCED\n");
1879 rval
= rdma_remap_page_tbl((void *)chd
->vdma_tm
, chd
->real_size
,
1882 rval
= rdma_remap_page((void *)chd
->prim_buf_addr
,
1883 chd
->real_size
, vma
);
1886 ERROR_MSG("rdma: rdma_mmap ddi_remap_page FAIL\n");
1889 chd
->allocs
= RCS_MAPPED
;
1890 DEBUG_MSG("rdma_mmap: minor: %d\n", minor
);
1891 DEBUG_MSG("rdma_mmap: FINISH\n");
1895 int get_file_minor(struct file
*file
)
1898 struct dentry
*f_dentry_rdma
;
1899 struct inode
*d_inode
;
1901 f_dentry_rdma
= file
->f_dentry
;
1902 DEBUG_MSG("get_file_minor: START f_dentry_rdma: %p file->f_dentry:%p\n",
1903 f_dentry_rdma
, file
->f_dentry
);
1904 if (!f_dentry_rdma
) {
1905 ERROR_MSG( "get_file_major: file->f_dentry is NULL\n");
1908 d_inode
= f_dentry_rdma
->d_inode
;
1910 ERROR_MSG( "get_file_major: f_dentry->d_inode is NULL\n");
1913 major
= MAJOR(d_inode
->i_rdev
);
1914 if (major
!= rdma_state
->major
) {
1915 ERROR_MSG( "get_file_major: major(%d)!=rdma_state->major(%d)\n",
1916 major
, rdma_state
->major
);
1919 DEBUG_MSG("get_file_minor: FINISH\n");
1920 return MINOR(d_inode
->i_rdev
);
1925 rdma_reg_VID
= ADDR_VID
; /* RDMA VID */
1926 rdma_reg_CH0_IDT
= ADDR_CH0_IDT
; /* RDMA ID/Type */
1927 rdma_reg_CS
= ADDR_CS
; /* RDMA Control/Status 000028a0 */
1928 rdma_reg_CH1_IDT
= ADDR_CH1_IDT
; /* RDMA ID/Type */
1929 rdma_reg_DD_ID_0
= ADDR_DD_ID(0); /* Data Destination ID */
1930 rdma_reg_DMD_ID_0
= ADDR_DMD_ID(0);/* Data Message Destination ID */
1931 rdma_reg_N_IDT_0
= ADDR_N_IDT(0); /* Neighbour ID/Type */
1932 rdma_reg_ES_0
= ADDR_ES(0); /* Event Status */
1933 rdma_reg_IRQ_MC_0
= ADDR_IRQ_MC(0);/* Interrupt Mask Control */
1934 rdma_reg_DMA_TCS_0
= ADDR_DMA_TCS(0);/* DMA Tx Control/Status */
1935 rdma_reg_DMA_TSA_0
= ADDR_DMA_TSA(0);/* DMA Tx Start Address */
1936 rdma_reg_DMA_TBC_0
= ADDR_DMA_TBC(0);/* DMA Tx Byte Counter */
1937 rdma_reg_DMA_RCS_0
= ADDR_DMA_RCS(0);/* DMA Rx Control/Status */
1938 rdma_reg_DMA_RSA_0
= ADDR_DMA_RSA(0);/* DMA Rx Start Address */
1939 rdma_reg_DMA_RBC_0
= ADDR_DMA_RBC(0);/* DMA Rx Byte Counter */
1940 rdma_reg_MSG_CS_0
= ADDR_MSG_CS(0); /* Messages Control/Status */
1941 rdma_reg_TDMSG_0
= ADDR_TDMSG(0); /* Tx Data_Messages Buffer */
1942 rdma_reg_RDMSG_0
= ADDR_RDMSG(0); /* Rx Data_Messages Buffer */
1943 rdma_reg_CAM_0
= ADDR_CAM(0); /* CAM - channel alive management*/
1945 rdma_reg_DD_ID_1
= ADDR_DD_ID(1); /* Data Destination ID */
1946 rdma_reg_DMD_ID_1
= ADDR_DMD_ID(1);/*Data Message Destination ID*/
1947 rdma_reg_N_IDT_1
= ADDR_N_IDT(1); /* Neighbour ID/Type */
1948 rdma_reg_ES_1
= ADDR_ES(1); /* Event Status */
1949 rdma_reg_IRQ_MC_1
= ADDR_IRQ_MC(1);/* Interrupt Mask Control */
1950 rdma_reg_DMA_TCS_1
= ADDR_DMA_TCS(1);/* DMA Tx Control/Status */
1951 rdma_reg_DMA_TSA_1
= ADDR_DMA_TSA(1);/* DMA Tx Start Address */
1952 rdma_reg_DMA_TBC_1
= ADDR_DMA_TBC(1);/* DMA Tx Byte Counter */
1953 rdma_reg_DMA_RCS_1
= ADDR_DMA_RCS(1);/* DMA Rx Control/Status */
1954 rdma_reg_DMA_RSA_1
= ADDR_DMA_RSA(1);/* DMA Rx Start Address */
1955 rdma_reg_DMA_RBC_1
= ADDR_DMA_RBC(1);/* DMA Rx Byte Counter */
1956 rdma_reg_MSG_CS_1
= ADDR_MSG_CS(1); /* Messages Control/Status */
1957 rdma_reg_TDMSG_1
= ADDR_TDMSG(1); /* Tx Data_Messages Buffer */
1958 rdma_reg_RDMSG_1
= ADDR_RDMSG(1); /* Rx Data_Messages Buffer */
1959 rdma_reg_CAM_1
= ADDR_CAM(1);/* CAM-channel alive management */
1962 void init_rdma_sti(int instance
)
1964 rw_state_t
*pd
, *pm
;
1966 dev_rdma_sem_t
*dev_sem
;
1967 rdma_state_inst_t
*rdma_sti
= &rdma_state
->rdma_sti
[instance
];
1969 printk("%ld - raw_spinlock_t\n", sizeof (raw_spinlock_t
));
1970 printk("%ld - spinlock_t\n", sizeof (spinlock_t
));
1971 rdma_sti
->instance
= instance
;
1972 mutex_init(&rdma_sti
->mu
);
1973 memset(&rdma_event
, 0, sizeof (struct rdma_event
));
1974 /* spin_lock_init(&mu_fix_event); */
1975 memset(&stat_rdma
, 0, sizeof (struct stat_rdma
));
1976 pm
= &rdma_sti
->talive
;
1977 mutex_init(&pm
->mu
);
1978 raw_spin_lock_init(&pm
->mu_spin
);
1980 pm
->timer
= TIMER_MIN
;
1981 dev_sem
= &pm
->dev_rdma_sem
;
1982 raw_spin_lock_init(&dev_sem
->lock
);
1983 cv_init(&dev_sem
->cond_var
);
1984 dev_sem
->irq_count_rdma
= 0;
1985 pm
= &rdma_sti
->ralive
;
1986 mutex_init(&pm
->mu
);
1987 raw_spin_lock_init(&pm
->mu_spin
);
1989 pm
->timer
= TIMER_MIN
;
1990 dev_sem
= &pm
->dev_rdma_sem
;
1991 raw_spin_lock_init(&dev_sem
->lock
);
1992 cv_init(&dev_sem
->cond_var
);
1993 dev_sem
->irq_count_rdma
= 0;
1994 for (i
= 0; i
< 2; i
++) {
1995 pm
= &rdma_sti
->rw_states_m
[i
];
1996 mutex_init(&pm
->mu
);
1997 raw_spin_lock_init(&pm
->mu_spin
);
1999 pm
->timer
= TIMER_MIN
;
2000 dev_sem
= &pm
->dev_rdma_sem
;
2001 raw_spin_lock_init(&dev_sem
->lock
);
2002 cv_init(&dev_sem
->cond_var
);
2003 dev_sem
->irq_count_rdma
= 0;
2004 pd
= &rdma_sti
->rw_states_d
[i
];
2005 mutex_init(&pd
->mu
);
2006 raw_spin_lock_init(&pd
->mu_spin
);
2007 dev_sem
= &pd
->dev_rdma_sem
;
2008 raw_spin_lock_init(&dev_sem
->lock
);
2009 cv_init(&dev_sem
->cond_var
);
2010 dev_sem
->irq_count_rdma
= 0;
2012 pd
->clock_receive_trwd
= 0;
2013 pd
->clock_begin_read
= 0;
2014 pd
->clock_end_read_old
= 0;
2015 pd
->clock_begin_read_old
= 0;
2016 pd
->trwd_send_count
= 0;
2017 pd
->ready_send_count
= 0;
2018 pd
->trwd_rec_count
= 0;
2019 pd
->ready_rec_count
= 0;
2023 pd
->timer_read
= TIMER_MIN
;
2024 pd
->timer_write
= TIMER_MIN
;
2025 pd
->timer_for_read
= TIMER_FOR_READ_MIN
;
2026 pd
->timer_for_write
= TIMER_FOR_WRITE_MIN
;
2030 void read_regs_rdma(void)
2032 printk("0x%08x - 0x0 SHIFT_VID\n", RDR_rdma(SHIFT_VID
));
2033 printk("0x%08x - 0x4 SHIFT_CH0_IDT\n", RDR_rdma(SHIFT_CH0_IDT
));
2034 printk("0x%08x - 0x8 SHIFT_CS\n", RDR_rdma(SHIFT_CS
));
2035 printk("0x%08x - 0xc SHIFT_CH1_IDT\n", RDR_rdma(SHIFT_CH1_IDT
));
2036 printk("0x%08x 0x100 - SHIFT_DD_ID\n", RDR_rdma(SHIFT_DD_ID(0)));
2037 printk("0x%08x 0x104 - SHIFT_DMD_ID\n", RDR_rdma(SHIFT_DMD_ID(0)));
2038 printk("0x%08x 0x108 - SHIFT_N_IDT\n", RDR_rdma(SHIFT_N_IDT(0)));
2039 printk("0x%08x 0x10c - SHIFT_ES\n", RDR_rdma(SHIFT_ES(0)));
2040 printk("0x%08x 0x110 - SHIFT_IRQ_MC\n", RDR_rdma(SHIFT_IRQ_MC(0)));
2041 printk("0x%08x 0x114 - SHIFT_DMA_TCS\n", RDR_rdma(SHIFT_DMA_TCS(0)));
2042 printk("0x%08x 0x118 - SHIFT_DMA_TSA\n", RDR_rdma(SHIFT_DMA_TSA(0)));
2043 printk("0x%08x 0x11c - SHIFT_DMA_TBC\n", RDR_rdma(SHIFT_DMA_TBC(0)));
2044 printk("0x%08x 0x120 - SHIFT_DMA_RCS\n", RDR_rdma(SHIFT_DMA_RCS(0)));
2045 printk("0x%08x 0x124 - SHIFT_DMA_RSA\n", RDR_rdma(SHIFT_DMA_RSA(0)));
2046 printk("0x%08x 0x128 - SHIFT_DMA_RBC\n", RDR_rdma(SHIFT_DMA_RBC(0)));
2047 printk("0x%08x 0x12c - SHIFT_MSG_CS\n", RDR_rdma(SHIFT_MSG_CS(0)));
2048 printk("0x%08x 0x130 - SHIFT_TDMSG\n", RDR_rdma(SHIFT_TDMSG(0)));
2049 printk("0x%08x 0x134 - SHIFT_RDMSG\n", RDR_rdma(SHIFT_RDMSG(0)));
2050 printk("0x%08x 0x138 - SHIFT_CAM\n", RDR_rdma(SHIFT_CAM(0)));
2053 void test_reg_rdma(void)
2056 WRR_rdma(SHIFT_TDMSG(0), 0xabcd);
2060 void free_chan(dma_chan_t
*chd
)
2063 DEBUG_MSG("free_chan: START\n");
2064 if (chd
->allocs
> RCS_ALLOCED_B
) {
2066 rdma_tbl_32_struct_t
*peltbl
;
2067 for (peltbl
= (rdma_tbl_32_struct_t
*)chd
->vdma_tm
,
2068 rest
= chd
->real_size
; rest
> 0; peltbl
++) {
2069 rdma_mem_free(peltbl
->sz
,
2070 (dma_addr_t
) peltbl
->laddr
,
2071 (unsigned long) __va(peltbl
->laddr
));
2074 rdma_mem_free(chd
->size_tm
, chd
->fdma_tm
,
2075 (unsigned long)chd
->vdma_tm
);
2077 if (chd
->real_size
) {
2078 rdma_mem_free(chd
->real_size
, chd
->dma
,
2079 (unsigned long)chd
->prim_buf_addr
);
2086 chd
->prim_buf_addr
= 0;
2089 DEBUG_MSG("free_chan: FINISH\n");
2092 void rdma_mem_free(size_t size
, dma_addr_t dev_memory
, unsigned long dma_memory
)
2096 struct page
*map
, *mapend
;
2098 order
= get_order(size
);
2099 mem
= (caddr_t
)dma_memory
;
2100 DEBUG_MSG("rdma_mem_free: START\n");
2101 if (!version_mem_alloc
) {
2102 mapend
= virt_to_page(mem
+ (PAGE_SIZE
<< order
) - 1);
2103 for (map
= virt_to_page(mem
); map
<= mapend
; map
++) {
2104 ClearPageReserved(map
);
2106 pci_unmap_single((struct pci_dev
*)rdma_state
->dev_rdma
,
2107 dev_memory
, size
, PCI_DMA_FROMDEVICE
);
2108 free_pages(dma_memory
, order
);
2110 dma_free_coherent(NULL
, size
, mem
, dev_memory
);
2112 DEBUG_MSG("rdma_mem_free: FINISH va: 0x%lx, fa: 0x%x size: 0x%lx\n",
2113 dma_memory
, dev_memory
, size
);
2116 int rdma_mem_alloc(size_t size
, dma_addr_t
*mem
, size_t *real_size
,
2117 unsigned long *dma_memory
)
2120 struct page
*map
, *mapend
;
2122 DEBUG_MSG("rdma_mem_alloc: START\n");
2123 order
= get_order(size
);
2124 if (!version_mem_alloc
) {
2125 *dma_memory
= __get_free_pages(GFP_KERNEL
| GFP_DMA
, order
);
2126 mapend
= virt_to_page((*dma_memory
) + (PAGE_SIZE
<< order
) - 1);
2127 for (map
= virt_to_page((*dma_memory
)); map
<= mapend
; map
++)
2128 SetPageReserved(map
);
2129 *mem
= pci_map_single((struct pci_dev
*)rdma_state
->dev_rdma
,
2130 (void *)*dma_memory
, size
,
2131 PCI_DMA_FROMDEVICE
);
2133 *dma_memory
= (unsigned long)dma_alloc_coherent(
2134 NULL
, size
, mem
, GFP_KERNEL
);
2136 if (!(*dma_memory
)) {
2137 ERROR_MSG("rdma_mem_alloc: Cannot bind DMA address order: %d"
2138 "size: 0x%lx\n", order
, size
);
2141 *real_size
= PAGE_SIZE
<< order
;
2142 DEBUG_MSG("rdma_mem_alloc: FINISH va: 0x%lx fa: 0x%x size: 0x%lx"
2143 "real_size: 0x%lx\n",
2144 *dma_memory
, *mem
, size
, *real_size
);
2148 int init_chan(dma_chan_t
*chd
, int reqlen
, int tm
)
2150 char *err_msg
= NULL
;
2151 rdma_tbl_32_struct_t
*peltbl
;
2153 rdma_addr_struct_t pxx
;
2154 int tmp_tm
= 0; /* Disable for e3m */
2156 DEBUG_MSG("init_chan: START\n");
2158 ERROR_MSG("init_chan: chd->allocs already %d\n", chd
->allocs
);
2161 if (reqlen
> 0x800000){
2162 ERROR_MSG("init_chan: The large size of the buffer. "
2163 "The buffer must be <= 0x0800000 \n");
2167 chd
->allocs
= RCS_ALLOCED_B
;
2168 DEBUG_MSG("init_chan: try alloc 0x%x\n", reqlen
);
2170 DEBUG_MSG("init_chan: table mode PAGE_SIZE: %x\n", PAGE_SIZE
);
2171 DEBUG_MSG("init_chan: try alloc for tm size: 0x%x\n",
2173 if (rdma_mem_alloc(SIZE_TBL32_RDMA
,
2174 (dma_addr_t
*)&chd
->fdma_tm
, &chd
->size_tm
,
2175 (unsigned long *)&chd
->vdma_tm
)) {
2176 err_msg
= "rdma_mem_alloc for tm";
2179 pxx
.addr
= (unsigned long)chd
->vdma_tm
;
2180 DEBUG_MSG("init_chan: 0x%08x%08x vdma_tm\n", pxx
.fields
.haddr
,
2182 pxx
.addr
= chd
->fdma_tm
;
2183 DEBUG_MSG("init_chan: 0x%08x%08x fdma_tm\n", pxx
.fields
.haddr
,
2186 DEBUG_MSG("init_chan: reqlen: 0x%08x"
2187 " rest: 0x%08x\n", reqlen
, rest
);
2189 for (peltbl
= (rdma_tbl_32_struct_t
*)chd
->vdma_tm
; rest
> 0;
2192 unsigned long addr
; /* address */
2194 if (rdma_mem_alloc(PAGE_SIZE
/*SIZE_EL_TBL64_RDMA*/,
2195 (dma_addr_t
*)&peltbl
->laddr
, &size_el
,
2196 (unsigned long *)&addr
)) {
2197 err_msg
= "rdma_mem_alloc for tm";
2200 pxx
.addr
= (unsigned long)peltbl
;
2201 DEBUG_MSG("init_chan: 0x%08x%08x peltbl\n",
2202 pxx
.fields
.haddr
, pxx
.fields
.laddr
);
2203 pxx
.addr
= peltbl
->laddr
;
2204 DEBUG_MSG("init_chan: 0x%08x%08x peltbl->addr\n",
2205 pxx
.fields
.haddr
, pxx
.fields
.laddr
);
2207 peltbl
->sz
= (unsigned int)size_el
;
2208 DEBUG_MSG("init_chan: peltbl->sz: 0x%08x "
2209 "rest: 0x%08x\n", peltbl
->sz
, rest
);
2210 chd
->real_size
+= size_el
;
2213 chd
->dma
= (unsigned int)chd
->fdma_tm
;
2216 DEBUG_MSG("init_chan: single mode PAGE_SIZE: %x\n", PAGE_SIZE
);
2217 if (rdma_mem_alloc((unsigned long)reqlen
,
2218 (dma_addr_t
*)&chd
->dma_busa
, &chd
->real_size
,
2219 (unsigned long *)&chd
->prim_buf_addr
)) {
2220 err_msg
= "rdma_mem_alloc";
2223 chd
->dma
= chd
->dma_busa
;
2224 pxx
.addr
= chd
->dma
;
2225 DEBUG_MSG("init_chan: 0x%08x%08x chd->dma\n",
2226 pxx
.fields
.haddr
, pxx
.fields
.laddr
);
2229 chd
->full
= (uint_t
)chd
->dma
;
2230 chd
->allocs
= RCS_ALLOCED
;
2231 DEBUG_MSG("init_chan: FINISH chd->real_size: %lx\n", chd
->real_size
);
2232 return chd
->real_size
;
2235 chd
->allocs
= RCS_EMPTY
;
2236 ERROR_MSG("init_chan: %s FAILED ****\n", err_msg
);
2241 module_init(rdma_init
);
2242 module_exit(rdma_cleanup
);
2244 MODULE_LICENSE("GPL");