1 #include <linux/module.h>
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/string.h>
7 #include <linux/slab.h>
8 #include <linux/delay.h>
10 #include <linux/init.h>
11 #include <linux/proc_fs.h>
13 #include <asm/uaccess.h>
14 #include <linux/pci.h>
15 #include <linux/mcst/ddi.h>
16 #include <asm-l/bootinfo.h>
17 #include <linux/mcst/rdma_user_intf.h>
18 #include <asm/setup.h>
21 #ifndef LINUX_2_33_DBG
22 #include <asm/mpspec.h>
27 #include <asm/sic_regs.h>
28 #include <asm/sic_regs_access.h>
29 #include <asm/e2k_sic.h>
30 #include <asm/uaccess.h>
32 #ifndef LINUX_2_33_DBG
33 #include <asm/iolinkmask.h>
34 #include <linux/topology.h>
36 #include "rdma_regs.h"
38 #include "rdma_error.h"
40 #define NUM_NODE_RDMA(num_link_rdma) (int)(num_link_rdma/NODE_NUMIOLINKS)
41 #define NUM_LINK_IN_NODE_RDMA(num_link_rdma)\
42 (num_link_rdma - ((int)(num_link_rdma/NODE_NUMIOLINKS))*NODE_NUMIOLINKS)
47 #define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP)
50 MODULE_LICENSE("GPL");
54 static int atl_v
= TR_ATL_B
;
55 module_param(atl_v
, int, 0);
56 MODULE_PARM_DESC(atl_v
, "Changes the value of ATL (alive timer limit) reg CAM.");
58 /* Struct for class rdma in sysfs */
59 static struct class *rdma_class
;
61 /*********************************************************************/
62 /* Enable RFSM - rfsm. */
63 /* rfsm = ENABLE_RFSM - RFSM disable (default). */
64 /* rfsm = DMA_RCS_RFSM - RFSM enable. */
65 /*********************************************************************/
66 #define CLEAR_RFSM DISABLE_RFSM
67 unsigned int rfsm
= CLEAR_RFSM
;
69 /*********************************************************************/
70 /* Enable exit GP0 - enable_exit_gp0. */
71 /* enable_exit_gp0 = 0 - disable (default). */
72 /* enable_exit_gp0 = 1 - RFSM enable. */
73 /*********************************************************************/
74 unsigned int enable_exit_gp0
= DISABLE_EXIT_GP0
;
76 extern int rdma_present
;
79 unsigned int count_read_sm_max
= 800;
80 unsigned int intr_rdc_count
[MAX_NUMIOLINKS
];
81 unsigned int msg_cs_dmrcl
;
82 unsigned int state_cam
= 0;
83 unsigned long time_ID_REQ
;
84 unsigned long time_ID_ANS
;
85 unsigned int state_GP0
;
86 link_id_t rdma_link_id
;
89 static int do_ioctl(struct file
*f
, unsigned cmd
, unsigned long arg
);
90 static long rdma_compat_ioctl(struct file
*f
, unsigned cmd
,
93 static long rdma_ioctl(struct file
*filp
, unsigned int cmd
,
95 /* static int rdma_ioctl(struct inode *inode, struct file *filp,
96 unsigned int cmd, unsigned long arg); */
97 static ssize_t
rdma_read(struct file
*, char *, size_t, loff_t
*);
98 static ssize_t
rdma_write(struct file
*, const char *, size_t, loff_t
*);
99 static int rdma_open(struct inode
*inode
, struct file
*file
);
100 static int rdma_close(struct inode
*inode
, struct file
*file
);
101 static int rdma_mmap(struct file
*file
, struct vm_area_struct
*vma
);
102 void test_send_msg_rdma(unsigned int i
, unsigned int msg
);
103 int get_file_minor(struct file
*file
);
105 void free_chan(dma_chan_t
*chd
);
106 void rdma_mem_free(size_t size
, dma_addr_t dev_memory
,
107 unsigned long dma_memory
);
108 void init_rdma_sti(int instance
);
109 void read_regs_rdma(int);
110 int rdma_mem_alloc(int node
, size_t size
, dma_addr_t
*mem
,
111 size_t *real_size
, unsigned long *dma_memory
);
112 int init_chan(dma_chan_t
*chd
, int reqlen
, int tm
);
113 int write_buf(rdma_state_inst_t
*xsp
, const char *buf
,
114 unsigned int size
, int instance
, int channel
,
115 rdma_ioc_parm_t
*parm
);
116 int read_buf(rdma_state_inst_t
*xsp
, const char *buf
, int size
,
117 int instance
, int channel
, rdma_ioc_parm_t
*parm
);
118 int rdma_remap_page(void *va
, size_t sz
,
119 struct vm_area_struct
*vma
);
120 int rdma_remap_page_tbl(void *va
, size_t sz
,
121 struct vm_area_struct
*vma
);
122 long wait_time_rdma(struct rdma_reg_state
*rdma_reg_state
,
123 signed long timeout
);
124 int rdma_check_buf(unsigned long addr_buf
, unsigned int cnst
,
125 unsigned int need_free_page
, char *prefix
);
127 int mk_unlink(char *filename
);
128 int mk_rm_dir(char *dir
);
129 int mk_mkdir(char *pathname
, int mode
);
130 int mk_mknod(char *filename
, int mode
, dev_t dev
);
131 unsigned long join_curr_clock( void );
132 unsigned int RDR_rdma(unsigned int reg
, unsigned int node
);
133 void WRR_rdma(unsigned int reg
, unsigned int node
, unsigned int val
);
135 int create_dev_rdma(int major
);
136 int remove_dev_rdma(int major
);
139 DEFINE_RAW_SPINLOCK(mu_fix_event
);
141 static struct file_operations rdma_fops
= {
142 .owner
= THIS_MODULE
,
145 .unlocked_ioctl
= rdma_ioctl
,
147 .compat_ioctl
= rdma_compat_ioctl
,
151 .release
= rdma_close
,
155 #define NBSR_INF_CFG 0x7088 /* 4 Node Configuration Information */
156 #define IO_HAB_FLAG 0x00000080
157 #define E90_IO_CSR_ch_on 0x80000000
158 #define E90_RDMA_CS_ch_on 0x80000000
159 #define IOHUB_IOL_MODE 0 /* controller is IO HUB */
160 #define RDMA_IOL_MODE 1 /* controller is RDMA */
161 #define IOHUB_ONLY_IOL_ABTYPE 1 /* abonent has only IO HUB controller */
162 #define RDMA_ONLY_IOL_ABTYPE 2 /* abonent has only RDMA controller */
163 #define RDMA_IOHUB_IOL_ABTYPE 3 /* abonent has RDMA and IO HUB controller */
164 #define E90_IOL_CSR_abtype_mask 0x007f0000
166 #define numa_node_id() e90s_cpu_to_node(raw_smp_processor_id())
167 #undef num_possible_rdmas
168 #define num_possible_rdmas() node_rdma_num
169 #undef num_online_rdmas
170 #define num_online_rdmas() node_online_rdma_num
172 #define for_each_rdma(node) \
173 for (node = 0; node < MAX_NUMIOLINKS; node++) \
174 if (!((node_rdma_map >> node) & 0x00000001)) \
176 #undef for_each_online_rdma
177 #define for_each_online_rdma(node) \
178 for (node = 0; node < MAX_NUMIOLINKS; node++ ) \
179 if (!((node_online_rdma_map >> node) & 0x00000001)) \
181 #undef SIC_io_reg_offset /* FIXME: defined at e90s.h */
182 #define SIC_io_reg_offset(io_link, reg) ((reg) + 0x1000 * (io_link))
184 static inline unsigned int
185 sic_read_node_iolink_nbsr_reg(int node_id
, unsigned int io_link
, int reg_offset
)
187 unsigned int reg_value
;
189 reg_value
= __raw_readl(BASE_NODE0
+ node_id
* NODE_OFF
+
190 SIC_io_reg_offset(io_link
, reg_offset
));
195 sic_write_node_iolink_nbsr_reg(int node_id
, int io_link
,
196 unsigned int reg_offset
, unsigned int reg_value
)
198 __raw_writel(reg_value
, BASE_NODE0
+ node_id
* NODE_OFF
+
199 SIC_io_reg_offset(io_link
, reg_offset
));
203 static inline unsigned int
204 sic_read_nbsr_reg(int reg_offset
)
206 return (sic_read_node_nbsr_reg(numa_node_id(), reg_offset
));
210 sic_write_nbsr_reg(int reg_offset
, unsigned int reg_value
)
212 sic_write_node_nbsr_reg(numa_node_id(), reg_offset
, reg_value
);
217 unsigned int node_rdma_map
= 0;
218 unsigned int node_online_rdma_map
= 0;
219 int node_rdma_num
= 0;
220 int node_online_rdma_num
= 0;
223 void init_node_e90s( void )
225 /* Until no support NUMA for sparc V9 in kernel*/
226 unsigned int node_iohub_map
= 0;
227 unsigned int node_online_iohub_map
= 0;
228 int node_iohub_num
= 0;
229 int node_online_iohub_num
= 0;
230 unsigned int node_mask
= 0, cpu_mask
= 0, i
;
235 for_each_online_cpu(node
) {
236 cpu_mask
= cpu_mask
| (1 << node
);
238 for (i
= 0; i
< MAX_NUMIOLINKS
; i
++ ) {
239 if ((cpu_mask
>> E90S_MAX_NR_NODE_CPUS
*i
) & 0x0000000f)
240 node_mask
= node_mask
| (1 << i
);
242 for (i
= 0; i
< MAX_NUMIOLINKS
; i
++ )
244 if ((node_mask
>> i
) & 0x00000001)
247 #define DBG_REG_RDMA 0
249 reg
= RDR_rdma( NBSR_INT_CFG
, node
);
250 printk("NBSR_INT_CFG: %x \n", reg
);
251 reg
= RDR_rdma(NBSR_INF_CFG
, node
);
252 printk("NBSR_INF_CFG: %x \n", reg
);
253 reg
= RDR_rdma(NBSR_NODE_CFG
, node
);
254 printk("NBSR_NODE_CFG: %x \n", reg
);
255 reg
= RDR_rdma(SHIFT_IO_CSR
,node
);
256 printk("SHIFT_IO_CSR: %x \n", reg
);
257 reg
= RDR_rdma(SHIFT_CS
, node
);
258 printk("SHIFT_CS: %x \n", reg
);
261 reg
= RDR_rdma(NBSR_NODE_CFG
, node
);
262 printk("Node #%d IO LINK is", node
);
264 if ((reg
& IO_HAB_FLAG
) == IOHUB_IOL_MODE
) {
265 node_iohub_map
= node_iohub_map
| (1 << node
);
267 printk(" IO HUB controller");
269 RDR_rdma(SHIFT_IO_CSR
, node
);
270 if (reg
& E90_IO_CSR_ch_on
) {
271 node_online_iohub_map
= node_online_iohub_map
|
273 node_online_iohub_num
++;
280 node_rdma_map
= node_rdma_map
| (1 << node
);
282 printk(" RDMA controller");
283 reg
= RDR_rdma(SHIFT_CS
, node
);
284 if (reg
& E90_RDMA_CS_ch_on
) {
285 node_online_rdma_map
= node_online_rdma_map
|
287 node_online_rdma_num
++;
296 reg
= RDR_rdma( NBSR_INF_CFG
, node
);
297 int ab_type
= (reg
& E90_IOL_CSR_abtype_mask
) >> 16 ;
299 printk(" connected to");
301 case IOHUB_ONLY_IOL_ABTYPE
:
302 printk(" IO HUB controller");
304 case RDMA_ONLY_IOL_ABTYPE
:
305 printk(" RDMA controller");
307 case RDMA_IOHUB_IOL_ABTYPE
:
308 printk(" IO HUB/RDMA controller");
311 printk(" unknown controller");
323 sic_write_node_nbsr_reg_rdma(int node_id
, unsigned int reg_offset
,
324 unsigned int reg_value
)
326 sic_write_node_iolink_nbsr_reg(NUM_NODE_RDMA(node_id
),
327 NUM_LINK_IN_NODE_RDMA(node_id
),
328 reg_offset
, reg_value
);
331 static inline unsigned int
332 sic_read_node_nbsr_reg_rdma(int node_id
, int reg_offset
)
334 unsigned int reg_value
;
335 reg_value
= sic_read_node_iolink_nbsr_reg(NUM_NODE_RDMA(node_id
),
336 NUM_LINK_IN_NODE_RDMA(node_id
), reg_offset
);
340 unsigned long join_curr_clock( void )
343 #ifdef CONFIG_E90S /* E90S */
346 ret
= E2K_GET_DSREG(clkr
);
351 static inline void __raw_add_wait_queue_from_ddi(raw_wait_queue_head_t
*head
,
352 raw_wait_queue_t
*new)
354 list_add(&new->task_list
, &head
->task_list
);
356 static inline void __raw_remove_wait_queue_from_ddi(raw_wait_queue_head_t
*head
,
357 raw_wait_queue_t
*old
)
359 list_del(&old
->task_list
);
362 void raw_add_wait_queue_from_ddi(raw_wait_queue_head_t
*q
,
363 raw_wait_queue_t
*wait
)
367 raw_spin_lock_irqsave(&q
->lock
, flags
);
368 __raw_add_wait_queue_from_ddi(q
, wait
);
369 raw_spin_unlock_irqrestore(&q
->lock
, flags
);
372 void raw_remove_wait_queue_from_ddi(raw_wait_queue_head_t
*q
,
373 raw_wait_queue_t
*wait
)
377 raw_spin_lock_irqsave(&q
->lock
, flags
);
378 __raw_remove_wait_queue_from_ddi(q
, wait
);
379 raw_spin_unlock_irqrestore(&q
->lock
, flags
);
381 unsigned int rdc_byte
;
383 void WRR_rdma(unsigned int reg
, unsigned int node
, unsigned int val
)
385 /* sic_write_node_iolink_nbsr_reg(node, io_link, reg, val); */
386 sic_write_node_nbsr_reg_rdma(node
, reg
, val
);
387 fix_event(node
, WRR_EVENT
, reg
, val
);
390 EXPORT_SYMBOL(WRR_rdma
);
392 unsigned int RDR_rdma(unsigned int reg
, unsigned int node
)
396 /* val = sic_read_node_iolink_nbsr_reg(node, io_link, reg); */
397 val
= sic_read_node_nbsr_reg_rdma(node
, reg
);
398 fix_event(node
, RDR_EVENT
, reg
, val
);
402 EXPORT_SYMBOL(RDR_rdma
);
404 #if defined(TRACE_LATENCY) || defined(TRACE_LATENCY_MSG) || \
405 defined(TRACE_LATENCY_SM)
406 void user_trace_stop_my(void)
408 #ifdef CONFIG_FUNCTION_TRACER
413 void user_trace_start_my(void)
415 #ifdef CONFIG_FUNCTION_TRACER
421 unsigned int allign_dma(unsigned int n
)
423 if (n
&(ALLIGN_RDMA
-1)) {
425 n
= n
&(~(ALLIGN_RDMA
-1));
430 int MCG_CS_SEND_ALL_MSG
=
431 (MSG_CS_SD_Msg
| MSG_CS_SGP0_Msg
| MSG_CS_SGP1_Msg
|
432 MSG_CS_SGP2_Msg
| MSG_CS_SGP3_Msg
| MSG_CS_SL_Msg
|
433 MSG_CS_SUL_Msg
| MSG_CS_SIR_Msg
);
434 int MSG_CS_MSF_ALL
= MSG_CS_DMPS_Err
| MSG_CS_MPCRC_Err
| MSG_CS_MPTO_Err
|
436 unsigned int count_loop_send_msg_max
= 10;
437 unsigned int count_wait_rdm_max
= 64;
439 dev_rdma_sem_t
*msg_snd_dev
[2];
446 do_gettimeofday(&tv
);
447 val
= tv
.tv_sec
* 1000000000LL + tv
.tv_usec
* 1000LL;
451 extern int wake_up_state(struct task_struct
*p
, unsigned int state
);
453 static void __raw_wake_up_common_from_ddi(raw_wait_queue_head_t
*q
)
455 struct list_head
*tmp
, *next
;
456 raw_wait_queue_t
*curr
;
458 list_for_each_safe(tmp
, next
, &q
->task_list
) {
459 curr
= list_entry(tmp
, raw_wait_queue_t
, task_list
);
460 //wake_up_state(curr->task, TASK_UNINTERRUPTIBLE |
461 // TASK_INTERRUPTIBLE);
462 wake_up_process(curr
->task
);
466 void __raw_wake_up_from_ddi(raw_wait_queue_head_t
*q
)
470 raw_spin_lock_irqsave(&q
->lock
, flags
);
471 __raw_wake_up_common_from_ddi(q
);
472 raw_spin_unlock_irqrestore(&q
->lock
, flags
);
475 int ddi_cv_broadcast_from_ddi(kcondvar_t
*cvp
)
477 __raw_wake_up_from_ddi(cvp
);
481 int rdma_cv_broadcast_rdma(void* dev_rdma_sem
, unsigned int instance
)
483 rdma_addr_struct_t p_xxb
;
485 dev_rdma_sem_t
*dev
= dev_rdma_sem
;
486 dev
->irq_count_rdma
++;
487 dev
->time_broadcast
= join_curr_clock();
488 p_xxb
.addr
= (unsigned long)dev
;
489 fix_event(instance
, RDMA_BROADCAST
, p_xxb
.fields
.laddr
,
490 dev
->irq_count_rdma
);
491 ddi_cv_broadcast_from_ddi(&dev
->cond_var
);
495 /* Convert mksec to HZ */
497 drv_usectohz_from_ddi(register clock_t mksec
)
500 struct timespec rqtp
;
502 rqtp
.tv_nsec
= ((mksec
% 1000000L) * 1000L);
503 rqtp
.tv_sec
= mksec
/ 1000000L;
504 DEBUG_MSG("drv_usectohz: start, mksec = 0x%lx\n", mksec
);
505 DEBUG_MSG("drv_usectohz: rqtp.tv_nsec = 0x%lx, rqtp.tv_sec = 0x%lx\n",
506 rqtp
.tv_nsec
, rqtp
.tv_sec
);
507 clock
= timespec_to_jiffies(&rqtp
);
511 ddi_cv_spin_timedwait_from_ddi(kcondvar_t
*cvp
, raw_spinlock_t
*lock
, long tim
)
513 unsigned long expire
;
515 int raw_spin_locking_done
= 0;
516 struct task_struct
*tsk
= current
;
517 DECLARE_RAW_WAIT_QUEUE(wait
);
518 expire
= tim
- jiffies
;
519 tsk
->state
= TASK_INTERRUPTIBLE
;
520 raw_add_wait_queue_from_ddi(cvp
, &wait
);
521 raw_spin_locking_done
= raw_spin_is_locked(lock
);
522 if(raw_spin_locking_done
)
523 spin_mutex_exit(lock
);
525 fix_event(0, WAIT_TRY_SCHTO_EVENT
,
526 (unsigned int)expire
, 0);
527 expire
= schedule_timeout(expire
);
528 raw_remove_wait_queue_from_ddi(cvp
, &wait
);
529 tsk
->state
= TASK_RUNNING
;
530 if(raw_spin_locking_done
)
531 spin_mutex_enter(lock
);
533 if (signal_pending(current
)) {
542 int wait_for_irq_rdma_sem(void* dev_rdma_sem
, signed long usec_timeout
,
543 unsigned int instance
)
545 unsigned int time_current
;
546 unsigned int delta_time
;
547 dev_rdma_sem_t
*dev
= dev_rdma_sem
;
548 rdma_addr_struct_t p_xxb
;
550 signed long timeout_tick
;
552 if (!raw_spin_is_locked(&dev
->lock
)) {
553 printk("wait_for_irq_rdma_sem: spin is NOT locked:dev: %p\n",
557 if (dev
->irq_count_rdma
) {
558 printk("wait_for_irq_rdma_sem(%p): dev->irq_count_rdma: %u"
559 "num_obmen: %u\n", &dev
->lock
, dev
->irq_count_rdma
,
560 (unsigned int)dev
->num_obmen
);
562 if (dev
->time_broadcast
) {
563 time_current
= join_curr_clock();
564 if (time_current
> dev
->time_broadcast
) {
565 delta_time
= (unsigned int)(time_current
-
566 dev
->time_broadcast
);
568 delta_time
= (unsigned int)(time_current
+
569 (~0U - dev
->time_broadcast
));
571 delta_time
|= (1<<31);
572 fix_event(instance
, WAIT_RET_SCHT0_EVENT
, delta_time
,
574 fix_event(instance
, WAIT_RET_SCHT0_EVENT
,
577 dev
->time_broadcast
= 0;
581 p_xxb
.addr
= usec_timeout
;
582 fix_event(instance
, WAIT_TRY_SCHTO_EVENT
,
583 p_xxb
.fields
.laddr
, dev
->num_obmen
);
584 timeout_tick
= (unsigned long)jiffies
;
585 timeout_tick
+= usec_timeout
;
586 ret
= ddi_cv_spin_timedwait_from_ddi(&dev
->cond_var
, &dev
->lock
,
589 if (dev
->time_broadcast
) {
590 time_current
= join_curr_clock();
591 if (time_current
> dev
->time_broadcast
) {
592 delta_time
= (unsigned int)(time_current
-
593 dev
->time_broadcast
);
595 delta_time
= (unsigned int)(time_current
+
596 (~0U - dev
->time_broadcast
));
598 fix_event(instance
, WAIT_RET_SCHT1_EVENT
, ret
, dev
->num_obmen
);
599 dev
->time_broadcast
= 0;
601 fix_event(dev
->irq_count_rdma
, WAIT_RET_SCHT2_EVENT
, ret
,
608 rdma_event_t rdma_event
;
609 int rdma_event_init
= 0;
611 #include "get_event_rdma.c"
613 void fix_event_proc(unsigned int channel
, unsigned int event
,
614 unsigned int val1
, unsigned int val2
)
616 struct event_cur
*event_cur
;
619 if (!rdma_event_init
)
621 raw_spin_lock_irqsave(&mu_fix_event
, flags
);
622 event_cur
= &rdma_event
.event
[rdma_event
.event_cur
];
623 event_cur
->clkr
= join_curr_clock();
624 event_cur
->event
= event
;
625 event_cur
->channel
= channel
;
626 event_cur
->val1
= val1
;
627 event_cur
->val2
= val2
;
628 rdma_event
.event_cur
++;
629 if (SIZE_EVENT
== rdma_event
.event_cur
) {
630 rdma_event
.event_cur
= 0;
632 raw_spin_unlock_irqrestore(&mu_fix_event
, flags
);
636 DECLARE_WAIT_QUEUE_HEAD(wqh_1
);
638 #include "rdma_intr.c"
639 #include "rdma_read_buf.c"
640 #include "rdma_write_buf.c"
641 #include "rdma_send_msg.c"
643 struct rdma_state
*rdma_state
;
647 struct rdma_reg_state rdma_reg_state
[MAX_NUMIOLINKS
];
649 static int __init
rdma_init(void)
654 size_t size_rdma_state
;
655 rdma_addr_struct_t p_xxb
;
656 DEBUG_MSG("rdma_init: START\n");
657 DEBUG_MSG("rdma_init: %lx - raw_spinlock_t\n", sizeof (raw_spinlock_t
));
658 DEBUG_MSG("rdma_init: %lx - spinlock_t\n", sizeof (spinlock_t
));
660 #if RDMA_PRN_ADDR_FUN
661 printk("ADDR_FUN: %p - static rdma_ioctl\n", rdma_ioctl
);
662 printk("ADDR_FUN: %p - static rdma_read\n", rdma_read
);
663 printk("ADDR_FUN: %p - static rdma_write\n", rdma_write
);
664 printk("ADDR_FUN: %p - static rdma_open\n", rdma_open
);
665 printk("ADDR_FUN: %p - static rdma_close\n", rdma_close
);
666 printk("ADDR_FUN: %p - static rdma_mmap\n", rdma_mmap
);
667 printk("ADDR_FUN: %p - get_file_minor\n", get_file_minor
);
668 printk("ADDR_FUN: %p - free_chan\n", free_chan
);
669 printk("ADDR_FUN: %p - rdma_mem_free\n", rdma_mem_free
);
670 printk("ADDR_FUN: %p - init_rdma_sti\n", init_rdma_sti
);
671 printk("ADDR_FUN: %p - read_regs_rdma\n", read_regs_rdma
);
672 printk("ADDR_FUN: %p - rdma_mem_alloc\n", rdma_mem_alloc
);
673 printk("ADDR_FUN: %p - init_chan\n", init_chan
);
674 printk("ADDR_FUN: %p - write_buf\n", write_buf
);
675 printk("ADDR_FUN: %p - read_buf\n", read_buf
);
676 printk("ADDR_FUN: %p - rdma_remap_page\n", rdma_remap_page
);
679 if (!HAS_MACHINE_E2K_FULL_SIC
) {
680 ERROR_MSG("rdma_init: sorry, I am worked on e3s/e90s/e2s\n");
681 DEBUG_MSG("rdma_init: FINISH\n");
687 ERROR_MSG("rdma_init: RDMA registers busy. \n");
693 if (!num_possible_rdmas()) {
694 ERROR_MSG("rdma_init: hard rdma is absent\n");
698 if (!num_online_rdmas()) {
699 ERROR_MSG("rdma_init: RDMA does not support hot plugging."
700 "Connect the cable and reboot machine.\n");
706 INFO_MSG("RDMA: I am worked on E90S, NODE_NUMIOLINKS: %d"
707 "MAX_NUMIOLINKS: %d\n ", NODE_NUMIOLINKS
, MAX_NUMIOLINKS
);
708 INFO_MSG("E90S. Loopback mode is not implemented.\n");
710 INFO_MSG("I am worked on E3S/CUBIC/E2S, NODE_NUMIOLINKS: %d "
711 "MAX_NUMIOLINKS: %d\n", NODE_NUMIOLINKS
, MAX_NUMIOLINKS
);
712 if (IS_MACHINE_E3S
) {
713 INFO_MSG("E3S. Loopback mode is not implemented.\n");
715 if (IS_MACHINE_ES2
) {
716 INFO_MSG("CUBIC. Loopback mode is not implemented.\n");
718 if (IS_MACHINE_E2S
) {
719 INFO_MSG("E2S. Loopback mode implemented.\n");
720 INFO_MSG("E2S. IS_MACHINE_E2S: %d IS_MACHINE_E2S: %x.\n",
721 IS_MACHINE_E2S
, IS_MACHINE_E2S
);
724 node
= numa_node_id();
725 fix_event(node
, RDMA_INIT
, START_EVENT
, 0);
726 major
= register_chrdev(0, board_name
, &rdma_fops
);
728 ERROR_MSG("rdma_init: There isn't free major\n");
731 DEBUG_MSG("rdma_init: major: %d\n", major
);
732 DEBUG_MSG("rdma_init: I am on %d numa_node_id\n", node
);
733 DEBUG_MSG("rdma_init: %lx: sizeof (nodemask_t)\n", sizeof (nodemask_t
));
735 rdma_interrupt_p
= rdma_interrupt
;
737 size_rdma_state
= sizeof (struct rdma_state
);
738 rdma_state
= (struct rdma_state
*)kmalloc(size_rdma_state
, GFP_KERNEL
);
739 if (rdma_state
== (struct rdma_state
*)NULL
) {
740 ERROR_MSG("rdma_init: rdma_state == NULL\n");
741 unregister_chrdev(major
, board_name
);
745 memset(rdma_state
, 0, size_rdma_state
);
746 DEBUG_MSG("rdma_init: sizeof (struct rdma_state): %x\n",
748 rdma_state
->size_rdma_state
= size_rdma_state
;
749 rdma_state
->major
= major
;
750 for_each_online_rdma(i
) {
751 WRR_rdma(SHIFT_CH_IDT
, i
, (l_base_mac_addr
[3] + i
) |
752 ((l_base_mac_addr
[4] + i
) << 8));
755 for_each_online_rdma(i
) {
757 cs
= RDR_rdma(SHIFT_CS
, i
);
760 WRR_rdma(SHIFT_CS
, i
, cs
| CS_DSM
| E2S_CS_PTOCL
);
762 WRR_rdma(SHIFT_CS
, i
, cs
| CS_DSM
);
764 WRR_rdma(SHIFT_CS
, i
, cs
| CS_DSM
);
766 printk("SHIFT_CS: %x\n", RDR_rdma(SHIFT_CS
, i
));
768 WRR_rdma(SHIFT_DMA_TCS
, i
, DMA_TCS_Tx_Rst
);
769 WRR_rdma(SHIFT_DMA_TCS
, i
,
770 RDR_rdma(SHIFT_DMA_TCS
, i
) | RCode_64
| DMA_TCS_DRCL
);
771 #define COUNT_RESET_RCS 10
773 for (count
= 1; count
< COUNT_RESET_RCS
; count
++)
774 WRR_rdma(SHIFT_DMA_RCS
, i
, DMA_RCS_Rx_Rst
);
775 WRR_rdma(SHIFT_DMA_RCS
, i
, RDR_rdma(SHIFT_DMA_RCS
, i
) | WCode_64
);
777 tr_atl
= ATL_B
| (atl_v
& ATL
);
778 printk("Reg CAM ATL: %x\n", tr_atl
);
799 for_each_online_rdma(i
) {
800 WRR_rdma(SIC_rdma_irq_mc
, i
,irq_mc
);
803 msg_cs_dmrcl
= MSG_CS_DMRCL
;
804 for_each_online_rdma(i
) {
805 rdma_state_inst_t
*xsp
;
808 p_xxb
.addr
= (unsigned long)&rdma_state
->rdma_sti
[i
];
809 DEBUG_MSG("rdma_init:link:%d rdma_state->rdma_sti:0x%08x%08x\n",
810 i
, p_xxb
.fields
.haddr
, p_xxb
.fields
.laddr
);
811 xsp
= &rdma_state
->rdma_sti
[i
];
812 ret
= send_msg(xsp
, 0, i
, MSG_CS_SIR_Msg
, 0);
814 ERROR_MSG("rdma_init: FAIL send MSG_CS_SIR_Msg from"
815 "link: %x ret: %d\n", i
, ret
);
818 printk("rdma_init: FAIL send MSG_CS_SIR_Msg"
819 "from link: %x. SM is absent\n", i
);
823 if (create_dev_rdma(major
))
824 printk("rdma_init: Error creating devices. "
825 "Create a device manually.");
829 DEBUG_MSG("rdma_init: FINISH\n");
830 fix_event(node
, RDMA_INIT
, RETURN_EVENT
, 0);
835 long wait_time_rdma(struct rdma_reg_state
*rdma_reg_state
, signed long timeout
)
837 DECLARE_WAITQUEUE(wait
, current
);
840 add_wait_queue(&rdma_reg_state
->wqh_d
, &wait
);
841 set_task_state(current
, TASK_INTERRUPTIBLE
);
842 ret
= schedule_timeout(timeout
);
843 __set_current_state(TASK_RUNNING
);
844 remove_wait_queue(&rdma_reg_state
->wqh_d
, &wait
);
848 unsigned char bus_number_rdma
, devfn_rdma
;
850 static void rdma_cleanup(void)
853 DEBUG_MSG("rdma_cleanup: START\n");
854 DEBUG_MSG("rdma_cleanup: rdma_state->major %d \n",
855 (int)rdma_state
->major
);
856 major
= (int)rdma_state
->major
;
857 for_each_online_rdma(i
) {
858 WRR_rdma(SIC_rdma_irq_mc
, i
, 0x0);
860 rdma_interrupt_p
= (void *) NULL
;
862 remove_dev_rdma(rdma_state
->major
);
864 unregister_chrdev(rdma_state
->major
, board_name
);
869 DEBUG_MSG("rdma_cleanup: FINISH\n");
873 static int rdma_close(struct inode
*inode
, struct file
*file
)
879 rdma_state_inst_t
*rdma_sti
;
881 DEBUG_MSG("rdma_close: START\n");
882 minor
= get_file_minor(file
);
884 ERROR_MSG("rdma_close: minor < 0: %d \n",
888 instance
= DEV_inst(minor
);
889 channel
= DEV_chan(minor
);
890 DEBUG_MSG("rdma_close: instance: %d channel: %d\n", instance
, channel
);
891 rdma_sti
= &rdma_state
->rdma_sti
[instance
];
892 mutex_enter(&rdma_sti
->mu
);
893 rdma_sti
->opened
&= ~(1 << channel
);
895 chd
= &rdma_sti
->dma_chans
[channel
];
898 /* to properly complete the exchange */
900 for (i = 0; i < 2; i++){
901 pd = &rdma_sti->rw_states_d[i];
903 pd->clock_receive_trwd = 0;
904 pd->clock_begin_read = 0;
905 pd->clock_end_read_old = 0;
906 pd->clock_begin_read_old = 0;
907 pd->trwd_send_count = 0;
908 pd->ready_send_count = 0;
909 pd->trwd_rec_count = 0;
910 pd->ready_rec_count = 0;
913 pd->timer_read = TIMER_MIN;
914 pd->timer_write = TIMER_MIN;
915 pd->timer_for_read = TIMER_FOR_READ_MIN;
916 pd->timer_for_write = TIMER_FOR_WRITE_MIN;
919 DEBUG_MSG("rdma_close: opened.minor.instance.channel: 0x%x.%d.%d.%d\n",
920 rdma_sti
->opened
, minor
, instance
, channel
);
921 mutex_exit(&rdma_sti
->mu
);
922 DEBUG_MSG("rdma_close: FINISH\n");
926 static int rdma_open(struct inode
*inode
, struct file
*file
)
928 int minor
, file_eys
= 0, i
;
932 rdma_state_inst_t
*rdma_sti
;
934 DEBUG_MSG("rdma_open: START\n");
935 if (file
== (struct file
*)NULL
) {
936 ERROR_MSG("rdma_open: file is NULL\n");
939 minor
= get_file_minor(file
);
941 ERROR_MSG("rdma_open: minor(%d) < 0\n", minor
);
944 instance
= DEV_inst(minor
);
945 for_each_online_rdma(i
)
949 ERROR_MSG("rdma_open:instance %d not support RDMA\n", instance
);
952 channel
= DEV_chan(minor
);
953 DEBUG_MSG("rdma_open: instance: %d channel: %d\n", instance
, channel
);
954 rdma_sti
= &rdma_state
->rdma_sti
[instance
];
955 mutex_enter(&rdma_sti
->mu
);
956 firstopen
= (((1 << channel
) & rdma_sti
->opened
) == 0);
957 if (firstopen
== 0) {
958 ERROR_MSG("rdma_open: device EBUSY: minor: %d inst: %d "
959 "channel: %d\n", minor
, instance
, channel
);
960 mutex_exit(&rdma_sti
->mu
);
963 rdma_sti
->opened
|= (1 << channel
);
964 DEBUG_MSG("rdma_open: opened.minor.instance.channel: 0x%x.%d.%d.%d\n",
965 rdma_sti
->opened
, minor
, instance
, channel
);
966 mutex_exit(&rdma_sti
->mu
);
967 DEBUG_MSG("rdma_open FINISH\n");
971 /*static int rdma_ioctl(struct inode *inode, struct file *filp,
972 unsigned int cmd, unsigned long arg) */
973 static long rdma_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
980 rdma_state_inst_t
*rdma_sti
;
981 rdma_ioc_parm_t parm
;
983 dev_rdma_sem_t
*dev_sem
;
986 DEBUG_MSG("rdma_ioctl: START cmd %x\n", cmd
);
987 minor
= get_file_minor(filp
);
989 ERROR_MSG("rdma_ioctl: minor(%d) < 0 cmd: %x\n",
993 instance
= DEV_inst(minor
);
994 channel
= DEV_chan(minor
);
995 rdma_sti
= &rdma_state
->rdma_sti
[instance
];
997 case RDMA_IOC_GET_neighbour_map
:
999 if (copy_to_user((void __user
*)arg
, &node_online_neighbour_map
,
1000 sizeof (nodemask_t
)) == -1) {
1001 ERROR_MSG("rdma_ioctl: RDMA_IOC_GET_neighbour_map: "
1002 "copy_to_user failed\n");
1008 case RDMA_IOC_GET_ID
:
1012 rdma_state_inst_t *xsp;
1014 for_each_online_rdma(i) {
1015 xsp = &rdma_state->rdma_sti[i];
1016 ret = send_msg(xsp, 0, i, MSG_CS_SIR_Msg, 0);
1018 ERROR_MSG("rdma_ioctl: FAIL send MSG_CS_SIR_Msg"
1019 "from link: %x ret: %d\n", i, ret);
1020 } else if (ret == 0) {
1021 printk("rdma_ioctl: FAIL send MSG_CS_SIR_Msg"
1022 "from link: %x. " "SM is absent\n", i);
1027 rdma_link_id
.count_links
= MAX_NUMIOLINKS
;
1028 for_each_online_rdma(i
) {
1029 rdma_link_id
.link_id
[i
][0] = 1;
1030 rdma_link_id
.link_id
[i
][1] = RDR_rdma(SHIFT_CH_IDT
, i
);
1031 rdma_link_id
.link_id
[i
][2] = RDR_rdma(SHIFT_N_IDT
, i
);
1032 if (copy_to_user((void __user
*)arg
, &rdma_link_id
,
1033 sizeof(link_id_t
)) == -1) {
1034 ERROR_MSG("rdma_ioctl:RDMA_IOC_GET_ID:"
1035 "copy_to_user failed\n");
1043 case RDMA_IOC_RESET_DMA
:
1045 reset_link_t reset_link
;
1046 rw_state_p pd
= NULL
;
1047 dev_rdma_sem_t
*dev_sem
;
1048 rdma_state_inst_t
*xsp
;
1050 xsp
= &rdma_state
->rdma_sti
[instance
];
1052 rval
= copy_from_user(&reset_link
, (void __user
*)arg
,
1053 sizeof (reset_link_t
));
1055 ERROR_MSG("rdma_ioctl(%d, %d, %x): copy_from_user"
1056 "failed size: %lx rval: %lx\n",
1057 instance
, channel
, cmd
,
1058 sizeof (reset_link_t
), rval
);
1061 if (reset_link
.tcs_reset
== 1) {
1062 /* Enable exit gp0 */
1063 if (enable_exit_gp0
) {
1064 int ret_send_msg
, j
;
1065 for (j
= 0; j
< 10; j
++) {
1066 ret_send_msg
= send_msg(xsp
, 0,
1070 if (ret_send_msg
> 0)
1072 if (ret_send_msg
< 0) {
1073 ERROR_MSG("rdma_ioctl:"
1074 "FAIL send MSG_CS_SGP0_Msg "
1075 "from link: %x ret: %d\n",
1076 instance
, ret_send_msg
);
1077 } else if (ret_send_msg
== 0) {
1078 DEBUG_MSG("rdma_ioctl: FAIL send"
1080 "from link: %x. SM is absent: %x "
1082 instance
, ret_send_msg
,
1083 RDR_rdma(SHIFT_MSG_CS
, instance
));
1088 if (reset_link
.rcs_reset
== 1) {
1089 /* Enable exit gp0 */
1090 if (enable_exit_gp0
) {
1091 pd
= &rdma_sti
->rw_states_d
[READER
];
1092 dev_sem
= &pd
->dev_rdma_sem
;
1093 raw_spin_lock_irq(&dev_sem
->lock
);
1095 raw_spin_unlock_irq(&dev_sem
->lock
);
1098 reset_link
.tcs
= RDR_rdma(SHIFT_DMA_TCS
, instance
);
1099 reset_link
.rcs
= RDR_rdma(SHIFT_DMA_RCS
, instance
);
1100 rval
= copy_to_user((reset_link_t __user
*)arg
, &reset_link
,
1101 sizeof (reset_link
));
1107 DEBUG_MSG("rdma_ioctl: minor: %d\n", minor
);
1108 DEBUG_MSG("rdma_ioctl: sizeof (rdma_ioc_parm_t): %x,"
1109 "sizeof (parm): %x\n", sizeof (rdma_ioc_parm_t
),
1111 rval
= copy_from_user(&parm
, (void __user
*)arg
,
1112 sizeof (rdma_ioc_parm_t
));
1114 ERROR_MSG("rdma_ioctl(%d, %d, %x): copy_from_user failed size:"
1115 "%lx rval: %lx\n", instance
, channel
, cmd
,
1116 sizeof (rdma_ioc_parm_t
), rval
);
1120 parm
.err_no
= res
= 0;
1122 case RDMA_IOC_RESET_TCS
:
1124 #define COUNT_RESET_TCS 100
1125 #define DELAY_RESET_TCS 10
1126 unsigned tcs
, es
, i
;
1128 for (i
= 0; i
< COUNT_RESET_TCS
; i
++) {
1129 WRR_rdma(SHIFT_DMA_TCS
, instance
, DMA_TCS_Tx_Rst
);
1130 mdelay(DELAY_RESET_TCS
);
1131 tcs
= RDR_rdma(SHIFT_DMA_TCS
, instance
);
1132 es
= RDR_rdma(SHIFT_ES
, instance
);
1134 WRR_rdma(SHIFT_DMA_TCS
, instance
, RCode_64
| DMA_TCS_DRCL
);
1135 tcs
= RDR_rdma(SHIFT_DMA_TCS
, instance
);
1140 case RDMA_IOC_RESET_RCS
:
1141 { unsigned rcs
, es
, i
;
1142 #define COUNT_RESET_RCS 10
1143 for (i
= 0; i
< COUNT_RESET_RCS
; i
++) {
1144 WRR_rdma(SHIFT_DMA_RCS
, instance
, DMA_RCS_Rx_Rst
);
1145 rcs
= RDR_rdma(SHIFT_DMA_RCS
, instance
);
1146 es
= RDR_rdma(SHIFT_ES
, instance
);
1148 WRR_rdma(SHIFT_DMA_RCS
, instance
, WCode_64
);
1149 rcs
= RDR_rdma(SHIFT_DMA_RCS
, instance
);
1154 case RDMA_IOC_SET_MODE_LOOP
:
1156 int rdma_loopback_mode
;
1158 if (IS_MACHINE_E2S
) {
1159 if (parm
.reqlen
== DISABLE_LOOP
) {
1160 WRR_rdma(SHIFT_CS
, instance
,
1161 RDR_rdma(SHIFT_CS
, instance
) & ~E2S_CS_LOOP
);
1163 WRR_rdma(SHIFT_CS
, instance
,
1164 RDR_rdma(SHIFT_CS
, instance
) | E2S_CS_LOOP
);
1166 rdma_loopback_mode
= RDR_rdma(SHIFT_CS
, instance
) &
1169 /* INFO_MSG("Loopback mode not release.\n"); */
1170 rdma_loopback_mode
= 0;
1173 /* INFO_MSG("Loopback mode not release.\n");*/
1174 rdma_loopback_mode
= 0;
1177 parm
.acclen
= rdma_loopback_mode
;
1180 case RDMA_IOC_SET_MODE_RFSM
:
1182 if (parm
.reqlen
== DISABLE_RFSM
) {
1185 rfsm
= DMA_RCS_RFSM
;
1190 case RDMA_IOC_SET_MODE_EXIT_GP0
:
1192 if (parm
.reqlen
== DISABLE_EXIT_GP0
) {
1193 enable_exit_gp0
= DISABLE_EXIT_GP0
;
1195 enable_exit_gp0
= ENABLE_EXIT_GP0
;
1197 parm
.acclen
= enable_exit_gp0
;
1200 case RDMA_IOC_DUMPREG0
:
1201 case RDMA_IOC_DUMPREG1
:
1202 read_regs_rdma(instance
);
1208 if ((parm
.reqlen
== 0x900) ||
1209 ((parm
.reqlen
>= 0x2000) && (parm
.reqlen
<= 0x2004)) ||
1210 ((parm
.reqlen
>= 0x3000) && (parm
.reqlen
<= 0x3088))) {
1211 /* sic_write_node_nbsr_reg(instance, parm.reqlen,
1213 WRR_rdma( parm
.reqlen
, instance
, parm
.acclen
);
1218 if ((parm
.reqlen
== 0x900) ||
1219 ((parm
.reqlen
>= 0x700) && (parm
.reqlen
<= 0x704)) ||
1220 ((parm
.reqlen
>= 0x800) && (parm
.reqlen
<= 0x888))) {
1221 /* sic_write_node_nbsr_reg(instance, parm.reqlen,
1223 WRR_rdma( parm
.reqlen
, instance
, parm
.acclen
);
1234 if ((parm
.reqlen
<= 0x900) ||
1235 ((parm
.reqlen
>= 0x2000) && (parm
.reqlen
<= 0x2004)) ||
1236 ((parm
.reqlen
>= 0x3000) && (parm
.reqlen
<= 0x3088))) {
1237 /* parm.acclen = sic_read_node_nbsr_reg(instance,
1239 parm
.acclen
= RDR_rdma(parm
.reqlen
, instance
);
1244 if ((parm
.reqlen
== 0x900) ||
1245 ((parm
.reqlen
>= 0x700) && (parm
.reqlen
<= 0x704)) ||
1246 ((parm
.reqlen
>= 0x800) && (parm
.reqlen
<= 0x888))) {
1247 /* sic_write_node_nbsr_reg(instance, parm.reqlen,
1249 WRR_rdma( parm
.reqlen
, instance
, parm
.acclen
);
1257 case RDMA_WAKEUP_WRITER
:
1259 dev_rdma_sem_t
*dev_sem
;
1262 pd
= &rdma_sti
->rw_states_d
[WRITER
];
1263 dev_sem
= &pd
->dev_rdma_sem
;
1264 raw_spin_lock_irq(&dev_sem
->lock
);
1265 rdma_cv_broadcast_rdma(&pd
->dev_rdma_sem
, instance
);
1266 raw_spin_unlock_irq(&dev_sem
->lock
);
1270 case RDMA_WAKEUP_READER
:
1272 dev_rdma_sem_t
*dev_sem
;
1275 pd
= &rdma_sti
->rw_states_d
[READER
];
1276 dev_sem
= &pd
->dev_rdma_sem
;
1277 raw_spin_lock_irq(&dev_sem
->lock
);
1278 rdma_cv_broadcast_rdma(&pd
->dev_rdma_sem
, instance
);
1279 raw_spin_unlock_irq(&dev_sem
->lock
);
1283 case RDMA_CLEAN_TDC_COUNT
:
1290 pd
= &rdma_sti
->rw_states_d
[WRITER
];
1293 ERROR_MSG("rdma_ioctl: CLEAN_TDC: (%d,%d):"
1294 "Unexpected channel\n", instance
, channel
);
1297 dev_sem
= &pd
->dev_rdma_sem
;
1298 dev_sem
->num_obmen
= 0;
1299 dev_sem
->irq_count_rdma
= 0;
1300 dbg_ioctl("CLEAN_TDC: %d dev_sem->num_obmen: %x\n",
1301 instance
, dev_sem
->num_obmen
);
1304 #define COUNT_CLK 10
1307 u64 time
[COUNT_CLK
];
1310 for (i
= 0; i
< COUNT_CLK
; i
++)
1311 time
[i
] = join_curr_clock();
1312 for (i
= 0; i
< COUNT_CLK
; i
++)
1313 printk("0x%llx\n", time
[i
]);
1316 case RDMA_GET_MAX_CLKR
:
1318 u64 time
[COUNT_CLK
];
1320 u64 max_clk_all
= 0;
1322 int count_rep_clk
= 0;
1324 #define COUNT_REP_CLK 100
1326 for (i
= 0; i
< COUNT_CLK
; i
++)
1327 time
[i
] = join_curr_clock();
1328 for (i
= 0; i
< COUNT_CLK
; i
++) {
1329 if (max_clk
< time
[i
])
1332 if (max_clk_all
< max_clk
) {
1333 max_clk_all
= max_clk
;
1334 printk("0x%llx - max_clk_all\n", max_clk_all
);
1336 if (count_rep_clk
< COUNT_REP_CLK
)
1342 case RDMA_CLEAN_RDC_COUNT
:
1344 intr_rdc_count
[instance
] = 0;
1350 pd
= &rdma_sti
->rw_states_d
[READER
];
1353 ERROR_MSG("rdma_ioctl: CLEAN_RDC: (%d,%d):"
1354 "Unexpected channel\n", instance
, channel
);
1357 dev_sem
= &pd
->dev_rdma_sem
;
1358 dev_sem
->num_obmen
= 0;
1359 dev_sem
->irq_count_rdma
= 0;
1360 dbg_ioctl("CLEAN_RDC: intr_rdc_count[%d]: %u "
1361 "dev_sem->num_obmen: %x\n", instance
,
1362 intr_rdc_count
[instance
], dev_sem
->num_obmen
);
1366 case RDMA_TIMER_FOR_READ
:
1367 dbg_ioctl("cmd = RDMA_TIMER_FOR_READ, "
1368 "reqlen (mksec) = 0x%x\n",
1369 MIN_min(TIMER_FOR_READ_MAX
, parm
.reqlen
));
1370 parm
.acclen
= (&rdma_sti
->rw_states_d
[READER
])->timer_for_read
;
1371 (&rdma_sti
->rw_states_d
[READER
])->timer_for_read
=
1372 MAX_max(TIMER_FOR_READ_MIN
, MIN_min(TIMER_FOR_READ_MAX
,
1374 parm
.reqlen
= (&rdma_sti
->rw_states_d
[READER
])->timer_for_read
;
1377 case RDMA_TIMER_FOR_WRITE
:
1378 dbg_ioctl("cmd = RDMA_TIMER_FOR_WRITE, "
1379 "reqlen (mksec) = 0x%x\n",
1380 MIN_min(TIMER_FOR_WRITE_MAX
, parm
.reqlen
));
1381 parm
.acclen
= (&rdma_sti
->rw_states_d
[WRITER
])->timer_for_write
;
1382 (&rdma_sti
->rw_states_d
[WRITER
])->timer_for_write
=
1383 MAX_max(TIMER_FOR_WRITE_MIN
,MIN_min(TIMER_FOR_WRITE_MAX
,
1385 parm
.reqlen
= (&rdma_sti
->rw_states_d
[WRITER
])->timer_for_write
;
1388 case RDMA_IOC_ALLOCB
:
1389 DEBUG_MSG("rdma_ioctl: cmd = RDMA_IOC_ALLOCB, "
1392 chd
= &rdma_sti
->dma_chans
[channel
];
1393 chd
->node_for_memory
= NUM_NODE_RDMA(instance
);
1394 if (chd
->allocs
!= RCS_EMPTY
) {
1395 ERROR_MSG("rdma_ioctl: RDMA_IOC_ALLOCB: "
1396 "WRONGLY finish: channel : %d "
1397 "chd->allocs: %i\n", channel
, chd
->allocs
);
1399 parm
.err_no
= RDMA_E_ALLOC
;
1400 parm
.acclen
= chd
->allocs
;
1403 parm
.acclen
= init_chan(chd
, parm
.reqlen
, parm
.rwmode
);
1404 if (parm
.acclen
< -1) {
1405 ERROR_MSG("rdma_ioctl: RDMA_IOC_ALLOCB: channel : %d "
1406 "WRONGLY finish: parm.acclen: %d\n",
1407 channel
, parm
.acclen
);
1408 res
= -1; parm
.err_no
= -parm
.acclen
;
1411 if (parm
.acclen
< 0) {
1412 ERROR_MSG("rdma_ioctl: RDMA_IOC_ALLOCB: "
1413 "WRONGLY finish: RDMA_E_NOBUF\n");
1414 res
= -1; parm
.err_no
= RDMA_E_NOBUF
;
1417 parm
.rwmode
= chd
->full
;
1418 DEBUG_MSG("rdma_ioctl: phys: 0x%llx full: 0x%08x\n", chd
->dma
,
1422 rdma_sti
->stat_rdma
.cur_clock
= jiffies
;
1423 if (copy_to_user((void __user
*)arg
, &rdma_sti
->stat_rdma
,
1424 sizeof (struct stat_rdma
)) == -1) {
1425 ERROR_MSG("rdma_ioctl: copy_to_user failed\n");
1429 case RDMA_GET_EVENT
:
1436 memset(&rdma_sti
->stat_rdma
, 0, sizeof (struct stat_rdma
));
1439 case RDMA_IS_CAM_YES
:
1442 int ret_time_dwait
= 0;
1443 dev_rdma_sem_t
*dev_sem
;
1446 event_ioctl(instance
, RDMA_IS_CAM_YES_EVENT
, 1, 0);
1447 pcam
= &rdma_sti
->ralive
;
1448 dev_sem
= &pcam
->dev_rdma_sem
;
1450 atl
= RDR_rdma(SHIFT_CAM
, instance
);
1454 goto end_RDMA_IS_CAM_YES
;
1456 raw_spin_lock_irq(&dev_sem
->lock
);
1457 dev_sem
->irq_count_rdma
= 0;
1459 ret_time_dwait
= wait_for_irq_rdma_sem(dev_sem
, IO_TIMEOUT
,
1462 raw_spin_unlock_irq(&dev_sem
->lock
);
1463 parm
.acclen
= RDR_rdma(SHIFT_CAM
, instance
);
1464 if (ret_time_dwait
== -2) {
1465 parm
.err_no
= -RDMA_E_SIGNAL
;
1467 if (ret_time_dwait
== -1) {
1468 parm
.err_no
= -RDMA_E_TIMER
;
1470 if (ret_time_dwait
> 0) {
1471 parm
.err_no
= ret_time_dwait
;
1474 end_RDMA_IS_CAM_YES
:
1475 event_ioctl(0, RDMA_IS_CAM_YES_EVENT
, 0, 0);
1478 case RDMA_IS_CAM_NO
:
1481 int ret_time_dwait
= 0;
1482 dev_rdma_sem_t
*dev_sem
;
1485 event_ioctl(instance
, RDMA_IS_CAM_NO_EVENT
, 1, 0);
1486 pcam
= &rdma_sti
->talive
;
1487 dev_sem
= &pcam
->dev_rdma_sem
;
1488 atl
= RDR_rdma(SHIFT_CAM
, instance
);
1492 goto end_RDMA_IS_CAM_NO
;
1494 raw_spin_lock_irq(&dev_sem
->lock
);
1495 dev_sem
->irq_count_rdma
= 0;
1497 ret_time_dwait
= wait_for_irq_rdma_sem(dev_sem
, IO_TIMEOUT
,
1500 raw_spin_unlock_irq(&dev_sem
->lock
);
1501 parm
.acclen
= RDR_rdma(SHIFT_CAM
, instance
);
1502 if (ret_time_dwait
== -2) {
1503 parm
.err_no
= -RDMA_E_SIGNAL
;
1505 if (ret_time_dwait
== -1) {
1506 parm
.err_no
= -RDMA_E_TIMER
;
1508 if (ret_time_dwait
> 0) {
1509 parm
.err_no
= ret_time_dwait
;
1513 parm
.clkr
= join_curr_clock();
1514 parm
.clkr1
= pcam
->clkr
;
1515 parm
.reqlen
= pcam
->int_cnt
;
1517 event_ioctl(0, RDMA_IS_CAM_NO_EVENT
, 0, 0);
1524 tr_atl
= ATL_B
| (parm
.reqlen
& ATL
);
1525 WRR_rdma(SHIFT_CAM
, instance
, tr_atl
);
1526 atl
= RDR_rdma(SHIFT_CAM
, instance
);
1531 ERROR_MSG("rdma_ioctl(%d, %d): default operation NOT EXPECTED"
1532 "cmd: %x\n", instance
, channel
, cmd
);
1534 parm
.err_no
= RDMA_E_INVOP
;
1537 rval
= copy_to_user((rdma_ioc_parm_t __user
*)arg
, &parm
,
1538 sizeof (rdma_ioc_parm_t
));
1540 ERROR_MSG("rdma_ioctl(%d, %d, %x): copy_to_user failed"
1541 "size: %lx rval: %lx\n", instance
, channel
, cmd
,
1542 sizeof (rdma_ioc_parm_t
), rval
);
1546 DEBUG_MSG("rdma_ioctl(%d, %d): NORMAL_END: acclen=%x *****\n\n",
1547 instance
, channel
, parm
.acclen
);
1548 DEBUG_MSG("rdma_ioctl: FINISH\n");
1552 ERROR_MSG("rdma_ioctl: FAIL\n");
1553 DEBUG_MSG("rdma_ioctl: FINISH\n");
1554 return -EINVAL
; /* !? return l>0 == return -1 !?*/
1559 #ifdef CONFIG_COMPAT
1560 static int do_ioctl(struct file
*f
, unsigned cmd
, unsigned long arg
)
1563 ret
= rdma_ioctl(f
, cmd
, arg
);
1564 /* ret = rdma_ioctl(f->f_dentry->d_inode, f, cmd, arg); */
1568 static long rdma_compat_ioctl(struct file
*f
, unsigned cmd
, unsigned long arg
)
1572 case RDMA_IOC_DUMPREG0
:
1573 case RDMA_IOC_DUMPREG1
:
1576 case RDMA_IOC_GET_neighbour_map
:
1577 case RDMA_CLEAN_TDC_COUNT
:
1579 case RDMA_GET_MAX_CLKR
:
1580 case RDMA_CLEAN_RDC_COUNT
:
1581 case RDMA_TIMER_FOR_READ
:
1582 case RDMA_TIMER_FOR_WRITE
:
1583 case RDMA_IOC_ALLOCB
:
1585 case RDMA_GET_EVENT
:
1587 case RDMA_IS_CAM_YES
:
1588 case RDMA_IS_CAM_NO
:
1590 case RDMA_WAKEUP_WRITER
:
1591 case RDMA_WAKEUP_READER
:
1592 case RDMA_IOC_GET_ID
:
1593 case RDMA_IOC_RESET_DMA
:
1594 case RDMA_IOC_SET_MODE_RFSM
:
1595 case RDMA_IOC_SET_MODE_EXIT_GP0
:
1596 case RDMA_IOC_RESET_TCS
:
1597 case RDMA_IOC_RESET_RCS
:
1598 case RDMA_IOC_SET_MODE_LOOP
:
1599 return do_ioctl(f
, cmd
, arg
);
1601 return -ENOIOCTLCMD
;
1606 /* ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); */
1607 static ssize_t
rdma_read(struct file
*filp
, char __user
*buf
, size_t size
,
1614 rdma_state_inst_t
*rdma_sti
;
1615 rdma_ioc_parm_t PRM
;
1618 DEBUG_MSG("rdma_read: START\n");
1619 if (filp
== (struct file
*)NULL
) {
1620 ERROR_MSG("rdma_read: filp is NULL\n");
1623 minor
= get_file_minor(filp
);
1624 DEBUG_MSG("rdma_read: minor: %d\n", minor
);
1626 ERROR_MSG("rdma_read: minor(%d) < 0\n", minor
);
1629 instance
= DEV_inst(minor
);
1630 channel
= DEV_chan(minor
);
1631 DEBUG_MSG("rdma_read: instance: %d channel: %d\n", instance
, channel
);
1632 rdma_sti
= &rdma_state
->rdma_sti
[instance
];
1633 rval
= copy_from_user(&PRM
, (rdma_ioc_parm_t __user
*)buf
,
1634 sizeof (rdma_ioc_parm_t
));
1636 ERROR_MSG("rdma_read(%d, %d): copy_from_user failed size: %lx"
1637 "rval: %lx\n", instance
, channel
,
1638 sizeof (rdma_ioc_parm_t
), rval
);
1643 ret
= read_buf(rdma_sti
, buf
, size
, instance
, channel
, &PRM
);
1644 PRM
.clkr
= join_curr_clock();
1646 rval
= copy_to_user((rdma_ioc_parm_t __user
*)buf
, &PRM
,
1647 sizeof (rdma_ioc_parm_t
));
1649 ERROR_MSG("rdma_read(%d, %d): copy_to_user failed size: %lx"
1650 "rval: %lx\n", instance
, channel
,
1651 sizeof (rdma_ioc_parm_t
), rval
);
1654 DEBUG_MSG("rdma_read: FINISH\n");
1658 static ssize_t
rdma_write(struct file
*filp
, const char __user
*buf
,
1659 size_t size
, loff_t
*pos
)
1665 rdma_state_inst_t
*rdma_sti
;
1666 rdma_ioc_parm_t PRM
;
1669 DEBUG_MSG("rdma_write: START\n");
1670 minor
= get_file_minor(filp
);
1673 instance
= DEV_inst(minor
);
1674 channel
= DEV_chan(minor
);
1675 DEBUG_MSG("rdma_write: instance: %d channel: %d\n", instance
, channel
);
1676 rdma_sti
= &rdma_state
->rdma_sti
[instance
];
1677 DEBUG_MSG("rdma_write: &rdma_state->rdma_sti[%d]: %p\n", instance
,
1679 rval
= copy_from_user(&PRM
, (rdma_ioc_parm_t __user
*)buf
,
1680 sizeof(rdma_ioc_parm_t
));
1681 DEBUG_MSG("rdma_write: copy_from_user PRM: %p sizeof(PRM):%x"
1682 "sizeof(rdma_ioc_parm_t):%x\n", &PRM
, sizeof(PRM
),
1683 sizeof(rdma_ioc_parm_t
));
1685 ERROR_MSG("rdma_read(%d, %d): copy_from_user failed size: %lx"
1686 "rval: %lx\n", instance
, channel
,
1687 sizeof (rdma_ioc_parm_t
), rval
);
1690 ret
= write_buf(rdma_sti
, buf
, size
, instance
, channel
, &PRM
);
1691 PRM
.clkr
= join_curr_clock();
1692 rval
= copy_to_user((rdma_ioc_parm_t __user
*)buf
, &PRM
,
1693 sizeof (rdma_ioc_parm_t
));
1695 ERROR_MSG("rdma_write(%d, %d): copy_to_user failed size: %lx"
1696 "rval: %lx\n", instance
, channel
,
1697 sizeof (rdma_ioc_parm_t
), rval
);
1704 static int rdma_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1710 rdma_state_inst_t
*rdma_sti
;
1713 DEBUG_MSG("rdma_mmap: START\n");
1714 minor
= get_file_minor(file
);
1717 instance
= DEV_inst(minor
);
1718 channel
= DEV_chan(minor
);
1719 rdma_sti
= &rdma_state
->rdma_sti
[instance
];
1720 chd
= &rdma_sti
->dma_chans
[channel
];
1721 if (chd
->allocs
!= RCS_ALLOCED
) {
1722 ERROR_MSG("rdma_mmap : chd->allocs != RCS_ALLOCED\n");
1726 rval
= rdma_remap_page_tbl((void *)chd
->vdma_tm
, chd
->real_size
,
1729 rval
= rdma_remap_page((void *)chd
->prim_buf_addr
,
1730 chd
->real_size
, vma
);
1733 ERROR_MSG("rdma: rdma_mmap ddi_remap_page FAIL\n");
1736 chd
->allocs
= RCS_MAPPED
;
1737 DEBUG_MSG("rdma_mmap: minor: %d\n", minor
);
1738 DEBUG_MSG("rdma_mmap: FINISH\n");
1742 int rdma_remap_page(void *va
, size_t sz
, struct vm_area_struct
*vma
)
1745 unsigned long vm_end
;
1746 unsigned long vm_start
;
1747 unsigned long vm_pgoff
;
1750 DEBUG_MSG("rdma_remap_page: START\n");
1751 if (!sz
) return -EINVAL
;
1752 pha
= virt_to_phys(va
);
1753 size
= (long )PAGE_ALIGN((pha
& ~PAGE_MASK
) + sz
);
1754 if ((vma
->vm_pgoff
<< PAGE_SHIFT
) > size
) return -ENXIO
;
1755 pha
+= (vma
->vm_pgoff
<< PAGE_SHIFT
);
1756 vm_end
= vma
->vm_end
;
1757 vm_start
= vma
->vm_start
;
1758 vm_pgoff
= vma
->vm_pgoff
;
1760 if ((vm_end
- vm_start
) < size
)
1761 size
= vm_end
- vm_start
;
1763 vma
->vm_flags
|= (VM_READ
| VM_WRITE
| VM_RESERVED
);
1766 if (vma
->vm_flags
& VM_IO
)
1767 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
) |
1768 _PAGE_CD_DIS
| _PAGE_PWT
);
1770 if (remap_pfn_range(vma
, vm_start
, (pha
>> PAGE_SHIFT
), size
,
1771 vma
->vm_page_prot
)) {
1772 ERROR_MSG("rdma_remap_page: FAIL remap_pfn_range\n");
1775 DEBUG_MSG("rdma_remap_page: FINISH\n");
1779 int rdma_remap_page_tbl(void *va
, size_t sz
, struct vm_area_struct
*vma
)
1782 unsigned long sz_pha
;
1783 unsigned long vm_end
;
1784 unsigned long vm_start
;
1785 unsigned long vm_pgoff
;
1787 rdma_tbl_64_struct_t
*ptbl
;
1789 DEBUG_MSG("rdma_remap_page_tbl: START\n");
1790 if (!sz
) return -EINVAL
;
1791 if (vma
->vm_pgoff
) {
1792 ERROR_MSG("rdma_remap_page_tbl: vma->vm_pgoff: 0x%lx\n",
1796 size
= (long)PAGE_ALIGN(sz
);
1797 vm_end
= vma
->vm_end
;
1798 vm_start
= vma
->vm_start
;
1799 vm_pgoff
= vma
->vm_pgoff
;
1801 if ((vm_end
- vm_start
) < size
) {
1802 size
= vm_end
- vm_start
;
1803 DEBUG_MSG("rdma_remap_page_tbl: vm_end(%lx) - vm_start(%lx) < "
1804 "size(%lx)\n", vm_end
, vm_start
, size
);
1807 vma
->vm_flags
|= (VM_READ
| VM_WRITE
| VM_RESERVED
);
1810 if (vma
->vm_flags
& VM_IO
)
1811 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
) |
1812 _PAGE_CD_DIS
| _PAGE_PWT
);
1814 for (ptbl
= (rdma_tbl_64_struct_t
*)va
; ptbl
; ptbl
++) {
1815 rdma_addr_struct_t pxx
;
1816 pxx
.addr
= (unsigned long)ptbl
;
1817 DEBUG_MSG("rdma_remap_page_tbl: 0x%08x%08x ptbl\n",
1818 pxx
.fields
.haddr
, pxx
.fields
.laddr
);
1819 pxx
.addr
= ptbl
->addr
;
1820 DEBUG_MSG("rdma_remap_page_tbl: 0x%08x%08x ptbl->addr\n",
1821 pxx
.fields
.haddr
, pxx
.fields
.laddr
);
1823 pha
= (unsigned long)(cpu_to_le64(ptbl
->addr
));
1824 DEBUG_MSG("rdma_remap_page_tbl: pha cpu_to_le64(pha): %lx \n",
1827 pha
= (unsigned long)ptbl
->addr
;
1829 pxx
.addr
= (unsigned long)phys_to_virt(pha
);
1830 DEBUG_MSG("rdma_remap_page_tbl: 0x%08x%08x __va(ptbl->addr)\n",
1831 pxx
.fields
.haddr
, pxx
.fields
.laddr
);
1833 DEBUG_MSG("rdma_remap_page_tbl: 0x%08x%08x __fa(ptbl->addr)\n",
1834 pxx
.fields
.haddr
, pxx
.fields
.laddr
);
1837 sz_pha
= cpu_to_le64(sz_pha
);
1838 DEBUG_MSG("rdma_remap_page_tbl:"
1839 "sz_pha cpu_to_le64(sz_pha): %lx\n", sz_pha
);
1843 (pha
>> PAGE_SHIFT
), sz_pha
, vma
->vm_page_prot
)) {
1844 ERROR_MSG("rdma_remap_page_tbl:FAIL remap_pfn_range\n");
1848 DEBUG_MSG("rdma_remap_page_tbl: vm_start: %lx vm_end: %lx "
1849 "sz_pha: %lx \n", vm_start
, vm_end
, sz_pha
);
1850 if (vm_start
>= vm_end
) {
1851 DEBUG_MSG("rdma_remap_page_tbl: "
1852 "vm_start(%lx) >= vm_end(%lx)\n", vm_start
,
1857 DEBUG_MSG("rdma_remap_page_tbl: FINISH\n");
1861 int get_file_minor(struct file
*file
)
1864 struct dentry
*f_dentry_rdma
;
1865 struct inode
*d_inode
;
1867 f_dentry_rdma
= file
->f_dentry
;
1868 if (!f_dentry_rdma
) {
1869 ERROR_MSG( "get_file_minor: file->f_dentry is NULL\n");
1872 d_inode
= f_dentry_rdma
->d_inode
;
1874 ERROR_MSG( "get_file_minor: f_dentry->d_inode is NULL\n");
1877 major
= MAJOR(d_inode
->i_rdev
);
1878 DEBUG_MSG("get_file_minor:d_inode->i_rdev: 0x%08u major: %d minor:%u\n",
1879 d_inode
->i_rdev
, major
, MINOR(d_inode
->i_rdev
));
1880 return MINOR(d_inode
->i_rdev
);
1884 void init_rdma_sti(int instance
)
1886 rw_state_t
*pd
, *pm
;
1888 dev_rdma_sem_t
*dev_sem
;
1889 rdma_state_inst_t
*rdma_sti
= &rdma_state
->rdma_sti
[instance
];
1890 rdma_addr_struct_t p_xxb
;
1892 DEBUG_MSG("init_rdma_sti: START\n");
1893 p_xxb
.addr
= (unsigned long)rdma_sti
;
1894 DEBUG_MSG("init_rdma_sti: node: %d rdma_sti: 0x%08x%08x\n",
1895 instance
, p_xxb
.fields
.haddr
, p_xxb
.fields
.laddr
);
1896 rdma_sti
->instance
= instance
;
1897 mutex_init(&rdma_sti
->mu
);
1898 pm
= &rdma_sti
->talive
;
1899 mutex_init(&pm
->mu
);
1900 raw_spin_lock_init(&pm
->mu_spin
);
1902 pm
->timer
= TIMER_MIN
;
1903 dev_sem
= &pm
->dev_rdma_sem
;
1904 raw_spin_lock_init(&dev_sem
->lock
);
1905 cv_init(&dev_sem
->cond_var
);
1906 dev_sem
->irq_count_rdma
= 0;
1907 pm
= &rdma_sti
->ralive
;
1908 mutex_init(&pm
->mu
);
1909 raw_spin_lock_init(&pm
->mu_spin
);
1911 pm
->timer
= TIMER_MIN
;
1912 dev_sem
= &pm
->dev_rdma_sem
;
1913 raw_spin_lock_init(&dev_sem
->lock
);
1914 cv_init(&dev_sem
->cond_var
);
1915 dev_sem
->irq_count_rdma
= 0;
1916 for (i
= 0; i
< 2; i
++) {
1917 pm
= &rdma_sti
->rw_states_m
[i
];
1918 mutex_init(&pm
->mu
);
1919 raw_spin_lock_init(&pm
->mu_spin
);
1921 pm
->timer
= TIMER_MIN
;
1922 dev_sem
= &pm
->dev_rdma_sem
;
1923 raw_spin_lock_init(&dev_sem
->lock
);
1924 cv_init(&dev_sem
->cond_var
);
1925 dev_sem
->irq_count_rdma
= 0;
1926 pd
= &rdma_sti
->rw_states_d
[i
];
1927 mutex_init(&pd
->mu
);
1928 raw_spin_lock_init(&pd
->mu_spin
);
1929 dev_sem
= &pd
->dev_rdma_sem
;
1930 raw_spin_lock_init(&dev_sem
->lock
);
1931 cv_init(&dev_sem
->cond_var
);
1932 dev_sem
->irq_count_rdma
= 0;
1934 pd
->clock_receive_trwd
= 0;
1935 pd
->clock_begin_read
= 0;
1936 pd
->clock_end_read_old
= 0;
1937 pd
->clock_begin_read_old
= 0;
1938 pd
->trwd_send_count
= 0;
1939 pd
->ready_send_count
= 0;
1940 pd
->trwd_rec_count
= 0;
1941 pd
->ready_rec_count
= 0;
1945 pd
->timer_read
= TIMER_MIN
;
1946 pd
->timer_write
= TIMER_MIN
;
1947 pd
->timer_for_read
= TIMER_FOR_READ_MIN
;
1948 pd
->timer_for_write
= TIMER_FOR_WRITE_MIN
;
1950 DEBUG_MSG("init_rdma_sti: FINISH\n");
1954 void read_regs_rdma(int i
)
1956 printk("%d 0x%08x - 0x0 SHIFT_IOL_CSR\n", i
,
1957 RDR_rdma(SHIFT_IOL_CSR
, i
));
1958 printk("%d 0x%08x - 0x0 SHIFT_IO_CSR\n", i
,
1959 RDR_rdma(SHIFT_IO_CSR
, i
));
1960 printk("%d 0x%08x - 0x0 SHIFT_VID\n", i
,
1961 RDR_rdma(SHIFT_VID
, i
));
1962 printk("%d 0x%08x - 0x4 SHIFT_CH_IDT\n", i
,
1963 RDR_rdma(SHIFT_CH_IDT
, i
));
1964 printk("%d 0x%08x - 0x8 SHIFT_CS\n", i
,
1965 RDR_rdma(SHIFT_CS
, i
));
1966 printk("%d 0x%08x 0x00 - SHIFT_DD_ID\n", i
,
1967 RDR_rdma(SHIFT_DD_ID
, i
));
1968 printk("%d 0x%08x 0x04 - SHIFT_DMD_ID\n", i
,
1969 RDR_rdma(SHIFT_DMD_ID
, i
));
1970 printk("%d 0x%08x 0x08 - SHIFT_N_IDT\n", i
,
1971 RDR_rdma(SHIFT_N_IDT
, i
));
1972 printk("%d 0x%08x 0x0c - SHIFT_ES\n", i
,
1973 RDR_rdma(SHIFT_ES
, i
));
1974 printk("%d 0x%08x 0x10 - SHIFT_IRQ_MC\n", i
,
1975 RDR_rdma(SHIFT_IRQ_MC
, i
));
1976 printk("%d 0x%08x 0x14 - SHIFT_DMA_TCS\n", i
,
1977 RDR_rdma(SHIFT_DMA_TCS
, i
));
1978 printk("%d 0x%08x 0x18 - SHIFT_DMA_TSA\n", i
,
1979 RDR_rdma(SHIFT_DMA_TSA
, i
));
1980 printk("%d 0x%08x 0x1c - SHIFT_DMA_TBC\n", i
,
1981 RDR_rdma(SHIFT_DMA_TBC
, i
));
1982 printk("%d 0x%08x 0x20 - SHIFT_DMA_RCS\n", i
,
1983 RDR_rdma(SHIFT_DMA_RCS
, i
));
1984 printk("%d 0x%08x 0x24 - SHIFT_DMA_RSA\n", i
,
1985 RDR_rdma(SHIFT_DMA_RSA
, i
));
1986 printk("%d 0x%08x 0x28 - SHIFT_DMA_RBC\n", i
,
1987 RDR_rdma(SHIFT_DMA_RBC
, i
));
1988 printk("%d 0x%08x 0x2c - SHIFT_MSG_CS\n", i
,
1989 RDR_rdma(SHIFT_MSG_CS
, i
));
1990 printk("%d 0x%08x 0x30 - SHIFT_TDMSG\n", i
,
1991 RDR_rdma(SHIFT_TDMSG
, i
));
1992 printk("%d 0x%08x 0x34 - SHIFT_RDMSG\n", i
,
1993 RDR_rdma(SHIFT_RDMSG
, i
));
1994 printk("%d 0x%08x 0x38 - SHIFT_CAM\n", i
,
1995 RDR_rdma(SHIFT_CAM
, i
));
1998 void test_send_msg_rdma(unsigned int i
, unsigned int msg
)
2001 WRR_rdma(SHIFT_TDMSG
, i
, msg
);
2005 void free_chan(dma_chan_t
*chd
)
2008 DEBUG_MSG("free_chan: START\n");
2009 if (chd
->allocs
> RCS_ALLOCED_B
) {
2011 rdma_tbl_64_struct_t
*peltbl
;
2012 for (peltbl
= (rdma_tbl_64_struct_t
*)chd
->vdma_tm
,
2013 rest
= chd
->real_size
; rest
> 0; peltbl
++) {
2015 peltbl
->addr
= cpu_to_le64(peltbl
->addr
);
2016 peltbl
->sz
= cpu_to_le64(peltbl
->sz
);
2018 rdma_mem_free(peltbl
->sz
,
2019 (dma_addr_t
) peltbl
->addr
,
2020 (unsigned long) __va(peltbl
->addr
));
2023 rdma_mem_free(chd
->size_tm
, chd
->fdma_tm
,
2024 (unsigned long)chd
->vdma_tm
);
2026 if (chd
->real_size
) {
2027 rdma_mem_free(chd
->real_size
, chd
->dma
,
2028 (unsigned long)chd
->prim_buf_addr
);
2035 chd
->prim_buf_addr
= 0;
2038 DEBUG_MSG("free_chan: FINISH\n");
2041 void rdma_mem_free(size_t size
, dma_addr_t dev_memory
,
2042 unsigned long dma_memory
)
2046 struct page
*map
, *mapend
;
2048 DEBUG_MSG("rdma_mem_free: START\n");
2049 mem
= (caddr_t
)dma_memory
;
2050 order
= get_order(size
);
2051 mapend
= virt_to_page(mem
+ (PAGE_SIZE
<< order
) - 1);
2052 for (map
= virt_to_page(mem
); map
<= mapend
; map
++)
2053 ClearPageReserved(map
);
2054 free_pages(dma_memory
, order
);
2055 DEBUG_MSG("rdma_mem_free: FINISH va: 0x%lx, fa: 0x%llx size: 0x%lx\n",
2056 dma_memory
, dev_memory
, size
);
2059 unsigned long __get_free_pages_rdma(int node
, gfp_t gfp_mask
,
2064 page
= alloc_pages_node(node
, gfp_mask
, order
);
2066 return (unsigned long)NULL
;
2067 return (unsigned long) page_address(page
);
2070 int rdma_mem_alloc(int node
, size_t size
, dma_addr_t
*mem
, size_t *real_size
,
2071 unsigned long *dma_memory
)
2074 struct page
*map
, *mapend
;
2076 DEBUG_MSG("rdma_mem_alloc: START\n");
2077 order
= get_order(size
);
2078 *dma_memory
= __get_free_pages_rdma(node
, GFP_KERNEL
, order
);
2079 if (!(*dma_memory
)) {
2080 ERROR_MSG("rdma_mem_alloc: Cannot bind DMA address order: %d"
2081 "size: 0x%lx\n", order
, size
);
2084 mapend
= virt_to_page((*dma_memory
) + (PAGE_SIZE
<< order
) - 1);
2085 for (map
= virt_to_page((*dma_memory
)); map
<= mapend
; map
++)
2086 SetPageReserved(map
);
2088 *mem
= __pa(*dma_memory
);
2089 *real_size
= PAGE_SIZE
<< order
;
2090 DEBUG_MSG("rdma_mem_alloc: FINISH va: 0x%lx fa: 0x%llx size: 0x%lx"
2091 "real_size: 0x%lx\n", *dma_memory
, *mem
, size
, *real_size
);
2095 int init_chan(dma_chan_t
*chd
, int reqlen
, int tm
)
2097 char *err_msg
= NULL
;
2098 rdma_tbl_64_struct_t
*peltbl
;
2099 signed int rest
, tmp_size
;
2100 rdma_addr_struct_t pxx
;
2103 DEBUG_MSG("init_chan: START\n");
2105 ERROR_MSG("init_chan: chd->allocs already %d\n", chd
->allocs
);
2108 #define SIZE_TLB_EL 128
2109 SIZE_TLB
= ((PAGE_ALIGN(reqlen
) / PAGE_SIZE
+ 1) * SIZE_TLB_EL
);
2111 chd
->allocs
= RCS_ALLOCED_B
;
2112 DEBUG_MSG("init_chan: try alloc 0x%x\n", reqlen
);
2114 DEBUG_MSG("init_chan: table mode PAGE_SIZE: %x\n", PAGE_SIZE
);
2115 DEBUG_MSG("init_chan: try alloc for tm size SIZE_TLB : 0x%x\n",
2118 (chd
->node_for_memory
, SIZE_TLB
,
2119 (dma_addr_t
*)&chd
->fdma_tm
, &chd
->size_tm
,
2120 (unsigned long *)&chd
->vdma_tm
)) {
2121 err_msg
= "rdma_mem_alloc for tm";
2124 pxx
.addr
= (unsigned long)chd
->vdma_tm
;
2125 DEBUG_MSG("init_chan: 0x%08x%08x vdma_tm\n", pxx
.fields
.haddr
,
2127 pxx
.addr
= chd
->fdma_tm
;
2128 DEBUG_MSG("init_chan: 0x%08x%08x fdma_tm\n", pxx
.fields
.haddr
,
2131 /* rest = allign_dma((unsigned int)reqlen);
2132 rest = PAGE_ALIGN(reqlen); */
2134 DEBUG_MSG("init_chan: reqlen: 0x%08x rest: 0x%08x\n",
2137 for (peltbl
= (rdma_tbl_64_struct_t
*)chd
->vdma_tm
;
2141 unsigned long addr
; /* address */
2143 (chd
->node_for_memory
, SIZE_EL_TBL64_RDMA
,
2144 (dma_addr_t
*)&peltbl
->addr
, &size_el
,
2145 (unsigned long *)&addr
)) {
2146 err_msg
= "rdma_mem_alloc for tm element";
2147 if (chd
->real_size
) {
2149 chd
->dma
= chd
->fdma_tm
;
2151 chd
->allocs
= RCS_ALLOCED
;
2156 pxx
.addr
= (unsigned long)peltbl
;
2157 DEBUG_MSG("init_chan: 0x%08x%08x peltbl\n",
2158 pxx
.fields
.haddr
, pxx
.fields
.laddr
);
2160 peltbl
->addr
= le64_to_cpu(peltbl
->addr
);
2162 pxx
.addr
= peltbl
->addr
;
2163 DEBUG_MSG("init_chan: 0x%08x%08x peltbl->addr\n",
2164 pxx
.fields
.haddr
, pxx
.fields
.laddr
);
2165 tmp_size
= ((rest
>= size_el
)?size_el
:
2166 (unsigned int)rest
);
2168 peltbl
->sz
= (unsigned long)size_el
;
2169 /* peltbl->sz = (unsigned long)tmp_size; */
2171 peltbl
->sz
= le64_to_cpu(peltbl
->sz
);
2174 /* DEBUG_MSG("init_chan: tmp_size: 0x%08x rest: 0x%08x\n",
2176 chd
->real_size
+= size_el
;
2177 /* chd->real_size += tmp_size; */
2180 chd
->dma
= chd
->fdma_tm
;
2184 DEBUG_MSG("init_chan: single mode PAGE_SIZE: %x\n", PAGE_SIZE
);
2186 if (reqlen
> 0x800000){
2187 ERROR_MSG("init_chan: The large size of the buffer. "
2188 "The buffer must be <= 0x0800000. "
2189 "Use table mode.\n");
2197 rfsm_size
= PAGE_ALIGN(reqlen
);
2199 rfsm_size
= PAGE_ALIGN(reqlen
);
2205 if (rdma_mem_alloc(chd
->node_for_memory
, (unsigned long)rfsm_size
,
2206 (dma_addr_t
*)&chd
->dma_busa
, &chd
->real_size
,
2207 (unsigned long *)&chd
->prim_buf_addr
)) {
2208 err_msg
= "rdma_mem_alloc";
2211 chd
->dma
= chd
->dma_busa
;
2212 pxx
.addr
= chd
->dma
;
2213 DEBUG_MSG("init_chan: 0x%08x%08x chd->dma\n", pxx
.fields
.haddr
,
2217 chd
->full
= (uint_t
)chd
->dma
;
2218 chd
->allocs
= RCS_ALLOCED
;
2219 DEBUG_MSG("init_chan: FINISH chd->real_size: %lx\n", chd
->real_size
);
2220 return chd
->real_size
;
2223 chd
->allocs
= RCS_EMPTY
;
2225 ERROR_MSG("init_chan: %s FAILED ****\n", err_msg
);
2229 /******************* create devices *************************/
2231 int create_dev_rdma(int major
)
2237 /* Create rdma nodes in /sysfs */
2238 rdma_class
= class_create(THIS_MODULE
, "rdma");
2239 if (IS_ERR(rdma_class
)) {
2240 pr_err("Error creating class: /sys/class/rdma.\n");
2242 for_each_online_rdma(i
) {
2243 /* for_each_rdma(i) { */
2244 for (i_rdma
= 0; i_rdma
< RDMA_NODE_DEV
; i_rdma
++) {
2245 minor
= i
* RDMA_NODE_DEV
+ i_rdma
;
2246 sprintf(nod
,"rdma_%d_:%d", i
, i_rdma
);
2247 pr_info("make node /sys/class/rdma/%s\n", nod
);
2248 if (device_create(rdma_class
, NULL
,
2249 MKDEV(major
, minor
), NULL
, nod
) == NULL
) {
2250 pr_err("create dev: %s a node: %d "
2251 "failed\n", nod
, i
);
2259 int remove_dev_rdma(int major
)
2265 /* Remove rdma nodes in /sysfs */
2267 for (i_rdma
= 0; i_rdma
< RDMA_NODE_DEV
; i_rdma
++) {
2268 minor
= i
* RDMA_NODE_DEV
+ i_rdma
;
2269 (void) sprintf(nod
,"rdma_%d_:%d", i
, i_rdma
);
2270 device_destroy(rdma_class
, MKDEV(major
, minor
));
2273 class_destroy(rdma_class
);
2277 module_init(rdma_init
);
2278 module_exit(rdma_cleanup
);