Lynx framebuffers multidomain implementation.
[linux/elbrus.git] / drivers / mcst / rdma_sic / rdma_sic.c
blob7d3f3716b3763373641379918ad552ce9342ba30
1 #include <linux/module.h>
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/string.h>
5 #include <linux/mm.h>
6 #include <linux/tty.h>
7 #include <linux/slab.h>
8 #include <linux/delay.h>
9 #include <linux/fb.h>
10 #include <linux/init.h>
11 #include <linux/proc_fs.h>
12 #include <asm/apic.h>
13 #include <asm/uaccess.h>
14 #include <linux/pci.h>
15 #include <linux/mcst/ddi.h>
16 #include <asm-l/bootinfo.h>
17 #include <linux/mcst/rdma_user_intf.h>
18 #include <asm/setup.h>
19 #ifdef CONFIG_E90S
20 #include <asm/e90s.h>
21 #ifndef LINUX_2_33_DBG
22 #include <asm/mpspec.h>
23 #endif
24 #endif
25 #ifdef CONFIG_E2K
26 #include <asm/e2k.h>
27 #include <asm/sic_regs.h>
28 #include <asm/sic_regs_access.h>
29 #include <asm/e2k_sic.h>
30 #include <asm/uaccess.h>
31 #endif
32 #ifndef LINUX_2_33_DBG
33 #include <asm/iolinkmask.h>
34 #include <linux/topology.h>
35 #endif
36 #include "rdma_regs.h"
37 #include "rdma.h"
38 #include "rdma_error.h"
40 #define NUM_NODE_RDMA(num_link_rdma) (int)(num_link_rdma/NODE_NUMIOLINKS)
41 #define NUM_LINK_IN_NODE_RDMA(num_link_rdma)\
42 (num_link_rdma - ((int)(num_link_rdma/NODE_NUMIOLINKS))*NODE_NUMIOLINKS)
44 #define DSF_NO 1
46 #ifndef VM_RESERVED
47 #define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP)
48 #endif
50 MODULE_LICENSE("GPL");
52 /* Set ATL */
53 unsigned int tr_atl;
54 static int atl_v = TR_ATL_B;
55 module_param(atl_v, int, 0);
56 MODULE_PARM_DESC(atl_v, "Changes the value of ATL (alive timer limit) reg CAM.");
58 /* Struct for class rdma in sysfs */
59 static struct class *rdma_class;
61 /*********************************************************************/
62 /* Enable RFSM - rfsm. */
63 /* rfsm = ENABLE_RFSM - RFSM disable (default). */
64 /* rfsm = DMA_RCS_RFSM - RFSM enable. */
65 /*********************************************************************/
66 #define CLEAR_RFSM DISABLE_RFSM
67 unsigned int rfsm = CLEAR_RFSM;
69 /*********************************************************************/
70 /* Enable exit GP0 - enable_exit_gp0. */
71 /* enable_exit_gp0 = 0 - disable (default). */
72 /* enable_exit_gp0 = 1 - RFSM enable. */
73 /*********************************************************************/
74 unsigned int enable_exit_gp0 = DISABLE_EXIT_GP0;
76 extern int rdma_present;
77 unsigned int e0regad;
78 unsigned int e1regad;
79 unsigned int count_read_sm_max = 800;
80 unsigned int intr_rdc_count[MAX_NUMIOLINKS];
81 unsigned int msg_cs_dmrcl;
82 unsigned int state_cam = 0;
83 unsigned long time_ID_REQ;
84 unsigned long time_ID_ANS;
85 unsigned int state_GP0;
86 link_id_t rdma_link_id;
88 #ifdef CONFIG_COMPAT
89 static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg);
90 static long rdma_compat_ioctl(struct file *f, unsigned cmd,
91 unsigned long arg);
92 #endif
93 static long rdma_ioctl(struct file *filp, unsigned int cmd,
94 unsigned long arg);
95 /* static int rdma_ioctl(struct inode *inode, struct file *filp,
96 unsigned int cmd, unsigned long arg); */
97 static ssize_t rdma_read(struct file *, char *, size_t, loff_t *);
98 static ssize_t rdma_write(struct file *, const char *, size_t, loff_t *);
99 static int rdma_open(struct inode *inode, struct file *file);
100 static int rdma_close(struct inode *inode, struct file *file);
101 static int rdma_mmap(struct file *file, struct vm_area_struct *vma);
102 void test_send_msg_rdma(unsigned int i, unsigned int msg);
103 int get_file_minor(struct file *file);
104 void init_reg(void);
105 void free_chan(dma_chan_t *chd);
106 void rdma_mem_free(size_t size, dma_addr_t dev_memory,
107 unsigned long dma_memory);
108 void init_rdma_sti(int instance);
109 void read_regs_rdma(int);
110 int rdma_mem_alloc(int node, size_t size, dma_addr_t *mem,
111 size_t *real_size, unsigned long *dma_memory);
112 int init_chan(dma_chan_t *chd, int reqlen, int tm);
113 int write_buf(rdma_state_inst_t *xsp, const char *buf,
114 unsigned int size, int instance, int channel,
115 rdma_ioc_parm_t *parm);
116 int read_buf(rdma_state_inst_t *xsp, const char *buf, int size,
117 int instance, int channel, rdma_ioc_parm_t *parm);
118 int rdma_remap_page(void *va, size_t sz,
119 struct vm_area_struct *vma);
120 int rdma_remap_page_tbl(void *va, size_t sz,
121 struct vm_area_struct *vma);
122 long wait_time_rdma(struct rdma_reg_state *rdma_reg_state,
123 signed long timeout);
124 int rdma_check_buf(unsigned long addr_buf, unsigned int cnst,
125 unsigned int need_free_page, char *prefix);
127 int mk_unlink(char *filename);
128 int mk_rm_dir(char *dir);
129 int mk_mkdir(char *pathname, int mode);
130 int mk_mknod(char *filename, int mode, dev_t dev);
131 unsigned long join_curr_clock( void );
132 unsigned int RDR_rdma(unsigned int reg, unsigned int node);
133 void WRR_rdma(unsigned int reg, unsigned int node, unsigned int val);
135 int create_dev_rdma(int major);
136 int remove_dev_rdma(int major);
139 DEFINE_RAW_SPINLOCK(mu_fix_event);
141 static struct file_operations rdma_fops = {
142 .owner = THIS_MODULE,
143 .read = rdma_read,
144 .write = rdma_write,
145 .unlocked_ioctl = rdma_ioctl,
146 #ifdef CONFIG_COMPAT
147 .compat_ioctl = rdma_compat_ioctl,
148 #endif
149 .mmap = rdma_mmap,
150 .open = rdma_open,
151 .release = rdma_close,
154 #ifdef CONFIG_E90S
155 #define NBSR_INF_CFG 0x7088 /* 4 Node Configuration Information */
156 #define IO_HAB_FLAG 0x00000080
157 #define E90_IO_CSR_ch_on 0x80000000
158 #define E90_RDMA_CS_ch_on 0x80000000
159 #define IOHUB_IOL_MODE 0 /* controller is IO HUB */
160 #define RDMA_IOL_MODE 1 /* controller is RDMA */
161 #define IOHUB_ONLY_IOL_ABTYPE 1 /* abonent has only IO HUB controller */
162 #define RDMA_ONLY_IOL_ABTYPE 2 /* abonent has only RDMA controller */
163 #define RDMA_IOHUB_IOL_ABTYPE 3 /* abonent has RDMA and IO HUB controller */
164 #define E90_IOL_CSR_abtype_mask 0x007f0000
165 #undef numa_node_id
166 #define numa_node_id() e90s_cpu_to_node(raw_smp_processor_id())
167 #undef num_possible_rdmas
168 #define num_possible_rdmas() node_rdma_num
169 #undef num_online_rdmas
170 #define num_online_rdmas() node_online_rdma_num
171 #undef for_each_rdma
172 #define for_each_rdma(node) \
173 for (node = 0; node < MAX_NUMIOLINKS; node++) \
174 if (!((node_rdma_map >> node) & 0x00000001)) \
175 continue; else
176 #undef for_each_online_rdma
177 #define for_each_online_rdma(node) \
178 for (node = 0; node < MAX_NUMIOLINKS; node++ ) \
179 if (!((node_online_rdma_map >> node) & 0x00000001)) \
180 continue; else
181 #undef SIC_io_reg_offset /* FIXME: defined at e90s.h */
182 #define SIC_io_reg_offset(io_link, reg) ((reg) + 0x1000 * (io_link))
184 static inline unsigned int
185 sic_read_node_iolink_nbsr_reg(int node_id, unsigned int io_link, int reg_offset)
187 unsigned int reg_value;
189 reg_value = __raw_readl(BASE_NODE0 + node_id * NODE_OFF +
190 SIC_io_reg_offset(io_link, reg_offset));
191 return (reg_value);
194 static inline void
195 sic_write_node_iolink_nbsr_reg(int node_id, int io_link,
196 unsigned int reg_offset, unsigned int reg_value)
198 __raw_writel(reg_value, BASE_NODE0 + node_id * NODE_OFF +
199 SIC_io_reg_offset(io_link, reg_offset));
202 #if 0
203 static inline unsigned int
204 sic_read_nbsr_reg(int reg_offset)
206 return (sic_read_node_nbsr_reg(numa_node_id(), reg_offset));
209 static inline void
210 sic_write_nbsr_reg(int reg_offset, unsigned int reg_value)
212 sic_write_node_nbsr_reg(numa_node_id(), reg_offset, reg_value);
214 #endif
217 unsigned int node_rdma_map = 0;
218 unsigned int node_online_rdma_map = 0;
219 int node_rdma_num = 0;
220 int node_online_rdma_num = 0;
223 void init_node_e90s( void )
225 /* Until no support NUMA for sparc V9 in kernel*/
226 unsigned int node_iohub_map = 0;
227 unsigned int node_online_iohub_map = 0;
228 int node_iohub_num = 0;
229 int node_online_iohub_num = 0;
230 unsigned int node_mask = 0, cpu_mask = 0, i;
231 int node;
232 int link_on;
233 unsigned int reg;
235 for_each_online_cpu(node) {
236 cpu_mask = cpu_mask | (1 << node);
238 for (i = 0; i < MAX_NUMIOLINKS; i++ ) {
239 if ((cpu_mask >> E90S_MAX_NR_NODE_CPUS*i) & 0x0000000f)
240 node_mask = node_mask | (1 << i);
242 for (i = 0; i < MAX_NUMIOLINKS; i++ )
244 if ((node_mask >> i) & 0x00000001)
245 node = i;
246 else continue;
247 #define DBG_REG_RDMA 0
248 #if DBG_REG_RDMA
249 reg = RDR_rdma( NBSR_INT_CFG, node);
250 printk("NBSR_INT_CFG: %x \n", reg);
251 reg = RDR_rdma(NBSR_INF_CFG, node);
252 printk("NBSR_INF_CFG: %x \n", reg);
253 reg = RDR_rdma(NBSR_NODE_CFG, node);
254 printk("NBSR_NODE_CFG: %x \n", reg);
255 reg = RDR_rdma(SHIFT_IO_CSR,node);
256 printk("SHIFT_IO_CSR: %x \n", reg);
257 reg = RDR_rdma(SHIFT_CS, node);
258 printk("SHIFT_CS: %x \n", reg);
259 #endif
260 link_on = 0;
261 reg = RDR_rdma(NBSR_NODE_CFG, node);
262 printk("Node #%d IO LINK is", node);
264 if ((reg & IO_HAB_FLAG) == IOHUB_IOL_MODE) {
265 node_iohub_map = node_iohub_map | (1 << node);
266 node_iohub_num ++;
267 printk(" IO HUB controller");
268 reg =
269 RDR_rdma(SHIFT_IO_CSR, node);
270 if (reg & E90_IO_CSR_ch_on) {
271 node_online_iohub_map = node_online_iohub_map |
272 (1 << node);
273 node_online_iohub_num ++;
274 link_on = 1;
275 printk(" ON");
276 } else {
277 printk(" OFF");
279 } else {
280 node_rdma_map = node_rdma_map | (1 << node);
281 node_rdma_num ++;
282 printk(" RDMA controller");
283 reg = RDR_rdma(SHIFT_CS, node);
284 if (reg & E90_RDMA_CS_ch_on) {
285 node_online_rdma_map = node_online_rdma_map |
286 (1 << node);
287 node_online_rdma_num ++;
288 link_on = 1;
289 printk(" ON");
290 } else {
291 printk(" OFF");
295 if (link_on) {
296 reg = RDR_rdma( NBSR_INF_CFG, node);
297 int ab_type = (reg & E90_IOL_CSR_abtype_mask) >> 16 ;
299 printk(" connected to");
300 switch (ab_type) {
301 case IOHUB_ONLY_IOL_ABTYPE:
302 printk(" IO HUB controller");
303 break;
304 case RDMA_ONLY_IOL_ABTYPE:
305 printk(" RDMA controller");
306 break;
307 case RDMA_IOHUB_IOL_ABTYPE:
308 printk(" IO HUB/RDMA controller");
309 break;
310 default:
311 printk(" unknown controller");
312 break;
316 printk(" \n");
319 #endif
322 static inline void
323 sic_write_node_nbsr_reg_rdma(int node_id, unsigned int reg_offset,
324 unsigned int reg_value)
326 sic_write_node_iolink_nbsr_reg(NUM_NODE_RDMA(node_id),
327 NUM_LINK_IN_NODE_RDMA(node_id),
328 reg_offset, reg_value );
331 static inline unsigned int
332 sic_read_node_nbsr_reg_rdma(int node_id, int reg_offset)
334 unsigned int reg_value;
335 reg_value = sic_read_node_iolink_nbsr_reg(NUM_NODE_RDMA(node_id),
336 NUM_LINK_IN_NODE_RDMA(node_id), reg_offset );
337 return (reg_value);
340 unsigned long join_curr_clock( void )
342 unsigned long ret;
343 #ifdef CONFIG_E90S /* E90S */
344 ret = get_cycles();
345 #else /* E3S */
346 ret = E2K_GET_DSREG(clkr);
347 #endif /* E90S */
348 return ret;
351 static inline void __raw_add_wait_queue_from_ddi(raw_wait_queue_head_t *head,
352 raw_wait_queue_t *new)
354 list_add(&new->task_list, &head->task_list);
356 static inline void __raw_remove_wait_queue_from_ddi(raw_wait_queue_head_t *head,
357 raw_wait_queue_t *old)
359 list_del(&old->task_list);
362 void raw_add_wait_queue_from_ddi(raw_wait_queue_head_t *q,
363 raw_wait_queue_t *wait)
365 unsigned long flags;
367 raw_spin_lock_irqsave(&q->lock, flags);
368 __raw_add_wait_queue_from_ddi(q, wait);
369 raw_spin_unlock_irqrestore(&q->lock, flags);
372 void raw_remove_wait_queue_from_ddi(raw_wait_queue_head_t *q,
373 raw_wait_queue_t *wait)
375 unsigned long flags;
377 raw_spin_lock_irqsave(&q->lock, flags);
378 __raw_remove_wait_queue_from_ddi(q, wait);
379 raw_spin_unlock_irqrestore(&q->lock, flags);
381 unsigned int rdc_byte;
383 void WRR_rdma(unsigned int reg, unsigned int node, unsigned int val)
385 /* sic_write_node_iolink_nbsr_reg(node, io_link, reg, val); */
386 sic_write_node_nbsr_reg_rdma(node, reg, val);
387 fix_event(node, WRR_EVENT, reg, val);
390 EXPORT_SYMBOL(WRR_rdma);
392 unsigned int RDR_rdma(unsigned int reg, unsigned int node)
394 unsigned int val;
396 /* val = sic_read_node_iolink_nbsr_reg(node, io_link, reg); */
397 val = sic_read_node_nbsr_reg_rdma(node, reg);
398 fix_event(node, RDR_EVENT, reg, val);
399 return val;
402 EXPORT_SYMBOL(RDR_rdma);
404 #if defined(TRACE_LATENCY) || defined(TRACE_LATENCY_MSG) || \
405 defined(TRACE_LATENCY_SM)
406 void user_trace_stop_my(void)
408 #ifdef CONFIG_FUNCTION_TRACER
409 tracing_stop();
410 #endif
413 void user_trace_start_my(void)
415 #ifdef CONFIG_FUNCTION_TRACER
416 tracing_start();
417 #endif
419 #endif
421 unsigned int allign_dma(unsigned int n)
423 if (n&(ALLIGN_RDMA-1)) {
424 n += ALLIGN_RDMA;
425 n = n&(~(ALLIGN_RDMA-1));
427 return n;
430 int MCG_CS_SEND_ALL_MSG =
431 (MSG_CS_SD_Msg | MSG_CS_SGP0_Msg | MSG_CS_SGP1_Msg |
432 MSG_CS_SGP2_Msg | MSG_CS_SGP3_Msg | MSG_CS_SL_Msg |
433 MSG_CS_SUL_Msg | MSG_CS_SIR_Msg);
434 int MSG_CS_MSF_ALL = MSG_CS_DMPS_Err | MSG_CS_MPCRC_Err | MSG_CS_MPTO_Err |
435 MSG_CS_DMPID_Err;
436 unsigned int count_loop_send_msg_max = 10;
437 unsigned int count_wait_rdm_max = 64;
439 dev_rdma_sem_t *msg_snd_dev[2];
441 hrtime_t
442 rdma_gethrtime(void)
444 struct timeval tv;
445 hrtime_t val;
446 do_gettimeofday(&tv);
447 val = tv.tv_sec * 1000000000LL + tv.tv_usec * 1000LL;
448 return (val);
451 extern int wake_up_state(struct task_struct *p, unsigned int state);
453 static void __raw_wake_up_common_from_ddi(raw_wait_queue_head_t *q)
455 struct list_head *tmp, *next;
456 raw_wait_queue_t *curr;
458 list_for_each_safe(tmp, next, &q->task_list) {
459 curr = list_entry(tmp, raw_wait_queue_t, task_list);
460 //wake_up_state(curr->task, TASK_UNINTERRUPTIBLE |
461 // TASK_INTERRUPTIBLE);
462 wake_up_process(curr->task);
466 void __raw_wake_up_from_ddi(raw_wait_queue_head_t *q)
468 unsigned long flags;
470 raw_spin_lock_irqsave(&q->lock, flags);
471 __raw_wake_up_common_from_ddi(q);
472 raw_spin_unlock_irqrestore(&q->lock, flags);
475 int ddi_cv_broadcast_from_ddi(kcondvar_t *cvp)
477 __raw_wake_up_from_ddi(cvp);
478 return 0;
481 int rdma_cv_broadcast_rdma(void* dev_rdma_sem, unsigned int instance)
483 rdma_addr_struct_t p_xxb;
485 dev_rdma_sem_t *dev = dev_rdma_sem;
486 dev->irq_count_rdma++;
487 dev->time_broadcast = join_curr_clock();
488 p_xxb.addr = (unsigned long)dev;
489 fix_event(instance, RDMA_BROADCAST, p_xxb.fields.laddr,
490 dev->irq_count_rdma);
491 ddi_cv_broadcast_from_ddi(&dev->cond_var);
492 return (0);
495 /* Convert mksec to HZ */
496 clock_t
497 drv_usectohz_from_ddi(register clock_t mksec)
499 clock_t clock;
500 struct timespec rqtp;
502 rqtp.tv_nsec = ((mksec % 1000000L) * 1000L);
503 rqtp.tv_sec = mksec / 1000000L;
504 DEBUG_MSG("drv_usectohz: start, mksec = 0x%lx\n", mksec);
505 DEBUG_MSG("drv_usectohz: rqtp.tv_nsec = 0x%lx, rqtp.tv_sec = 0x%lx\n",
506 rqtp.tv_nsec, rqtp.tv_sec);
507 clock = timespec_to_jiffies(&rqtp);
508 return (clock);
511 ddi_cv_spin_timedwait_from_ddi(kcondvar_t *cvp, raw_spinlock_t *lock, long tim)
513 unsigned long expire;
514 int rval = 0;
515 int raw_spin_locking_done = 0;
516 struct task_struct *tsk = current;
517 DECLARE_RAW_WAIT_QUEUE(wait);
518 expire = tim - jiffies;
519 tsk->state = TASK_INTERRUPTIBLE;
520 raw_add_wait_queue_from_ddi(cvp, &wait);
521 raw_spin_locking_done = raw_spin_is_locked(lock);
522 if(raw_spin_locking_done)
523 spin_mutex_exit(lock);
525 fix_event(0, WAIT_TRY_SCHTO_EVENT,
526 (unsigned int)expire, 0);
527 expire = schedule_timeout(expire);
528 raw_remove_wait_queue_from_ddi(cvp, &wait);
529 tsk->state = TASK_RUNNING;
530 if(raw_spin_locking_done)
531 spin_mutex_enter(lock);
532 if (expire) {
533 if (signal_pending(current)) {
534 rval = -2;
536 } else {
537 rval = -1;
539 return rval;
542 int wait_for_irq_rdma_sem(void* dev_rdma_sem, signed long usec_timeout,
543 unsigned int instance)
545 unsigned int time_current;
546 unsigned int delta_time;
547 dev_rdma_sem_t *dev = dev_rdma_sem;
548 rdma_addr_struct_t p_xxb;
549 int ret = 0;
550 signed long timeout_tick;
552 if (!raw_spin_is_locked(&dev->lock)) {
553 printk("wait_for_irq_rdma_sem: spin is NOT locked:dev: %p\n",
554 dev);
555 return -3;
557 if (dev->irq_count_rdma) {
558 printk("wait_for_irq_rdma_sem(%p): dev->irq_count_rdma: %u"
559 "num_obmen: %u\n", &dev->lock, dev->irq_count_rdma,
560 (unsigned int)dev->num_obmen);
561 delta_time = 0;
562 if (dev->time_broadcast) {
563 time_current = join_curr_clock();
564 if (time_current > dev->time_broadcast) {
565 delta_time = (unsigned int)(time_current -
566 dev->time_broadcast);
567 } else {
568 delta_time = (unsigned int)(time_current +
569 (~0U - dev->time_broadcast));
571 delta_time |= (1<<31);
572 fix_event(instance, WAIT_RET_SCHT0_EVENT, delta_time,
573 dev->num_obmen);
574 fix_event(instance, WAIT_RET_SCHT0_EVENT,
575 dev->irq_count_rdma,
576 dev->num_obmen);
577 dev->time_broadcast = 0;
579 return(1);
581 p_xxb.addr = usec_timeout;
582 fix_event(instance, WAIT_TRY_SCHTO_EVENT,
583 p_xxb.fields.laddr, dev->num_obmen);
584 timeout_tick = (unsigned long)jiffies;
585 timeout_tick += usec_timeout;
586 ret = ddi_cv_spin_timedwait_from_ddi(&dev->cond_var, &dev->lock,
587 timeout_tick);
588 delta_time = 0;
589 if (dev->time_broadcast) {
590 time_current = join_curr_clock();
591 if (time_current > dev->time_broadcast) {
592 delta_time = (unsigned int)(time_current -
593 dev->time_broadcast);
594 } else {
595 delta_time = (unsigned int)(time_current +
596 (~0U - dev->time_broadcast));
598 fix_event(instance, WAIT_RET_SCHT1_EVENT, ret, dev->num_obmen);
599 dev->time_broadcast = 0;
600 } else {
601 fix_event(dev->irq_count_rdma, WAIT_RET_SCHT2_EVENT, ret,
602 dev->num_obmen);
605 return ret;
608 rdma_event_t rdma_event;
609 int rdma_event_init = 0;
611 #include "get_event_rdma.c"
613 void fix_event_proc(unsigned int channel, unsigned int event,
614 unsigned int val1, unsigned int val2)
616 struct event_cur *event_cur;
617 unsigned long flags;
619 if (!rdma_event_init)
620 return;
621 raw_spin_lock_irqsave(&mu_fix_event, flags);
622 event_cur = &rdma_event.event[rdma_event.event_cur];
623 event_cur->clkr = join_curr_clock();
624 event_cur->event = event;
625 event_cur->channel = channel;
626 event_cur->val1 = val1;
627 event_cur->val2 = val2;
628 rdma_event.event_cur++;
629 if (SIZE_EVENT == rdma_event.event_cur) {
630 rdma_event.event_cur = 0;
632 raw_spin_unlock_irqrestore(&mu_fix_event, flags);
633 return;
636 DECLARE_WAIT_QUEUE_HEAD(wqh_1);
638 #include "rdma_intr.c"
639 #include "rdma_read_buf.c"
640 #include "rdma_write_buf.c"
641 #include "rdma_send_msg.c"
643 struct rdma_state *rdma_state;
645 int irq_mc;
647 struct rdma_reg_state rdma_reg_state[MAX_NUMIOLINKS];
649 static int __init rdma_init(void)
651 unsigned int i;
652 int node;
653 int major;
654 size_t size_rdma_state;
655 rdma_addr_struct_t p_xxb;
656 DEBUG_MSG("rdma_init: START\n");
657 DEBUG_MSG("rdma_init: %lx - raw_spinlock_t\n", sizeof (raw_spinlock_t));
658 DEBUG_MSG("rdma_init: %lx - spinlock_t\n", sizeof (spinlock_t));
660 #if RDMA_PRN_ADDR_FUN
661 printk("ADDR_FUN: %p - static rdma_ioctl\n", rdma_ioctl);
662 printk("ADDR_FUN: %p - static rdma_read\n", rdma_read);
663 printk("ADDR_FUN: %p - static rdma_write\n", rdma_write);
664 printk("ADDR_FUN: %p - static rdma_open\n", rdma_open);
665 printk("ADDR_FUN: %p - static rdma_close\n", rdma_close);
666 printk("ADDR_FUN: %p - static rdma_mmap\n", rdma_mmap);
667 printk("ADDR_FUN: %p - get_file_minor\n", get_file_minor);
668 printk("ADDR_FUN: %p - free_chan\n", free_chan);
669 printk("ADDR_FUN: %p - rdma_mem_free\n", rdma_mem_free);
670 printk("ADDR_FUN: %p - init_rdma_sti\n", init_rdma_sti);
671 printk("ADDR_FUN: %p - read_regs_rdma\n", read_regs_rdma);
672 printk("ADDR_FUN: %p - rdma_mem_alloc\n", rdma_mem_alloc);
673 printk("ADDR_FUN: %p - init_chan\n", init_chan);
674 printk("ADDR_FUN: %p - write_buf\n", write_buf);
675 printk("ADDR_FUN: %p - read_buf\n", read_buf);
676 printk("ADDR_FUN: %p - rdma_remap_page\n", rdma_remap_page);
677 #endif
679 if (!HAS_MACHINE_E2K_FULL_SIC) {
680 ERROR_MSG("rdma_init: sorry, I am worked on e3s/e90s/e2s\n");
681 DEBUG_MSG("rdma_init: FINISH\n");
682 return -ENODEV;
684 if (!rdma_present) {
685 rdma_present = 1;
686 } else {
687 ERROR_MSG("rdma_init: RDMA registers busy. \n");
688 return -ENODEV;
690 #ifdef CONFIG_E90S
691 init_node_e90s();
692 #endif
693 if (!num_possible_rdmas()) {
694 ERROR_MSG("rdma_init: hard rdma is absent\n");
695 rdma_present = 0;
696 return -ENODEV;
698 if (!num_online_rdmas()) {
699 ERROR_MSG("rdma_init: RDMA does not support hot plugging."
700 "Connect the cable and reboot machine.\n");
701 rdma_present = 0;
702 return -ENODEV;
704 rdma_event_init = 1;
705 #ifdef CONFIG_E90S
706 INFO_MSG("RDMA: I am worked on E90S, NODE_NUMIOLINKS: %d"
707 "MAX_NUMIOLINKS: %d\n ", NODE_NUMIOLINKS, MAX_NUMIOLINKS);
708 INFO_MSG("E90S. Loopback mode is not implemented.\n");
709 #else /* E3S */
710 INFO_MSG("I am worked on E3S/CUBIC/E2S, NODE_NUMIOLINKS: %d "
711 "MAX_NUMIOLINKS: %d\n", NODE_NUMIOLINKS, MAX_NUMIOLINKS);
712 if (IS_MACHINE_E3S) {
713 INFO_MSG("E3S. Loopback mode is not implemented.\n");
715 if (IS_MACHINE_ES2) {
716 INFO_MSG("CUBIC. Loopback mode is not implemented.\n");
718 if (IS_MACHINE_E2S) {
719 INFO_MSG("E2S. Loopback mode implemented.\n");
720 INFO_MSG("E2S. IS_MACHINE_E2S: %d IS_MACHINE_E2S: %x.\n",
721 IS_MACHINE_E2S, IS_MACHINE_E2S);
723 #endif
724 node = numa_node_id();
725 fix_event(node, RDMA_INIT, START_EVENT, 0);
726 major = register_chrdev(0, board_name, &rdma_fops);
727 if ( major < 0 ) {
728 ERROR_MSG("rdma_init: There isn't free major\n");
729 goto failed;
731 DEBUG_MSG("rdma_init: major: %d\n", major);
732 DEBUG_MSG("rdma_init: I am on %d numa_node_id\n", node);
733 DEBUG_MSG("rdma_init: %lx: sizeof (nodemask_t)\n", sizeof (nodemask_t));
735 rdma_interrupt_p = rdma_interrupt;
737 size_rdma_state = sizeof (struct rdma_state);
738 rdma_state = (struct rdma_state *)kmalloc(size_rdma_state, GFP_KERNEL);
739 if (rdma_state == (struct rdma_state *)NULL) {
740 ERROR_MSG("rdma_init: rdma_state == NULL\n");
741 unregister_chrdev(major, board_name);
742 rdma_present = 0;
743 return (-EFAULT);
745 memset(rdma_state, 0, size_rdma_state);
746 DEBUG_MSG("rdma_init: sizeof (struct rdma_state): %x\n",
747 size_rdma_state);
748 rdma_state->size_rdma_state = size_rdma_state;
749 rdma_state->major = major;
750 for_each_online_rdma(i) {
751 WRR_rdma(SHIFT_CH_IDT, i, (l_base_mac_addr[3] + i) |
752 ((l_base_mac_addr[4] + i) << 8));
753 init_rdma_sti(i);
755 for_each_online_rdma(i) {
756 unsigned int cs;
757 cs = RDR_rdma(SHIFT_CS, i);
758 #ifdef CONFIG_E2K
759 if (IS_MACHINE_E2S)
760 WRR_rdma(SHIFT_CS, i, cs | CS_DSM | E2S_CS_PTOCL );
761 else
762 WRR_rdma(SHIFT_CS, i, cs | CS_DSM );
763 #else
764 WRR_rdma(SHIFT_CS, i, cs | CS_DSM );
765 #endif
766 printk("SHIFT_CS: %x\n", RDR_rdma(SHIFT_CS, i));
768 WRR_rdma(SHIFT_DMA_TCS, i, DMA_TCS_Tx_Rst);
769 WRR_rdma(SHIFT_DMA_TCS, i,
770 RDR_rdma(SHIFT_DMA_TCS, i) | RCode_64 | DMA_TCS_DRCL);
771 #define COUNT_RESET_RCS 10
772 int count = 0;
773 for (count = 1; count < COUNT_RESET_RCS; count++)
774 WRR_rdma(SHIFT_DMA_RCS, i, DMA_RCS_Rx_Rst);
775 WRR_rdma(SHIFT_DMA_RCS, i, RDR_rdma(SHIFT_DMA_RCS, i) | WCode_64);
777 tr_atl = ATL_B | (atl_v & ATL);
778 printk("Reg CAM ATL: %x\n", tr_atl);
779 irq_mc =
780 IRQ_RDM |
781 IRQ_RGP3M |
782 IRQ_RGP2M |
783 IRQ_RGP1M |
784 IRQ_RGP0M |
785 IRQ_RIAM |
786 IRQ_RIRM |
787 IRQ_RULM |
788 IRQ_RLM |
789 IRQ_MSF |
790 #if DSF_NO
791 /* IRQ_DSF | */
792 #else
793 IRQ_DSF |
794 #endif
795 IRQ_TDC |
796 IRQ_RDC |
797 IRQ_CMIE
799 for_each_online_rdma(i) {
800 WRR_rdma(SIC_rdma_irq_mc, i ,irq_mc);
801 //read_regs_rdma(i);
803 msg_cs_dmrcl = MSG_CS_DMRCL;
804 for_each_online_rdma(i) {
805 rdma_state_inst_t *xsp;
806 int ret = 0;
808 p_xxb.addr = (unsigned long)&rdma_state->rdma_sti[i];
809 DEBUG_MSG("rdma_init:link:%d rdma_state->rdma_sti:0x%08x%08x\n",
810 i, p_xxb.fields.haddr, p_xxb.fields.laddr);
811 xsp = &rdma_state->rdma_sti[i];
812 ret = send_msg(xsp, 0, i, MSG_CS_SIR_Msg, 0);
813 if (ret < 0) {
814 ERROR_MSG("rdma_init: FAIL send MSG_CS_SIR_Msg from"
815 "link: %x ret: %d\n", i, ret);
816 } else
817 if (ret == 0) {
818 printk("rdma_init: FAIL send MSG_CS_SIR_Msg"
819 "from link: %x. SM is absent\n", i);
822 #ifdef MODULE
823 if (create_dev_rdma(major))
824 printk("rdma_init: Error creating devices. "
825 "Create a device manually.");
826 #endif
827 return 0;
828 failed:
829 DEBUG_MSG("rdma_init: FINISH\n");
830 fix_event(node, RDMA_INIT, RETURN_EVENT, 0);
831 rdma_present = 0;
832 return -ENODEV;
835 long wait_time_rdma(struct rdma_reg_state *rdma_reg_state, signed long timeout)
837 DECLARE_WAITQUEUE(wait, current);
838 long ret;
840 add_wait_queue(&rdma_reg_state->wqh_d, &wait);
841 set_task_state(current, TASK_INTERRUPTIBLE);
842 ret = schedule_timeout(timeout);
843 __set_current_state(TASK_RUNNING);
844 remove_wait_queue(&rdma_reg_state->wqh_d, &wait);
845 return ret;
848 unsigned char bus_number_rdma, devfn_rdma;
850 static void rdma_cleanup(void)
852 int i, major;
853 DEBUG_MSG("rdma_cleanup: START\n");
854 DEBUG_MSG("rdma_cleanup: rdma_state->major %d \n",
855 (int)rdma_state->major);
856 major = (int)rdma_state->major;
857 for_each_online_rdma(i) {
858 WRR_rdma(SIC_rdma_irq_mc, i, 0x0);
860 rdma_interrupt_p = (void *) NULL;
861 #ifdef MODULE
862 remove_dev_rdma(rdma_state->major);
863 #endif
864 unregister_chrdev(rdma_state->major, board_name);
865 rdma_event_init = 0;
866 kfree(rdma_state);
867 if (rdma_present)
868 rdma_present = 0;
869 DEBUG_MSG("rdma_cleanup: FINISH\n");
870 return;
873 static int rdma_close(struct inode *inode, struct file *file)
875 int minor;
876 int instance;
877 int channel;
878 dma_chan_t *chd;
879 rdma_state_inst_t *rdma_sti;
881 DEBUG_MSG("rdma_close: START\n");
882 minor = get_file_minor(file);
883 if (minor < 0) {
884 ERROR_MSG("rdma_close: minor < 0: %d \n",
885 minor);
886 return minor;
888 instance = DEV_inst(minor);
889 channel = DEV_chan(minor);
890 DEBUG_MSG("rdma_close: instance: %d channel: %d\n", instance, channel);
891 rdma_sti = &rdma_state->rdma_sti[instance];
892 mutex_enter(&rdma_sti->mu);
893 rdma_sti->opened &= ~(1 << channel);
894 if (channel < 7) {
895 chd = &rdma_sti->dma_chans[channel];
896 free_chan(chd);
898 /* to properly complete the exchange */
900 for (i = 0; i < 2; i++){
901 pd = &rdma_sti->rw_states_d[i];
902 pd->trwd_was = 0;
903 pd->clock_receive_trwd = 0;
904 pd->clock_begin_read = 0;
905 pd->clock_end_read_old = 0;
906 pd->clock_begin_read_old = 0;
907 pd->trwd_send_count = 0;
908 pd->ready_send_count = 0;
909 pd->trwd_rec_count = 0;
910 pd->ready_rec_count = 0;
911 // pd->n_ready = 0;
912 pd->stat = 0;
913 pd->timer_read = TIMER_MIN;
914 pd->timer_write = TIMER_MIN;
915 pd->timer_for_read = TIMER_FOR_READ_MIN;
916 pd->timer_for_write = TIMER_FOR_WRITE_MIN;
919 DEBUG_MSG("rdma_close: opened.minor.instance.channel: 0x%x.%d.%d.%d\n",
920 rdma_sti->opened, minor, instance, channel);
921 mutex_exit(&rdma_sti->mu);
922 DEBUG_MSG("rdma_close: FINISH\n");
923 return 0;
926 static int rdma_open(struct inode *inode, struct file *file)
928 int minor, file_eys = 0, i;
929 int instance;
930 int firstopen = 0;
931 int channel;
932 rdma_state_inst_t *rdma_sti;
934 DEBUG_MSG("rdma_open: START\n");
935 if (file == (struct file *)NULL) {
936 ERROR_MSG("rdma_open: file is NULL\n");
937 return (-EINVAL);
939 minor = get_file_minor(file);
940 if (minor < 0) {
941 ERROR_MSG("rdma_open: minor(%d) < 0\n", minor);
942 return (-EINVAL);
944 instance = DEV_inst(minor);
945 for_each_online_rdma(i)
946 if (i == instance)
947 file_eys++;
948 if (!file_eys) {
949 ERROR_MSG("rdma_open:instance %d not support RDMA\n", instance);
950 return (-EINVAL);
952 channel = DEV_chan(minor);
953 DEBUG_MSG("rdma_open: instance: %d channel: %d\n", instance, channel);
954 rdma_sti = &rdma_state->rdma_sti[instance];
955 mutex_enter(&rdma_sti->mu);
956 firstopen = (((1 << channel) & rdma_sti->opened) == 0);
957 if (firstopen == 0) {
958 ERROR_MSG("rdma_open: device EBUSY: minor: %d inst: %d "
959 "channel: %d\n", minor, instance, channel);
960 mutex_exit(&rdma_sti->mu);
961 return (-EBUSY);
963 rdma_sti->opened |= (1 << channel);
964 DEBUG_MSG("rdma_open: opened.minor.instance.channel: 0x%x.%d.%d.%d\n",
965 rdma_sti->opened, minor, instance, channel);
966 mutex_exit(&rdma_sti->mu);
967 DEBUG_MSG("rdma_open FINISH\n");
968 return 0;
971 /*static int rdma_ioctl(struct inode *inode, struct file *filp,
972 unsigned int cmd, unsigned long arg) */
973 static long rdma_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
975 int minor;
976 int instance;
977 int channel;
978 int res = 0;
979 dma_chan_t *chd;
980 rdma_state_inst_t *rdma_sti;
981 rdma_ioc_parm_t parm;
982 size_t rval;
983 dev_rdma_sem_t *dev_sem;
984 rw_state_p pd;
985 // long ret;
986 DEBUG_MSG("rdma_ioctl: START cmd %x\n", cmd);
987 minor = get_file_minor(filp);
988 if (minor < 0) {
989 ERROR_MSG("rdma_ioctl: minor(%d) < 0 cmd: %x\n",
990 (int)minor, cmd);
991 return minor;
993 instance = DEV_inst(minor);
994 channel = DEV_chan(minor);
995 rdma_sti = &rdma_state->rdma_sti[instance];
996 switch (cmd) {
997 case RDMA_IOC_GET_neighbour_map:
999 if (copy_to_user((void __user *)arg, &node_online_neighbour_map,
1000 sizeof (nodemask_t)) == -1) {
1001 ERROR_MSG("rdma_ioctl: RDMA_IOC_GET_neighbour_map: "
1002 "copy_to_user failed\n");
1003 return EINVAL;
1005 return 0;
1006 break;
1008 case RDMA_IOC_GET_ID:
1010 int i;
1012 rdma_state_inst_t *xsp;
1013 int ret;
1014 for_each_online_rdma(i) {
1015 xsp = &rdma_state->rdma_sti[i];
1016 ret = send_msg(xsp, 0, i, MSG_CS_SIR_Msg, 0);
1017 if (ret < 0) {
1018 ERROR_MSG("rdma_ioctl: FAIL send MSG_CS_SIR_Msg"
1019 "from link: %x ret: %d\n", i, ret);
1020 } else if (ret == 0) {
1021 printk("rdma_ioctl: FAIL send MSG_CS_SIR_Msg"
1022 "from link: %x. " "SM is absent\n", i);
1025 mdelay(30);
1027 rdma_link_id.count_links = MAX_NUMIOLINKS;
1028 for_each_online_rdma(i) {
1029 rdma_link_id.link_id[i][0] = 1;
1030 rdma_link_id.link_id[i][1] = RDR_rdma(SHIFT_CH_IDT, i);
1031 rdma_link_id.link_id[i][2] = RDR_rdma(SHIFT_N_IDT, i);
1032 if (copy_to_user((void __user *)arg, &rdma_link_id,
1033 sizeof(link_id_t)) == -1) {
1034 ERROR_MSG("rdma_ioctl:RDMA_IOC_GET_ID:"
1035 "copy_to_user failed\n");
1036 return EINVAL;
1039 return 0;
1040 break;
1042 /* Reset DMA */
1043 case RDMA_IOC_RESET_DMA:
1045 reset_link_t reset_link;
1046 rw_state_p pd = NULL;
1047 dev_rdma_sem_t *dev_sem;
1048 rdma_state_inst_t *xsp;
1050 xsp = &rdma_state->rdma_sti[instance];
1052 rval = copy_from_user(&reset_link, (void __user *)arg,
1053 sizeof (reset_link_t));
1054 if (rval) {
1055 ERROR_MSG("rdma_ioctl(%d, %d, %x): copy_from_user"
1056 "failed size: %lx rval: %lx\n",
1057 instance, channel, cmd,
1058 sizeof (reset_link_t), rval);
1059 return -EINVAL;
1061 if (reset_link.tcs_reset == 1) {
1062 /* Enable exit gp0 */
1063 if (enable_exit_gp0) {
1064 int ret_send_msg, j;
1065 for (j = 0; j < 10; j++) {
1066 ret_send_msg = send_msg(xsp, 0,
1067 instance,
1068 MSG_CS_SGP0_Msg,
1070 if (ret_send_msg > 0)
1071 break;
1072 if (ret_send_msg < 0) {
1073 ERROR_MSG("rdma_ioctl:"
1074 "FAIL send MSG_CS_SGP0_Msg "
1075 "from link: %x ret: %d\n",
1076 instance, ret_send_msg);
1077 } else if (ret_send_msg == 0) {
1078 DEBUG_MSG("rdma_ioctl: FAIL send"
1079 " MSG_CS_SGP0_Msg "
1080 "from link: %x. SM is absent: %x "
1081 "MSG_CS: %x \n",
1082 instance, ret_send_msg,
1083 RDR_rdma(SHIFT_MSG_CS, instance));
1088 if (reset_link.rcs_reset == 1) {
1089 /* Enable exit gp0 */
1090 if (enable_exit_gp0) {
1091 pd = &rdma_sti->rw_states_d[READER];
1092 dev_sem = &pd->dev_rdma_sem;
1093 raw_spin_lock_irq(&dev_sem->lock);
1094 pd->state_GP0 = 0;
1095 raw_spin_unlock_irq(&dev_sem->lock);
1098 reset_link.tcs = RDR_rdma(SHIFT_DMA_TCS, instance);
1099 reset_link.rcs = RDR_rdma(SHIFT_DMA_RCS, instance);
1100 rval = copy_to_user((reset_link_t __user *)arg, &reset_link,
1101 sizeof (reset_link));
1102 return 0;
1103 break;
1107 DEBUG_MSG("rdma_ioctl: minor: %d\n", minor);
1108 DEBUG_MSG("rdma_ioctl: sizeof (rdma_ioc_parm_t): %x,"
1109 "sizeof (parm): %x\n", sizeof (rdma_ioc_parm_t),
1110 sizeof (parm));
1111 rval = copy_from_user(&parm, (void __user *)arg,
1112 sizeof (rdma_ioc_parm_t));
1113 if (rval) {
1114 ERROR_MSG("rdma_ioctl(%d, %d, %x): copy_from_user failed size:"
1115 "%lx rval: %lx\n", instance, channel, cmd,
1116 sizeof (rdma_ioc_parm_t), rval);
1117 return -EINVAL;
1120 parm.err_no = res = 0;
1121 switch (cmd) {
1122 case RDMA_IOC_RESET_TCS:
1124 #define COUNT_RESET_TCS 100
1125 #define DELAY_RESET_TCS 10
1126 unsigned tcs, es, i;
1128 for (i = 0; i < COUNT_RESET_TCS; i++) {
1129 WRR_rdma(SHIFT_DMA_TCS, instance, DMA_TCS_Tx_Rst);
1130 mdelay(DELAY_RESET_TCS);
1131 tcs = RDR_rdma(SHIFT_DMA_TCS, instance);
1132 es = RDR_rdma(SHIFT_ES, instance);
1134 WRR_rdma(SHIFT_DMA_TCS, instance, RCode_64 | DMA_TCS_DRCL);
1135 tcs = RDR_rdma(SHIFT_DMA_TCS, instance);
1136 parm.acclen = tcs;
1137 break;
1140 case RDMA_IOC_RESET_RCS:
1141 { unsigned rcs, es, i;
1142 #define COUNT_RESET_RCS 10
1143 for (i = 0; i < COUNT_RESET_RCS; i++) {
1144 WRR_rdma(SHIFT_DMA_RCS, instance, DMA_RCS_Rx_Rst);
1145 rcs = RDR_rdma(SHIFT_DMA_RCS, instance);
1146 es = RDR_rdma(SHIFT_ES, instance);
1148 WRR_rdma(SHIFT_DMA_RCS, instance, WCode_64);
1149 rcs = RDR_rdma(SHIFT_DMA_RCS, instance);
1150 parm.acclen = rcs;
1151 break;
1154 case RDMA_IOC_SET_MODE_LOOP:
1156 int rdma_loopback_mode;
1157 #ifdef CONFIG_E2K
1158 if (IS_MACHINE_E2S) {
1159 if (parm.reqlen == DISABLE_LOOP) {
1160 WRR_rdma(SHIFT_CS, instance,
1161 RDR_rdma(SHIFT_CS, instance) & ~E2S_CS_LOOP);
1162 } else {
1163 WRR_rdma(SHIFT_CS, instance,
1164 RDR_rdma(SHIFT_CS, instance) | E2S_CS_LOOP);
1166 rdma_loopback_mode = RDR_rdma(SHIFT_CS, instance) &
1167 E2S_CS_LOOP;
1168 } else {
1169 /* INFO_MSG("Loopback mode not release.\n"); */
1170 rdma_loopback_mode = 0;
1172 #else
1173 /* INFO_MSG("Loopback mode not release.\n");*/
1174 rdma_loopback_mode = 0;
1175 #endif
1177 parm.acclen = rdma_loopback_mode;
1178 break;
1180 case RDMA_IOC_SET_MODE_RFSM:
1182 if (parm.reqlen == DISABLE_RFSM) {
1183 rfsm = CLEAR_RFSM;
1184 } else {
1185 rfsm = DMA_RCS_RFSM;
1187 parm.acclen = rfsm;
1188 break;
1190 case RDMA_IOC_SET_MODE_EXIT_GP0:
1192 if (parm.reqlen == DISABLE_EXIT_GP0) {
1193 enable_exit_gp0 = DISABLE_EXIT_GP0;
1194 } else {
1195 enable_exit_gp0 = ENABLE_EXIT_GP0;
1197 parm.acclen = enable_exit_gp0;
1198 break;
1200 case RDMA_IOC_DUMPREG0:
1201 case RDMA_IOC_DUMPREG1:
1202 read_regs_rdma(instance);
1203 break;
1205 case RDMA_IOC_WRR:
1207 #ifdef CONFIG_E90S
1208 if ((parm.reqlen == 0x900) ||
1209 ((parm.reqlen >= 0x2000) && (parm.reqlen <= 0x2004)) ||
1210 ((parm.reqlen >= 0x3000) && (parm.reqlen <= 0x3088))) {
1211 /* sic_write_node_nbsr_reg(instance, parm.reqlen,
1212 parm.acclen); */
1213 WRR_rdma( parm.reqlen, instance, parm.acclen);
1214 } else {
1215 return -EINVAL;
1217 #else
1218 if ((parm.reqlen == 0x900) ||
1219 ((parm.reqlen >= 0x700) && (parm.reqlen <= 0x704)) ||
1220 ((parm.reqlen >= 0x800) && (parm.reqlen <= 0x888))) {
1221 /* sic_write_node_nbsr_reg(instance, parm.reqlen,
1222 parm.acclen); */
1223 WRR_rdma( parm.reqlen, instance, parm.acclen);
1224 } else {
1225 return -EINVAL;
1227 #endif
1228 break;
1231 case RDMA_IOC_RDR:
1233 #ifdef CONFIG_E90S
1234 if ((parm.reqlen <= 0x900) ||
1235 ((parm.reqlen >= 0x2000) && (parm.reqlen <= 0x2004)) ||
1236 ((parm.reqlen >= 0x3000) && (parm.reqlen <= 0x3088))) {
1237 /* parm.acclen = sic_read_node_nbsr_reg(instance,
1238 parm.reqlen); */
1239 parm.acclen = RDR_rdma(parm.reqlen, instance);
1240 } else {
1241 return -EINVAL;
1243 #else
1244 if ((parm.reqlen == 0x900) ||
1245 ((parm.reqlen >= 0x700) && (parm.reqlen <= 0x704)) ||
1246 ((parm.reqlen >= 0x800) && (parm.reqlen <= 0x888))) {
1247 /* sic_write_node_nbsr_reg(instance, parm.reqlen,
1248 parm.acclen); */
1249 WRR_rdma( parm.reqlen, instance, parm.acclen);
1250 } else {
1251 return -EINVAL;
1253 #endif
1254 break;
1257 case RDMA_WAKEUP_WRITER:
1259 dev_rdma_sem_t *dev_sem;
1260 rw_state_p pd;
1262 pd = &rdma_sti->rw_states_d[WRITER];
1263 dev_sem = &pd->dev_rdma_sem;
1264 raw_spin_lock_irq(&dev_sem->lock);
1265 rdma_cv_broadcast_rdma(&pd->dev_rdma_sem, instance);
1266 raw_spin_unlock_irq(&dev_sem->lock);
1267 break;
1270 case RDMA_WAKEUP_READER:
1272 dev_rdma_sem_t *dev_sem;
1273 rw_state_p pd;
1275 pd = &rdma_sti->rw_states_d[READER];
1276 dev_sem = &pd->dev_rdma_sem;
1277 raw_spin_lock_irq(&dev_sem->lock);
1278 rdma_cv_broadcast_rdma(&pd->dev_rdma_sem, instance);
1279 raw_spin_unlock_irq(&dev_sem->lock);
1280 break;
1283 case RDMA_CLEAN_TDC_COUNT:
1285 switch (channel) {
1286 case 0:
1287 case 1:
1288 case 2:
1289 case 3:
1290 pd = &rdma_sti->rw_states_d[WRITER];
1291 break;
1292 default:
1293 ERROR_MSG("rdma_ioctl: CLEAN_TDC: (%d,%d):"
1294 "Unexpected channel\n", instance, channel);
1295 return -EIO;
1297 dev_sem = &pd->dev_rdma_sem;
1298 dev_sem->num_obmen = 0;
1299 dev_sem->irq_count_rdma = 0;
1300 dbg_ioctl("CLEAN_TDC: %d dev_sem->num_obmen: %x\n",
1301 instance, dev_sem->num_obmen);
1303 break;
1304 #define COUNT_CLK 10
1305 case RDMA_GET_CLKR:
1307 u64 time[COUNT_CLK];
1308 int i;
1310 for (i = 0; i < COUNT_CLK; i++)
1311 time[i] = join_curr_clock();
1312 for (i = 0; i < COUNT_CLK; i++)
1313 printk("0x%llx\n", time[i]);
1315 break;
1316 case RDMA_GET_MAX_CLKR:
1318 u64 time[COUNT_CLK];
1319 u64 max_clk = 0;
1320 u64 max_clk_all = 0;
1321 int i;
1322 int count_rep_clk = 0;
1324 #define COUNT_REP_CLK 100
1325 rep_max_clk:
1326 for (i = 0; i < COUNT_CLK; i++)
1327 time[i] = join_curr_clock();
1328 for (i = 0; i < COUNT_CLK; i++) {
1329 if (max_clk < time[i])
1330 max_clk = time[i];
1332 if (max_clk_all < max_clk) {
1333 max_clk_all = max_clk;
1334 printk("0x%llx - max_clk_all\n", max_clk_all);
1335 count_rep_clk++;
1336 if (count_rep_clk < COUNT_REP_CLK)
1337 goto rep_max_clk;
1340 break;
1342 case RDMA_CLEAN_RDC_COUNT:
1344 intr_rdc_count[instance] = 0;
1345 switch (channel) {
1346 case 0:
1347 case 1:
1348 case 2:
1349 case 3:
1350 pd = &rdma_sti->rw_states_d[READER];
1351 break;
1352 default:
1353 ERROR_MSG("rdma_ioctl: CLEAN_RDC: (%d,%d):"
1354 "Unexpected channel\n", instance, channel);
1355 return -EIO;
1357 dev_sem = &pd->dev_rdma_sem;
1358 dev_sem->num_obmen = 0;
1359 dev_sem->irq_count_rdma = 0;
1360 dbg_ioctl("CLEAN_RDC: intr_rdc_count[%d]: %u "
1361 "dev_sem->num_obmen: %x\n", instance,
1362 intr_rdc_count[instance], dev_sem->num_obmen);
1364 break;
1366 case RDMA_TIMER_FOR_READ :
1367 dbg_ioctl("cmd = RDMA_TIMER_FOR_READ, "
1368 "reqlen (mksec) = 0x%x\n",
1369 MIN_min(TIMER_FOR_READ_MAX, parm.reqlen));
1370 parm.acclen = (&rdma_sti->rw_states_d[READER])->timer_for_read;
1371 (&rdma_sti->rw_states_d[READER])->timer_for_read =
1372 MAX_max(TIMER_FOR_READ_MIN, MIN_min(TIMER_FOR_READ_MAX,
1373 parm.reqlen));
1374 parm.reqlen = (&rdma_sti->rw_states_d[READER])->timer_for_read;
1375 break;
1377 case RDMA_TIMER_FOR_WRITE:
1378 dbg_ioctl("cmd = RDMA_TIMER_FOR_WRITE, "
1379 "reqlen (mksec) = 0x%x\n",
1380 MIN_min(TIMER_FOR_WRITE_MAX, parm.reqlen));
1381 parm.acclen = (&rdma_sti->rw_states_d[WRITER])->timer_for_write;
1382 (&rdma_sti->rw_states_d[WRITER])->timer_for_write =
1383 MAX_max(TIMER_FOR_WRITE_MIN,MIN_min(TIMER_FOR_WRITE_MAX,
1384 parm.reqlen));
1385 parm.reqlen = (&rdma_sti->rw_states_d[WRITER])->timer_for_write;
1386 break;
1388 case RDMA_IOC_ALLOCB:
1389 DEBUG_MSG("rdma_ioctl: cmd = RDMA_IOC_ALLOCB, "
1390 "reqlen = 0x%lx\n",
1391 (long)parm.reqlen);
1392 chd = &rdma_sti->dma_chans[channel];
1393 chd->node_for_memory = NUM_NODE_RDMA(instance);
1394 if (chd->allocs != RCS_EMPTY) {
1395 ERROR_MSG("rdma_ioctl: RDMA_IOC_ALLOCB: "
1396 "WRONGLY finish: channel : %d "
1397 "chd->allocs: %i\n", channel, chd->allocs);
1398 res = -1;
1399 parm.err_no = RDMA_E_ALLOC;
1400 parm.acclen = chd->allocs;
1401 break;
1403 parm.acclen = init_chan(chd, parm.reqlen, parm.rwmode);
1404 if (parm.acclen < -1) {
1405 ERROR_MSG("rdma_ioctl: RDMA_IOC_ALLOCB: channel : %d "
1406 "WRONGLY finish: parm.acclen: %d\n",
1407 channel, parm.acclen);
1408 res = -1; parm.err_no = -parm.acclen;
1409 break;
1411 if (parm.acclen < 0) {
1412 ERROR_MSG("rdma_ioctl: RDMA_IOC_ALLOCB: "
1413 "WRONGLY finish: RDMA_E_NOBUF\n");
1414 res = -1; parm.err_no = RDMA_E_NOBUF;
1415 break;
1417 parm.rwmode = chd->full;
1418 DEBUG_MSG("rdma_ioctl: phys: 0x%llx full: 0x%08x\n", chd->dma,
1419 chd->full);
1420 break;
1421 case RDMA_GET_STAT:
1422 rdma_sti->stat_rdma.cur_clock = jiffies;
1423 if (copy_to_user((void __user *)arg, &rdma_sti->stat_rdma,
1424 sizeof (struct stat_rdma)) == -1) {
1425 ERROR_MSG("rdma_ioctl: copy_to_user failed\n");
1426 return (EINVAL);
1428 return 0;
1429 case RDMA_GET_EVENT:
1431 get_event_rdma(1);
1433 return 0;
1435 case RDMA_SET_STAT:
1436 memset(&rdma_sti->stat_rdma, 0, sizeof (struct stat_rdma));
1437 parm.acclen = 0;
1438 break;
1439 case RDMA_IS_CAM_YES :
1441 unsigned int atl;
1442 int ret_time_dwait = 0;
1443 dev_rdma_sem_t *dev_sem;
1444 rw_state_p pcam;
1446 event_ioctl(instance, RDMA_IS_CAM_YES_EVENT, 1, 0);
1447 pcam = &rdma_sti->ralive;
1448 dev_sem = &pcam->dev_rdma_sem;
1449 ret_time_dwait = 0;
1450 atl = RDR_rdma(SHIFT_CAM, instance);
1451 if (atl) {
1452 parm.acclen = atl;
1453 parm.err_no = 0;
1454 goto end_RDMA_IS_CAM_YES;
1456 raw_spin_lock_irq(&dev_sem->lock);
1457 dev_sem->irq_count_rdma = 0;
1458 pcam->stat = 1;
1459 ret_time_dwait = wait_for_irq_rdma_sem(dev_sem, IO_TIMEOUT,
1460 instance);
1461 pcam->stat = 0;
1462 raw_spin_unlock_irq(&dev_sem->lock);
1463 parm.acclen = RDR_rdma(SHIFT_CAM, instance);
1464 if (ret_time_dwait == -2) {
1465 parm.err_no = -RDMA_E_SIGNAL;
1466 } else
1467 if (ret_time_dwait == -1) {
1468 parm.err_no = -RDMA_E_TIMER;
1469 } else
1470 if (ret_time_dwait > 0) {
1471 parm.err_no = ret_time_dwait;
1472 } else
1473 parm.err_no = 0;
1474 end_RDMA_IS_CAM_YES:
1475 event_ioctl(0, RDMA_IS_CAM_YES_EVENT, 0, 0);
1477 break;
1478 case RDMA_IS_CAM_NO :
1480 unsigned int atl;
1481 int ret_time_dwait = 0;
1482 dev_rdma_sem_t *dev_sem;
1483 rw_state_p pcam;
1485 event_ioctl(instance, RDMA_IS_CAM_NO_EVENT, 1, 0);
1486 pcam = &rdma_sti->talive;
1487 dev_sem = &pcam->dev_rdma_sem;
1488 atl = RDR_rdma(SHIFT_CAM, instance);
1489 if (!atl) {
1490 parm.acclen = 0;
1491 parm.err_no = 0;
1492 goto end_RDMA_IS_CAM_NO;
1494 raw_spin_lock_irq(&dev_sem->lock);
1495 dev_sem->irq_count_rdma = 0;
1496 pcam->stat = 1;
1497 ret_time_dwait = wait_for_irq_rdma_sem(dev_sem, IO_TIMEOUT,
1498 instance);
1499 pcam->stat = 0;
1500 raw_spin_unlock_irq(&dev_sem->lock);
1501 parm.acclen = RDR_rdma(SHIFT_CAM, instance);
1502 if (ret_time_dwait == -2) {
1503 parm.err_no = -RDMA_E_SIGNAL;
1504 } else
1505 if (ret_time_dwait == -1) {
1506 parm.err_no = -RDMA_E_TIMER;
1507 } else
1508 if (ret_time_dwait > 0) {
1509 parm.err_no = ret_time_dwait;
1510 } else
1511 parm.err_no = 0;
1512 end_RDMA_IS_CAM_NO:
1513 parm.clkr = join_curr_clock();
1514 parm.clkr1 = pcam->clkr;
1515 parm.reqlen = pcam->int_cnt;
1517 event_ioctl(0, RDMA_IS_CAM_NO_EVENT, 0, 0);
1518 break;
1520 case RDMA_SET_ATL :
1522 unsigned int atl;
1524 tr_atl = ATL_B | (parm.reqlen & ATL);
1525 WRR_rdma(SHIFT_CAM, instance, tr_atl);
1526 atl = RDR_rdma(SHIFT_CAM, instance);
1527 parm.acclen = atl;
1529 break;
1530 default :
1531 ERROR_MSG("rdma_ioctl(%d, %d): default operation NOT EXPECTED"
1532 "cmd: %x\n", instance, channel, cmd);
1533 res = -1;
1534 parm.err_no = RDMA_E_INVOP;
1537 rval = copy_to_user((rdma_ioc_parm_t __user *)arg, &parm,
1538 sizeof (rdma_ioc_parm_t));
1539 if (rval) {
1540 ERROR_MSG("rdma_ioctl(%d, %d, %x): copy_to_user failed"
1541 "size: %lx rval: %lx\n", instance, channel, cmd,
1542 sizeof (rdma_ioc_parm_t), rval);
1543 return (-EINVAL);
1545 if (res == 0) {
1546 DEBUG_MSG("rdma_ioctl(%d, %d): NORMAL_END: acclen=%x *****\n\n",
1547 instance, channel, parm.acclen);
1548 DEBUG_MSG("rdma_ioctl: FINISH\n");
1549 return 0;
1552 ERROR_MSG("rdma_ioctl: FAIL\n");
1553 DEBUG_MSG("rdma_ioctl: FINISH\n");
1554 return -EINVAL; /* !? return l>0 == return -1 !?*/
1559 #ifdef CONFIG_COMPAT
1560 static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
1562 int ret;
1563 ret = rdma_ioctl(f, cmd, arg);
1564 /* ret = rdma_ioctl(f->f_dentry->d_inode, f, cmd, arg); */
1565 return ret;
1568 static long rdma_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
1570 switch (cmd) {
1572 case RDMA_IOC_DUMPREG0:
1573 case RDMA_IOC_DUMPREG1:
1574 case RDMA_IOC_WRR:
1575 case RDMA_IOC_RDR:
1576 case RDMA_IOC_GET_neighbour_map:
1577 case RDMA_CLEAN_TDC_COUNT:
1578 case RDMA_GET_CLKR:
1579 case RDMA_GET_MAX_CLKR:
1580 case RDMA_CLEAN_RDC_COUNT:
1581 case RDMA_TIMER_FOR_READ :
1582 case RDMA_TIMER_FOR_WRITE:
1583 case RDMA_IOC_ALLOCB:
1584 case RDMA_GET_STAT:
1585 case RDMA_GET_EVENT:
1586 case RDMA_SET_STAT:
1587 case RDMA_IS_CAM_YES :
1588 case RDMA_IS_CAM_NO :
1589 case RDMA_SET_ATL:
1590 case RDMA_WAKEUP_WRITER:
1591 case RDMA_WAKEUP_READER:
1592 case RDMA_IOC_GET_ID:
1593 case RDMA_IOC_RESET_DMA:
1594 case RDMA_IOC_SET_MODE_RFSM:
1595 case RDMA_IOC_SET_MODE_EXIT_GP0:
1596 case RDMA_IOC_RESET_TCS:
1597 case RDMA_IOC_RESET_RCS:
1598 case RDMA_IOC_SET_MODE_LOOP:
1599 return do_ioctl(f, cmd, arg);
1600 default:
1601 return -ENOIOCTLCMD;
1604 #endif
1606 /* ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); */
1607 static ssize_t rdma_read(struct file *filp, char __user *buf, size_t size,
1608 loff_t *pos)
1610 int minor;
1611 int instance;
1612 int channel;
1613 int ret = 0;
1614 rdma_state_inst_t *rdma_sti;
1615 rdma_ioc_parm_t PRM;
1616 size_t rval;
1618 DEBUG_MSG("rdma_read: START\n");
1619 if (filp == (struct file *)NULL) {
1620 ERROR_MSG("rdma_read: filp is NULL\n");
1621 return 0;
1623 minor = get_file_minor(filp);
1624 DEBUG_MSG("rdma_read: minor: %d\n", minor);
1625 if (minor < 0) {
1626 ERROR_MSG("rdma_read: minor(%d) < 0\n", minor);
1627 return (-EINVAL);
1629 instance = DEV_inst(minor);
1630 channel = DEV_chan(minor);
1631 DEBUG_MSG("rdma_read: instance: %d channel: %d\n", instance, channel);
1632 rdma_sti = &rdma_state->rdma_sti[instance];
1633 rval = copy_from_user(&PRM, (rdma_ioc_parm_t __user *)buf,
1634 sizeof (rdma_ioc_parm_t));
1635 if (rval) {
1636 ERROR_MSG("rdma_read(%d, %d): copy_from_user failed size: %lx"
1637 "rval: %lx\n", instance, channel,
1638 sizeof (rdma_ioc_parm_t), rval);
1639 return (-EINVAL);
1642 PRM.reqlen = 0;
1643 ret = read_buf(rdma_sti, buf, size, instance, channel, &PRM);
1644 PRM.clkr = join_curr_clock();
1646 rval = copy_to_user((rdma_ioc_parm_t __user *)buf, &PRM,
1647 sizeof (rdma_ioc_parm_t));
1648 if (rval) {
1649 ERROR_MSG("rdma_read(%d, %d): copy_to_user failed size: %lx"
1650 "rval: %lx\n", instance, channel,
1651 sizeof (rdma_ioc_parm_t), rval);
1652 return (-EINVAL);
1654 DEBUG_MSG("rdma_read: FINISH\n");
1655 return ret;
1658 static ssize_t rdma_write(struct file *filp, const char __user *buf,
1659 size_t size, loff_t *pos)
1661 int minor;
1662 int instance;
1663 int channel;
1664 int ret = 0;
1665 rdma_state_inst_t *rdma_sti;
1666 rdma_ioc_parm_t PRM;
1667 size_t rval;
1669 DEBUG_MSG("rdma_write: START\n");
1670 minor = get_file_minor(filp);
1671 if (minor < 0)
1672 return 0;
1673 instance = DEV_inst(minor);
1674 channel = DEV_chan(minor);
1675 DEBUG_MSG("rdma_write: instance: %d channel: %d\n", instance, channel);
1676 rdma_sti = &rdma_state->rdma_sti[instance];
1677 DEBUG_MSG("rdma_write: &rdma_state->rdma_sti[%d]: %p\n", instance,
1678 rdma_sti);
1679 rval = copy_from_user(&PRM, (rdma_ioc_parm_t __user *)buf,
1680 sizeof(rdma_ioc_parm_t));
1681 DEBUG_MSG("rdma_write: copy_from_user PRM: %p sizeof(PRM):%x"
1682 "sizeof(rdma_ioc_parm_t):%x\n", &PRM, sizeof(PRM),
1683 sizeof(rdma_ioc_parm_t));
1684 if (rval) {
1685 ERROR_MSG("rdma_read(%d, %d): copy_from_user failed size: %lx"
1686 "rval: %lx\n", instance, channel,
1687 sizeof (rdma_ioc_parm_t), rval);
1688 return (-EINVAL);
1690 ret = write_buf(rdma_sti, buf, size, instance, channel, &PRM);
1691 PRM.clkr = join_curr_clock();
1692 rval = copy_to_user((rdma_ioc_parm_t __user *)buf, &PRM,
1693 sizeof (rdma_ioc_parm_t));
1694 if (rval) {
1695 ERROR_MSG("rdma_write(%d, %d): copy_to_user failed size: %lx"
1696 "rval: %lx\n", instance, channel,
1697 sizeof (rdma_ioc_parm_t), rval);
1698 return (-EINVAL);
1701 return ret;
1704 static int rdma_mmap(struct file *file, struct vm_area_struct *vma)
1706 int minor;
1707 int instance;
1708 int channel;
1709 int rval;
1710 rdma_state_inst_t *rdma_sti;
1711 dma_chan_t *chd;
1713 DEBUG_MSG("rdma_mmap: START\n");
1714 minor = get_file_minor(file);
1715 if (minor < 0)
1716 return minor;
1717 instance = DEV_inst(minor);
1718 channel = DEV_chan(minor);
1719 rdma_sti = &rdma_state->rdma_sti[instance];
1720 chd = &rdma_sti->dma_chans[channel];
1721 if (chd->allocs != RCS_ALLOCED) {
1722 ERROR_MSG("rdma_mmap : chd->allocs != RCS_ALLOCED\n");
1723 return -EAGAIN;
1725 if (chd->tm) {
1726 rval = rdma_remap_page_tbl((void *)chd->vdma_tm, chd->real_size,
1727 vma);
1728 } else {
1729 rval = rdma_remap_page((void *)chd->prim_buf_addr,
1730 chd->real_size, vma);
1732 if (rval) {
1733 ERROR_MSG("rdma: rdma_mmap ddi_remap_page FAIL\n");
1734 return -EAGAIN;
1736 chd->allocs = RCS_MAPPED;
1737 DEBUG_MSG("rdma_mmap: minor: %d\n", minor);
1738 DEBUG_MSG("rdma_mmap: FINISH\n");
1739 return 0;
1742 int rdma_remap_page(void *va, size_t sz, struct vm_area_struct *vma)
1744 unsigned long pha;
1745 unsigned long vm_end;
1746 unsigned long vm_start;
1747 unsigned long vm_pgoff;
1748 size_t size;
1750 DEBUG_MSG("rdma_remap_page: START\n");
1751 if (!sz) return -EINVAL;
1752 pha = virt_to_phys(va);
1753 size = (long )PAGE_ALIGN((pha & ~PAGE_MASK) + sz);
1754 if ((vma->vm_pgoff << PAGE_SHIFT) > size) return -ENXIO;
1755 pha += (vma->vm_pgoff << PAGE_SHIFT);
1756 vm_end = vma->vm_end;
1757 vm_start = vma->vm_start;
1758 vm_pgoff = vma->vm_pgoff;
1760 if ((vm_end - vm_start) < size)
1761 size = vm_end - vm_start;
1763 vma->vm_flags |= (VM_READ | VM_WRITE | VM_RESERVED);
1765 #ifdef __e2k__
1766 if (vma->vm_flags & VM_IO)
1767 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) |
1768 _PAGE_CD_DIS | _PAGE_PWT );
1769 #endif
1770 if (remap_pfn_range(vma, vm_start, (pha >> PAGE_SHIFT), size,
1771 vma->vm_page_prot)) {
1772 ERROR_MSG("rdma_remap_page: FAIL remap_pfn_range\n");
1773 return -EAGAIN;
1775 DEBUG_MSG("rdma_remap_page: FINISH\n");
1776 return 0;
1779 int rdma_remap_page_tbl(void *va, size_t sz, struct vm_area_struct *vma)
1781 unsigned long pha;
1782 unsigned long sz_pha;
1783 unsigned long vm_end;
1784 unsigned long vm_start;
1785 unsigned long vm_pgoff;
1786 size_t size;
1787 rdma_tbl_64_struct_t *ptbl;
1789 DEBUG_MSG("rdma_remap_page_tbl: START\n");
1790 if (!sz) return -EINVAL;
1791 if (vma->vm_pgoff) {
1792 ERROR_MSG("rdma_remap_page_tbl: vma->vm_pgoff: 0x%lx\n",
1793 vma->vm_pgoff);
1794 return -EINVAL;
1796 size = (long)PAGE_ALIGN(sz);
1797 vm_end = vma->vm_end;
1798 vm_start = vma->vm_start;
1799 vm_pgoff = vma->vm_pgoff;
1801 if ((vm_end - vm_start) < size) {
1802 size = vm_end - vm_start;
1803 DEBUG_MSG("rdma_remap_page_tbl: vm_end(%lx) - vm_start(%lx) < "
1804 "size(%lx)\n", vm_end, vm_start, size);
1807 vma->vm_flags |= (VM_READ | VM_WRITE | VM_RESERVED);
1809 #ifdef __e2k__
1810 if (vma->vm_flags & VM_IO)
1811 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) |
1812 _PAGE_CD_DIS | _PAGE_PWT );
1813 #endif
1814 for (ptbl = (rdma_tbl_64_struct_t *)va; ptbl; ptbl++) {
1815 rdma_addr_struct_t pxx;
1816 pxx.addr = (unsigned long)ptbl;
1817 DEBUG_MSG("rdma_remap_page_tbl: 0x%08x%08x ptbl\n",
1818 pxx.fields.haddr, pxx.fields.laddr);
1819 pxx.addr = ptbl->addr;
1820 DEBUG_MSG("rdma_remap_page_tbl: 0x%08x%08x ptbl->addr\n",
1821 pxx.fields.haddr, pxx.fields.laddr);
1822 #ifdef CONFIG_E90S
1823 pha = (unsigned long)(cpu_to_le64(ptbl->addr));
1824 DEBUG_MSG("rdma_remap_page_tbl: pha cpu_to_le64(pha): %lx \n",
1825 pha);
1826 #else /* E3S */
1827 pha = (unsigned long)ptbl->addr;
1828 #endif
1829 pxx.addr = (unsigned long)phys_to_virt(pha);
1830 DEBUG_MSG("rdma_remap_page_tbl: 0x%08x%08x __va(ptbl->addr)\n",
1831 pxx.fields.haddr, pxx.fields.laddr);
1832 pxx.addr = pha;
1833 DEBUG_MSG("rdma_remap_page_tbl: 0x%08x%08x __fa(ptbl->addr)\n",
1834 pxx.fields.haddr, pxx.fields.laddr);
1835 sz_pha = ptbl->sz;
1836 #ifdef CONFIG_E90S
1837 sz_pha = cpu_to_le64(sz_pha);
1838 DEBUG_MSG("rdma_remap_page_tbl:"
1839 "sz_pha cpu_to_le64(sz_pha): %lx\n", sz_pha);
1840 #endif
1841 if (remap_pfn_range
1842 (vma, vm_start,
1843 (pha >> PAGE_SHIFT), sz_pha, vma->vm_page_prot)) {
1844 ERROR_MSG("rdma_remap_page_tbl:FAIL remap_pfn_range\n");
1845 return -EAGAIN;
1847 vm_start += sz_pha;
1848 DEBUG_MSG("rdma_remap_page_tbl: vm_start: %lx vm_end: %lx "
1849 "sz_pha: %lx \n", vm_start, vm_end, sz_pha);
1850 if (vm_start >= vm_end) {
1851 DEBUG_MSG("rdma_remap_page_tbl: "
1852 "vm_start(%lx) >= vm_end(%lx)\n", vm_start,
1853 vm_end);
1854 break;
1857 DEBUG_MSG("rdma_remap_page_tbl: FINISH\n");
1858 return 0;
1861 int get_file_minor(struct file *file)
1863 int major;
1864 struct dentry *f_dentry_rdma;
1865 struct inode *d_inode;
1867 f_dentry_rdma = file->f_dentry;
1868 if (!f_dentry_rdma) {
1869 ERROR_MSG( "get_file_minor: file->f_dentry is NULL\n");
1870 return -EBADF;
1872 d_inode = f_dentry_rdma->d_inode;
1873 if (!d_inode) {
1874 ERROR_MSG( "get_file_minor: f_dentry->d_inode is NULL\n");
1875 return -EBADF;
1877 major = MAJOR(d_inode->i_rdev);
1878 DEBUG_MSG("get_file_minor:d_inode->i_rdev: 0x%08u major: %d minor:%u\n",
1879 d_inode->i_rdev, major, MINOR(d_inode->i_rdev));
1880 return MINOR(d_inode->i_rdev);
1884 void init_rdma_sti(int instance)
1886 rw_state_t *pd, *pm;
1887 int i;
1888 dev_rdma_sem_t *dev_sem;
1889 rdma_state_inst_t *rdma_sti = &rdma_state->rdma_sti[instance];
1890 rdma_addr_struct_t p_xxb;
1892 DEBUG_MSG("init_rdma_sti: START\n");
1893 p_xxb.addr = (unsigned long)rdma_sti;
1894 DEBUG_MSG("init_rdma_sti: node: %d rdma_sti: 0x%08x%08x\n",
1895 instance, p_xxb.fields.haddr, p_xxb.fields.laddr);
1896 rdma_sti->instance = instance;
1897 mutex_init(&rdma_sti->mu);
1898 pm = &rdma_sti->talive;
1899 mutex_init(&pm->mu);
1900 raw_spin_lock_init(&pm->mu_spin);
1901 pm->stat = 0;
1902 pm->timer = TIMER_MIN;
1903 dev_sem = &pm->dev_rdma_sem;
1904 raw_spin_lock_init(&dev_sem->lock);
1905 cv_init(&dev_sem->cond_var);
1906 dev_sem->irq_count_rdma = 0;
1907 pm = &rdma_sti->ralive;
1908 mutex_init(&pm->mu);
1909 raw_spin_lock_init(&pm->mu_spin);
1910 pm->stat = 0;
1911 pm->timer = TIMER_MIN;
1912 dev_sem = &pm->dev_rdma_sem;
1913 raw_spin_lock_init(&dev_sem->lock);
1914 cv_init(&dev_sem->cond_var);
1915 dev_sem->irq_count_rdma = 0;
1916 for (i = 0; i < 2; i++) {
1917 pm = &rdma_sti->rw_states_m[i];
1918 mutex_init(&pm->mu);
1919 raw_spin_lock_init(&pm->mu_spin);
1920 pm->stat = 0;
1921 pm->timer = TIMER_MIN;
1922 dev_sem = &pm->dev_rdma_sem;
1923 raw_spin_lock_init(&dev_sem->lock);
1924 cv_init(&dev_sem->cond_var);
1925 dev_sem->irq_count_rdma = 0;
1926 pd = &rdma_sti->rw_states_d[i];
1927 mutex_init(&pd->mu);
1928 raw_spin_lock_init(&pd->mu_spin);
1929 dev_sem = &pd->dev_rdma_sem;
1930 raw_spin_lock_init(&dev_sem->lock);
1931 cv_init(&dev_sem->cond_var);
1932 dev_sem->irq_count_rdma = 0;
1933 pd->trwd_was = 0;
1934 pd->clock_receive_trwd = 0;
1935 pd->clock_begin_read = 0;
1936 pd->clock_end_read_old = 0;
1937 pd->clock_begin_read_old = 0;
1938 pd->trwd_send_count = 0;
1939 pd->ready_send_count = 0;
1940 pd->trwd_rec_count = 0;
1941 pd->ready_rec_count = 0;
1942 pd->n_ready = 0;
1943 pd->stat = 0;
1944 pd->trwd_was = 0;
1945 pd->timer_read = TIMER_MIN;
1946 pd->timer_write = TIMER_MIN;
1947 pd->timer_for_read = TIMER_FOR_READ_MIN;
1948 pd->timer_for_write = TIMER_FOR_WRITE_MIN;
1950 DEBUG_MSG("init_rdma_sti: FINISH\n");
1954 void read_regs_rdma(int i)
1956 printk("%d 0x%08x - 0x0 SHIFT_IOL_CSR\n", i,
1957 RDR_rdma(SHIFT_IOL_CSR, i));
1958 printk("%d 0x%08x - 0x0 SHIFT_IO_CSR\n", i,
1959 RDR_rdma(SHIFT_IO_CSR, i));
1960 printk("%d 0x%08x - 0x0 SHIFT_VID\n", i,
1961 RDR_rdma(SHIFT_VID, i));
1962 printk("%d 0x%08x - 0x4 SHIFT_CH_IDT\n", i,
1963 RDR_rdma(SHIFT_CH_IDT, i));
1964 printk("%d 0x%08x - 0x8 SHIFT_CS\n", i,
1965 RDR_rdma(SHIFT_CS, i));
1966 printk("%d 0x%08x 0x00 - SHIFT_DD_ID\n", i,
1967 RDR_rdma(SHIFT_DD_ID, i));
1968 printk("%d 0x%08x 0x04 - SHIFT_DMD_ID\n", i,
1969 RDR_rdma(SHIFT_DMD_ID, i));
1970 printk("%d 0x%08x 0x08 - SHIFT_N_IDT\n", i,
1971 RDR_rdma(SHIFT_N_IDT, i));
1972 printk("%d 0x%08x 0x0c - SHIFT_ES\n", i,
1973 RDR_rdma(SHIFT_ES, i));
1974 printk("%d 0x%08x 0x10 - SHIFT_IRQ_MC\n", i,
1975 RDR_rdma(SHIFT_IRQ_MC, i));
1976 printk("%d 0x%08x 0x14 - SHIFT_DMA_TCS\n", i,
1977 RDR_rdma(SHIFT_DMA_TCS, i));
1978 printk("%d 0x%08x 0x18 - SHIFT_DMA_TSA\n", i,
1979 RDR_rdma(SHIFT_DMA_TSA, i));
1980 printk("%d 0x%08x 0x1c - SHIFT_DMA_TBC\n", i,
1981 RDR_rdma(SHIFT_DMA_TBC, i));
1982 printk("%d 0x%08x 0x20 - SHIFT_DMA_RCS\n", i,
1983 RDR_rdma(SHIFT_DMA_RCS, i));
1984 printk("%d 0x%08x 0x24 - SHIFT_DMA_RSA\n", i,
1985 RDR_rdma(SHIFT_DMA_RSA, i));
1986 printk("%d 0x%08x 0x28 - SHIFT_DMA_RBC\n", i,
1987 RDR_rdma(SHIFT_DMA_RBC, i));
1988 printk("%d 0x%08x 0x2c - SHIFT_MSG_CS\n", i,
1989 RDR_rdma(SHIFT_MSG_CS, i));
1990 printk("%d 0x%08x 0x30 - SHIFT_TDMSG\n", i,
1991 RDR_rdma(SHIFT_TDMSG, i));
1992 printk("%d 0x%08x 0x34 - SHIFT_RDMSG\n", i,
1993 RDR_rdma(SHIFT_RDMSG, i));
1994 printk("%d 0x%08x 0x38 - SHIFT_CAM\n", i,
1995 RDR_rdma(SHIFT_CAM, i));
1998 void test_send_msg_rdma(unsigned int i, unsigned int msg)
2000 read_regs_rdma(i);
2001 WRR_rdma(SHIFT_TDMSG, i, msg);
2002 read_regs_rdma(i);
2005 void free_chan(dma_chan_t *chd)
2007 signed int rest;
2008 DEBUG_MSG("free_chan: START\n");
2009 if (chd->allocs > RCS_ALLOCED_B) {
2010 if (chd->size_tm) {
2011 rdma_tbl_64_struct_t *peltbl;
2012 for (peltbl = (rdma_tbl_64_struct_t *)chd->vdma_tm,
2013 rest = chd->real_size; rest > 0; peltbl++) {
2014 #ifdef CONFIG_E90S
2015 peltbl->addr = cpu_to_le64(peltbl->addr);
2016 peltbl->sz = cpu_to_le64(peltbl->sz);
2017 #endif
2018 rdma_mem_free(peltbl->sz,
2019 (dma_addr_t) peltbl->addr,
2020 (unsigned long) __va(peltbl->addr));
2021 rest -= peltbl->sz;
2023 rdma_mem_free(chd->size_tm, chd->fdma_tm,
2024 (unsigned long)chd->vdma_tm);
2025 } else
2026 if (chd->real_size) {
2027 rdma_mem_free(chd->real_size, chd->dma,
2028 (unsigned long)chd->prim_buf_addr);
2030 chd->tm = 0;
2031 chd->allocs = 0;
2032 chd->vdma_tm = 0;
2033 chd->size_tm = 0;
2034 chd->dma_busa = 0;
2035 chd->prim_buf_addr = 0;
2036 chd->real_size = 0;
2038 DEBUG_MSG("free_chan: FINISH\n");
2041 void rdma_mem_free(size_t size, dma_addr_t dev_memory,
2042 unsigned long dma_memory)
2044 int order;
2045 caddr_t mem;
2046 struct page *map, *mapend;
2048 DEBUG_MSG("rdma_mem_free: START\n");
2049 mem = (caddr_t)dma_memory;
2050 order = get_order(size);
2051 mapend = virt_to_page(mem + (PAGE_SIZE << order) - 1);
2052 for (map = virt_to_page(mem); map <= mapend; map++)
2053 ClearPageReserved(map);
2054 free_pages(dma_memory, order);
2055 DEBUG_MSG("rdma_mem_free: FINISH va: 0x%lx, fa: 0x%llx size: 0x%lx\n",
2056 dma_memory, dev_memory, size);
2059 unsigned long __get_free_pages_rdma(int node, gfp_t gfp_mask,
2060 unsigned int order)
2062 struct page *page;
2064 page = alloc_pages_node(node, gfp_mask, order);
2065 if (!page)
2066 return (unsigned long)NULL;
2067 return (unsigned long) page_address(page);
2070 int rdma_mem_alloc(int node, size_t size, dma_addr_t *mem, size_t *real_size,
2071 unsigned long *dma_memory)
2073 int order;
2074 struct page *map, *mapend;
2076 DEBUG_MSG("rdma_mem_alloc: START\n");
2077 order = get_order(size);
2078 *dma_memory = __get_free_pages_rdma(node, GFP_KERNEL , order);
2079 if (!(*dma_memory)) {
2080 ERROR_MSG("rdma_mem_alloc: Cannot bind DMA address order: %d"
2081 "size: 0x%lx\n", order, size);
2082 return -1;
2084 mapend = virt_to_page((*dma_memory) + (PAGE_SIZE << order) - 1);
2085 for (map = virt_to_page((*dma_memory)); map <= mapend; map++)
2086 SetPageReserved(map);
2088 *mem = __pa(*dma_memory);
2089 *real_size = PAGE_SIZE << order;
2090 DEBUG_MSG("rdma_mem_alloc: FINISH va: 0x%lx fa: 0x%llx size: 0x%lx"
2091 "real_size: 0x%lx\n", *dma_memory, *mem, size, *real_size);
2092 return 0;
2095 int init_chan(dma_chan_t *chd, int reqlen, int tm)
2097 char *err_msg = NULL;
2098 rdma_tbl_64_struct_t *peltbl;
2099 signed int rest, tmp_size;
2100 rdma_addr_struct_t pxx;
2101 int SIZE_TLB;
2103 DEBUG_MSG("init_chan: START\n");
2104 if (chd->allocs) {
2105 ERROR_MSG("init_chan: chd->allocs already %d\n", chd->allocs);
2106 return -1;
2108 #define SIZE_TLB_EL 128
2109 SIZE_TLB = ((PAGE_ALIGN(reqlen) / PAGE_SIZE + 1) * SIZE_TLB_EL);
2111 chd->allocs = RCS_ALLOCED_B;
2112 DEBUG_MSG("init_chan: try alloc 0x%x\n", reqlen);
2113 if (tm) {
2114 DEBUG_MSG("init_chan: table mode PAGE_SIZE: %x\n", PAGE_SIZE);
2115 DEBUG_MSG("init_chan: try alloc for tm size SIZE_TLB : 0x%x\n",
2116 SIZE_TLB);
2117 if (rdma_mem_alloc
2118 (chd->node_for_memory, SIZE_TLB,
2119 (dma_addr_t *)&chd->fdma_tm, &chd->size_tm,
2120 (unsigned long *)&chd->vdma_tm)) {
2121 err_msg = "rdma_mem_alloc for tm";
2122 goto failed;
2124 pxx.addr = (unsigned long)chd->vdma_tm;
2125 DEBUG_MSG("init_chan: 0x%08x%08x vdma_tm\n", pxx.fields.haddr,
2126 pxx.fields.laddr);
2127 pxx.addr = chd->fdma_tm;
2128 DEBUG_MSG("init_chan: 0x%08x%08x fdma_tm\n", pxx.fields.haddr,
2129 pxx.fields.laddr);
2130 rest = reqlen;
2131 /* rest = allign_dma((unsigned int)reqlen);
2132 rest = PAGE_ALIGN(reqlen); */
2134 DEBUG_MSG("init_chan: reqlen: 0x%08x rest: 0x%08x\n",
2135 reqlen, rest);
2136 chd->real_size = 0;
2137 for (peltbl = (rdma_tbl_64_struct_t *)chd->vdma_tm;
2138 rest > 0; peltbl++)
2140 size_t size_el;
2141 unsigned long addr; /* address */
2142 if (rdma_mem_alloc
2143 (chd->node_for_memory, SIZE_EL_TBL64_RDMA,
2144 (dma_addr_t *)&peltbl->addr, &size_el,
2145 (unsigned long *)&addr)) {
2146 err_msg = "rdma_mem_alloc for tm element";
2147 if (chd->real_size) {
2148 peltbl->sz = 0;
2149 chd->dma = chd->fdma_tm;
2150 chd->tm = 1;
2151 chd->allocs = RCS_ALLOCED;
2152 goto failed1;
2153 } else
2154 goto failed;
2156 pxx.addr = (unsigned long)peltbl;
2157 DEBUG_MSG("init_chan: 0x%08x%08x peltbl\n",
2158 pxx.fields.haddr, pxx.fields.laddr);
2159 #ifdef CONFIG_E90S
2160 peltbl->addr = le64_to_cpu(peltbl->addr);
2161 #endif
2162 pxx.addr = peltbl->addr;
2163 DEBUG_MSG("init_chan: 0x%08x%08x peltbl->addr\n",
2164 pxx.fields.haddr, pxx.fields.laddr);
2165 tmp_size = ((rest >= size_el)?size_el:
2166 (unsigned int)rest);
2168 peltbl->sz = (unsigned long)size_el;
2169 /* peltbl->sz = (unsigned long)tmp_size; */
2170 #ifdef CONFIG_E90S
2171 peltbl->sz = le64_to_cpu(peltbl->sz);
2172 #endif
2173 rest -= size_el;
2174 /* DEBUG_MSG("init_chan: tmp_size: 0x%08x rest: 0x%08x\n",
2175 tmp_size, rest); */
2176 chd->real_size += size_el;
2177 /* chd->real_size += tmp_size; */
2179 peltbl->sz = 0;
2180 chd->dma = chd->fdma_tm;
2181 chd->tm = 1;
2183 } else {
2184 DEBUG_MSG("init_chan: single mode PAGE_SIZE: %x\n", PAGE_SIZE);
2185 int rfsm_size;
2186 if (reqlen > 0x800000){
2187 ERROR_MSG("init_chan: The large size of the buffer. "
2188 "The buffer must be <= 0x0800000. "
2189 "Use table mode.\n");
2190 goto failed;
2192 if (rfsm) {
2193 #ifdef CONFIG_E2K
2194 if (IS_MACHINE_E2S)
2195 rfsm_size = reqlen;
2196 else
2197 rfsm_size = PAGE_ALIGN(reqlen);
2198 #else
2199 rfsm_size = PAGE_ALIGN(reqlen);
2200 #endif
2201 } else {
2202 rfsm_size = reqlen;
2205 if (rdma_mem_alloc(chd->node_for_memory, (unsigned long)rfsm_size,
2206 (dma_addr_t *)&chd->dma_busa, &chd->real_size,
2207 (unsigned long *)&chd->prim_buf_addr)) {
2208 err_msg = "rdma_mem_alloc";
2209 goto failed;
2211 chd->dma = chd->dma_busa;
2212 pxx.addr = chd->dma;
2213 DEBUG_MSG("init_chan: 0x%08x%08x chd->dma\n", pxx.fields.haddr,
2214 pxx.fields.laddr);
2215 chd->tm = 0;
2217 chd->full = (uint_t)chd->dma;
2218 chd->allocs = RCS_ALLOCED;
2219 DEBUG_MSG("init_chan: FINISH chd->real_size: %lx\n", chd->real_size);
2220 return chd->real_size;
2222 failed:
2223 chd->allocs = RCS_EMPTY;
2224 failed1:
2225 ERROR_MSG("init_chan: %s FAILED ****\n", err_msg);
2226 return (-1);
2229 /******************* create devices *************************/
2231 int create_dev_rdma(int major)
2233 char nod[128];
2234 int i = 0, i_rdma;
2235 int minor;
2237 /* Create rdma nodes in /sysfs */
2238 rdma_class = class_create(THIS_MODULE, "rdma");
2239 if (IS_ERR(rdma_class)) {
2240 pr_err("Error creating class: /sys/class/rdma.\n");
2242 for_each_online_rdma(i) {
2243 /* for_each_rdma(i) { */
2244 for (i_rdma= 0; i_rdma < RDMA_NODE_DEV; i_rdma++) {
2245 minor = i * RDMA_NODE_DEV + i_rdma;
2246 sprintf(nod,"rdma_%d_:%d", i, i_rdma);
2247 pr_info("make node /sys/class/rdma/%s\n", nod);
2248 if (device_create(rdma_class, NULL,
2249 MKDEV(major, minor), NULL, nod) == NULL) {
2250 pr_err("create dev: %s a node: %d "
2251 "failed\n", nod, i);
2252 return -1;
2256 return 0;
2259 int remove_dev_rdma(int major)
2261 char nod[128];
2262 int i = 0, i_rdma;
2263 int minor;
2265 /* Remove rdma nodes in /sysfs */
2266 for_each_rdma(i) {
2267 for (i_rdma= 0; i_rdma < RDMA_NODE_DEV; i_rdma++) {
2268 minor = i * RDMA_NODE_DEV + i_rdma;
2269 (void) sprintf(nod,"rdma_%d_:%d", i, i_rdma);
2270 device_destroy(rdma_class, MKDEV(major, minor));
2273 class_destroy(rdma_class);
2274 return 0;
2277 module_init(rdma_init);
2278 module_exit(rdma_cleanup);