Lynx framebuffers multidomain implementation.
[linux/elbrus.git] / drivers / mcst / mokx / mokx.c
blob137a561921e937cf50fb30d83c70ae0d02744ba5
1 /*
2 * BUGS:
3 * - CUBIC: rfsm mode can not be used in a table mode.
4 */
6 #include "mokx_iocc.h"
7 #include "mokx_mok.h"
8 #include "mokx_iocc_error.h"
9 #include "mokx_mok_error.h"
10 #include "mokx_iocc_regs.h"
11 #include "mokx_mok_regs.h"
13 #ifndef VM_RESERVED
14 #define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP)
15 #endif
17 #define SETTING_OVER_INTERRUPT 1
18 #define SET_ENABLE_RECEIVE_BIT 1
20 #if SETTING_OVER_INTERRUPT
21 unsigned int wait_answer_msg = 0x0;
22 #endif
24 #ifdef UNX_TRWD
25 unsigned int REPEAT_TRWD = 0;
26 #endif
29 #define DSF_NO 0
30 #define ALLOC_MEM_DRIVER 1
31 #define SMALL_CHANGE 0x0
32 #define TX_RX_WAIT_DMA 1000000
34 int busy_rdma_boot_mem = 0;
35 #ifdef CONFIG_RDMA_BOOT_MEM_ALLOC
36 extern unsigned int R_M_NODE;
37 extern unsigned int R_M_SH;
38 extern volatile void *rdma_link_mem[MAX_NUMNODES];
39 extern volatile void *rdma_share_mem;
40 #endif
42 MODULE_AUTHOR("Copyright by MCST 2013-2014");
43 MODULE_LICENSE("GPL");
44 MODULE_DESCRIPTION("MOKX driver");
47 * Parametr's driver
49 #ifndef LMS
50 #ifdef CONFIG_RDMA_BOOT_MEM_ALLOC
51 #define MAX_SIZE_BUFF 0x800000
52 #define LIMIT_SIZE_BUFF 0x40000000
53 #else
54 #define MAX_SIZE_BUFF 0x800000
55 #define LIMIT_SIZE_BUFF 0x2000000
56 #endif
57 #define MAX_SIZE_BUFF_TM 0xC800000
58 #else
59 #ifdef CONFIG_RDMA_BOOT_MEM_ALLOC
60 #define MAX_SIZE_BUFF 0x10000
61 #else
62 #define MAX_SIZE_BUFF 0x8000
63 #define LIMIT_SIZE_BUFF 0x200000
64 #endif
65 #define MAX_SIZE_BUFF_TM 0x80000
66 #endif
69 * Struct for class rdma in sysfs
71 static struct class *mokx_class;
74 * Set ATL
76 #if 0
77 unsigned int tr_atl;
78 static int atl_v = TR_ATL_B;
79 module_param(atl_v, int, 0);
80 MODULE_PARM_DESC(atl_v, "Changes the value of ATL (alive timer limit) "
81 "reg CAM.");
82 #endif
85 * Mode ( 0 - single mode , 1 - table mode )
87 static int tm_mode = 0x1;
88 module_param(tm_mode, int, 0);
91 * Max size buf for single mode
93 static int align_buf_tm = 1;
94 module_param(align_buf_tm, int, 0);
97 * Max size buf for single mode
99 static int max_size_buf = MAX_SIZE_BUFF;
100 module_param(max_size_buf, int, 0);
103 * Max size buf for table mode
105 static int max_size_buf_tm = MAX_SIZE_BUFF_TM;
106 module_param(max_size_buf_tm, int, 0);
109 * The number of buffers
111 static int num_buf = RDMA_BUF_NUM;
112 module_param(num_buf, int, 0);
115 * Allocate memory on its node
117 static int node_mem_alloc = 0x0;
118 module_param(node_mem_alloc, int, 0);
121 * Develop for multy channel
123 static int count_rdma_vc = RDMA_NODE_DEV;
126 * Print events
128 static int ev_pr = 0;
129 module_param(ev_pr, int, 0);
132 * Enable RFSM - rfsm.
133 * rfsm = ENABLE_RFSM - RFSM disable (default).
134 * rfsm = DMA_RCS_RFSM - RFSM enable.
136 #define CLEAR_RFSM DISABLE_RFSM
137 unsigned int rfsm = CLEAR_RFSM;
139 struct rdma_reg_state rdma_reg_state[RDMA_MAX_NUMIOLINKS];
140 struct rdma_state *rdma_state;
142 struct pci_dev *rdma_dev;
143 link_id_t rdma_link_id;
144 unsigned long time_ID_REQ;
145 unsigned long time_ID_ANS;
146 unsigned long flags_s;
147 unsigned char *e0regad;
148 unsigned char *e1regad;
149 unsigned int count_read_sm_max = 800;
150 unsigned int intr_rdc_count[RDMA_MAX_NUMIOLINKS];
151 unsigned int msg_cs_dmrcl = MSG_CS_DMRCL;
152 unsigned int state_cam = 0;
153 unsigned int state_GP0;
155 unsigned int SHIFT_IO_VID;
156 unsigned int SHIFT_VID; /* RDMA VID */
157 unsigned int SHIFT_IOL_CSR;
158 unsigned int SHIFT_IO_CSR;
159 unsigned int SHIFT_CH0_IDT; /* RDMA ID/Type E90/E3M1 */
160 unsigned int SHIFT_CH1_IDT; /* RDMA ID/Type E90/E3M1 */
161 unsigned int SHIFT_CH_IDT; /* RDMA ID/Type E3S/E90S */
162 unsigned int SHIFT_CS; /* RDMA Control/Status 000028a0 */
163 unsigned int SHIFT_DD_ID; /* Data Destination ID */
164 unsigned int SHIFT_DMD_ID; /* Data Message Destination ID */
165 unsigned int SHIFT_N_IDT; /* Neighbour ID/Type */
166 unsigned int SHIFT_ES; /* Event Status */
167 unsigned int SHIFT_IRQ_MC; /* Interrupt Mask Control */
168 unsigned int SHIFT_DMA_TCS; /* DMA Tx Control/Status */
169 unsigned int SHIFT_DMA_TSA; /* DMA Tx Start Address */
170 unsigned int SHIFT_DMA_HTSA; /* DMA Tx Start Address */
171 unsigned int SHIFT_DMA_TBC; /* DMA Tx Byte Counter */
172 unsigned int SHIFT_DMA_RCS; /* DMA Rx Control/Status */
173 unsigned int SHIFT_DMA_RSA; /* DMA Rx Start Address */
174 unsigned int SHIFT_DMA_HRSA; /* DMA Rx Start Address */
175 unsigned int SHIFT_DMA_RBC; /* DMA Rx Byte Counter */
176 unsigned int SHIFT_MSG_CS; /* Messages Control/Status */
177 unsigned int SHIFT_TDMSG; /* Tx Data_Messages Buffer */
178 unsigned int SHIFT_RDMSG; /* Rx Data_Messages Buffer */
179 unsigned int SHIFT_CAM; /* CAM - channel alive management */
182 int MCG_CS_SEND_ALL_MSG = (MSG_CS_SD_Msg |
183 MSG_CS_SGP0_Msg |
184 MSG_CS_SGP1_Msg |
185 MSG_CS_SGP2_Msg |
186 MSG_CS_SGP3_Msg |
187 MSG_CS_SL_Msg |
188 MSG_CS_SUL_Msg |
189 MSG_CS_SIR_Msg);
191 int MSG_CS_MSF_ALL = MSG_CS_DMPS_Err |
192 MSG_CS_MPCRC_Err |
193 MSG_CS_MPTO_Err |
194 MSG_CS_DMPID_Err;
196 unsigned int irq_mc_1 = IRQ_RGP1M,
197 irq_mc_rdc = IRQ_RDC,
198 irq_mc_03 =
199 IRQ_RGP0M |
200 IRQ_RGP3M,
201 irq_mc = IRQ_RDM |
202 IRQ_RGP3M |
203 IRQ_RGP2M |
204 IRQ_RGP1M |
205 IRQ_RGP0M |
206 IRQ_RIAM |
207 IRQ_RIRM |
208 IRQ_RULM |
209 IRQ_RLM |
210 IRQ_MSF |
211 #if DSF_NO
212 //IRQ_DSF |
213 #else
214 IRQ_DSF |
215 #endif
216 IRQ_TDC |
217 IRQ_RDC |
218 IRQ_CMIE;
220 unsigned int count_loop_send_msg_max = 10;
221 unsigned int count_wait_rdm_max = 64;
222 dev_rdma_sem_t *msg_snd_dev[2];
224 #define RESET_DMA_MEMMORY 1
225 #ifdef RESET_DMA_MEMMORY
226 unsigned long reset_dma_memory_r, reset_dma_memory_w;
227 unsigned int reset_size_r;
228 unsigned int reset_size_w;
229 int reset_order_r, reset_order_w;
230 #endif
232 #ifdef CONFIG_COMPAT
233 static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg);
234 static long rdma_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
235 #endif
236 static long rdma_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
237 static ssize_t rdma_read(struct file *, char *, size_t, loff_t *);
238 static ssize_t rdma_write(struct file *, const char *, size_t, loff_t *);
239 static int rdma_open(struct inode *inode, struct file *file);
240 static int rdma_close(struct inode *inode, struct file *file);
241 static int rdma_mmap(struct file *file, struct vm_area_struct *vma);
242 void test_send_msg_rdma(unsigned int i, unsigned int msg);
243 int get_file_minor(struct file *file);
244 void init_reg(void);
245 void rdma_mem_free(size_t size, dma_addr_t dev_memory, unsigned long dma_memory);
246 void rdma_link_init(int link);
247 void read_regs_rdma(int);
248 int rdma_mem_alloc(int node, size_t size, dma_addr_t *mem,
249 size_t *real_size, unsigned long *dma_memory, int node_mem_alloc);
250 int write_buf(int link, rdma_ioc_parm_t *parm, unsigned int f_flags);
251 int read_buf(int link, rdma_ioc_parm_t *parm, unsigned int f_flags);
252 int rdma_remap_page(void *va, size_t sz, struct vm_area_struct *vma);
253 int rdma_remap_page_tbl(void *va, size_t sz, struct vm_area_struct *vma,
254 int align);
255 long wait_time_rdma(struct rdma_reg_state *rdma_reg_state,
256 signed long timeout);
257 int rdma_check_buf(unsigned long addr_buf, unsigned int cnst,
258 unsigned int need_free_page, char *prefix);
259 unsigned long join_curr_clock( void );
260 unsigned int RDR_rdma(unsigned int reg, unsigned int node);
261 void WRR_rdma(unsigned int reg, unsigned int node, unsigned int val);
262 int create_dev_mokx(int major);
263 int remove_dev_mokx(int major);
264 int init_buff(int link, int rw);
265 int rdma_mem_alloc_pool(rdma_pool_buf_t *);
266 void rdma_mem_free_pool(rdma_pool_buf_t *);
267 static void rdma_cleanup(void);
268 int send_msg_check(unsigned int msg, unsigned int link, unsigned int cmd,
269 dev_rdma_sem_t *dev, int print_enable);
270 unsigned long __get_free_pages_rdma(int node, gfp_t gfp_mask,
271 unsigned int order, int node_mem_alloc);
272 int mok_x_unset_mode4(link);
274 #if RESET_THREAD_DMA
275 int rst_thr_action(void *arg);
276 #endif
278 DEFINE_RAW_SPINLOCK(mu_fix_event);
280 static struct file_operations rdma_fops = {
281 .owner = THIS_MODULE,
282 .read = rdma_read,
283 .write = rdma_write,
284 .unlocked_ioctl = rdma_ioctl,
285 #ifdef CONFIG_COMPAT
286 .compat_ioctl = rdma_compat_ioctl,
287 #endif
288 .mmap = rdma_mmap,
289 .open = rdma_open,
290 .release = rdma_close,
293 void init_regs(void)
295 SHIFT_IO_VID = IO_VID;
296 SHIFT_IOL_CSR = IOL_CSR;
297 SHIFT_IO_CSR = IO_CSR;
298 SHIFT_VID = RDMA_VID;
299 SHIFT_CH_IDT = RDMA_CH_IDT;
300 SHIFT_CS = RDMA_CS;
301 SHIFT_DD_ID = RDMA_DD_ID;
302 SHIFT_DMD_ID = RDMA_DMD_ID;
303 SHIFT_N_IDT = RDMA_N_IDT;
304 SHIFT_ES = RDMA_ES;
305 SHIFT_IRQ_MC = RDMA_IRQ_MC;
306 SHIFT_DMA_TCS = RDMA_DMA_TCS;
307 SHIFT_DMA_TSA = RDMA_DMA_TSA;
308 SHIFT_DMA_TBC = RDMA_DMA_TBC;
309 SHIFT_DMA_RCS = RDMA_DMA_RCS;
310 SHIFT_DMA_RSA = RDMA_DMA_RSA;
311 SHIFT_DMA_RBC = RDMA_DMA_RBC;
312 SHIFT_MSG_CS = RDMA_MSG_CS;
313 SHIFT_TDMSG = RDMA_TDMSG;
314 SHIFT_RDMSG = RDMA_RDMSG;
315 SHIFT_DMA_HTSA = RDMA_DMA_HTSA;
316 SHIFT_DMA_HRSA = RDMA_DMA_HRSA;
317 SHIFT_CAM = RDMA_CAM;
321 * Read/write reg's CPU RDMA and allign dma
322 * ============================================================================
325 static inline void sic_write_node_nbsr_reg_rdma(int node_id,
326 unsigned int reg_offset,
327 unsigned int reg_value)
329 sic_write_node_iolink_nbsr_reg(NUM_NODE_RDMA(node_id),
330 NUM_LINK_IN_NODE_RDMA(node_id),
331 reg_offset, reg_value );
334 static inline unsigned int sic_read_node_nbsr_reg_rdma(int node_id,
335 int reg_offset)
337 unsigned int reg_value;
339 reg_value = sic_read_node_iolink_nbsr_reg(NUM_NODE_RDMA(node_id),
340 NUM_LINK_IN_NODE_RDMA(node_id),
341 reg_offset );
342 return (reg_value);
345 void WRR_rdma(unsigned int reg, unsigned int node, unsigned int val)
347 sic_write_node_nbsr_reg_rdma(node, reg, val);
348 fix_event(node, WRR_EVENT, reg, val);
351 unsigned int RDR_rdma(unsigned int reg, unsigned int node)
353 unsigned int val;
354 val = sic_read_node_nbsr_reg_rdma(node, reg);
355 fix_event(node, RDR_EVENT, reg, val);
356 return val;
359 unsigned int allign_dma(unsigned int n)
361 if (n&(ALLIGN_RDMA-1)) {
362 n += ALLIGN_RDMA;
363 n = n&(~(ALLIGN_RDMA-1));
365 return n;
368 #define ALLIGN_RDMA_BUF 16 * PAGE_SIZE
369 unsigned int allign_dma_buf(unsigned int n)
371 if (n&(ALLIGN_RDMA_BUF-1)) {
372 n += ALLIGN_RDMA_BUF;
373 n = n&(~(ALLIGN_RDMA_BUF-1));
375 return n;
379 * List search
380 * ============================================================================
382 static rdma_buf_t* search_in_list(struct list_head* list1, int num1)
384 struct list_head* tmp;
385 rdma_buf_t* ret = NULL;
387 list_for_each(tmp, list1) {
388 ret = list_entry(tmp, rdma_buf_t, list);
389 if(ret->num == num1)
390 return (ret);
392 return (NULL);
396 * Clock
397 * ============================================================================
399 unsigned long join_curr_clock(void)
401 unsigned long ret;
402 ret = get_cycles();
403 return ret;
407 * Schedule
408 * ============================================================================
410 static inline void __raw_add_wait_queue_from_ddi(raw_wait_queue_head_t *head,
411 raw_wait_queue_t *new)
413 list_add(&new->task_list, &head->task_list);
415 static inline void __raw_remove_wait_queue_from_ddi(raw_wait_queue_head_t *head,
416 raw_wait_queue_t *old)
418 list_del(&old->task_list);
421 void raw_add_wait_queue_from_ddi(raw_wait_queue_head_t *q,
422 raw_wait_queue_t *wait)
424 unsigned long flags;
426 raw_spin_lock_irqsave(&q->lock, flags);
427 __raw_add_wait_queue_from_ddi(q, wait);
428 raw_spin_unlock_irqrestore(&q->lock, flags);
431 void raw_remove_wait_queue_from_ddi(raw_wait_queue_head_t *q,
432 raw_wait_queue_t *wait)
434 unsigned long flags;
436 raw_spin_lock_irqsave(&q->lock, flags);
437 __raw_remove_wait_queue_from_ddi(q, wait);
438 raw_spin_unlock_irqrestore(&q->lock, flags);
441 hrtime_t rdma_gethrtime(void)
443 struct timeval tv;
444 hrtime_t val;
445 do_gettimeofday(&tv);
446 val = tv.tv_sec * 1000000000LL + tv.tv_usec * 1000LL;
447 return (val);
450 static void __raw_wake_up_common_from_ddi(raw_wait_queue_head_t *q)
452 struct list_head *tmp, *next;
453 raw_wait_queue_t *curr;
455 list_for_each_safe(tmp, next, &q->task_list) {
456 curr = list_entry(tmp, raw_wait_queue_t, task_list);
457 //wake_up_state(curr->task, TASK_UNINTERRUPTIBLE |
458 // TASK_INTERRUPTIBLE);
459 wake_up_process(curr->task);
463 void __raw_wake_up_from_ddi(raw_wait_queue_head_t *q)
465 unsigned long flags;
467 raw_spin_lock_irqsave(&q->lock, flags);
468 __raw_wake_up_common_from_ddi(q);
469 raw_spin_unlock_irqrestore(&q->lock, flags);
472 int cv_broadcast_from_ddi(raw_wait_queue_head_t *cvp)
474 __raw_wake_up_from_ddi(cvp);
475 return 0;
478 int rdma_cv_broadcast_rdma(void* dev_rdma_sem, unsigned int link)
480 rdma_addr_struct_t p_xxb;
481 dev_rdma_sem_t *dev = dev_rdma_sem;
483 dev->irq_count_rdma ++;
484 dev->time_broadcast = join_curr_clock();
485 p_xxb.addr = (unsigned long)dev;
486 fix_event(link, RDMA_BROADCAST, p_xxb.fields.laddr,
487 dev->irq_count_rdma);
488 cv_broadcast_from_ddi(&dev->cond_var);
489 return (0);
493 * Convert mksec to HZ
495 clock_t drv_usectohz_from_ddi(register clock_t mksec)
497 clock_t clock;
498 struct timespec rqtp;
500 rqtp.tv_nsec = ((mksec % 1000000L) * 1000L);
501 rqtp.tv_sec = mksec / 1000000L;
502 clock = timespec_to_jiffies(&rqtp);
503 return (clock);
506 int cv_spin_timedwait_from_ddi(raw_wait_queue_head_t *cvp,
507 raw_spinlock_t *lock, long tim)
509 struct task_struct *tsk = current;
510 unsigned long expire;
511 int raw_spin_locking_done = 0;
512 int rval = 0;
514 DECLARE_RAW_WAIT_QUEUE(wait);
515 expire = tim - jiffies;
516 tsk->state = TASK_INTERRUPTIBLE;
517 raw_add_wait_queue_from_ddi(cvp, &wait);
518 raw_spin_locking_done = raw_spin_is_locked(lock);
519 if(raw_spin_locking_done)
520 spin_mutex_exit(lock);
521 fix_event(0, WAIT_TRY_SCHTO_EVENT, (unsigned int)expire, 0);
522 expire = schedule_timeout(expire);
523 raw_remove_wait_queue_from_ddi(cvp, &wait);
524 tsk->state = TASK_RUNNING;
525 if(raw_spin_locking_done)
526 spin_mutex_enter(lock);
527 if (expire) {
528 if (signal_pending(current)) {
529 rval = -2;
531 } else {
532 rval = -1;
534 return rval;
537 int wait_for_irq_rdma_sem(void* dev_rdma_sem, signed long usec_timeout,
538 unsigned int link)
540 rdma_addr_struct_t p_xxb;
541 dev_rdma_sem_t *dev = dev_rdma_sem;
542 unsigned int time_current;
543 unsigned int delta_time;
544 signed long timeout_tick;
545 int ret = 0;
547 if (!raw_spin_is_locked(&dev->lock)) {
548 printk("%s: spin is NOT locked:dev: %p\n", __FUNCTION__, dev);
549 return -3;
551 if (dev->irq_count_rdma) {
552 printk("%s(%p): dev->irq_count_rdma: %u"
553 "num_obmen: %u\n", __FUNCTION__, &dev->lock,
554 dev->irq_count_rdma, (unsigned int)dev->num_obmen);
555 delta_time = 0;
556 if (dev->time_broadcast) {
557 time_current = join_curr_clock();
558 if (time_current > dev->time_broadcast) {
559 delta_time = (unsigned int)(time_current -
560 dev->time_broadcast);
561 } else {
562 delta_time = (unsigned int)(time_current +
563 (~0U - dev->time_broadcast));
565 delta_time |= (1<<31);
566 fix_event(link, WAIT_RET_SCHT0_EVENT, delta_time,
567 dev->num_obmen);
568 fix_event(link, WAIT_RET_SCHT0_EVENT,
569 dev->irq_count_rdma, dev->num_obmen);
570 dev->time_broadcast = 0;
572 return(1);
574 p_xxb.addr = usec_timeout;
575 fix_event(link, WAIT_TRY_SCHTO_EVENT, p_xxb.fields.laddr, dev->num_obmen);
576 timeout_tick = (unsigned long)jiffies;
577 timeout_tick += usec_timeout;
578 ret = cv_spin_timedwait_from_ddi(&dev->cond_var, &dev->lock,
579 timeout_tick);
580 delta_time = 0;
581 if (dev->time_broadcast) {
582 time_current = join_curr_clock();
583 if (time_current > dev->time_broadcast) {
584 delta_time = (unsigned int)(time_current -
585 dev->time_broadcast);
586 } else {
587 delta_time = (unsigned int)(time_current +
588 (~0U - dev->time_broadcast));
590 fix_event(link, WAIT_RET_SCHT1_EVENT, ret, dev->num_obmen);
591 dev->time_broadcast = 0;
592 } else {
593 fix_event(dev->irq_count_rdma, WAIT_RET_SCHT2_EVENT, ret,
594 dev->num_obmen);
597 return ret;
601 * Fixed event
602 * ============================================================================
605 rdma_event_t rdma_event;
606 int rdma_event_init = 0;
607 #include "mokx_get_event.c"
609 void fix_event_proc(unsigned int channel, unsigned int event,
610 unsigned int val1, unsigned int val2)
612 struct event_cur *event_cur;
613 unsigned long flags;
615 if (!rdma_event_init)
616 return;
617 raw_spin_lock_irqsave(&mu_fix_event, flags);
618 event_cur = &rdma_event.event[rdma_event.event_cur];
619 event_cur->clkr = join_curr_clock();
620 event_cur->event = event;
621 event_cur->channel = channel;
622 event_cur->val1 = val1;
623 event_cur->val2 = val2;
624 rdma_event.event_cur++;
625 if (SIZE_EVENT == rdma_event.event_cur) {
626 rdma_event.event_cur = 0;
628 raw_spin_unlock_irqrestore(&mu_fix_event, flags);
629 return;
632 #include "mokx_ext_mode.c"
633 #include "mokx_intrrupt.c"
634 #include "mokx_read_buf.c"
635 #include "mokx_write_buf.c"
636 #include "mokx_send_msg.c"
640 * Set ID and mask
641 * ============================================================================
644 * Set ID for device
646 void set_id_link(int link)
648 unsigned cs;
650 //WRR_rdma(SHIFT_CH_IDT, link, (base_ip_addr[3] + link) |
651 // ((base_ip_addr[4] + link) << 8));
652 cs = RDR_rdma(SHIFT_CS, link);
653 if (IS_MACHINE_E2S)
654 WRR_rdma(SHIFT_CS, link, cs | CS_DSM | E2S_CS_PTOCL );
655 else
656 WRR_rdma(SHIFT_CS, link, cs | CS_DSM );
657 INFO_MSG("SHIFT_CS: 0x%08x\n", RDR_rdma(SHIFT_CS, link));
658 //INFO_MSG("SHIFT_CH_IDT: 0x%08x\n", RDR_rdma(SHIFT_CH_IDT, link));
659 //INFO_MSG("SHIFT_N_IDT: 0x%08x\n", RDR_rdma(SHIFT_N_IDT, link));
664 * Set/unset mask interrupt
666 int set_mask(int link, unsigned int irq_mask)
668 int ret = SUCCES_MOK_X;
670 WRR_rdma(SHIFT_IRQ_MC, link, irq_mask);
671 if (RDR_rdma(SHIFT_IRQ_MC, link) != irq_mask)
672 ret = FAILED_MOK_X;
673 return ret;
677 * Send messages
678 * ============================================================================
681 int send_msg_check(unsigned int msg, unsigned int link, unsigned int cmd,
682 dev_rdma_sem_t *dev, int print_enable)
684 rdma_state_link_t *rdma_link;
685 int ret_send_msg, i, count_repeat = 10;
686 unsigned long flags_s;
688 rdma_link = &rdma_state->rdma_link[link];
689 raw_spin_lock_irqsave(&rdma_link->mutex_send_msg, flags_s);
690 for (i = 0; i < count_repeat; i++) {
691 ret_send_msg = send_msg(rdma_link, msg, link, cmd, 0);
692 if (ret_send_msg > 0)
693 break;
694 if (ret_send_msg < 0) {
695 if (print_enable)
696 ERROR_MSG("%s: FAIL send msg: 0x%08x "
697 "cmd: 0x%08x from link: %d ret: %d\n",
698 __FUNCTION__, msg, cmd, link, ret_send_msg);
699 } else if (ret_send_msg == 0) {
700 if (print_enable)
701 DEBUG_MSG("%s: FAIL send msg: 0x%08x "
702 "cmd: 0x%08x from link: %d "
703 "ret: %d. SM is absent. "
704 "MSG_CS: 0x%08x \n",
705 __FUNCTION__, msg, cmd, link,
706 ret_send_msg,
707 RDR_rdma(SHIFT_MSG_CS, link));
710 raw_spin_unlock_irqrestore(&rdma_link->mutex_send_msg, flags_s);
711 if (ret_send_msg > 0) {
712 fix_event(link, SNDMSGOK_EVENT, ret_send_msg, count_repeat);
713 fix_event(link, SNDMSGOK_EVENT, 0xff, raw_smp_processor_id());
714 } else {
715 fix_event(link, SNDMSGBAD_EVENT, ret_send_msg, count_repeat);
716 fix_event(link, SNDMSGBAD_EVENT, 0xff, raw_smp_processor_id());
718 return ret_send_msg;
722 * Send SIR (start CAM)
724 int send_SIR_Msg(int link)
726 int ret = SUCCES_MOK_X;
728 ret = send_msg_check(0, link, MSG_CS_SIR_Msg, 0, 0);
729 if (ret < 0) {
730 ERROR_MSG("%s: FAIL send MSG_CS_SIR_Msg from link: 0x%08x "
731 "ret: %d\n", __FUNCTION__, link, ret);
732 } else if (ret == 0) {
733 ERROR_MSG("%s: FAIL send MSG_CS_SIR_Msg from link: 0x%08x. "
734 "SM is absent\n", __FUNCTION__, link);
736 return ret;
740 * Send GP0 (reset)
742 int send_SGP0_Msg(int link)
744 int ret = SUCCES_MOK_X;
746 ret = send_msg_check(0, link, MSG_CS_SGP0_Msg, 0, 0);
747 if (ret < 0) {
748 ERROR_MSG("%s: FAIL send MSG_CS_SGP0_Msg from link: 0x%08x "
749 "ret: %d\n", __FUNCTION__, link, ret);
750 } else if (ret == 0) {
751 ERROR_MSG("%s: FAIL send MSG_CS_SGP0_Msg from link: 0x%08x. "
752 "SM is absent\n", __FUNCTION__, link);
754 return ret;
758 * Send GP1 (change mode)
760 int send_SGP1_Msg(int link)
762 int ret = SUCCES_MOK_X;
764 ret = send_msg_check(0, link, MSG_CS_SGP1_Msg, 0, 0);
765 if (ret < 0) {
766 ERROR_MSG("%s: FAIL send MSG_CS_SGP1_Msg from link: 0x%08x "
767 "ret: %d\n", __FUNCTION__, link, ret);
768 } else if (ret == 0) {
769 ERROR_MSG("%s: FAIL send MSG_CS_SGP1_Msg from link: 0x%08x. "
770 "SM is absent\n", __FUNCTION__, link);
772 return ret;
776 * Send GP2 (reset)
778 int send_SGP2_Msg(int link)
780 int ret = SUCCES_MOK_X;
782 ret = send_msg_check(0, link, MSG_CS_SGP2_Msg, 0, 0);
783 if (ret < 0) {
784 ERROR_MSG("%s: FAIL send MSG_CS_SGP2_Msg from link: 0x%08x "
785 "ret: %d\n", __FUNCTION__, link, ret);
786 } else if (ret == 0) {
787 ERROR_MSG("%s: FAIL send MSG_CS_SGP2_Msg from link: 0x%08x. "
788 "SM is absent\n", __FUNCTION__, link);
790 return ret;
794 * Reset link
795 * ============================================================================
798 int link_soft_reset(int link)
800 unsigned int cs;
801 int i;
803 cs = RDR_rdma(SHIFT_CS, link);
804 printk("%s: link #%d. Register CS: %x.\n", __FUNCTION__, link, cs);
805 printk("%s: link #%d. Reset link.\n", __FUNCTION__, link);
806 WRR_rdma(SHIFT_CS, link, cs | CS_SRst);
807 for (i = 0; i < 10; i ++) {
808 mdelay(1);
809 cs = RDR_rdma(SHIFT_CS, link);
810 printk("%s: link #%d. Register CS: %x.\n", __FUNCTION__, link, cs);
812 return cs | CS_SRst;
815 #ifdef RESET_DMA_MEMMORY
816 int null_change(int link)
818 rdma_addr_struct_t p_xxb_pa_r, p_xxb_pa_w;
819 unsigned int es;
821 p_xxb_pa_r.addr = (unsigned long)__pa(reset_dma_memory_r);
822 p_xxb_pa_w.addr = (unsigned long)__pa(reset_dma_memory_w);
823 WRR_rdma(SHIFT_IRQ_MC, link , 0x0);
824 //read_regs_rdma(link);
826 #if 0
827 unsigned int i;
828 for (i = 0; i < 10; i++) {
829 WRR_rdma(SHIFT_DMA_RCS, link, DMA_RCS_Rx_Rst);
830 udelay(1000);
832 WRR_rdma(SHIFT_DMA_TCS, link, DMA_TCS_Tx_Rst);
833 WRR_rdma(SHIFT_DMA_RCS, link, RDR_rdma(SHIFT_DMA_RCS, link) & ~DMA_RCS_RTM);
834 #endif
835 mok_x_unset_mode4(link);
836 //printk("---------------------Receive null wait...\n");
837 //read_regs_rdma(link);
839 WRR_rdma(SHIFT_DMA_TCS, link, RCode_64);
840 WRR_rdma(SHIFT_DMA_RCS, link, WCode_64);
841 WRR_rdma(SHIFT_DMA_HRSA, link, p_xxb_pa_r.fields.haddr);
842 WRR_rdma(SHIFT_DMA_RSA, link, p_xxb_pa_r.fields.laddr);
843 WRR_rdma(SHIFT_DMA_RBC, link, reset_size_r);
844 WRR_rdma(SHIFT_DMA_RCS, link, WCode_64 | DMA_RCS_RFSM | DMA_RCS_RE );
846 //printk("Receive null wait...\n");
847 //read_regs_rdma(link);
848 udelay(10000);
849 //printk("Receive wait end.\n");
850 //read_regs_rdma(link);
851 WRR_rdma(SHIFT_DMA_RCS, link,
852 RDR_rdma(SHIFT_DMA_RCS, link) & (~DMA_RCS_RE));
853 #if 1
854 unsigned int i;
855 for (i = 0; i < 10; i++) {
856 WRR_rdma(SHIFT_DMA_RCS, link, DMA_RCS_Rx_Rst);
857 udelay(1000);
859 WRR_rdma(SHIFT_DMA_TCS, link, RCode_64);
860 WRR_rdma(SHIFT_DMA_RCS, link, WCode_64);
861 #endif
862 es = RDR_rdma(SHIFT_ES, link);
863 if (es & ES_RDC_Ev)
864 WRR_rdma(SHIFT_ES, link, es & ES_RDC_Ev);
865 WRR_rdma(SHIFT_IRQ_MC, link ,irq_mc);
866 //read_regs_rdma(link);
867 return 0;
869 #endif
871 #if RESET_THREAD_DMA
873 #define RST_THR_ACT_DBG 1
874 #define RST_THR_ACT_DEBUG_MSG(x...)\
875 if (RST_THR_ACT_DBG) DEBUG_MSG(x)
876 int rst_thr_action(void *arg)
878 rdma_state_link_t *rdma_link = (rdma_state_link_t *) arg;
879 struct sched_param param = { .sched_priority = MAX_RT_PRIO/4 };
880 unsigned long flags;
881 int link = rdma_link->link;
882 int count = 0;
883 int ret_smsg, file_reciver_open = 0;
884 unsigned int sending_msg;
885 rw_state_p pd = NULL;
886 dev_rdma_sem_t *dev_sem;
887 rdma_pool_buf_t *r_pool_buf;
888 unsigned int es;
890 RST_THR_ACT_DEBUG_MSG("%s: START link:%d rdma_link: %p\n", __FUNCTION__,
891 link, rdma_link);
892 //sys_sched_setscheduler(current->pid, SCHED_FIFO, &param);
893 sched_setscheduler(current, SCHED_FIFO, &param);
894 pd = &rdma_link->rw_states_d[READER];
895 dev_sem = &pd->dev_rdma_sem;
896 r_pool_buf = &rdma_link->read_pool;
897 while (!kthread_should_stop()) {
898 set_current_state(TASK_INTERRUPTIBLE);
899 raw_spin_lock_irqsave(&dev_sem->lock, flags);
900 if (pd->state_open_close) {
901 file_reciver_open = 1;
903 else
904 file_reciver_open = 0;
905 raw_spin_unlock_irqrestore(&dev_sem->lock, flags);
906 raw_spin_lock_irqsave(&rdma_link->rst_thr_lock, flags);
907 if ( rdma_link->start_rst_thr == 0) {
908 raw_spin_unlock_irqrestore(&rdma_link->rst_thr_lock, flags);
909 RST_THR_ACT_DEBUG_MSG("%s: link:%d rdma_link: %p no reset\n",
910 __FUNCTION__, link, rdma_link);
911 schedule();
912 continue;
914 #if RST_THR_ACT_DBG
915 read_regs_rdma(link);
916 #endif
917 rdma_link->start_rst_thr = 0;
918 raw_spin_unlock_irqrestore(&rdma_link->rst_thr_lock, flags);
919 WRR_rdma(SHIFT_IRQ_MC, link , irq_mc_03);
920 #define DELAY_DMA 10
921 #define COUNT_DMA 100
922 for (count = 1; count < COUNT_DMA; count++) {
924 RST_THR_ACT_DEBUG_MSG("Repeat reg prog.\n");
925 read_regs_rdma(link);
926 es = RDR_rdma(SHIFT_ES, link);
927 if (es & ES_DSF_Ev) {
928 WRR_rdma(SHIFT_DMA_TCS, link, RDR_rdma(SHIFT_DMA_TCS, link) & (~DMA_TCS_TE));
929 WRR_rdma(SHIFT_ES, link, es & ES_DSF_Ev);
930 WRR_rdma(SHIFT_DMA_TCS, link, RCode_64 | DMA_TCS_DRCL |
931 DMA_TCS_TE );
933 mdelay(COUNT_DMA);
935 es = RDR_rdma(SHIFT_ES, link);
936 if (es & ES_DSF_Ev) {
937 WRR_rdma(SHIFT_DMA_TCS, link, RDR_rdma(SHIFT_DMA_TCS, link) & (~DMA_TCS_TE));
938 WRR_rdma(SHIFT_ES, link, es & ES_DSF_Ev);
940 WRR_rdma(SHIFT_DMA_TCS, link, RDR_rdma(SHIFT_DMA_TCS, link) & (~DMA_TCS_TE));
941 WRR_rdma(SHIFT_DMA_RCS, link, RDR_rdma(SHIFT_DMA_RCS, link) & (~DMA_RCS_RE));
942 es = RDR_rdma(SHIFT_ES, link);
943 WRR_rdma(SHIFT_ES, link, es & ~ES_SM_Ev & ~ES_DSF_Ev);
944 #define DELAY_RESET 10
945 #define COUNT_RESET_RCS 10
946 for (count = 1; count < COUNT_RESET_RCS; count++) {
947 WRR_rdma(SHIFT_DMA_RCS, link, DMA_RCS_Rx_Rst);
948 mdelay(DELAY_RESET);
950 WRR_rdma(SHIFT_DMA_RCS, link, RDR_rdma(SHIFT_DMA_RCS, link) |
951 WCode_64);
952 #define COUNT_RESET_TCS 10
953 for (count = 1; count < COUNT_RESET_TCS; count++) {
954 WRR_rdma(SHIFT_DMA_TCS, link, DMA_TCS_Tx_Rst);
955 mdelay(DELAY_RESET);
957 WRR_rdma(SHIFT_DMA_TCS, link,
958 RDR_rdma(SHIFT_DMA_TCS, link) | RCode_64 | DMA_TCS_DRCL);
959 //rdma_link->start_rst_thr = 0;
960 #if RST_THR_ACT_DBG
961 read_regs_rdma(link);
962 #endif
964 * If file reciver open && transmiter reset
966 if (file_reciver_open) {
967 unsigned long flags_r;
968 raw_spin_lock_irqsave(&pd->lock_rd, flags_r);
970 * The release of buffers
972 while (!list_empty(&r_pool_buf->ready_list)) {
973 list_move_tail(r_pool_buf->ready_list.next,
974 &r_pool_buf->free_list);
975 r_pool_buf->num_free_buf ++;
977 //while (!list_empty(&r_pool_buf->busy_list)) {
978 // list_move_tail(r_pool_buf->busy_list.next,
979 // &r_pool_buf->free_list);
981 //r_pool_buf->num_free_buf = num_buf;
982 raw_spin_unlock_irqrestore(&pd->lock_rd, flags_r);
984 * Create MSG_READY_DMA
986 sending_msg = MSG_READY_DMA | r_pool_buf->num_free_buf;
988 * Send TRWD
990 if ((ret_smsg = send_msg_check(sending_msg, link,
991 0, dev_sem, 0)) <= 0) {
992 fix_event(link, READ_SNDMSGBAD_EVENT,
993 sending_msg, dev_sem->num_obmen);
994 } else {
995 fix_event(link, READ_SNDNGMSG_EVENT,
996 sending_msg, dev_sem->num_obmen);
999 WRR_rdma(SHIFT_IRQ_MC, link ,irq_mc);
1001 RST_THR_ACT_DEBUG_MSG("%s: link:%d rdma_link: %p reset mask: %x \n",
1002 __FUNCTION__, link, rdma_link,
1003 RDR_rdma(SHIFT_IRQ_MC, link));
1005 __set_current_state(TASK_RUNNING);
1006 RST_THR_ACT_DEBUG_MSG("%s: STOP link:%d rdma_link: %p\n", __FUNCTION__,
1007 link, rdma_link);
1008 return 0;
1011 #endif
1015 * Create thread for reset link, init lock thread reset
1017 #if RESET_THREAD_DMA
1018 int thread_reset_start(int link)
1020 rdma_state_link_t *rdma_link = &rdma_state->rdma_link[link];
1022 raw_spin_lock_init(&rdma_link->rst_thr_lock);
1023 rdma_link->start_rst_thr = 0;
1024 rdma_link->rst_thr = kthread_create(rst_thr_action, rdma_link,
1025 "%d-mokx-rx-rst-thr", link);
1026 if (!rdma_link->rst_thr) {
1027 ERROR_MSG("%s: could not create %d-mokx-rst-thr\n",
1028 __FUNCTION__, link);
1029 rdma_link->rst_thr = NULL;
1030 return FAILED_MOK_X;
1032 return SUCCES_MOK_X;
1034 #endif
1037 * Reset when the channel error and driver initialization
1039 void link_error_reset_start(int link)
1041 #if RESET_THREAD_DMA
1042 rdma_state_link_t *rdma_link = &rdma_state->rdma_link[link];
1043 unsigned long flags;
1045 raw_spin_lock_irqsave(&rdma_link->rst_thr_lock, flags);
1046 rdma_link->start_rst_thr = 1;
1047 raw_spin_unlock_irqrestore(&rdma_link->rst_thr_lock, flags);
1048 wake_up_process(rdma_link->rst_thr);
1049 #else
1050 #if 0
1051 WRR_rdma(SHIFT_DMA_TCS, link, DMA_TCS_Tx_Rst);
1052 WRR_rdma(SHIFT_DMA_TCS, link, RDR_rdma(SHIFT_DMA_TCS, link) |
1053 RCode_64 | DMA_TCS_DRCL);
1054 #define COUNT_RESET_RCS 10
1055 int count = 0;
1056 for (count = 1; count < COUNT_RESET_RCS; count++)
1057 WRR_rdma(SHIFT_DMA_RCS, link, DMA_RCS_Rx_Rst);
1058 WRR_rdma(SHIFT_DMA_RCS, link, RDR_rdma(SHIFT_DMA_RCS, link) | WCode_64);
1059 #endif
1060 #endif
1063 #if RESET_THREAD_DMA
1065 * Stop thread for reset link
1067 void thread_reset_stop(int link)
1069 rdma_state_link_t *rdma_link = &rdma_state->rdma_link[link];
1071 if (rdma_link->rst_thr) {
1072 kthread_stop(rdma_link->rst_thr);
1073 rdma_link->rst_thr = NULL;
1076 #endif
1079 * Memory
1080 * ============================================================================
1083 #define INIT_POOL_BUF_DBG 0
1084 #define INIT_POOL_BUF_DEBUG_MSG(x...)\
1085 if (INIT_POOL_BUF_DBG) DEBUG_MSG(x)
1086 static int pool_buf_init(int link, int rw)
1088 rdma_state_link_t *rdma_link = &rdma_state->rdma_link[link];
1089 rdma_tbl_64_struct_t *peltbl, *peltbl_tmp;
1090 rdma_addr_struct_t pxx;
1091 rdma_pool_buf_t *pool_buf;
1092 rdma_buf_t *r_buf;
1093 int buf_size_page;
1094 int buf_size;
1095 int i;
1097 INIT_POOL_BUF_DEBUG_MSG("%s: buffer(%s) START \n", __FUNCTION__,
1098 rw ? "write" : "read");
1099 INIT_POOL_BUF_DEBUG_MSG("=========================================\n");
1100 INIT_POOL_BUF_DEBUG_MSG("rdma_link->num_buf: %x\n", rdma_link->num_buf);
1101 INIT_POOL_BUF_DEBUG_MSG("rdma_link->tm_mode: %x\n", rdma_link->tm_mode);
1102 INIT_POOL_BUF_DEBUG_MSG("rdma_link->max_size_buf_tm: %x\n", rdma_link->max_size_buf_tm);
1103 INIT_POOL_BUF_DEBUG_MSG("rdma_link->align_buf_tm: %x\n", rdma_link->align_buf_tm);
1104 INIT_POOL_BUF_DEBUG_MSG("rdma_link->node_mem_alloc: %x\n", rdma_link->node_mem_alloc);
1105 INIT_POOL_BUF_DEBUG_MSG("rdma_link->type_alloc: %x\n", rdma_link->type_alloc);
1106 INIT_POOL_BUF_DEBUG_MSG("=========================================\n");
1109 rw ? (pool_buf = &rdma_link->write_pool) :
1110 (pool_buf = &rdma_link->read_pool);
1111 pool_buf->alloc = RDMA_BUF_EMPTY;
1113 * Alloc memory for pool (get user access address and DMA address)
1115 if (rdma_link->type_alloc) {
1116 #ifdef CONFIG_RDMA_BOOT_MEM_ALLOC
1117 if (R_M_NODE && rdma_link_mem[NUM_NODE_RDMA(link)]) {
1118 buf_size = allign_dma(rdma_link->max_size_buf);
1119 if ((buf_size * rdma_link->num_buf) > R_M_NODE)
1120 goto failed;
1121 INIT_POOL_BUF_DEBUG_MSG("%s: alloc bootmem rdma_link_mem[%d]: %p\n",
1122 __FUNCTION__, NUM_NODE_RDMA(link),
1123 rdma_link_mem[NUM_NODE_RDMA(link)]);
1124 rdma_link->buf_size = buf_size;
1125 rdma_link->tm_mode = 0;
1126 pool_buf->buf_size = buf_size;
1127 pool_buf->size = buf_size * rdma_link->num_buf;
1128 pool_buf->tm_mode = rdma_link->tm_mode;
1129 pool_buf->vdma = (caddr_t)(rdma_link_mem[NUM_NODE_RDMA(link)] +
1130 pool_buf->size * busy_rdma_boot_mem);
1131 pool_buf->fdma = (dma_addr_t)virt_to_phys(pool_buf->vdma);
1132 pool_buf->dma_size = pool_buf->size;
1133 } else
1134 goto failed;
1135 #else
1136 goto failed;
1137 #endif
1138 } else {
1139 rdma_link->tm_mode ? (buf_size = ALIGN(rdma_link->max_size_buf_tm,
1140 rdma_link->align_buf_tm * PAGE_SIZE)) :
1141 (buf_size = allign_dma(rdma_link->max_size_buf));
1142 if (rdma_link->tm_mode)
1143 buf_size = ALIGN(buf_size, 32 * rdma_link->align_buf_tm * PAGE_SIZE);
1144 buf_size_page = buf_size / (rdma_link->align_buf_tm * PAGE_SIZE);
1145 if (rdma_link->tm_mode) {
1146 INIT_POOL_BUF_DEBUG_MSG("%s: max_size_buf_tm: 0x%08x "
1147 "buf_size: 0x%08x buf_size_page: %d\n",
1148 __FUNCTION__, rdma_link->max_size_buf_tm,
1149 buf_size, buf_size_page);
1150 } else
1151 INIT_POOL_BUF_DEBUG_MSG("%s: max_size_buf: 0x%08x "
1152 "buf_size: 0x%08x buf_size_page: %d\n",
1153 __FUNCTION__, rdma_link->max_size_buf,
1154 buf_size, buf_size_page);
1155 rdma_link->buf_size = buf_size;
1156 pool_buf->buf_size = buf_size;
1157 pool_buf->size = buf_size * rdma_link->num_buf;
1158 pool_buf->node_mem_alloc = rdma_link->node_mem_alloc;
1159 pool_buf->node_for_memory = NUM_NODE_RDMA(link);
1160 pool_buf->tm_mode = rdma_link->tm_mode;
1161 pool_buf->align_buf_tm = rdma_link->align_buf_tm;
1162 INIT_POOL_BUF_DEBUG_MSG("%s: buffer(%s) buf_size: 0x%016lx tm_mode: %d "
1163 "node_for_memory: 0x%08x\n", __FUNCTION__,
1164 rw ? "write" : "read", pool_buf->size,
1165 pool_buf->tm_mode, pool_buf->node_for_memory);
1167 if (rdma_mem_alloc_pool(pool_buf)) {
1168 ERROR_MSG("%s: ERROR: Cannot alloc device buffer "
1169 "for link: %d buf: %s\n", __FUNCTION__,
1170 link, rw ? "write" : "read");
1171 goto failed;
1174 pool_buf->alloc = RDMA_BUF_ALLOCED;
1177 * Init list's
1179 INIT_LIST_HEAD(&pool_buf->ready_list);
1180 INIT_LIST_HEAD(&pool_buf->free_list);
1181 INIT_LIST_HEAD(&pool_buf->busy_list);
1183 if (pool_buf->tm_mode)
1184 peltbl = (rdma_tbl_64_struct_t *)pool_buf->vdma;
1185 for(i = 0; i < rdma_link->num_buf; i++) {
1186 r_buf = &pool_buf->buf[i];
1187 INIT_POOL_BUF_DEBUG_MSG("%s: ADDR BUFF[%d]: %p\n", __FUNCTION__,
1188 i, r_buf);
1189 INIT_POOL_BUF_DEBUG_MSG("%s: alloc buf[%d]\n", __FUNCTION__, i);
1190 pool_buf->buf[i].num = i;
1191 INIT_POOL_BUF_DEBUG_MSG("%s: pool_buf->buf[%d].num : 0x%08x\n",
1192 __FUNCTION__, i, pool_buf->buf[i].num);
1193 pool_buf->buf[i].st = RDMA_BUF_ST_FREE;
1194 INIT_POOL_BUF_DEBUG_MSG("%s: pool_buf->buf[%d].st: 0x%08x\n",
1195 __FUNCTION__, i, pool_buf->buf[i].st);
1196 if (pool_buf->tm_mode) {
1197 peltbl_tmp = peltbl + i * buf_size_page;
1198 pool_buf->buf[i].buf_addr =
1199 (caddr_t)((unsigned long)peltbl_tmp);
1201 * For small changes
1203 pool_buf->buf[i].buf_addr_small =
1204 (dma_addr_t)peltbl_tmp->addr;
1205 pxx.addr = (unsigned long)pool_buf->buf[i].buf_addr_small;
1206 INIT_POOL_BUF_DEBUG_MSG("%s: SMALL 0x%08x%08x pool_buf->buf[%d].buf_addr_small\n",
1207 __FUNCTION__, pxx.fields.haddr, pxx.fields.laddr, i);
1208 pool_buf->buf[i].dma_addr = (dma_addr_t)
1209 virt_to_phys(pool_buf->buf[i].buf_addr);
1210 pxx.addr = (unsigned long)peltbl_tmp;
1211 INIT_POOL_BUF_DEBUG_MSG("%s: 0x%08x%08x peltbl : %p buf[%d]\n",
1212 __FUNCTION__, pxx.fields.haddr, pxx.fields.laddr,
1213 peltbl_tmp, i);
1214 pxx.addr = peltbl_tmp->addr;
1215 INIT_POOL_BUF_DEBUG_MSG("%s: 0x%08x%08x peltbl->addr buf[%d]\n",
1216 __FUNCTION__, pxx.fields.haddr,
1217 pxx.fields.laddr, i);
1218 INIT_POOL_BUF_DEBUG_MSG("%s: 0x%llx peltbl->sz buf[%d]\n", __FUNCTION__,
1219 peltbl_tmp->sz, i);
1220 } else {
1221 pool_buf->buf[i].buf_addr =
1222 (caddr_t)((unsigned long)pool_buf->vdma
1223 + buf_size * i);
1224 pool_buf->buf[i].dma_addr =
1225 (dma_addr_t)virt_to_phys(pool_buf->buf[i].buf_addr);
1226 pool_buf->buf[i].buf_addr_small =
1227 pool_buf->buf[i].dma_addr;
1229 * For small changes
1231 pxx.addr = (unsigned long)pool_buf->buf[i].buf_addr_small;
1232 INIT_POOL_BUF_DEBUG_MSG("%s: SMALL 0x%08x%08x pool_buf->buf[%d].buf_addr_small\n",
1233 __FUNCTION__, pxx.fields.haddr, pxx.fields.laddr, i);
1236 pool_buf->buf[i].size = pool_buf->buf_size;
1237 INIT_POOL_BUF_DEBUG_MSG("%s: pool_buf->buf[%d].size: 0x%016lx\n",
1238 __FUNCTION__, i, pool_buf->buf[i].size);
1239 pxx.addr = (unsigned long) pool_buf->buf[i].buf_addr;
1240 INIT_POOL_BUF_DEBUG_MSG("%s: 0x%08x%08x "
1241 "pool_buf->buf[%d].buf_addr\n",
1242 __FUNCTION__, pxx.fields.haddr,
1243 pxx.fields.laddr, i);
1244 pxx.addr = pool_buf->buf[i].dma_addr;
1245 INIT_POOL_BUF_DEBUG_MSG("%s: 0x%08x%08x "
1246 "pool_buf->buf[%d].dma_addr\n",
1247 __FUNCTION__, pxx.fields.haddr,
1248 pxx.fields.laddr, i);
1249 list_add_tail(&pool_buf->buf[i].list, &pool_buf->free_list);
1251 pool_buf->num_free_buf = rdma_link->num_buf;
1252 INIT_POOL_BUF_DEBUG_MSG("%s: buffer(%s) STOP \n", __FUNCTION__,
1253 rw ? "write" : "read");
1254 return 0;
1255 failed:
1256 return -1;
1259 #define FREE_POOL_BUF_DBG 0
1260 #define FREE_POOL_BUF_DEBUG_MSG(x...)\
1261 if (FREE_POOL_BUF_DBG) DEBUG_MSG(x)
1262 static int free_pool_buf(int link, int rw)
1264 rdma_state_link_t *rdma_link = &rdma_state->rdma_link[link];
1265 rdma_addr_struct_t pxx;
1266 rdma_pool_buf_t *pool_buf;
1267 int i;
1269 FREE_POOL_BUF_DEBUG_MSG("%s: buffer(%s) START \n", __FUNCTION__,
1270 rw ? "write" : "read");
1271 rw ? (pool_buf = &rdma_link->write_pool) :
1272 (pool_buf = &rdma_link->read_pool);
1275 * Free memory for pool (get user access address and DMA address)
1277 if (!rdma_link->type_alloc)
1278 rdma_mem_free_pool(pool_buf);
1279 for(i = 0; i < rdma_link->num_buf; i++) {
1280 if (pool_buf->buf[i].size) {
1281 FREE_POOL_BUF_DEBUG_MSG("%s: free buf[%d]\n", __FUNCTION__, i);
1282 pool_buf->buf[i].size = 0;
1283 FREE_POOL_BUF_DEBUG_MSG("%s: pool_buf->buf[%d].size: 0x%016lx\n",
1284 __FUNCTION__, i, pool_buf->buf[i].size);
1285 pool_buf->buf[i].buf_addr = NULL;
1286 pxx.addr = (unsigned long) pool_buf->buf[i].buf_addr;
1287 FREE_POOL_BUF_DEBUG_MSG("%s: 0x%08x%08x "
1288 "pool_buf->buf[%d].buf_addr\n",
1289 __FUNCTION__, pxx.fields.haddr,
1290 pxx.fields.laddr, i);
1291 pool_buf->buf[i].dma_addr = 0;
1292 pxx.addr = pool_buf->buf[i].dma_addr;
1293 FREE_POOL_BUF_DEBUG_MSG("%s: 0x%08x%08x "
1294 "pool_buf->buf[%d].dma_addr\n",
1295 __FUNCTION__, pxx.fields.haddr,
1296 pxx.fields.laddr, i);
1299 return 0;
1303 * Init buff's
1305 int bufs_init(int link)
1307 busy_rdma_boot_mem = 0;
1308 if (pool_buf_init(link, READER))
1309 goto failed;
1310 busy_rdma_boot_mem = 1;
1311 if (pool_buf_init(link, WRITER))
1312 goto failed;
1313 return 0;
1314 failed:
1315 return 1;
1319 * Free buff's
1321 void bufs_free(int link)
1323 free_pool_buf(link, READER);
1324 free_pool_buf(link, WRITER);
1327 #define MOK_X_SET_MODE_DBG 0
1328 #define MOK_X_SET_MODE_DEBUG_MSG(x...)\
1329 if (MOK_X_SET_MODE_DBG) printk(x)
1332 * Set default mode
1333 * ============================================================================
1336 int set_mode_default(int link)
1338 int ret;
1340 MOK_X_SET_MODE_DEBUG_MSG("Unset bit enable status reg: ");
1341 if (ret = unset_mok_x_SR_enable(link)) {
1342 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1343 goto fail;
1345 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1346 MOK_X_SET_MODE_DEBUG_MSG("Unset bit master status reg: ");
1347 if (ret = unset_mok_x_SR_master(link)) {
1348 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1349 goto fail;
1351 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1352 MOK_X_SET_MODE_DEBUG_MSG("Unset bit slave status reg: ");
1353 if (ret = unset_mok_x_SR_slave(link)) {
1354 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1355 goto fail;
1357 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1358 MOK_X_SET_MODE_DEBUG_MSG("Unset bit enable_transmit status reg: ");
1359 if (ret = unset_mok_x_SR_enable_trasmit(link)) {
1360 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1361 goto fail;
1363 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1364 MOK_X_SET_MODE_DEBUG_MSG("Unset bit enable_receive status reg: ");
1365 if (ret = unset_mok_x_SR_enable_receive(link)) {
1366 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1367 goto fail;
1369 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1370 MOK_X_SET_MODE_DEBUG_MSG("Unset bit ready_to_receive status reg: ");
1371 if (ret = unset_mok_x_SR_ready_to_receive(link)) {
1372 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1373 goto fail;
1375 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1376 MOK_X_SET_MODE_DEBUG_MSG("Unset bit granted_packet status reg: ");
1377 if (ret = unset_mok_x_SR_granted_packet(link)) {
1378 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1379 goto fail;
1381 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1382 MOK_X_SET_MODE_DEBUG_MSG("Unset bit granted_last_packet status reg: ");
1383 if (ret = unset_mok_x_SR_granted_last_packet(link)) {
1384 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1385 goto fail;
1387 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1388 MOK_X_SET_MODE_DEBUG_MSG("Unset bit mode1 status reg: ");
1389 if (ret = unset_mok_x_SR_mode1(link)) {
1390 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1391 goto fail;
1393 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1394 MOK_X_SET_MODE_DEBUG_MSG("Unset bit mode2 status reg: ");
1395 if (ret = unset_mok_x_SR_mode2(link)) {
1396 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1397 goto fail;
1399 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1400 MOK_X_SET_MODE_DEBUG_MSG("Unset bit mode3 status reg: ");
1401 if (ret = unset_mok_x_SR_mode3(link)) {
1402 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1403 goto fail;
1405 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1406 MOK_X_SET_MODE_DEBUG_MSG("Unset bit mode4 status reg: ");
1407 if (ret = unset_mok_x_SR_mode4(link)) {
1408 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1409 goto fail;
1411 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1412 fail:
1413 return ret;
1416 int set_mode_default_remote(int link)
1418 int ret;
1420 MOK_X_SET_MODE_DEBUG_MSG("Unset remote bit enable status reg: ");
1421 if (ret = unset_mok_x_remote_SR_enable(link)) {
1422 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1423 goto fail;
1425 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1426 MOK_X_SET_MODE_DEBUG_MSG("Unset remote bit master status reg: ");
1427 if (ret = unset_mok_x_remote_SR_master(link)) {
1428 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1429 goto fail;
1431 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1432 MOK_X_SET_MODE_DEBUG_MSG("Unset remote bit slave status reg: ");
1433 if (ret = unset_mok_x_remote_SR_slave(link)) {
1434 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1435 goto fail;
1437 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1438 MOK_X_SET_MODE_DEBUG_MSG("Unset remote bit enable_transmit status reg: ");
1439 if (ret = unset_mok_x_remote_SR_enable_trasmit(link)) {
1440 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1441 goto fail;
1443 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1444 MOK_X_SET_MODE_DEBUG_MSG("Unset remote bit enable_receive status reg: ");
1445 if (ret = unset_mok_x_remote_SR_enable_receive(link)) {
1446 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1447 goto fail;
1449 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1450 MOK_X_SET_MODE_DEBUG_MSG("Unset remote bit ready_to_receive status reg: ");
1451 if (ret = unset_mok_x_remote_SR_ready_to_receive(link)) {
1452 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1453 goto fail;
1455 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1456 MOK_X_SET_MODE_DEBUG_MSG("Unset remote bit granted_packet status reg: ");
1457 if (ret = unset_mok_x_remote_SR_granted_packet(link)) {
1458 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1459 goto fail;
1461 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1462 MOK_X_SET_MODE_DEBUG_MSG("Unset remote bit granted_last_packet status reg: ");
1463 if (ret = unset_mok_x_remote_SR_granted_last_packet(link)) {
1464 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1465 goto fail;
1467 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1468 MOK_X_SET_MODE_DEBUG_MSG("Unset remote bit mode1 status reg: ");
1469 if (ret = unset_mok_x_remote_SR_mode1(link)) {
1470 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1471 goto fail;
1473 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1474 MOK_X_SET_MODE_DEBUG_MSG("Unset remote bit mode2 status reg: ");
1475 if (ret = unset_mok_x_remote_SR_mode2(link)) {
1476 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1477 goto fail;
1479 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1480 MOK_X_SET_MODE_DEBUG_MSG("Unset remote bit mode3 status reg: ");
1481 if (ret = unset_mok_x_remote_SR_mode3(link)) {
1482 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1483 goto fail;
1485 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1486 MOK_X_SET_MODE_DEBUG_MSG("Unset remote bit mode4 status reg: ");
1487 if (ret = unset_mok_x_remote_SR_mode4(link)) {
1488 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1489 goto fail;
1491 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1492 fail:
1493 return ret;
1496 int check_mode_default(int link)
1498 int ret;
1500 MOK_X_SET_MODE_DEBUG_MSG("Get bit enable status reg: ");
1501 if ((ret = get_mok_x_SR_enable(link)) < 1) {
1502 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1503 goto fail;
1505 MOK_X_SET_MODE_DEBUG_MSG(" %d\n", ret);
1506 MOK_X_SET_MODE_DEBUG_MSG("Get bit master status reg: ");
1507 if ((ret = get_mok_x_SR_master(link)) < 1) {
1508 printk("error (%d)\n", ret);
1509 goto fail;
1511 MOK_X_SET_MODE_DEBUG_MSG(" %d\n", ret);
1512 MOK_X_SET_MODE_DEBUG_MSG("Get bit slave status reg: ");
1513 if ((ret = get_mok_x_SR_slave(link)) < 1) {
1514 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1515 goto fail;
1517 MOK_X_SET_MODE_DEBUG_MSG(" %d\n", ret);
1518 MOK_X_SET_MODE_DEBUG_MSG("Get bit enable_transmit status reg: ");
1519 if ((ret = get_mok_x_SR_enable_trasmit(link)) < 1) {
1520 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1521 goto fail;
1523 MOK_X_SET_MODE_DEBUG_MSG(" %d\n", ret);
1524 MOK_X_SET_MODE_DEBUG_MSG("Get bit enable_receive status reg: ");
1525 if ((ret = get_mok_x_SR_enable_receive(link)) < 1) {
1526 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1527 goto fail;
1529 MOK_X_SET_MODE_DEBUG_MSG(" %d\n", ret);
1530 MOK_X_SET_MODE_DEBUG_MSG("Get bit ready_to_receive status reg: ");
1531 if ((ret = get_mok_x_SR_ready_to_receive(link)) < 1) {
1532 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1533 goto fail;
1535 MOK_X_SET_MODE_DEBUG_MSG(" %d\n", ret);
1536 MOK_X_SET_MODE_DEBUG_MSG("Get bit granted_packet status reg: ");
1537 if ((ret = get_mok_x_SR_granted_packet(link)) < 1) {
1538 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1539 goto fail;
1541 MOK_X_SET_MODE_DEBUG_MSG(" %d\n", ret);
1542 MOK_X_SET_MODE_DEBUG_MSG("Get bit granted_last_packet status reg: ");
1543 if ((ret = get_mok_x_SR_granted_last_packet(link)) < 1) {
1544 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1545 goto fail;
1547 MOK_X_SET_MODE_DEBUG_MSG(" %d\n", ret);
1548 MOK_X_SET_MODE_DEBUG_MSG("Get bit mode1 status reg: ");
1549 if ((ret = get_mok_x_SR_mode1(link)) < 1) {
1550 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1551 goto fail;
1553 MOK_X_SET_MODE_DEBUG_MSG(" %d\n", ret);
1554 MOK_X_SET_MODE_DEBUG_MSG("Get bit mode2 status reg: ");
1555 if ((ret = get_mok_x_SR_mode2(link)) < 1) {
1556 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1557 goto fail;
1559 MOK_X_SET_MODE_DEBUG_MSG(" %d\n", ret);
1560 MOK_X_SET_MODE_DEBUG_MSG("Get bit mode3 status reg: ");
1561 if ((ret = get_mok_x_SR_mode3(link)) < 1) {
1562 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1563 goto fail;
1565 MOK_X_SET_MODE_DEBUG_MSG(" %d\n", ret);
1566 MOK_X_SET_MODE_DEBUG_MSG("Get bit mode4 status reg: ");
1567 if ((ret = get_mok_x_SR_mode4(link)) < 1) {
1568 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1569 goto fail;
1571 MOK_X_SET_MODE_DEBUG_MSG(" %d\n", ret);
1572 fail:
1573 return ret;
1577 * Set native mode
1578 * ============================================================================
1581 int set_mode_native(int link)
1583 int ret;
1585 MOK_X_SET_MODE_DEBUG_MSG("Set bit enable status reg: ");
1586 if (ret = set_mok_x_SR_enable(link)) {
1587 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1588 goto fail;
1590 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1591 MOK_X_SET_MODE_DEBUG_MSG("Set bit enable_transmit status reg: ");
1592 if (ret = set_mok_x_SR_enable_trasmit(link)) {
1593 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1594 goto fail;
1596 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1597 MOK_X_SET_MODE_DEBUG_MSG("Set bit enable_receive status reg: ");
1598 if (ret = set_mok_x_SR_enable_receive(link)) {
1599 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1600 goto fail;
1602 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1603 MOK_X_SET_MODE_DEBUG_MSG("Set bit ready_to_receive status reg: ");
1604 if (ret = set_mok_x_SR_ready_to_receive(link)) {
1605 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1606 goto fail;
1608 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1609 MOK_X_SET_MODE_DEBUG_MSG("Set bit granted_packet status reg: ");
1610 if (ret = set_mok_x_SR_granted_packet(link)) {
1611 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1612 goto fail;
1614 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1615 fail:
1616 return ret;
1619 int check_mode_native(int link)
1621 int ret;
1623 MOK_X_SET_MODE_DEBUG_MSG("Get bit enable status reg: ");
1624 if ((ret = get_mok_x_SR_enable(link)) < 1) {
1625 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1626 goto fail;
1628 MOK_X_SET_MODE_DEBUG_MSG(" %d\n", ret);
1629 MOK_X_SET_MODE_DEBUG_MSG("Get bit enable_transmit status reg: ");
1630 if ((ret = get_mok_x_SR_enable_trasmit(link)) < 1) {
1631 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1632 goto fail;
1634 MOK_X_SET_MODE_DEBUG_MSG(" %d\n", ret);
1635 MOK_X_SET_MODE_DEBUG_MSG("Get bit enable_receive status reg: ");
1636 if ((ret = get_mok_x_SR_enable_receive(link)) < 1) {
1637 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1638 goto fail;
1640 MOK_X_SET_MODE_DEBUG_MSG(" %d\n", ret);
1641 MOK_X_SET_MODE_DEBUG_MSG("Get bit ready_to_receive status reg: ");
1642 if ((ret = get_mok_x_SR_ready_to_receive(link)) < 1) {
1643 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1644 goto fail;
1646 MOK_X_SET_MODE_DEBUG_MSG(" %d\n", ret);
1647 MOK_X_SET_MODE_DEBUG_MSG("Get bit granted_packet status reg: ");
1648 if ((ret = get_mok_x_SR_granted_packet(link)) < 1) {
1649 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1650 goto fail;
1652 MOK_X_SET_MODE_DEBUG_MSG(" %d\n", ret);
1653 return 0;
1654 fail:
1655 return 1;
1658 int mok_x_set_native_mode(int link, int *error)
1660 #if RESET_THREAD_DMA
1661 rdma_state_link_t *rdma_link = &rdma_state->rdma_link[link];
1662 unsigned long flags;
1663 #endif
1664 int ret = SUCCES_MOK_X;
1665 #ifdef SETTING_OVER_INTERRUPT
1666 if (set_mask(link, irq_mc)) {
1667 *error = ERRDMA_SET_MASK;
1668 ret = FAILED_MOK_X;
1669 goto filed_set_mode_native;
1671 #endif
1672 if (ret = set_mode_native(link)) {
1673 *error = ret;
1674 ret = FAILED_MOK_X;
1675 goto filed_set_mode_native;
1677 INFO_MSG("MOKX set to native mode. Error(%d).\n", ret);
1678 if (ret = check_mode_native(link)) {
1679 if (!ret)
1680 *error = ERROR_MOK_X_NOT_SET_BIT;
1681 else
1682 *error = ret;
1683 ret = FAILED_MOK_X;
1684 goto filed_set_mode_native;
1686 INFO_MSG("MOKX check native mode. Error(%d).\n", ret);
1687 #if RESET_THREAD_DMA
1688 if (thread_reset_start(link)) {
1689 *error = ERRDMA_THREAD_RESET_START;
1690 ret = FAILED_MOK_X;
1691 goto filed_set_mode_native;
1693 if (send_SGP2_Msg(link) < 1) {
1694 *error = ERRDMA_GP0_SEND;
1695 ret = FAILED_MOK_X;
1696 goto filed_set_mode_native;
1698 if (set_mask(link, irq_mc_03)) {
1699 *error = ERRDMA_SET_MASK;
1700 ret = FAILED_MOK_X;
1701 goto filed_set_mode_native;
1703 raw_spin_lock_irqsave(&rdma_link->rst_thr_lock, flags);
1704 rdma_link->start_rst_thr = 1;
1705 raw_spin_unlock_irqrestore(&rdma_link->rst_thr_lock, flags);
1706 wake_up_process(rdma_link->rst_thr);
1707 INFO_MSG("MOKX start reset thread. Error(%d).\n", ret);
1708 #else
1709 #ifndef SETTING_OVER_INTERRUPT
1710 if (set_mask(link, irq_mc)) {
1711 *error = ERRDMA_SET_MASK;
1712 ret = FAILED_MOK_X;
1713 goto filed_set_mode_native;
1715 #endif
1716 #endif
1717 #if 0
1718 if (send_SIR_Msg(link) < 1) {
1719 *error = ERRDMA_ID_SEND;
1720 ret = FAILED_MOK_X;
1721 goto filed_set_mode_native;
1723 INFO_MSG("MOKX send SIR. Error(%d).\n", ret);
1724 #endif
1725 return ret;
1726 filed_set_mode_native:
1727 set_mask(link, 0x0);
1728 #if RESET_THREAD_DMA
1729 thread_reset_stop(link);
1730 #endif
1731 return ret;
1734 int mok_x_unset_native_mode(int link, int *error)
1736 int ret = SUCCES_MOK_X;
1738 if (set_mask(link, MASK_INTERRUPT_NULL)) {
1739 *error = ERRDMA_SET_MASK;
1740 ret = FAILED_MOK_X;
1742 #if RESET_THREAD_DMA
1743 thread_reset_stop(link);
1744 #endif
1745 return ret;
1749 #define MOK_X_EX_MODE_INIT_DBG 0
1750 #define MOK_X_EX_MODE_INIT_DEBUG_MSG(x...)\
1751 if (MOK_X_EX_MODE_INIT_DBG) DEBUG_MSG(x)
1753 #define IS_NOT_SET_REMOTE_SYSTEM_SLAVE 11
1754 #define IS_NOT_SET_REMOTE_MODE4 12
1755 #define IS_NOT_SET_SYSTEM_MASTER 13
1756 #define IS_NOT_SET_MODE3 14
1757 #define IS_NOT_SET_SIZE 15
1758 #define IS_NOT_SET_ENABLE_RECEIVE 16
1759 #define IS_NOT_SET_ENABLE_TRANSMIT 17
1760 #define IS_NOT_SET_READY_TO_RECEIVE 18
1761 #define IS_NOT_SET_GRANTED_PACKET 19
1763 int mok_x_prog_recieve_dma(int link, int lock)
1765 rdma_state_link_t *rdma_link = &rdma_state->rdma_link[link];
1766 //unsigned long flags, flags_r;
1767 unsigned long flags_r;
1768 rdma_addr_struct_t p_xxb, p_xxb_pa;
1769 dev_rdma_sem_t *dev_sem;
1770 rw_state_p pd = NULL;
1771 rdma_pool_buf_t *r_pool_buf;
1772 rdma_buf_t *r_buf;
1773 size_t size;
1774 int ret = 0;
1776 pd = &rdma_link->rw_states_d[READER];
1777 dev_sem = &pd->dev_rdma_sem;
1778 p_xxb.addr = (unsigned long)pd;
1779 r_pool_buf = &rdma_link->read_pool;
1781 if (lock)
1782 raw_spin_lock_irqsave(&pd->lock_rd, flags_r);
1784 * Search free for read buffer
1786 if (list_empty(&r_pool_buf->free_list)) {
1787 r_buf = NULL;
1788 ret = -1;
1789 } else {
1790 r_buf = list_entry(r_pool_buf->free_list.next,
1791 rdma_buf_t, list);
1792 list_move_tail(&r_buf->list,
1793 &r_pool_buf->ready_list);
1794 r_pool_buf->num_free_buf --;
1796 * Programming dma reciver
1798 size = rdma_link->mok_x_buf_size;
1800 * Check on bad size. TODO ???
1802 if (size > r_buf->size) {
1803 event_intr(link, READ_BADSIZE_EVENT,
1804 size, dev_sem->num_obmen);
1805 event_intr(link, READ_BADSIZE_EVENT, r_buf->size,
1806 dev_sem->num_obmen);
1807 size = r_buf->size;
1809 r_buf->real_size = size;
1810 WRR_rdma(SHIFT_DMA_RCS, link, WCode_64 );
1811 if (size > SMALL_CHANGE) {
1812 p_xxb_pa.addr = (unsigned long)r_buf->dma_addr;
1813 } else {
1814 p_xxb_pa.addr = (unsigned long)r_buf->buf_addr_small;
1816 WRR_rdma(SHIFT_DMA_HRSA, link,
1817 p_xxb_pa.fields.haddr);
1818 WRR_rdma(SHIFT_DMA_RSA, link,
1819 p_xxb_pa.fields.laddr);
1820 if (size > SMALL_CHANGE) {
1821 pd->size_trans = (r_pool_buf->tm_mode ?
1822 ALIGN(size, (rdma_link->align_buf_tm * PAGE_SIZE)) : (rfsm ?
1823 r_buf->size : allign_dma(size)));
1824 WRR_rdma(SHIFT_DMA_RBC, link, pd->size_trans);
1825 //read_regs_rdma(link); ///
1826 WRR_rdma(SHIFT_DMA_RCS, link, WCode_64 |
1827 DMA_RCS_RE |
1828 (r_pool_buf->tm_mode ? DMA_RCS_RTM : 0) |
1829 (r_pool_buf->tm_mode ? 0 : DMA_RCS_RFSM));
1830 if (rdma_link->mok_x_mode_number_link == MODE3_LINK)
1831 set_mok_x_SR_ready_to_receive(link);
1832 //read_regs_rdma(link); ///
1833 } else {
1834 pd->size_trans = allign_dma(size);
1835 WRR_rdma(SHIFT_DMA_RCS, link, WCode_64 );
1836 WRR_rdma(SHIFT_DMA_RBC, link, pd->size_trans);
1837 //read_regs_rdma(link); ///
1838 WRR_rdma(SHIFT_DMA_RCS, link, WCode_64 |
1839 DMA_RCS_RE | DMA_RCS_RFSM);
1840 if (rdma_link->mok_x_mode_number_link == MODE3_LINK)
1841 set_mok_x_SR_ready_to_receive(link);
1842 //read_regs_rdma(link); ///
1845 r_pool_buf->work_buf = r_buf;
1846 if (lock)
1847 raw_spin_unlock_irqrestore(&pd->lock_rd, flags_r);
1848 return ret;
1851 //int mok_x_set_mode4(int link, int test_generator)
1852 int mok_x_set_mode4(int link)
1854 rdma_state_link_t *rdma_link = &rdma_state->rdma_link[link];
1855 int ret = 0;
1857 #if RESET_THREAD_DMA
1858 unsigned long flags;
1860 raw_spin_lock_irqsave(&rdma_link->rst_thr_lock, flags);
1861 rdma_link->start_rst_thr = 1;
1862 raw_spin_unlock_irqrestore(&rdma_link->rst_thr_lock, flags);
1863 wake_up_process(rdma_link->rst_thr);
1864 mdelay(1000);
1865 //thread_reset_stop(link);
1866 #endif
1867 #ifdef SET_ENABLE_RECEIVE_BIT
1868 #ifdef SETTING_OVER_INTERRUPT
1869 WRR_rdma(SHIFT_IRQ_MC, link , ES_RDM_Ev);
1870 #endif
1871 #endif
1872 MOK_X_SET_MODE_DEBUG_MSG("Set bit granted_packet status reg remote controller: ");
1873 if (ret = set_mok_x_remote_SR_granted_packet(link)) {
1874 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1875 goto fail;
1877 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1878 MOK_X_SET_MODE_DEBUG_MSG("Set bit enable status reg remote controller: ");
1879 if (ret = set_mok_x_remote_SR_enable(link)) {
1880 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1881 goto fail;
1883 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1884 MOK_X_SET_MODE_DEBUG_MSG("Set bit enable_receive status reg remote controller:: ");
1885 if (ret = set_mok_x_remote_SR_enable_receive(link)) {
1886 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1887 goto fail;
1889 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1890 MOK_X_SET_MODE_DEBUG_MSG("Set bit enable_transmit status reg remote controller: ");
1891 if (ret = set_mok_x_remote_SR_enable_trasmit(link)) {
1892 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1893 goto fail;
1895 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1896 if (rdma_link->mok_x_mode_number_link == MODE3_LINK) {
1897 MOK_X_SET_MODE_DEBUG_MSG("Set bit mode3 status reg controller: ");
1898 if (ret = set_mok_x_SR_mode3(link)) {
1899 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1900 goto fail;
1902 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1903 MOK_X_SET_MODE_DEBUG_MSG("Set size (%x) buffer your controller: ",
1904 rdma_link->mok_x_buf_size);
1905 if (ret = set_mok_x_SIZE(link, rdma_link->mok_x_buf_size)) {
1906 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1907 goto fail;
1909 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1911 if (rdma_link->generator_mode) {
1912 MOK_X_SET_MODE_DEBUG_MSG("Set bit slave status reg remote controller: ");
1913 if (ret = set_mok_x_remote_SR_slave(link)) {
1914 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1915 goto fail;
1917 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1918 MOK_X_SET_MODE_DEBUG_MSG("Set bit mode4 status reg remote controller: ");
1919 if (ret = set_mok_x_remote_SR_mode4(link)) {
1920 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1921 goto fail;
1923 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1925 set_mask(link, irq_mc_rdc);
1926 fail:
1927 return ret;
1930 int mok_x_unset_mode4(int link)
1932 rdma_state_link_t *rdma_link = &rdma_state->rdma_link[link];
1933 int ret = 0;
1935 #ifdef SET_ENABLE_RECEIVE_BIT
1936 #ifdef SETTING_OVER_INTERRUPT
1937 WRR_rdma(SHIFT_IRQ_MC, link , ES_RDM_Ev);
1938 #endif
1939 #endif
1940 if (rdma_link->generator_mode) {
1941 MOK_X_SET_MODE_DEBUG_MSG("Unset bit mode4 status reg remote controller: ");
1942 if (ret = unset_mok_x_remote_SR_mode4(link)) {
1943 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1944 goto fail;
1946 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1947 MOK_X_SET_MODE_DEBUG_MSG("Unset bit slave status reg remote controller: ");
1948 if (ret = unset_mok_x_remote_SR_slave(link)) {
1949 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1950 goto fail;
1952 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1953 #if 0
1954 unsigned type_msg;
1955 type_msg = (2 * 1 + 1) << RDMA_MOK_X_MSG_SHIFT;
1956 //ret = WRR_mok_x(link, type_msg, MOK_X_COMMAND,
1957 ret = WRR_mok_x(link, RDMA_MOK_X_REMOTE_REG_WRITE, MOK_X_COMMAND,
1958 MOK_X_COMMAND_RESET);
1959 INFO_MSG("RESET TRANSMIT: %d\n", ret);
1960 #endif
1962 if (rdma_link->mok_x_mode_number_link == MODE3_LINK) {
1963 MOK_X_SET_MODE_DEBUG_MSG("Unset bit mode3 status reg controller: ");
1964 if (ret = unset_mok_x_SR_mode3(link)) {
1965 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1966 goto fail;
1968 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1969 MOK_X_SET_MODE_DEBUG_MSG("Set bit ready_to_receive status reg: ");
1970 if (ret = set_mok_x_SR_ready_to_receive(link)) {
1971 MOK_X_SET_MODE_DEBUG_MSG("error (%d)\n", ret);
1972 goto fail;
1974 MOK_X_SET_MODE_DEBUG_MSG("ok (%d)\n", ret);
1976 fail:
1977 #ifdef SET_ENABLE_RECEIVE_BIT
1978 #ifdef SETTING_OVER_INTERRUPT
1979 WRR_rdma(SHIFT_IRQ_MC, link , 0x0);
1980 #endif
1981 #endif
1982 //set_mask(link, irq_mc);
1983 rdma_link->generator_mode = 0;
1984 #if RESET_THREAD_DMA
1985 unsigned long flags;
1986 mdelay(1000);
1987 raw_spin_lock_irqsave(&rdma_link->rst_thr_lock, flags);
1988 rdma_link->start_rst_thr = 1;
1989 raw_spin_unlock_irqrestore(&rdma_link->rst_thr_lock, flags);
1990 wake_up_process(rdma_link->rst_thr);
1991 #endif
1992 return ret;
1995 #define RDMA_INIT_DBG 0
1996 #define RDMA_INIT_DEBUG_MSG(x...)\
1997 if (RDMA_INIT_DBG) DEBUG_MSG(x)
1999 static int __init rdma_init(void)
2001 size_t size_rdma_state;
2002 int link;
2003 int node;
2004 int major;
2005 int ret = SUCCES_MOK_X;
2007 if (!rdma_present) {
2008 rdma_present = 1;
2009 } else {
2010 ERROR_MSG("%s: RDMA registers busy. \n", __FUNCTION__);
2011 ret = -EBUSY;
2012 goto rdma_init_failed;
2014 init_regs();
2015 if (HAS_MACHINE_L_SIC) {
2016 if (!num_possible_rdmas()) {
2017 ERROR_MSG("%s: hard rdma is absent\n", __FUNCTION__);
2018 rdma_present = 0;
2019 ret = -ENODEV;
2020 goto rdma_init_failed_0;
2023 * Not hot plugging
2025 if (!num_online_rdmas()) {
2026 ERROR_MSG("%s: RDMA does not support hot plugging."
2027 "Connect the cable and reboot machine.\n",
2028 __FUNCTION__);
2029 rdma_present = 0;
2030 ret = -ENODEV;
2031 goto rdma_init_failed_0;
2034 INFO_MSG("I am worked on CUBIC, NODE_NUMIOLINKS: %d "
2035 "MAX_NUMIOLINKS: %d\n", RDMA_NODE_IOLINKS,
2036 RDMA_MAX_NUMIOLINKS);
2037 if (num_buf > RDMA_BUF_NUM) {
2038 ERROR_MSG("%s: num_buf(%d) > max_buf(%d).\n", __FUNCTION__,
2039 num_buf, RDMA_BUF_NUM);
2040 rdma_present = 0;
2041 ret = -EINVAL;
2042 goto rdma_init_failed_0;
2044 if (!tm_mode) {
2045 if (max_size_buf > MAX_SIZE_BUFF) {
2046 ERROR_MSG("%s: max_size_buf(0x%x) > MAX_SIZE_BUFF(0x%x).\n",
2047 __FUNCTION__, max_size_buf, MAX_SIZE_BUFF);
2048 rdma_present = 0;
2049 ret = -ENOMEM;
2050 goto rdma_init_failed_0;
2052 } else {
2053 if (max_size_buf_tm > MAX_SIZE_BUFF_TM) {
2054 ERROR_MSG("%s: max_size_buf_tm(0x%x) > MAX_SIZE_BUFF_TM(0x%x).\n",
2055 __FUNCTION__, max_size_buf_tm, MAX_SIZE_BUFF_TM);
2056 rdma_present = 0;
2057 ret = -ENOMEM;
2058 goto rdma_init_failed_0;
2061 #if 0
2062 if (!tm_mode) {
2063 if ((max_size_buf * num_buf ) > LIMIT_SIZE_BUFF){
2064 ERROR_MSG("%s: The large size of the buffer. "
2065 "The buffer must be: max_size_buf * "
2066 "num_buf <= 0x%08x. \n",
2067 __FUNCTION__, LIMIT_SIZE_BUFF);
2068 rdma_present = 0;
2069 ret = -EINVAL;
2070 goto rdma_init_failed;
2073 #endif
2074 INFO_MSG("Table mode: %s\n", tm_mode ? "set" : "unset");
2075 INFO_MSG("Number buffers: %d\n", num_buf);
2076 if (tm_mode)
2077 INFO_MSG("Max size buffer in table mode : 0x%x\n",
2078 max_size_buf_tm);
2079 else
2080 INFO_MSG("Max size buffer: 0x%x\n", max_size_buf);
2081 INFO_MSG("Align row in table: 0x%x\n", align_buf_tm);
2082 INFO_MSG("Your node alloc memory: %s\n", node_mem_alloc ? "yes" : "no");
2083 INFO_MSG("Type create device: %s\n", "sysfs");
2084 rdma_event_init = 1;
2085 INFO_MSG("Print event's mode: %s\n", ev_pr ? "set" : "unset");
2086 node = numa_node_id();
2087 fix_event(node, RDMA_INIT, START_EVENT, 0);
2088 major = register_chrdev(0, board_name, &rdma_fops);
2089 if ( major < 0 ) {
2090 ERROR_MSG("%s: There isn't free major\n", __FUNCTION__);
2091 rdma_present = 0;
2092 ret = -EINVAL;
2093 goto rdma_init_failed_0;
2095 RDMA_INIT_DEBUG_MSG("%s: major: %d\n", __FUNCTION__, major);
2096 RDMA_INIT_DEBUG_MSG("%s: I am on %d numa_node_id\n", __FUNCTION__,
2097 node);
2098 RDMA_INIT_DEBUG_MSG("%s: %lx: sizeof (nodemask_t)\n", __FUNCTION__,
2099 sizeof (nodemask_t));
2100 size_rdma_state = sizeof (struct rdma_state);
2101 rdma_state = (struct rdma_state *)kmalloc(size_rdma_state, GFP_KERNEL);
2102 if (rdma_state == (struct rdma_state *)NULL) {
2103 ERROR_MSG("%s: rdma_state == NULL\n", __FUNCTION__);
2104 unregister_chrdev(major, board_name);
2105 rdma_present = 0;
2106 ret = -ENOMEM;
2107 goto rdma_init_failed_0;
2109 memset(rdma_state, 0, size_rdma_state);
2110 RDMA_INIT_DEBUG_MSG("%s: sizeof (struct rdma_state): 0x%016lx\n",
2111 __FUNCTION__, size_rdma_state);
2112 rdma_state->size_rdma_state = size_rdma_state;
2113 rdma_state->major = major;
2114 #ifdef MODULE
2115 if (create_dev_mokx(major))
2116 ERROR_MSG("%s: Error creating devices. "
2117 "Create a device manually.", __FUNCTION__);
2118 #endif
2120 * Set atl (rezerv)
2122 #if 0
2123 tr_atl = ATL_B | (atl_v & ATL);
2124 INFO_MSG("Reg CAM ATL: %x\n", tr_atl);
2125 #endif
2127 * While memory alloceted boot time
2129 #if 0
2130 #ifdef CONFIG_RDMA_BOOT_MEM_ALLOC
2131 if (R_M_NODE) {
2132 INFO_MSG("%s: check alloc bootmem R_M: %x\n",
2133 __FUNCTION__, R_M_NODE);
2134 if ((long)R_M_NODE < (long)(PAGE_ALIGN(max_size_buf) * num_buf)) {
2135 ERROR_MSG("%s: Error alloc bootmem for rdma. "
2136 "R_M(%x) < max_size_buf * num_buf(%x)\n",
2137 __FUNCTION__, R_M_NODE,
2138 PAGE_ALIGN(max_size_buf) * num_buf);
2139 ret = -ENOMEM;
2140 goto rdma_init_failed;
2143 #endif
2144 #endif
2145 #ifdef RESET_DMA_MEMMORY
2146 reset_size_r = allign_dma(0x2000000);
2147 reset_size_w = allign_dma(0x1000);
2148 reset_order_r = get_order(reset_size_r);
2149 reset_order_w = get_order(reset_size_w);
2150 reset_dma_memory_r = __get_free_pages_rdma(0, GFP_KERNEL , reset_order_r, 0);
2151 reset_dma_memory_w = __get_free_pages_rdma(0, GFP_KERNEL , reset_order_w, 0);
2152 #endif
2154 * Init link and memory
2156 if (HAS_MACHINE_L_SIC) {
2157 for_each_online_rdma(link) {
2158 set_id_link(link);
2159 rdma_link_init(link);
2160 #ifdef ALLOC_MEM_DRIVER
2161 if (bufs_init(link))
2162 goto rdma_init_failed;
2163 #endif
2167 * Register's interrupt
2169 rdma_interrupt_p = rdma_interrupt;
2171 * Native mode
2173 for_each_online_rdma(link) {
2174 rdma_state_link_t *rdma_link;
2175 rdma_link = &rdma_state->rdma_link[link];
2176 int err = 0, res = 0;
2178 res = mok_x_set_native_mode(link, &err);
2179 printk("%s: link init: %d res: %d err: %d\n",
2180 __FUNCTION__, link, res, err);
2181 //null_change(link);
2183 return 0;
2184 rdma_init_failed:
2185 rdma_cleanup();
2186 rdma_init_failed_0:
2187 RDMA_INIT_DEBUG_MSG("%s: FINISH\n", __FUNCTION__);
2188 return -ENODEV;
2191 #define RDMA_CLEANUP_DBG 0
2192 #define RDMA_CLEANUP_DEBUG_MSG(x...)\
2193 if (RDMA_CLEANUP_DBG) DEBUG_MSG(x)
2194 static void rdma_cleanup(void)
2196 rdma_state_link_t *rdma_link;
2197 int link, major;
2199 major = (int)rdma_state->major;
2200 RDMA_CLEANUP_DEBUG_MSG("%s: START rdma_state->major %d\n", __FUNCTION__,
2201 major);
2202 if (HAS_MACHINE_L_SIC)
2203 for_each_online_rdma(link) {
2204 set_mask(link, MASK_INTERRUPT_NULL);
2205 bufs_free(link);
2206 #if RESET_THREAD_DMA
2207 thread_reset_stop(link);
2208 #endif
2209 rdma_link = &rdma_state->rdma_link[link];
2210 rdma_link->mok_x_mode_link = STATE_LINK_DEFAULT;
2212 rdma_interrupt_p = (void *) NULL;
2213 #ifdef MODULE
2214 remove_dev_mokx(major);
2215 #endif
2216 unregister_chrdev(rdma_state->major, board_name);
2217 rdma_event_init = 0;
2218 kfree(rdma_state);
2219 if (rdma_present)
2220 rdma_present = 0;
2221 #ifdef RESET_DMA_MEMMORY
2222 if (reset_dma_memory_r)
2223 free_pages(reset_dma_memory_r, reset_order_r);
2224 if (reset_dma_memory_w)
2225 free_pages(reset_dma_memory_w, reset_order_w);
2226 #endif
2227 RDMA_CLEANUP_DEBUG_MSG("%s: FINISH\n", __FUNCTION__);
2228 return;
2231 #define RDMA_CLOSE_DBG 0
2232 #define RDMA_CLOSE_DEBUG_MSG(x...)\
2233 if (RDMA_CLOSE_DBG) DEBUG_MSG(x)
2234 static int rdma_close(struct inode *inode, struct file *file)
2236 rdma_state_link_t *rdma_link;
2237 dev_rdma_sem_t *dev_sem;
2238 rw_state_t *rdma_private_data;
2239 rw_state_p pd;
2240 unsigned long flags, flags_w, flags_r;
2241 int minor, file_eys = 0, i;
2242 int link, file_open_mode;
2244 /* TODO óÄÅÌÁÔØ ×ÓÅ ËÒÁÓÉ×Ï ÞÅÒÅÚ rdma_private_data */
2245 RDMA_CLOSE_DEBUG_MSG("%s: START\n", __FUNCTION__);
2246 minor = MINOR(inode->i_rdev);
2247 if (minor < 0) {
2248 ERROR_MSG("%s: minor(%d) < 0\n", __FUNCTION__, minor);
2249 return (-EINVAL);
2251 link = DEV_inst(minor);
2252 if (HAS_MACHINE_L_SIC) {
2253 for_each_online_rdma(i)
2254 if (i == link)
2255 file_eys++;
2256 } else {
2257 if (0 == link)
2258 file_eys++;
2260 if (!file_eys) {
2261 ERROR_MSG("%s: link %d not support RDMA\n", __FUNCTION__,
2262 link);
2263 return (-EINVAL);
2265 rdma_link = &rdma_state->rdma_link[link];
2266 file_open_mode = minor % 2;
2267 rdma_private_data = &rdma_link->rw_states_d[file_open_mode];
2268 RDMA_CLOSE_DEBUG_MSG("%s: mode close %s (minor: 0x%08x)\n",
2269 __FUNCTION__, file_open_mode ? "WRITE" : "READ", minor);
2270 mutex_enter(&rdma_link->mu);
2271 rdma_link->opened &= ~(1 << rdma_private_data->open_mode);
2272 rdma_private_data->open_mode = 0;
2273 file->private_data = NULL;
2274 RDMA_CLOSE_DEBUG_MSG("%s: opened.minor.link.channel: 0x%x.%d.%d.%d\n",
2275 __FUNCTION__, rdma_link->opened, minor, link,
2276 rdma_private_data->open_mode);
2277 mutex_exit(&rdma_link->mu);
2279 pd = &rdma_link->rw_states_d[file_open_mode];
2280 dev_sem = &pd->dev_rdma_sem;
2283 * File open as READER
2285 if (!file_open_mode) {
2286 rdma_pool_buf_t *r_pool_buf;
2287 unsigned int ret_wait_rdc;
2288 unsigned int sending_msg;
2289 unsigned int ret_smsg;
2290 int count_wait_rdc = TX_RX_WAIT_DMA;
2292 r_pool_buf = &rdma_link->read_pool;
2294 * Unset mode4
2298 if (rdma_link->mok_x_mode_link == STATE_LINK_ONLY_RECIVE) {
2299 unsigned int tmp_reg;
2301 raw_spin_lock_irqsave(&dev_sem->lock, flags);
2302 rdma_link->generator_stop = 1;
2303 raw_spin_unlock_irqrestore(&dev_sem->lock, flags);
2304 mdelay(1000);
2305 WRR_rdma(SHIFT_IRQ_MC, link , 0x0);
2306 mdelay(1000);
2307 //mok_x_unset_mode4(link);
2308 //get_event_rdma(1);
2309 //WRR_rdma(SHIFT_IRQ_MC, link , 0x0);
2310 //INFO_MSG("%s: Stop generator. Stack reg.\n",
2311 // __FUNCTION__);
2312 //read_regs_rdma(link);
2313 tmp_reg = RDR_rdma(SHIFT_ES, link);
2314 WRR_rdma(SHIFT_ES, link, tmp_reg & ES_RDC_Ev);
2315 //WRR_rdma(SHIFT_IRQ_MC, link ,irq_mc);
2317 tmp_reg = RDR_rdma(SHIFT_DMA_RCS, link);
2318 WRR_rdma(SHIFT_DMA_RCS, link, tmp_reg & (~DMA_RCS_RE));
2319 //WRR_rdma(SHIFT_DMA_RBC, link, 0x0);
2320 RDMA_CLOSE_DEBUG_MSG("%s: link %d reset recive. RCS: 0x%08x "
2321 "RBC: 0x%08x\n", __FUNCTION__, link,
2322 RDR_rdma(SHIFT_DMA_RCS, link),
2323 RDR_rdma(SHIFT_DMA_RBC, link));
2324 null_change(link);
2325 rdma_link->mok_x_mode_link = STATE_LINK_NATIVE;
2326 WRR_rdma(SHIFT_IRQ_MC, link ,irq_mc);
2327 } else {
2328 //INFO_MSG("%s: Stop VK-VK. Stack reg.\n",
2329 // __FUNCTION__);
2330 //read_regs_rdma(link);
2333 #ifdef UNX_TRWD
2334 raw_spin_lock_irqsave(&dev_sem->lock, flags);
2335 rdma_link->unexpected_trwd_size = 0x0;
2336 rdma_link->unexpected_trwd = 0x0;
2337 raw_spin_unlock_irqrestore(&dev_sem->lock, flags);
2338 #endif
2340 * Reciver wait dma
2342 while (count_wait_rdc--) {
2343 ret_wait_rdc = RDR_rdma(SHIFT_DMA_RCS, link);
2344 if (!(ret_wait_rdc & DMA_RCS_RE)) {
2345 goto end_wait_rdc;
2348 ERROR_MSG("%s: link %d ret_wait_rdc: 0x%08x "
2349 "count_wait_rdc: %d\n", __FUNCTION__, link,
2350 ret_wait_rdc, count_wait_rdc);
2352 end_wait_rdc:
2353 raw_spin_lock_irqsave(&pd->lock_rd, flags_r);
2355 * The release of buffers
2357 while (!list_empty(&r_pool_buf->ready_list)) {
2358 list_move_tail(r_pool_buf->ready_list.next,
2359 &r_pool_buf->free_list);
2361 while (!list_empty(&r_pool_buf->busy_list)) {
2362 list_move_tail(r_pool_buf->busy_list.next,
2363 &r_pool_buf->free_list);
2365 r_pool_buf->num_free_buf = 0;
2366 raw_spin_unlock_irqrestore(&pd->lock_rd, flags_r);
2368 * Send READY_DMA
2370 if (rdma_link->mok_x_mode_link != STATE_LINK_ONLY_RECIVE) {
2371 sending_msg = MSG_READY_DMA | r_pool_buf->num_free_buf;
2372 if ((ret_smsg = send_msg_check(sending_msg, link, 0,
2373 dev_sem, 0)) <= 0) {
2374 fix_event(link, READ_SNDMSGBAD_EVENT,
2375 sending_msg, dev_sem->num_obmen);
2376 } else {
2377 fix_event(link, READ_SNDNGMSG_EVENT,
2378 sending_msg, dev_sem->num_obmen);
2381 #ifdef UNX_TRWD
2382 //printk("%s: REPEAT_TRWD: %x\n", __FUNCTION__, REPEAT_TRWD);
2383 #endif
2384 } else {
2386 * File open as WRITER
2388 rdma_pool_buf_t *w_pool_buf;
2389 unsigned int ret_wait_tdc;
2390 int count_wait_tdc = TX_RX_WAIT_DMA;
2392 w_pool_buf = &rdma_link->write_pool;
2394 * Sender wait dma
2396 while (count_wait_tdc--)
2398 ret_wait_tdc = RDR_rdma(SHIFT_DMA_TCS, link);
2399 if (!(ret_wait_tdc & DMA_TCS_TE)) {
2400 goto end_wait_tdc;
2403 ERROR_MSG("%s: link %d ret_wait_tdc: 0x%08x count_wait_tdc: %d\n",
2404 __FUNCTION__, link, ret_wait_tdc, count_wait_tdc);
2405 end_wait_tdc:
2406 raw_spin_lock_irqsave(&pd->lock_wr, flags_w);
2408 * The release of buffers
2410 while (!list_empty(&w_pool_buf->ready_list)) {
2411 list_move_tail(w_pool_buf->ready_list.next,
2412 &w_pool_buf->free_list);
2414 while (!list_empty(&w_pool_buf->busy_list)) {
2415 list_move_tail(w_pool_buf->busy_list.next,
2416 &w_pool_buf->free_list);
2418 raw_spin_unlock_irqrestore(&pd->lock_wr, flags_w);
2419 //printk("rdma_link->trwd_lock_err: %x\n",
2420 // rdma_link->trwd_lock_err);
2422 #if 0
2423 #ifdef SET_ENABLE_RECEIVE_BIT
2425 * Set enable recieve after reset
2427 #ifndef SETTING_OVER_INTERRUPT
2428 WRR_rdma(SHIFT_IRQ_MC, link , 0x0);
2429 #endif
2430 set_mode_native(link);
2431 //udelay(1000);
2432 #ifndef SETTING_OVER_INTERRUPT
2433 WRR_rdma(SHIFT_IRQ_MC, link ,irq_mc);
2434 #endif
2435 #endif
2436 #endif
2437 raw_spin_lock_irqsave(&dev_sem->lock, flags);
2438 pd->state_open_close = 0;
2439 raw_spin_unlock_irqrestore(&dev_sem->lock, flags);
2440 RDMA_CLOSE_DEBUG_MSG("%s: FINISH\n", __FUNCTION__);
2441 return 0;
2444 #define RDMA_OPEN_DBG 0
2445 #define RDMA_OPEN_DEBUG_MSG(x...)\
2446 if (RDMA_OPEN_DBG) DEBUG_MSG(x)
2447 static int rdma_open(struct inode *inode, struct file *file)
2449 rdma_state_link_t *rdma_link;
2450 rw_state_t *rdma_private_data;
2451 dev_rdma_sem_t *dev_sem;
2452 rw_state_p pd;
2453 unsigned long flags, flags_w, flags_r;
2454 int minor, file_eys = 0, i, file_open_mode;
2455 int link;
2456 int firstopen = 0;
2457 /* TODO óÄÅÌÁÔØ ×ÓÅ ËÒÁÓÉ×Ï ÞÅÒÅÚ rdma_private_data */
2458 RDMA_OPEN_DEBUG_MSG("%s: START\n", __FUNCTION__);
2459 if (file == (struct file *)NULL) {
2460 ERROR_MSG("%s: file is NULL\n", __FUNCTION__);
2461 return (-EINVAL);
2463 minor = MINOR(inode->i_rdev);
2464 if (minor < 0) {
2465 ERROR_MSG("%s: minor(%d) < 0\n", __FUNCTION__, minor);
2466 return (-EINVAL);
2468 link = DEV_inst(minor);
2469 if (HAS_MACHINE_L_SIC) {
2470 for_each_online_rdma(i)
2471 if (i == link)
2472 file_eys++;
2473 } else {
2474 if (0 == link)
2475 file_eys++;
2477 if (!file_eys) {
2478 ERROR_MSG("%s: link %d not support RDMA\n", __FUNCTION__,
2479 link);
2480 return (-EINVAL);
2482 file->private_data = NULL;
2483 rdma_link = &rdma_state->rdma_link[link];
2485 * File open mode.
2487 file_open_mode = minor % 2;
2488 rdma_private_data = &rdma_link->rw_states_d[file_open_mode];
2489 rdma_private_data->open_mode = file_open_mode;
2490 RDMA_OPEN_DEBUG_MSG("%s: mode open %s (minor: %x)\n",
2491 __FUNCTION__, file_open_mode ? "WRITE" : "READ", minor);
2492 rdma_private_data->link = link;
2493 file->private_data = rdma_private_data;
2494 mutex_enter(&rdma_link->mu);
2495 firstopen = (((1 << rdma_private_data->open_mode) & rdma_link->opened) == 0);
2496 if (firstopen == 0) {
2497 ERROR_MSG("%s: device EBUSY: minor: %d link: %d channel: %d\n",
2498 __FUNCTION__, minor, link, rdma_private_data->open_mode);
2499 mutex_exit(&rdma_link->mu);
2500 return (-EBUSY);
2502 rdma_link->opened |= (1 << rdma_private_data->open_mode);
2503 RDMA_OPEN_DEBUG_MSG("%s: opened.minor.link.channel: 0x%x.%d.%d.%d\n",
2504 __FUNCTION__, rdma_link->opened, minor, link,
2505 rdma_private_data->open_mode);
2506 mutex_exit(&rdma_link->mu);
2507 pd = &rdma_link->rw_states_d[file_open_mode];
2508 dev_sem = &pd->dev_rdma_sem;
2509 raw_spin_lock_irqsave(&dev_sem->lock, flags);
2510 pd->state_open_close = 1;
2512 #ifdef SET_ENABLE_RECEIVE_BIT
2514 * Set enable recieve after reset
2516 #ifndef SETTING_OVER_INTERRUPT
2517 WRR_rdma(SHIFT_IRQ_MC, link , 0x0);
2518 #endif
2519 set_mode_native(link);
2520 //udelay(1000);
2521 #ifndef SETTING_OVER_INTERRUPT
2522 WRR_rdma(SHIFT_IRQ_MC, link ,irq_mc);
2523 #endif
2524 #endif
2526 * File opened as READER
2528 if (!file_open_mode) {
2529 rdma_pool_buf_t *r_pool_buf;
2530 unsigned int sending_msg;
2531 unsigned int ret_smsg, ret_wait_rdc;
2532 int count_wait_rdc = TX_RX_WAIT_DMA;
2534 pd->first_open++;
2535 rdma_link->generator_stop = 0;
2536 raw_spin_unlock_irqrestore(&dev_sem->lock, flags);
2538 r_pool_buf = &rdma_link->read_pool;
2539 raw_spin_lock_irqsave(&pd->lock_rd, flags_r);
2540 r_pool_buf->num_free_buf = rdma_link->num_buf;
2542 * The release of buffers
2544 while (!list_empty(&r_pool_buf->ready_list)) {
2545 list_move_tail(r_pool_buf->ready_list.next,
2546 &r_pool_buf->free_list);
2548 while (!list_empty(&r_pool_buf->busy_list)) {
2549 list_move_tail(r_pool_buf->busy_list.next,
2550 &r_pool_buf->free_list);
2552 raw_spin_unlock_irqrestore(&pd->lock_rd, flags_r);
2553 #ifdef RESET_DMA_MEMMORY
2555 * Reset dma
2557 //null_change(link);
2558 #endif
2560 * Waiting for the end of the last dma
2562 while (count_wait_rdc --) {
2563 ret_wait_rdc = RDR_rdma(SHIFT_DMA_RCS, link);
2564 if (!(ret_wait_rdc & DMA_RCS_RE)) {
2565 goto end_wait_rdc;
2569 * TODO. Error.
2571 ERROR_MSG("%s: link %d ret_wait_rdc: 0x%08x "
2572 "count_wait_rdc: %d\n", __FUNCTION__, link,
2573 ret_wait_rdc, count_wait_rdc);
2574 end_wait_rdc:;
2576 * Create MSG_READY_DMA
2578 sending_msg = MSG_READY_DMA |
2579 r_pool_buf->num_free_buf;
2581 * Send MSG_READY_DMA
2583 if ((ret_smsg = send_msg_check(sending_msg,
2584 link, 0, dev_sem, 0)) <= 0) {
2585 fix_event(link,
2586 READ_SNDMSGBAD_EVENT,
2587 sending_msg,
2588 dev_sem->num_obmen);
2589 } else {
2590 fix_event(link,
2591 READ_SNDNGMSG_EVENT,
2592 sending_msg,
2593 dev_sem->num_obmen);
2595 #ifdef UNX_TRWD
2596 REPEAT_TRWD = 0;
2597 //printk("%s: REPEAT_TRWD: %x\n", __FUNCTION__, REPEAT_TRWD);
2598 #endif
2599 } else {
2601 * File opened as WRITER
2603 rdma_pool_buf_t *w_pool_buf;
2604 unsigned int ret_wait_tdc;
2605 int count_wait_tdc = TX_RX_WAIT_DMA;
2607 rdma_link->trwd_lock_err = 0;
2608 raw_spin_unlock_irqrestore(&dev_sem->lock, flags);
2610 w_pool_buf = &rdma_link->write_pool;
2612 * The release of buffers
2614 raw_spin_lock_irqsave(&pd->lock_wr, flags_w);
2615 while (!list_empty(&w_pool_buf->ready_list)) {
2616 list_move_tail(w_pool_buf->ready_list.next,
2617 &w_pool_buf->free_list);
2619 while (!list_empty(&w_pool_buf->busy_list)) {
2620 list_move_tail(w_pool_buf->busy_list.next,
2621 &w_pool_buf->free_list);
2623 raw_spin_unlock_irqrestore(&pd->lock_wr, flags_w);
2625 * Waiting for the end of the last dma
2627 while (count_wait_tdc--)
2629 ret_wait_tdc = RDR_rdma(SHIFT_DMA_TCS, link);
2630 if (!(ret_wait_tdc & DMA_TCS_TE)) {
2631 goto end_wait_tdc;
2635 * TODO. Error.
2637 ERROR_MSG("%s: link %d ret_wait_tdc: 0x%08x count_wait_tdc: %d\n",
2638 __FUNCTION__, link, ret_wait_tdc, count_wait_tdc);
2639 end_wait_tdc:;
2641 RDMA_OPEN_DEBUG_MSG("%s: FINISH\n", __FUNCTION__);
2642 return 0;
2645 #define RDMA_READ_DBG 0
2646 #define RDMA_READ_DEBUG_MSG(x...)\
2647 if (RDMA_READ_DBG) DEBUG_MSG(x)
2648 static ssize_t rdma_read(struct file *filp, char __user *buf, size_t size,
2649 loff_t *pos)
2651 RDMA_READ_DEBUG_MSG("%s: read call is not supported!", __FUNCTION__);
2652 return 0;
2655 #define RDMA_WRITE_DBG 0
2656 #define RDMA_WRITE_DEBUG_MSG(x...)\
2657 if (RDMA_WRITE_DBG) DEBUG_MSG(x)
2658 static ssize_t rdma_write(struct file *filp, const char __user *buf,
2659 size_t size, loff_t *pos)
2661 RDMA_READ_DEBUG_MSG("%s: write call is not supported!", __FUNCTION__);
2662 return 0;
2665 #define RDMA_IOCTL_DBG 0
2666 #define RDMA_IOCTL_DEBUG_MSG(x...)\
2667 if (RDMA_IOCTL_DBG) DEBUG_MSG(x)
2668 #define IOC_SUCCESFULL 0
2669 #define IOC_FAIL -1
2671 static long rdma_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2673 rdma_state_link_t *rdma_link;
2674 rdma_ioc_parm_t parm;
2675 dev_rdma_sem_t *dev_sem;
2676 rw_state_t *rdma_private_data;
2677 rw_state_p pd;
2678 size_t rval;
2679 //unsigned long flags, flags_w, flags_r;
2680 unsigned long flags_w, flags_r;
2681 unsigned int open_mode;
2682 int ret = IOC_SUCCESFULL;
2683 int minor;
2684 int link;
2685 int res = 0;
2687 minor = get_file_minor(filp);
2688 if (minor < 0) {
2689 ERROR_MSG("%s: minor(%d) < 0 cmd: 0x%08x\n", __FUNCTION__,
2690 (int)minor, cmd);
2691 return minor;
2693 link = DEV_inst(minor);
2694 RDMA_IOCTL_DEBUG_MSG("%s: link: %d cmd: 0x%08x. START\n", __FUNCTION__,
2695 link, cmd);
2696 rdma_link = &rdma_state->rdma_link[link];
2697 rval = copy_from_user(&parm, (void __user *)arg,
2698 sizeof (rdma_ioc_parm_t));
2699 if (rval) {
2700 ERROR_MSG("%s: link: %d cmd: 0x%08x. Copy_from_user failed.\n",
2701 __FUNCTION__, link, cmd);
2702 ret = -EINVAL;
2704 RDMA_IOCTL_DEBUG_MSG("%s: in :\n"
2705 " parm.reqlen: 0x%08x\n"
2706 " parm.acclen: 0x%08x\n"
2707 " parm.err_no: 0x%08x\n"
2708 " parm.rwmode: 0x%08x\n"
2709 " parm.msg : 0x%08x\n"
2710 " parm.clkr : %llx\n"
2711 " parm.clkr1 : %llx\n"
2712 " parm.type_mode : 0x%08x\n"
2713 " parm.type_oper : 0x%08x\n"
2714 " parm.reg_addr0 : 0x%08x\n"
2715 " parm.reg_addr1 : 0x%08x\n"
2716 " parm.reg_addr2 : 0x%08x\n"
2717 " parm.reg_data : 0x%08x\n",
2718 __FUNCTION__, parm.reqlen,
2719 parm.acclen, parm.err_no, parm.rwmode, parm.msg,
2720 parm.clkr, parm.clkr1, parm.type_mode,
2721 parm.type_oper, parm.reg_addr0, parm.reg_addr1,
2722 parm.reg_addr2, parm.reg_data);
2724 rdma_private_data = filp->private_data;
2725 open_mode = rdma_private_data->open_mode;
2726 parm.err_no = res = 0;
2728 switch (cmd) {
2729 case RDMA_IOC_GET_neighbour_map:
2731 if (copy_to_user((void __user *)arg, &node_online_neighbour_map,
2732 sizeof (nodemask_t))) {
2733 ERROR_MSG("%s: link %d cmd: RDMA_IOC_GET_neighbour_map "
2734 "copy_to_user failed\n", __FUNCTION__, link);
2735 return -EINVAL;
2737 return 0;
2738 break;
2741 case RDMA_IOC_GET_ID:
2743 int i;
2744 rdma_link_id.count_links = MAX_NUMIOLINKS;
2745 if (HAS_MACHINE_L_SIC) {
2746 for_each_online_rdma(i) {
2747 rdma_link_id.link_id[i][0] = 1;
2748 rdma_link_id.link_id[i][1] = RDR_rdma(SHIFT_CH_IDT, i);
2749 rdma_link_id.link_id[i][2] = RDR_rdma(SHIFT_N_IDT, i);
2751 } else {
2752 i = 0;
2753 rdma_link_id.link_id[i][0] = 1;
2754 rdma_link_id.link_id[i][1] = RDR_rdma(SHIFT_CH_IDT, i);
2755 rdma_link_id.link_id[i][2] = RDR_rdma(SHIFT_N_IDT, i);
2757 if (copy_to_user((void __user *)arg, &rdma_link_id,
2758 sizeof(link_id_t)) == -1) {
2759 ERROR_MSG("%s:RDMA_IOC_GET_ID: copy_to_user failed\n",
2760 __FUNCTION__);
2761 return EINVAL;
2763 return 0;
2764 break;
2766 #if 0
2767 case RDMA_SET_ATL:
2769 unsigned int atl;
2771 tr_atl = ATL_B | (parm.reqlen & ATL);
2772 WRR_rdma(SHIFT_CAM, link, tr_atl);
2773 atl = RDR_rdma(SHIFT_CAM, link);
2774 parm.acclen = atl;
2775 break;
2777 #endif
2778 case RDMA_IOC_GET_BUF_NUM:
2780 parm.acclen = rdma_link->num_buf;
2781 ret = IOC_SUCCESFULL;
2782 break;
2785 case RDMA_IOC_SET_BUF_NUM:
2787 if (parm.reqlen <= RDMA_BUF_NUM) {
2788 rdma_link->num_buf = parm.reqlen;
2789 parm.acclen = rdma_link->num_buf;
2790 ret = IOC_SUCCESFULL;
2791 } else {
2792 parm.acclen = RDMA_BUF_NUM;
2793 ret = IOC_FAIL;
2795 break;
2799 case RDMA_IOC_GET_TM_MODE:
2801 parm.acclen = rdma_link->tm_mode;
2802 ret = IOC_SUCCESFULL;
2803 break;
2806 case RDMA_IOC_SET_TM_MODE:
2808 rdma_link->tm_mode = parm.reqlen;
2809 parm.acclen = rdma_link->tm_mode;
2810 ret = IOC_SUCCESFULL;
2811 break;
2814 case RDMA_IOC_GET_ALIGN_BUF_TM:
2816 parm.acclen = rdma_link->align_buf_tm;
2817 ret = IOC_SUCCESFULL;
2818 break;
2821 case RDMA_IOC_GET_PAGE_SIZE:
2823 parm.acclen = PAGE_SIZE;
2824 ret = IOC_SUCCESFULL;
2825 break;
2828 case RDMA_IOC_SET_ALIGN_BUF_TM:
2830 rdma_link->align_buf_tm = parm.reqlen;
2831 parm.acclen = rdma_link->align_buf_tm;
2832 ret = IOC_SUCCESFULL;
2833 break;
2836 case RDMA_IOC_GET_NODE_MEM_ALLOC:
2838 parm.acclen = rdma_link->node_mem_alloc;
2839 ret = IOC_SUCCESFULL;
2840 break;
2843 case RDMA_IOC_SET_NODE_MEM_ALLOC:
2845 rdma_link->node_mem_alloc = parm.reqlen;
2846 parm.acclen = rdma_link->node_mem_alloc;
2847 ret = IOC_SUCCESFULL;
2848 break;
2851 case RDMA_IOC_GET_MAX_SIZE_BUFF:
2853 parm.acclen = rdma_link->max_size_buf;
2854 ret = IOC_SUCCESFULL;
2855 break;
2858 case RDMA_IOC_SET_MAX_SIZE_BUFF:
2860 if ((max_size_buf * num_buf ) > LIMIT_SIZE_BUFF) {
2861 parm.acclen = LIMIT_SIZE_BUFF;
2862 } else {
2863 rdma_link->max_size_buf = parm.reqlen;
2864 parm.acclen = rdma_link->max_size_buf;
2865 ret = IOC_SUCCESFULL;
2867 break;
2869 case RDMA_IOC_GET_MAX_SIZE_BUFF_TM:
2871 parm.acclen = rdma_link->max_size_buf_tm;
2872 ret = IOC_SUCCESFULL;
2873 break;
2876 case RDMA_IOC_SET_MAX_SIZE_BUFF_TM:
2878 rdma_link->max_size_buf_tm = parm.reqlen;
2879 parm.acclen = rdma_link->max_size_buf;
2880 ret = IOC_SUCCESFULL;
2881 break;
2884 case RDMA_IOC_GET_BUF_SIZE:
2886 parm.acclen = rdma_link->buf_size;
2887 ret = IOC_SUCCESFULL;
2888 break;
2891 case RDMA_IOC_ALLOC_TYPE:
2893 #ifdef CONFIG_RDMA_BOOT_MEM_ALLOC
2894 parm.acclen = R_M_NODE;
2895 #else
2896 parm.acclen = 0;
2897 #endif
2898 ret = IOC_SUCCESFULL;
2899 break;
2902 case RDMA_IOC_MEMRY_ALLOC:
2905 * parm.reqlen = size; // max_size_buf
2906 * parm.reqlen1 = 4; // num buf
2907 * parm.reqlen2 = 1; // tm mode
2908 * parm.reqlen3 = 1; // num PAGE_SIZE in row table
2909 * parm.reqlen4 = 0; // alloc memory our node
2910 *parm.reqlen5 = 0; // type alloc
2912 bufs_free(link);
2913 rdma_link->type_alloc = parm.reqlen5;
2914 if (rdma_link->type_alloc) {
2915 rdma_link->max_size_buf = parm.reqlen;
2916 rdma_link->num_buf = parm.reqlen1;
2917 } else {
2918 rdma_link->num_buf = parm.reqlen1;
2919 rdma_link->tm_mode = parm.reqlen2;
2920 if (rdma_link->tm_mode) {
2921 rdma_link->max_size_buf_tm = parm.reqlen;
2922 } else {
2923 rdma_link->max_size_buf = parm.reqlen;
2925 rdma_link->align_buf_tm = parm.reqlen3;
2926 rdma_link->node_mem_alloc = parm.reqlen4;
2928 if (!bufs_init(link)) {
2929 parm.acclen = rdma_link->buf_size;
2930 ret = IOC_SUCCESFULL;
2931 } else {
2932 bufs_free(link);
2933 parm.acclen = 0;
2934 ret = IOC_FAIL;
2936 break;
2939 case RDMA_IOC_SET_MODE_RFSM:
2941 if (parm.reqlen == DISABLE_RFSM) {
2942 rfsm = CLEAR_RFSM;
2943 } else {
2944 rfsm = DMA_RCS_RFSM;
2946 parm.acclen = rfsm;
2947 break;
2950 case RDMA_IOC_GET_WR_BUF:
2952 rdma_pool_buf_t *w_pool_buf;
2953 rdma_buf_t *w_buf;
2955 if (open_mode == READER) {
2956 ERROR_MSG("%s: link: %d cmd: RDMA_IOC_GET_WR_BUF. "
2957 "File open as READER.\n", __FUNCTION__, link);
2958 ret = -EBADF;
2959 break;
2961 w_pool_buf = &rdma_link->write_pool;
2962 pd = &rdma_link->rw_states_d[WRITER];
2964 * Search free buffer to write
2966 raw_spin_lock_irqsave(&pd->lock_wr, flags_w);
2967 if (list_empty(&w_pool_buf->free_list)) {
2968 raw_spin_unlock_irqrestore(&pd->lock_wr, flags_w);
2969 ERROR_MSG("%s: link: %d "
2970 "cmd: RDMA_IOC_GET_WR_BUF(0x%08x). "
2971 "Search free for write buf failed.\n",
2972 __FUNCTION__, link, cmd);
2973 ret = -EBUSY;
2974 break;
2976 w_buf = list_entry(w_pool_buf->free_list.next, rdma_buf_t,
2977 list);
2978 list_move_tail(&w_buf->list, &w_pool_buf->ready_list);
2979 raw_spin_unlock_irqrestore(&pd->lock_wr, flags_w);
2980 parm.acclen = w_buf->num;
2981 ret = IOC_SUCCESFULL;
2982 break;
2985 case RDMA_IOC_WR_BUF:
2987 rdma_pool_buf_t *w_pool_buf;
2988 rdma_buf_t *w_buf;
2990 if (open_mode == READER) {
2991 ERROR_MSG("%s: link: %d cmd: RDMA_IOC_WR_BUF(0x%08x). "
2992 "File open as READER.\n", __FUNCTION__, link, cmd);
2993 ret = -EBADF;
2994 break;
2996 w_pool_buf = &rdma_link->write_pool;
2997 pd = &rdma_link->rw_states_d[WRITER];
2999 * Find user buffer
3001 raw_spin_lock_irqsave(&pd->lock_wr, flags_w);
3002 w_buf = search_in_list(&w_pool_buf->ready_list, parm.acclen);
3003 if (w_buf == NULL) {
3004 raw_spin_unlock_irqrestore(&pd->lock_wr, flags_w);
3005 ERROR_MSG("%s: link: %d cmd: RDMA_IOC_WR_BUF(0x%08x). "
3006 "Cant find buf.\n", __FUNCTION__, link, cmd);
3007 parm.err_no = RDMA_E_BAD_BUFFER;
3008 /*ret = -EAGAIN;*/
3009 ret = -EFAULT;
3010 break;
3013 * Mark this buf as busy and place in the end of queue
3015 list_move_tail(&w_buf->list, &w_pool_buf->busy_list);
3016 w_pool_buf->work_buf = w_buf;
3017 raw_spin_unlock_irqrestore(&pd->lock_wr, flags_w);
3019 * Call write function's
3021 ret = write_buf(link, &parm, filp->f_flags);
3022 #if 0
3023 // Move ioctl RDMA_IOC_PUT_WR_BUF
3024 // /*
3025 // * Remove buf from busy and move free list
3026 // */
3027 // raw_spin_lock_irqsave(&pd->lock_wr, flags_w);
3028 // list_move_tail(&w_buf->list, &w_pool_buf->free_list);
3029 // w_pool_buf->work_buf = NULL;
3030 // raw_spin_unlock_irqrestore(&pd->lock_wr, flags_w);
3031 #endif
3032 break;
3035 case RDMA_IOC_PUT_WR_BUF:
3037 rdma_pool_buf_t *w_pool_buf;
3038 rdma_buf_t *w_buf;
3040 if (open_mode == READER) {
3041 ERROR_MSG("%s: link: %d "
3042 "cmd: RDMA_IOC_PUT_WR_BUF(0x%08x). "
3043 "File open as READER.\n", __FUNCTION__,
3044 link, cmd);
3045 ret = -EBADF;
3046 break;
3048 if ( parm.acclen < 0 || parm.acclen > rdma_link->num_buf ) {
3049 ERROR_MSG("%s: link: %d "
3050 "cmd: RDMA_IOC_PUT_WR_BUF(0x%08x). "
3051 "Wrong num buf: 0x%08x.\n", __FUNCTION__,
3052 link, cmd, parm.acclen);
3053 ret = -ERANGE;
3054 break;
3056 w_pool_buf = &rdma_link->write_pool;
3057 pd = &rdma_link->rw_states_d[WRITER];
3059 * Remove buf from busy and move free list
3061 raw_spin_lock_irqsave(&pd->lock_wr, flags_w);
3062 w_buf = search_in_list(&w_pool_buf->busy_list, parm.acclen);
3063 if (w_buf == NULL) {
3064 raw_spin_unlock_irqrestore(&pd->lock_wr, flags_w);
3065 ERROR_MSG("%s: link: %d "
3066 "cmd: RDMA_IOC_PUT_WR_BUF(0x%08x). "
3067 "Cant find buf.\n", __FUNCTION__, link, cmd);
3068 ret = -EFAULT;
3069 break;
3071 list_move_tail(&w_buf->list, &w_pool_buf->free_list);
3072 w_pool_buf->work_buf = NULL;
3073 raw_spin_unlock_irqrestore(&pd->lock_wr, flags_w);
3074 ret = IOC_SUCCESFULL;
3075 break;
3078 case RDMA_IOC_GET_RD_BUF:
3080 rdma_pool_buf_t *r_pool_buf;
3081 rdma_buf_t *r_buf;
3083 if (open_mode == WRITER) {
3084 ERROR_MSG("%s: link: %d cmd: RDMA_IOC_GET_RD_BUF. "
3085 "File open as WRITER.\n", __FUNCTION__,
3086 link);
3087 ret = -EBADF;
3088 break;
3090 r_pool_buf = &rdma_link->read_pool;
3091 pd = &rdma_link->rw_states_d[READER];
3093 * Search free buffer to write
3095 raw_spin_lock_irqsave(&pd->lock_rd, flags_r);
3096 if (list_empty(&r_pool_buf->free_list)) {
3097 raw_spin_unlock_irqrestore(&pd->lock_rd, flags_r);
3098 ERROR_MSG("%s: link: %d "
3099 "cmd: RDMA_IOC_GET_RD_BUF(0x%08x). "
3100 "Search free for read buf failed.\n",
3101 __FUNCTION__, link, cmd);
3102 ret = -EBUSY;
3103 break;
3105 r_buf = list_entry(r_pool_buf->free_list.next, rdma_buf_t,
3106 list);
3107 list_move_tail(&r_buf->list, &r_pool_buf->ready_list);
3108 raw_spin_unlock_irqrestore(&pd->lock_rd, flags_r);
3109 parm.acclen = r_buf->num;
3110 ret = IOC_SUCCESFULL;
3111 break;
3115 case RDMA_IOC_RD_BUF:
3117 rdma_pool_buf_t *r_pool_buf;
3118 rdma_buf_t *r_buf;
3120 if (open_mode == WRITER) {
3121 ERROR_MSG("%s: link: %d cmd: RDMA_IOC_RD_BUF(0x%08x). "
3122 "File open as WRITER.", __FUNCTION__,
3123 link, cmd);
3124 ret = -EBADF;
3125 break;
3127 r_pool_buf = &rdma_link->read_pool;
3128 pd = &rdma_link->rw_states_d[READER];
3129 dev_sem = &pd->dev_rdma_sem;
3131 * Call read function's
3133 ret = read_buf(link, &parm, filp->f_flags);
3134 if ( ret < 0) {
3135 ERROR_MSG("%s: link: %d cmd: RDMA_IOC_RD_BUF(0x%08x). "
3136 "Error read_buf.\n", __FUNCTION__, link, cmd);
3137 parm.acclen = -1;
3138 /*ret = -EAGAIN;*/
3139 break;
3142 * Time for reserve
3144 parm.clkr = join_curr_clock();
3146 * Find user buffer
3148 raw_spin_lock_irqsave(&pd->lock_rd, flags_r);
3149 /*r_buf = list_entry(r_pool_buf->ready_list.next, rdma_buf_t, list);*/
3150 r_buf = list_entry(r_pool_buf->busy_list.next, rdma_buf_t, list);
3151 raw_spin_unlock_irqrestore(&pd->lock_rd, flags_r);
3152 if (r_buf == NULL) {
3153 ERROR_MSG("%s: link: %d cmd: RDMA_IOC_RD_BUF(0x%08x). "
3154 "Cant find buf. \n", __FUNCTION__, link, cmd);
3155 event_ioctl(link, READ_BAD2_EVENT, 0,
3156 dev_sem->num_obmen);
3157 parm.acclen = -1;
3158 parm.err_no = RDMA_E_BAD_BUFFER;
3159 ret = -EFAULT;
3160 break;
3162 if ( r_buf->num < 0 || r_buf->num > rdma_link->num_buf ) {
3163 ERROR_MSG("%s: link: %d cmd: RDMA_IOC_RD_BUF(0x%08x). "
3164 "Wrong num buf: %d.\n", __FUNCTION__,
3165 link, cmd, r_buf->num);
3166 event_ioctl(link, READ_BAD3_EVENT, r_buf->num,
3167 dev_sem->num_obmen);
3168 parm.acclen = r_buf->num;
3169 parm.err_no = RDMA_E_BAD_BUFFER;
3170 ret = -ERANGE;
3171 break;
3173 parm.acclen = r_buf->num;
3175 * Cleanup: join rfsm_size & r_buf->real_size.
3177 if (rfsm)
3178 parm.reqlen = r_buf->rfsm_size;
3179 else
3180 parm.reqlen = r_buf->real_size;
3181 break;
3184 case RDMA_IOC_PUT_RD_BUF:
3186 rdma_pool_buf_t *r_pool_buf;
3187 rdma_buf_t *r_buf;
3188 unsigned int sending_msg;
3189 int ret_smsg;
3191 if (open_mode == WRITER) {
3192 ERROR_MSG("%s: link: %d "
3193 "cmd: RDMA_IOC_PUT_RD_BUF(0x%08x). "
3194 "File open as WRITER.", __FUNCTION__,
3195 link, cmd);
3196 ret = -EBADF;
3197 break;
3199 if (parm.acclen < 0 || parm.acclen > rdma_link->num_buf) {
3200 ERROR_MSG("%s: link: %d "
3201 "cmd: RDMA_IOC_PUT_RD_BUF(0x%08x). "
3202 "Wrong num buf: 0x%08x.\n", __FUNCTION__,
3203 link, cmd, parm.acclen);
3204 ret = -ERANGE;
3205 break;
3207 r_pool_buf = &rdma_link->read_pool;
3208 pd = &rdma_link->rw_states_d[READER];
3210 * Find user buffer
3212 raw_spin_lock_irqsave(&pd->lock_rd, flags_r);
3213 r_buf = search_in_list(&r_pool_buf->busy_list, parm.acclen);
3214 if (r_buf == NULL) {
3215 raw_spin_unlock_irqrestore(&pd->lock_rd, flags_r);
3216 ERROR_MSG("%s: link: %d "
3217 "cmd: RDMA_IOC_PUT_RD_BUF(0x%08x). "
3218 "Cant find buf.\n", __FUNCTION__, link, cmd);
3219 ret = -EFAULT;
3220 break;
3223 * Mark this buf as free and place in the end of queue
3225 list_move_tail(&r_buf->list, &r_pool_buf->free_list);
3226 if (!r_pool_buf->num_free_buf) {
3227 r_pool_buf->num_free_buf ++;
3228 if (rdma_link->mok_x_mode_link != STATE_LINK_ONLY_RECIVE) {
3230 * Create MSG_READY_DMA
3232 sending_msg = MSG_READY_DMA |
3233 r_pool_buf->num_free_buf;
3235 * Send READY_DMA
3237 if ((ret_smsg = send_msg_check(sending_msg,
3238 link, 0, 0, 0)) <= 0) {
3239 fix_event(link, READ_SNDMSGBAD_EVENT,
3240 ret_smsg,
3241 r_pool_buf->num_free_buf);
3242 } else {
3243 fix_event(link, READ_SNDNGMSG_EVENT,
3244 ret_smsg,
3245 r_pool_buf->num_free_buf);
3248 } else {
3249 r_pool_buf->num_free_buf ++;
3251 if ((rdma_link->mok_x_mode_link == STATE_LINK_ONLY_RECIVE) &&
3252 (r_pool_buf->work_buf == NULL)) {
3253 mok_x_prog_recieve_dma(link, 0);
3254 if (rdma_link->mok_x_mode_number_link == MODE3_LINK) {
3256 * Enable recive
3258 set_mok_x_SR_ready_to_receive(link);
3261 raw_spin_unlock_irqrestore(&pd->lock_rd, flags_r);
3262 ret = IOC_SUCCESFULL;
3263 break;
3266 case RDMA_IOC_SET_TIMEOUT_RD:
3268 if (open_mode == WRITER) {
3269 ERROR_MSG("%s: link: %d "
3270 "cmd: RDMA_IOC_SET_TIMEOUT_RD(0x%08x). "
3271 "File open as READER.\n", __FUNCTION__,
3272 link, cmd);
3273 ret = -EBADF;
3274 break;
3276 pd = &rdma_link->rw_states_d[READER];
3277 dev_sem = &pd->dev_rdma_sem;
3278 dev_sem->timeout = parm.reqlen;
3279 parm.acclen = dev_sem->timeout;
3280 ret = IOC_SUCCESFULL;
3281 break;
3284 case RDMA_IOC_SET_TIMEOUT_WR:
3286 if (open_mode == READER) {
3287 ERROR_MSG("%s: link: %d "
3288 "cmd: RDMA_IOC_SET_TIMEOUT_WR(0x%08x). "
3289 "File open as READER.\n", __FUNCTION__,
3290 link, cmd);
3291 ret = -EBADF;
3292 break;
3294 pd = &rdma_link->rw_states_d[WRITER];
3295 dev_sem = &pd->dev_rdma_sem;
3296 dev_sem->timeout = parm.reqlen;
3297 parm.acclen = dev_sem->timeout;
3298 ret = IOC_SUCCESFULL;
3299 break;
3302 case RDMA_SET_STAT:
3304 memset(&rdma_link->stat_rdma, 0, sizeof (struct stat_rdma));
3305 parm.acclen = 0;
3306 ret = IOC_SUCCESFULL;
3307 break;
3309 #if 0
3310 case RDMA_IS_CAM_YES :
3312 dev_rdma_sem_t *dev_sem;
3313 rw_state_p pcam;
3314 unsigned int atl;
3315 int ret_time_dwait = 0;
3317 event_ioctl(link, RDMA_IS_CAM_YES_EVENT, 1, 0);
3318 pcam = &rdma_link->ralive;
3319 dev_sem = &pcam->dev_rdma_sem;
3320 ret_time_dwait = 0;
3321 atl = RDR_rdma(SHIFT_CAM, link);
3322 if (atl) {
3323 parm.acclen = atl;
3324 parm.err_no = 0;
3325 goto end_RDMA_IS_CAM_YES;
3327 raw_spin_lock_irqsave(&dev_sem->lock, flags);
3328 dev_sem->irq_count_rdma = 0;
3329 pcam->stat = 1;
3330 ret_time_dwait = wait_for_irq_rdma_sem(dev_sem, IO_TIMEOUT, link);
3331 pcam->stat = 0;
3332 raw_spin_unlock_irqrestore(&dev_sem->lock, flags);
3333 parm.acclen = RDR_rdma(SHIFT_CAM, link);
3334 if (ret_time_dwait == -2) {
3335 parm.err_no = -RDMA_E_SIGNAL;
3336 } else
3337 if (ret_time_dwait == -1) {
3338 parm.err_no = -RDMA_E_TIMER;
3339 } else
3340 if (ret_time_dwait > 0) {
3341 parm.err_no = ret_time_dwait;
3342 } else
3343 parm.err_no = 0;
3344 end_RDMA_IS_CAM_YES:
3345 event_ioctl(0, RDMA_IS_CAM_YES_EVENT, 0, 0);
3346 break;
3348 case RDMA_IS_CAM_NO :
3350 dev_rdma_sem_t *dev_sem;
3351 rw_state_p pcam;
3352 unsigned int atl;
3353 int ret_time_dwait = 0;
3355 event_ioctl(link, RDMA_IS_CAM_NO_EVENT, 1, 0);
3356 pcam = &rdma_link->talive;
3357 dev_sem = &pcam->dev_rdma_sem;
3358 atl = RDR_rdma(SHIFT_CAM, link);
3359 if (!atl) {
3360 parm.acclen = 0;
3361 parm.err_no = 0;
3362 goto end_RDMA_IS_CAM_NO;
3364 raw_spin_lock_irqsave(&dev_sem->lock, flags);
3365 dev_sem->irq_count_rdma = 0;
3366 pcam->stat = 1;
3367 ret_time_dwait = wait_for_irq_rdma_sem(dev_sem, IO_TIMEOUT, link);
3368 pcam->stat = 0;
3369 raw_spin_unlock_irqrestore(&dev_sem->lock, flags);
3370 parm.acclen = RDR_rdma(SHIFT_CAM, link);
3371 if (ret_time_dwait == -2) {
3372 parm.err_no = -RDMA_E_SIGNAL;
3373 } else
3374 if (ret_time_dwait == -1) {
3375 parm.err_no = -RDMA_E_TIMER;
3376 } else
3377 if (ret_time_dwait > 0) {
3378 parm.err_no = ret_time_dwait;
3379 } else
3380 parm.err_no = 0;
3381 end_RDMA_IS_CAM_NO:
3382 parm.clkr = join_curr_clock();
3383 parm.clkr1 = pcam->clkr;
3384 parm.reqlen = pcam->int_cnt;
3386 event_ioctl(0, RDMA_IS_CAM_NO_EVENT, 0, 0);
3387 break;
3388 #endif
3389 case MOK_X_IOC_CHANGE_MODE:
3392 if (send_SGP1_Msg(link) > 0 ) {
3393 ret = IOC_SUCCESFULL;
3394 RDMA_IOCTL_DEBUG_MSG("%s: link: %d "
3395 "cmd: MOK_X_IOC_CHANGE_MODE(0x%08x). "
3396 "Change mode. \n", __FUNCTION__, link, cmd);
3397 } else {
3398 ret = -1;
3399 RDMA_IOCTL_DEBUG_MSG("%s: link: %d "
3400 "cmd: MOK_X_IOC_CHANGE_MODE(0x%08x). "
3401 "Change not mode. \n", __FUNCTION__, link, cmd);
3403 break;
3406 case MOK_X_IOC_READ_REG:
3408 unsigned int type_msg;
3410 #ifndef SETTING_OVER_INTERRUPT
3411 set_mask(link, 0x0);
3412 #endif
3413 type_msg = (2 * parm.type_mode +
3414 parm.type_oper) << RDMA_MOK_X_MSG_SHIFT;
3415 RDMA_IOCTL_DEBUG_MSG("%s: link: %d "
3416 "cmd: MOK_X_IOC_READ_REG(0x%08x). "
3417 "type_msg: %x.\n", __FUNCTION__, link, cmd,
3418 type_msg);
3419 ret = RDR_mok_x(link, type_msg, parm.reg_addr0, &parm.reg_data);
3420 #ifndef SETTING_OVER_INTERRUPT
3421 set_mask(link, irq_mc);
3422 #endif
3423 break;
3426 case MOK_X_IOC_WRITE_REG:
3428 unsigned int type_msg;
3430 #ifndef SETTING_OVER_INTERRUPT
3431 set_mask(link, 0x0);
3432 #endif
3433 type_msg = (2 * parm.type_mode +
3434 parm.type_oper) << RDMA_MOK_X_MSG_SHIFT;
3435 RDMA_IOCTL_DEBUG_MSG("%s: link: %d "
3436 "cmd: MOK_X_IOC_WRITE_REG(0x%08x). "
3437 "type_msg: %x.\n", __FUNCTION__, link, cmd,
3438 type_msg);
3439 ret = WRR_mok_x(link, type_msg, parm.reg_addr0, parm.reg_data);
3440 #ifndef SETTING_OVER_INTERRUPT
3441 set_mask(link, irq_mc);
3442 #endif
3443 break;
3446 case MOK_X_IOC_READ_MDIO_REG:
3448 unsigned int type_msg;
3450 #ifndef SETTING_OVER_INTERRUPT
3451 set_mask(link, 0x0);
3452 #endif
3453 type_msg = (2 * parm.type_mode +
3454 parm.type_oper) << RDMA_MOK_X_MSG_SHIFT;
3455 RDMA_IOCTL_DEBUG_MSG("%s: link: %d "
3456 "cmd: MOK_X_IOC_READ_MDIO_REG(0x%08x). "
3457 "type_msg: %x.\n", __FUNCTION__, link, cmd,
3458 type_msg);
3459 ret = get_mok_x_mdio_reg(link, type_msg, parm.reg_addr0,
3460 parm.reg_addr1, parm.reg_addr2,
3461 &parm.reg_data);
3462 #ifndef SETTING_OVER_INTERRUPT
3463 set_mask(link, irq_mc);
3464 #endif
3465 break;
3468 case MOK_X_IOC_WRITE_MDIO_REG:
3470 unsigned int type_msg;
3472 #ifndef SETTING_OVER_INTERRUPT
3473 set_mask(link, 0x0);
3474 #endif
3475 type_msg = (2 * parm.type_mode +
3476 parm.type_oper) << RDMA_MOK_X_MSG_SHIFT;
3477 RDMA_IOCTL_DEBUG_MSG("%s: link: %d "
3478 "cmd: MOK_X_IOC_WRITE_MDIO_REG(0x%08x). "
3479 "type_msg: %x.\n", __FUNCTION__, link, cmd,
3480 type_msg);
3481 ret = set_mok_x_mdio_reg(link, type_msg, parm.reg_addr0,
3482 parm.reg_addr1, parm.reg_addr2,
3483 parm.reg_data);
3484 #ifndef SETTING_OVER_INTERRUPT
3485 set_mask(link, irq_mc);
3486 #endif
3487 break;
3491 * Get value link
3493 case MOK_X_IOC_GET_LINK:
3495 switch (parm.rwmode) {
3496 case TYPE_OPER_NATIVE:
3497 ret = get_mok_x_SR_link(link);
3498 break;
3499 case TYPE_OPER_REMOTE:
3500 ret = get_mok_x_remote_SR_link(link);
3501 break;
3502 default:
3503 ret = -ENODEV;
3505 parm.acclen = ret;
3506 if (ret >= 0 || ret <= 1)
3507 ret = 0;
3508 break;
3512 * Set enable
3514 case MOK_X_IOC_SET_ENABLE:
3516 switch (parm.rwmode) {
3517 case TYPE_OPER_NATIVE:
3518 ret = set_mok_x_SR_enable(link);
3519 break;
3520 case TYPE_OPER_REMOTE:
3521 ret = set_mok_x_remote_SR_enable(link);
3522 break;
3523 default:
3524 ret = -ENODEV;
3526 break;
3529 * Unset enable
3531 case MOK_X_IOC_UNSET_ENABLE:
3533 switch (parm.rwmode) {
3534 case TYPE_OPER_NATIVE:
3535 ret = unset_mok_x_SR_enable(link);
3536 break;
3537 case TYPE_OPER_REMOTE:
3538 ret = unset_mok_x_remote_SR_enable(link);
3539 break;
3540 default:
3541 ret = -ENODEV;
3543 break;
3546 * Get value enable
3548 case MOK_X_IOC_GET_ENABLE:
3550 switch (parm.rwmode) {
3551 case TYPE_OPER_NATIVE:
3552 ret = get_mok_x_SR_enable(link);
3553 break;
3554 case TYPE_OPER_REMOTE:
3555 ret = get_mok_x_remote_SR_enable(link);
3556 break;
3557 default:
3558 ret = -ENODEV;
3560 parm.acclen = ret;
3561 if (ret >= 0 || ret <= 1)
3562 ret = 0;
3563 break;
3567 * Set master
3569 case MOK_X_IOC_SET_MASTER:
3571 switch (parm.rwmode) {
3572 case TYPE_OPER_NATIVE:
3573 ret = set_mok_x_SR_master(link);
3574 break;
3575 case TYPE_OPER_REMOTE:
3576 ret = set_mok_x_remote_SR_master(link);
3577 break;
3578 default:
3579 ret = -ENODEV;
3581 break;
3584 * Unset master
3586 case MOK_X_IOC_UNSET_MASTER:
3588 switch (parm.rwmode) {
3589 case TYPE_OPER_NATIVE:
3590 ret = unset_mok_x_SR_master(link);
3591 break;
3592 case TYPE_OPER_REMOTE:
3593 ret = unset_mok_x_remote_SR_master(link);
3594 break;
3595 default:
3596 ret = -ENODEV;
3598 break;
3601 * Get value master
3603 case MOK_X_IOC_GET_MASTER:
3605 switch (parm.rwmode) {
3606 case TYPE_OPER_NATIVE:
3607 ret = get_mok_x_SR_master(link);
3608 break;
3609 case TYPE_OPER_REMOTE:
3610 ret = get_mok_x_remote_SR_master(link);
3611 break;
3612 default:
3613 ret = -ENODEV;
3615 parm.acclen = ret;
3616 if (ret >= 0 || ret <= 1)
3617 ret = 0;
3618 break;
3622 * Set slave
3624 case MOK_X_IOC_SET_SLAVE:
3626 switch (parm.rwmode) {
3627 case TYPE_OPER_NATIVE:
3628 ret = set_mok_x_SR_slave(link);
3629 break;
3630 case TYPE_OPER_REMOTE:
3631 ret = set_mok_x_remote_SR_slave(link);
3632 break;
3633 default:
3634 ret = -ENODEV;
3636 break;
3639 * Unset slave
3641 case MOK_X_IOC_UNSET_SLAVE:
3643 switch (parm.rwmode) {
3644 case TYPE_OPER_NATIVE:
3645 ret = unset_mok_x_SR_slave(link);
3646 break;
3647 case TYPE_OPER_REMOTE:
3648 ret = unset_mok_x_remote_SR_slave(link);
3649 break;
3650 default:
3651 ret = -ENODEV;
3653 break;
3656 * Get value master
3658 case MOK_X_IOC_GET_SLAVE:
3660 switch (parm.rwmode) {
3661 case TYPE_OPER_NATIVE:
3662 ret = get_mok_x_SR_slave(link);
3663 break;
3664 case TYPE_OPER_REMOTE:
3665 ret = get_mok_x_remote_SR_slave(link);
3666 break;
3667 default:
3668 ret = -ENODEV;
3670 parm.acclen = ret;
3671 if (ret >= 0 || ret <= 1)
3672 ret = 0;
3673 break;
3677 * Set enable transmit
3679 case MOK_X_IOC_SET_ENABLE_TRANSMIT:
3681 switch (parm.rwmode) {
3682 case TYPE_OPER_NATIVE:
3683 ret = set_mok_x_SR_enable_trasmit(link);
3684 break;
3685 case TYPE_OPER_REMOTE:
3686 ret = set_mok_x_remote_SR_enable_trasmit(link);
3687 break;
3688 default:
3689 ret = -ENODEV;
3691 break;
3694 * Get value enable transmit
3696 case MOK_X_IOC_GET_ENABLE_TRANSMIT:
3698 switch (parm.rwmode) {
3699 case TYPE_OPER_NATIVE:
3700 ret = get_mok_x_SR_enable_trasmit(link);
3701 break;
3702 case TYPE_OPER_REMOTE:
3703 ret = get_mok_x_remote_SR_enable_trasmit(link);
3704 break;
3705 default:
3706 ret = -ENODEV;
3708 parm.acclen = ret;
3709 if (ret >= 0 || ret <= 1)
3710 ret = 0;
3711 break;
3715 * Set enable receive
3717 case MOK_X_IOC_SET_ENABLE_RECEIVE:
3719 switch (parm.rwmode) {
3720 case TYPE_OPER_NATIVE:
3721 ret = set_mok_x_SR_enable_receive(link);
3722 break;
3723 case TYPE_OPER_REMOTE:
3724 ret = set_mok_x_remote_SR_enable_receive(link);
3725 break;
3726 default:
3727 ret = -ENODEV;
3729 break;
3732 * Get value enable receive
3734 case MOK_X_IOC_GET_ENABLE_RECEIVE:
3736 switch (parm.rwmode) {
3737 case TYPE_OPER_NATIVE:
3738 ret = get_mok_x_SR_enable_receive(link);
3739 break;
3740 case TYPE_OPER_REMOTE:
3741 ret = get_mok_x_remote_SR_enable_receive(link);
3742 break;
3743 default:
3744 ret = -ENODEV;
3746 parm.acclen = ret;
3747 if (ret >= 0 || ret <= 1)
3748 ret = 0;
3749 break;
3753 * Set ready to receive
3755 case MOK_X_IOC_SET_READY_TO_RECEIVE:
3757 switch (parm.rwmode) {
3758 case TYPE_OPER_NATIVE:
3759 ret = set_mok_x_SR_ready_to_receive(link);
3760 break;
3761 case TYPE_OPER_REMOTE:
3762 ret = set_mok_x_remote_SR_ready_to_receive(link);
3763 break;
3764 default:
3765 ret = -ENODEV;
3767 break;
3770 * Get value ready to receive
3772 case MOK_X_IOC_GET_READY_TO_RECEIVE:
3774 switch (parm.rwmode) {
3775 case TYPE_OPER_NATIVE:
3776 ret = get_mok_x_SR_ready_to_receive(link);
3777 break;
3778 case TYPE_OPER_REMOTE:
3779 ret = get_mok_x_remote_SR_ready_to_receive(link);
3780 break;
3781 default:
3782 ret = -ENODEV;
3784 parm.acclen = ret;
3785 if (ret >= 0 || ret <= 1)
3786 ret = 0;
3787 break;
3791 * Set granted last packet
3793 case MOK_X_IOC_SET_GRANTED_LAST_PACKET:
3795 switch (parm.rwmode) {
3796 case TYPE_OPER_NATIVE:
3797 ret = set_mok_x_SR_granted_last_packet(link);
3798 break;
3799 case TYPE_OPER_REMOTE:
3800 ret = set_mok_x_remote_SR_granted_last_packet(link);
3801 break;
3802 default:
3803 ret = -ENODEV;
3805 break;
3809 * Unset granted last packet
3811 case MOK_X_IOC_UNSET_GRANTED_LAST_PACKET:
3813 switch (parm.rwmode) {
3814 case TYPE_OPER_NATIVE:
3815 ret = unset_mok_x_SR_granted_last_packet(link);
3816 break;
3817 case TYPE_OPER_REMOTE:
3818 ret = unset_mok_x_remote_SR_granted_last_packet(link);
3819 break;
3820 default:
3821 ret = -ENODEV;
3823 break;
3827 * Get value granted last packet
3829 case MOK_X_IOC_GET_GRANTED_LAST_PACKET:
3831 switch (parm.rwmode) {
3832 case TYPE_OPER_NATIVE:
3833 ret = get_mok_x_SR_granted_last_packet(link);
3834 break;
3835 case TYPE_OPER_REMOTE:
3836 ret = get_mok_x_remote_SR_granted_last_packet(link);
3837 break;
3838 default:
3839 ret = -ENODEV;
3841 parm.acclen = ret;
3842 if (ret >= 0 || ret <= 1)
3843 ret = 0;
3844 break;
3848 * Set granted packet
3850 case MOK_X_IOC_SET_GRANTED_PACKET:
3852 switch (parm.rwmode) {
3853 case TYPE_OPER_NATIVE:
3854 ret = set_mok_x_SR_granted_packet(link);
3855 break;
3856 case TYPE_OPER_REMOTE:
3857 ret = set_mok_x_remote_SR_granted_packet(link);
3858 break;
3859 default:
3860 ret = -ENODEV;
3862 break;
3866 * Unset granted packet
3868 case MOK_X_IOC_UNSET_GRANTED_PACKET:
3870 switch (parm.rwmode) {
3871 case TYPE_OPER_NATIVE:
3872 ret = unset_mok_x_SR_granted_packet(link);
3873 break;
3874 case TYPE_OPER_REMOTE:
3875 ret = unset_mok_x_remote_SR_granted_packet(link);
3876 break;
3877 default:
3878 ret = -ENODEV;
3880 break;
3884 * Get value granted packet
3886 case MOK_X_IOC_GET_GRANTED_PACKET:
3888 switch (parm.rwmode) {
3889 case TYPE_OPER_NATIVE:
3890 ret = get_mok_x_SR_granted_packet(link);
3891 break;
3892 case TYPE_OPER_REMOTE:
3893 ret = get_mok_x_remote_SR_granted_packet(link);
3894 break;
3895 default:
3896 ret = -ENODEV;
3898 parm.acclen = ret;
3899 if (ret >= 0 || ret <= 1)
3900 ret = 0;
3901 break;
3905 * Get value in ready to receive
3907 case MOK_X_IOC_GET_IN_READY_TO_RECEIVE:
3909 switch (parm.rwmode) {
3910 case TYPE_OPER_NATIVE:
3911 ret = get_mok_x_SR_in_ready_to_receive(link);
3912 break;
3913 case TYPE_OPER_REMOTE:
3914 ret = get_mok_x_remote_SR_in_ready_to_receive(link);
3915 break;
3916 default:
3917 ret = -ENODEV;
3919 parm.acclen = ret;
3920 if (ret >= 0 || ret <= 1)
3921 ret = 0;
3922 break;
3926 * Set size buffer from mode1, mode2, mode3
3928 case MOK_X_IOC_SET_SIZE_FOR_MODE:
3930 if (parm.reqlen <= rdma_link->buf_size) {
3931 switch (parm.rwmode) {
3932 case TYPE_OPER_NATIVE:
3933 ret = set_mok_x_SIZE(link,
3934 rdma_link->mok_x_buf_size);
3935 rdma_link->mok_x_buf_size = parm.reqlen;
3936 break;
3937 case TYPE_OPER_REMOTE:
3938 ret = set_mok_x_remote_SIZE(link,
3939 rdma_link->mok_x_buf_size);
3940 rdma_link->mok_x_remote_buf_size = parm.reqlen;
3941 break;
3942 default:
3943 ret = -ENODEV;
3945 } else {
3946 parm.acclen = rdma_link->buf_size;
3947 ret = -EMSGSIZE;
3949 break;
3952 * Get size buffer from mode1, mode2, mode3
3954 case MOK_X_IOC_GET_SIZE_FOR_MODE:
3956 int mok_x_buf_size = 0;
3957 switch (parm.rwmode) {
3958 case TYPE_OPER_NATIVE:
3959 ret = get_mok_x_SIZE(link, &mok_x_buf_size);
3960 parm.acclen = (int) mok_x_buf_size;
3961 break;
3962 case TYPE_OPER_REMOTE:
3963 ret = get_mok_x_remote_SIZE(link, &mok_x_buf_size);
3964 parm.acclen = (int) mok_x_buf_size;
3965 break;
3966 default:
3967 ret = -ENODEV;
3969 break;
3973 * Set mode1
3975 case MOK_X_IOC_SET_MODE1:
3977 parm.acclen = rdma_link->mok_x_buf_size;
3978 switch (parm.rwmode) {
3979 case TYPE_OPER_NATIVE:
3980 if (!(ret = set_mok_x_SR_mode1(link)))
3981 rdma_link->mok_x_mode1 = 1;
3982 break;
3983 case TYPE_OPER_REMOTE:
3984 if (!(ret = set_mok_x_SR_mode1(link)))
3985 rdma_link->mok_x_remote_mode1 = 1;
3986 break;
3987 default:
3988 ret = -ENODEV;
3990 break;
3993 * Unset mode1
3995 case MOK_X_IOC_UNSET_MODE1:
3997 parm.acclen = rdma_link->mok_x_buf_size;
3998 switch (parm.rwmode) {
3999 case TYPE_OPER_NATIVE:
4000 if (!(ret = unset_mok_x_SR_mode1(link)))
4001 rdma_link->mok_x_mode1 = 0;
4002 break;
4003 case TYPE_OPER_REMOTE:
4004 if (!(ret = unset_mok_x_SR_mode1(link)))
4005 rdma_link->mok_x_remote_mode1 = 0;
4006 break;
4007 default:
4008 ret = -ENODEV;
4010 break;
4013 * Get mode1
4015 case MOK_X_IOC_GET_MODE1:
4017 switch (parm.rwmode) {
4018 case TYPE_OPER_NATIVE:
4019 ret = get_mok_x_SR_mode1(link);
4020 break;
4021 case TYPE_OPER_REMOTE:
4022 ret = get_mok_x_remote_SR_mode1(link);
4023 break;
4024 default:
4025 ret = -ENODEV;
4027 parm.acclen = ret;
4028 if (ret >= 0 || ret <= 1)
4029 ret = 0;
4030 break;
4033 * Set mode2
4035 case MOK_X_IOC_SET_MODE2:
4037 switch (parm.rwmode) {
4038 case TYPE_OPER_NATIVE:
4039 if (!(ret = set_mok_x_SR_mode2(link)))
4040 rdma_link->mok_x_mode2 = 1;
4041 break;
4042 case TYPE_OPER_REMOTE:
4043 if (!(ret = set_mok_x_remote_SR_mode2(link)))
4044 rdma_link->mok_x_remote_mode2 = 1;
4045 break;
4046 default:
4047 ret = -ENODEV;
4049 break;
4052 * Unset mode2
4054 case MOK_X_IOC_UNSET_MODE2:
4056 switch (parm.rwmode) {
4057 case TYPE_OPER_NATIVE:
4058 if (!(ret = unset_mok_x_SR_mode2(link)))
4059 rdma_link->mok_x_mode2 = 1;
4060 break;
4061 case TYPE_OPER_REMOTE:
4062 if (!(ret = unset_mok_x_SR_mode2(link)))
4063 rdma_link->mok_x_remote_mode2 = 1;
4064 break;
4065 default:
4066 ret = -ENODEV;
4068 break;
4071 * Get mode2
4073 case MOK_X_IOC_GET_MODE2:
4075 switch (parm.rwmode) {
4076 case TYPE_OPER_NATIVE:
4077 ret = get_mok_x_SR_mode2(link);
4078 break;
4079 case TYPE_OPER_REMOTE:
4080 ret = get_mok_x_remote_SR_mode2(link);
4081 break;
4082 default:
4083 ret = -ENODEV;
4085 parm.acclen = ret;
4086 if (ret >= 0 || ret <= 1)
4087 ret = 0;
4088 break;
4092 * Set mode3
4094 case MOK_X_IOC_SET_MODE3:
4096 switch (parm.rwmode) {
4097 case TYPE_OPER_NATIVE:
4098 if (!(ret = set_mok_x_SR_mode3(link)))
4099 rdma_link->mok_x_mode3 = 1;
4100 break;
4101 case TYPE_OPER_REMOTE:
4102 if (!(ret = set_mok_x_remote_SR_mode3(link)))
4103 rdma_link->mok_x_remote_mode3 = 1;
4104 break;
4105 default:
4106 ret = -ENODEV;
4108 break;
4111 * Unset mode3
4113 case MOK_X_IOC_UNSET_MODE3:
4115 switch (parm.rwmode) {
4116 case TYPE_OPER_NATIVE:
4117 if (!(ret = unset_mok_x_SR_mode3(link)))
4118 rdma_link->mok_x_mode3 = 0;
4119 break;
4120 case TYPE_OPER_REMOTE:
4121 if (!(ret = unset_mok_x_SR_mode2(link)))
4122 rdma_link->mok_x_remote_mode3 = 0;
4123 break;
4124 default:
4125 ret = -ENODEV;
4127 break;
4130 * Get mode3
4132 case MOK_X_IOC_GET_MODE3:
4134 switch (parm.rwmode) {
4135 case TYPE_OPER_NATIVE:
4136 ret = get_mok_x_SR_mode3(link);
4137 break;
4138 case TYPE_OPER_REMOTE:
4139 ret = get_mok_x_remote_SR_mode3(link);
4140 break;
4141 default:
4142 ret = -ENODEV;
4144 parm.acclen = ret;
4145 if (ret >= 0 || ret <= 1)
4146 ret = 0;
4147 break;
4151 * Set mode4
4153 case MOK_X_IOC_SET_MODE4:
4155 switch (parm.rwmode) {
4156 case TYPE_OPER_NATIVE:
4157 if (!(ret = set_mok_x_SR_mode4(link))) {
4158 rdma_link->mok_x_mode4 = 1;
4160 break;
4161 case TYPE_OPER_REMOTE:
4162 if (!(ret = set_mok_x_remote_SR_mode4(link))) {
4163 rdma_link->mok_x_remote_mode4 = 1;
4165 break;
4166 default:
4167 ret = -ENODEV;
4169 break;
4171 case MOK_X_IOC_UNSET_MODE4:
4173 switch (parm.rwmode) {
4174 case TYPE_OPER_NATIVE:
4175 if (!(ret = unset_mok_x_SR_mode4(link))) {
4176 rdma_link->mok_x_mode4 = 0;
4178 break;
4179 case TYPE_OPER_REMOTE:
4180 if (!(ret = unset_mok_x_SR_mode2(link))) {
4181 rdma_link->mok_x_remote_mode4 = 0;
4183 break;
4184 default:
4185 ret = -ENODEV;
4187 break;
4190 * Get mode4
4192 case MOK_X_IOC_GET_MODE4:
4194 switch (parm.rwmode) {
4195 case TYPE_OPER_NATIVE:
4196 ret = get_mok_x_SR_mode4(link);
4197 break;
4198 case TYPE_OPER_REMOTE:
4199 ret = get_mok_x_remote_SR_mode4(link);
4200 break;
4201 default:
4202 ret = -ENODEV;
4204 parm.acclen = ret;
4205 if (ret >= 0 || ret <= 1)
4206 ret = 0;
4207 break;
4211 * Get value timeout message receive
4213 case MOK_X_IOC_GET_TIMEOUT_MSG_RECEIVE:
4215 switch (parm.rwmode) {
4216 case TYPE_OPER_NATIVE:
4217 ret = get_mok_x_SR_in_ready_to_receive(link);
4218 break;
4219 case TYPE_OPER_REMOTE:
4220 ret = get_mok_x_remote_SR_in_ready_to_receive(link);
4221 break;
4222 default:
4223 ret = -ENODEV;
4225 parm.acclen = ret;
4226 if (ret >= 0 || ret <= 1)
4227 ret = 0;
4228 break;
4232 * Get value transmitted packets counter
4234 case MOK_X_IOC_TRANSMITTED_PACKET_COUNTER:
4236 unsigned int data = 0;
4237 unsigned int reg_addr = MOK_X_TRANSMITTED_PACKET_COUNTER0;
4239 if (parm.reqlen > 3 || parm.reqlen < 0) {
4240 ret = -ERANGE;
4241 goto failed_transmit;
4243 reg_addr = reg_addr + parm.reqlen;
4244 switch (parm.rwmode) {
4245 case TYPE_OPER_NATIVE:
4246 ret = get_mok_x_reg_counters(link, reg_addr, &data);
4247 break;
4248 case TYPE_OPER_REMOTE:
4249 ret = get_mok_x_remote_reg_counters(link, reg_addr, &data);
4250 break;
4251 default:
4252 ret = -ENODEV;
4254 failed_transmit:
4255 parm.reg_data = data;
4256 break;
4260 * Get value received packets counter
4262 case MOK_X_IOC_RECEIVED_PACKET_COUNTER:
4264 unsigned int data = 0;
4265 unsigned int reg_addr = MOK_X_RECEIVED_PACKET_COUNTER0;
4267 if (parm.reqlen > 3 || parm.reqlen < 0) {
4268 ret = -ERANGE;
4269 goto failed_received;
4271 reg_addr = reg_addr + parm.reqlen;
4272 switch (parm.rwmode) {
4273 case TYPE_OPER_NATIVE:
4274 ret = get_mok_x_reg_counters(link, reg_addr, &data);
4275 break;
4276 case TYPE_OPER_REMOTE:
4277 ret = get_mok_x_remote_reg_counters(link, reg_addr, &data);
4278 break;
4279 default:
4280 ret = -ENODEV;
4282 failed_received:
4283 parm.reg_data = data;
4284 break;
4288 * Get value received packets with error counter
4290 case MOK_X_IOC_RECEIVED_PACKET_ERR_COUNTER:
4292 unsigned int data = 0;
4293 unsigned int reg_addr = MOK_X_RECEIVED_PACKET_ERR_COUNTER0;
4295 if (parm.reqlen > 3 || parm.reqlen < 0) {
4296 ret = -ERANGE;
4297 goto failed_err_received;
4299 reg_addr = reg_addr + parm.reqlen;
4300 switch (parm.rwmode) {
4301 case TYPE_OPER_NATIVE:
4302 ret = get_mok_x_reg_counters(link, reg_addr, &data);
4303 break;
4304 case TYPE_OPER_REMOTE:
4305 ret = get_mok_x_remote_reg_counters(link, reg_addr, &data);
4306 break;
4307 default:
4308 ret = -ENODEV;
4310 failed_err_received:
4311 parm.reg_data = data;
4312 break;
4316 * Get value not received packets counter
4318 case MOK_X_IOC_RECEIVED_PACKET_NOT_COUNTER:
4320 unsigned int data = 0;
4321 unsigned int reg_addr = MOK_X_RECEIVED_PACKET_ERR_COUNTER0;
4323 if (parm.reqlen > 3 || parm.reqlen < 0) {
4324 ret = -ERANGE;
4325 goto failed_not_received;
4327 reg_addr = reg_addr + parm.reqlen;
4328 switch (parm.rwmode) {
4329 case TYPE_OPER_NATIVE:
4330 ret = get_mok_x_reg_counters(link, reg_addr, &data);
4331 break;
4332 case TYPE_OPER_REMOTE:
4333 ret = get_mok_x_remote_reg_counters(link, reg_addr, &data);
4334 break;
4335 default:
4336 ret = -ENODEV;
4338 failed_not_received:
4339 parm.reg_data = data;
4340 break;
4343 case MOK_X_IOC_SET_NATIVE_MODE:
4345 ret = mok_x_set_native_mode(link, &parm.acclen);
4346 break;
4349 case MOK_X_IOC_UNSET_NATIVE_MODE:
4351 ret = mok_x_unset_native_mode(link, &parm.acclen);
4352 break;
4356 * Mode set link
4358 case MOK_X_IOC_SET_MODE_LINK:
4360 if (rdma_link->mok_x_config_sem_link == CONFIG_SEM_LINK_DOWN) {
4361 parm.acclen = CONFIG_SEM_LINK_DOWN;
4362 ret = IOC_SUCCESFULL;
4363 goto exit_set_mode_link;
4365 rdma_link->mok_x_config_sem_link = CONFIG_SEM_LINK_DOWN;
4366 if (parm.reqlen == STATE_LINK_NATIVE) {
4367 ret = mok_x_set_native_mode(link, &parm.err_no);
4368 if (ret) {
4369 ERROR_MSG("Error native mode set. Errno: %d\n",
4370 ret, parm.err_no);
4371 } else {
4372 ERROR_MSG("Native mode set. Errno: %d\n",
4373 ret, parm.err_no);
4376 if (parm.reqlen == STATE_LINK_ONLY_RECIVE) {
4377 //ret = mok_x_unset_native_mode(link, &parm.err_no);
4378 rdma_link->mok_x_mode_number_link = parm.rwmode;
4379 if (parm.reqlen1 <= rdma_link->buf_size) {
4380 rdma_link->mok_x_buf_size =
4381 (rdma_link->tm_mode ?
4382 ALIGN(parm.reqlen1, (rdma_link->align_buf_tm * PAGE_SIZE)) :
4383 (rfsm ? rdma_link->buf_size : allign_dma(parm.reqlen1)));
4384 } else {
4385 parm.reqlen1 = rdma_link->buf_size;
4386 ret = -EMSGSIZE;
4387 ERROR_MSG("%s: Ordered size: %x larger buffer: %x\n",
4388 __FUNCTION__, parm.reqlen1, rdma_link->buf_size);
4389 goto exit_set_mode_link;
4391 parm.reqlen1 = rdma_link->mok_x_buf_size;
4392 rdma_link->generator_mode = parm.reqlen2;
4393 ret = mok_x_set_mode4(link);
4394 if (ret) {
4395 mok_x_unset_mode4(link);
4396 set_mask(link, irq_mc);
4397 ERROR_MSG("Error only receive mode set. Errno: %d\n",
4398 ret, parm.err_no);
4399 } else {
4400 ERROR_MSG("Only receive mode set. Errno: %d\n",
4401 ret, parm.err_no);
4404 if (!ret)
4405 rdma_link->mok_x_mode_link = parm.reqlen;
4406 parm.acclen = rdma_link->mok_x_mode_link;
4407 exit_set_mode_link:;;
4408 rdma_link->mok_x_config_sem_link = CONFIG_SEM_LINK_UP;
4409 break;
4413 * Mode reset link
4415 case MOK_X_IOC_RESET_MODE_LINK:
4417 ret = set_mode_default_remote(link);
4418 ret = set_mode_default(link);
4419 rdma_link->mok_x_mode_link = STATE_LINK_DEFAULT;
4420 parm.acclen = rdma_link->mok_x_mode_link;
4421 break;
4425 * Mode get link
4427 case MOK_X_IOC_GET_MODE_LINK:
4429 if (rdma_link->mok_x_config_sem_link == CONFIG_SEM_LINK_DOWN) {
4430 parm.acclen = CONFIG_SEM_LINK_DOWN;
4431 goto exit_get_mode_link;
4433 rdma_link->mok_x_config_sem_link = CONFIG_SEM_LINK_DOWN;
4434 parm.acclen = rdma_link->mok_x_mode_link;
4435 rdma_link->mok_x_config_sem_link = CONFIG_SEM_LINK_UP;
4436 exit_get_mode_link:;;
4437 ret = IOC_SUCCESFULL;
4438 break;
4442 * Set sem link
4444 case MOK_X_IOC_SET_CONFIG_SEM_LINK:
4446 rdma_link->mok_x_config_sem_link = parm.reqlen;
4447 parm.acclen = rdma_link->mok_x_config_sem_link;
4448 ret = 0;
4449 break;
4453 * Get sem link
4455 case MOK_X_IOC_GET_CONFIG_SEM_LINK:
4457 parm.acclen = rdma_link->mok_x_config_sem_link;
4458 ret = 0;
4459 break;
4463 * Start DMA in extented mode
4465 case MOK_X_IOC_START_DMA:
4469 * Set mask only RDC enable
4471 set_mask(link, irq_mc_rdc);
4473 * Programming dma
4475 mok_x_prog_recieve_dma(link, 0);
4477 ret = IOC_SUCCESFULL;
4478 break;
4481 default :
4482 ERROR_MSG("%s: link: %d unknown cmd: 0x%08x\n", __FUNCTION__,
4483 link, cmd);
4484 ret = -EFAULT;
4485 break;
4488 rval = copy_to_user((rdma_ioc_parm_t __user *)arg, &parm,
4489 sizeof (rdma_ioc_parm_t));
4490 if (rval) {
4491 ERROR_MSG("%s: link: %d cmd: 0x%08x copy_to_user failed\n",
4492 __FUNCTION__, link, cmd);
4493 ret = -EINVAL;
4495 RDMA_IOCTL_DEBUG_MSG("%s: link: %d cmd: 0x%08x FINISH\n", __FUNCTION__,
4496 link, cmd);
4497 return ret;
4500 #ifdef CONFIG_COMPAT
4501 static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
4503 int ret;
4504 ret = rdma_ioctl(f, cmd, arg);
4505 return ret;
4508 static long rdma_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
4510 switch (cmd) {
4512 case RDMA_IOC_DUMPREG0:
4513 case RDMA_IOC_DUMPREG1:
4514 case RDMA_IOC_WRR:
4515 case RDMA_IOC_RDR:
4516 case RDMA_IOC_GET_neighbour_map:
4517 case RDMA_CLEAN_TDC_COUNT:
4518 case RDMA_GET_CLKR:
4519 case RDMA_GET_MAX_CLKR:
4520 case RDMA_CLEAN_RDC_COUNT:
4521 case RDMA_TIMER_FOR_READ :
4522 case RDMA_TIMER_FOR_WRITE:
4523 case RDMA_IOC_ALLOCB:
4524 case RDMA_GET_STAT:
4525 case RDMA_GET_EVENT:
4526 case RDMA_SET_STAT:
4527 case RDMA_SET_ATL:
4528 case RDMA_IS_CAM_YES:
4529 case RDMA_IS_CAM_NO:
4530 case RDMA_WAKEUP_WRITER:
4531 case RDMA_WAKEUP_READER:
4532 case RDMA_IOC_GET_ID:
4533 case RDMA_IOC_RESET_DMA:
4534 case RDMA_IOC_SET_MODE_RFSM:
4535 case RDMA_IOC_SET_MODE_EXIT_GP0:
4536 case RDMA_IOC_RESET_TCS:
4537 case RDMA_IOC_RESET_RCS:
4538 case RDMA_IOC_SET_MODE_LOOP:
4539 case RDMA_IOC_GET_BUF_NUM:
4540 case RDMA_IOC_SET_BUF_NUM:
4541 case RDMA_IOC_GET_BUF_SIZE:
4542 case RDMA_IOC_RD_BUF:
4543 case RDMA_IOC_WR_BUF:
4544 case RDMA_IOC_GET_RD_BUF:
4545 case RDMA_IOC_GET_WR_BUF:
4546 case RDMA_IOC_PUT_RD_BUF:
4547 case RDMA_IOC_PUT_WR_BUF:
4548 case RDMA_IOC_SET_TIMEOUT_RD:
4549 case RDMA_IOC_SET_TIMEOUT_WR:
4550 case MOK_X_IOC_SET_ONLY_RECEIVE_MODE:
4551 case MOK_X_IOC_SET_NATIVE_MODE:
4552 case MOK_X_IOC_READ_REG:
4553 case MOK_X_IOC_WRITE_REG:
4554 case MOK_X_IOC_READ_MDIO_REG:
4555 case MOK_X_IOC_WRITE_MDIO_REG:
4556 case MOK_X_IOC_CHANGE_MODE:
4557 case MOK_X_IOC_SET_MODE_LINK:
4558 case MOK_X_IOC_RESET_MODE_LINK:
4559 case RDMA_IOC_GET_TM_MODE:
4560 case RDMA_IOC_SET_TM_MODE:
4561 case RDMA_IOC_GET_ALIGN_BUF_TM:
4562 case RDMA_IOC_SET_ALIGN_BUF_TM:
4563 case RDMA_IOC_GET_PAGE_SIZE:
4564 case RDMA_IOC_GET_NODE_MEM_ALLOC:
4565 case RDMA_IOC_SET_NODE_MEM_ALLOC:
4566 case RDMA_IOC_GET_MAX_SIZE_BUFF:
4567 case RDMA_IOC_SET_MAX_SIZE_BUFF:
4568 case RDMA_IOC_GET_MAX_SIZE_BUFF_TM:
4569 case RDMA_IOC_SET_MAX_SIZE_BUFF_TM:
4570 case RDMA_IOC_MEMRY_ALLOC:
4571 case RDMA_IOC_ALLOC_TYPE:
4572 return do_ioctl(f, cmd, arg);
4573 default:
4574 return -ENOIOCTLCMD;
4577 #endif
4579 #define GET_FILE_MINOR_DBG 0
4580 #define GET_FILE_MINOR_DEBUG_MSG(x...)\
4581 if (GET_FILE_MINOR_DBG) DEBUG_MSG(x)
4582 int get_file_minor(struct file *file)
4584 int major;
4585 struct dentry *f_dentry_rdma;
4586 struct inode *d_inode;
4588 f_dentry_rdma = file->f_dentry;
4589 if (!f_dentry_rdma) {
4590 ERROR_MSG( "get_file_minor: file->f_dentry is NULL\n");
4591 return -EBADF;
4593 d_inode = f_dentry_rdma->d_inode;
4594 if (!d_inode) {
4595 ERROR_MSG( "get_file_minor: f_dentry->d_inode is NULL\n");
4596 return -EBADF;
4598 major = MAJOR(d_inode->i_rdev);
4599 GET_FILE_MINOR_DEBUG_MSG("get_file_minor:d_inode->i_rdev: 0x%08u "
4600 "major: %d minor:%u\n", d_inode->i_rdev, major,
4601 MINOR(d_inode->i_rdev));
4602 return MINOR(d_inode->i_rdev);
4605 #define RDMA_REMAP_DBG 0
4606 #define RDMA_REMAP_DEBUG_MSG(x...)\
4607 if (RDMA_REMAP_DBG) DEBUG_MSG(x)
4608 #define REMAP RDMA_REMAP_DEBUG_MSG
4609 int rdma_remap_page(void *va, size_t sz, struct vm_area_struct *vma)
4611 unsigned long pha;
4612 unsigned long vm_end;
4613 unsigned long vm_start;
4614 unsigned long vm_pgoff;
4615 size_t size;
4617 REMAP("%s: START\n", __FUNCTION__);
4618 if (!sz) return -EINVAL;
4619 pha = virt_to_phys(va);
4620 size = (long )PAGE_ALIGN((pha & ~PAGE_MASK) + sz);
4621 if ((vma->vm_pgoff << PAGE_SHIFT) > size) return -ENXIO;
4622 pha += (vma->vm_pgoff << PAGE_SHIFT);
4623 vm_end = vma->vm_end;
4624 vm_start = vma->vm_start;
4625 vm_pgoff = vma->vm_pgoff;
4627 if ((vm_end - vm_start) < size)
4628 size = vm_end - vm_start;
4629 vma->vm_flags |= (VM_READ | VM_WRITE | VM_RESERVED | VM_IO);
4630 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) |
4631 _PAGE_CD_DIS | _PAGE_PWT );
4632 if (remap_pfn_range(vma, vm_start, (pha >> PAGE_SHIFT), size,
4633 vma->vm_page_prot)) {
4634 ERROR_MSG("%s: FAIL remap_pfn_range\n", __FUNCTION__);
4635 return -EAGAIN;
4637 REMAP("%s: FINISH\n", __FUNCTION__);
4638 return 0;
4641 #define RDMA_REMAP_T_DBG 0
4642 #define RDMA_REMAP_T_DEBUG_MSG(x...)\
4643 if (RDMA_REMAP_T_DBG) DEBUG_MSG(x)
4644 #define REMAP_T RDMA_REMAP_T_DEBUG_MSG
4645 int rdma_remap_page_tbl(void *va, size_t sz, struct vm_area_struct *vma, int align)
4647 rdma_tbl_64_struct_t *ptbl;
4648 unsigned long vm_start;
4649 unsigned long vm_pgoff;
4650 unsigned long sz_pha;
4651 unsigned long vm_end;
4652 unsigned long pha;
4653 size_t size;
4655 REMAP_T("%s: START size(sz): 0x%016lx\n", __FUNCTION__, sz);
4656 if (!sz) return -EINVAL;
4657 if (vma->vm_pgoff) {
4658 ERROR_MSG("%s: vma->vm_pgoff: 0x%lx\n", __FUNCTION__,
4659 vma->vm_pgoff);
4660 return -EINVAL;
4662 //size = (long)PAGE_ALIGN(sz);
4663 size = (long)ALIGN(sz, align * PAGE_SIZE);
4664 vm_end = vma->vm_end;
4665 vm_start = vma->vm_start;
4666 vm_pgoff = vma->vm_pgoff;
4667 if ((vm_end - vm_start) < size) {
4668 size = vm_end - vm_start;
4669 REMAP_T("%s: vm_end(%lx) - vm_start(%lx) < size(%lx)\n",
4670 __FUNCTION__, vm_end, vm_start, size);
4672 vma->vm_flags |= (VM_READ | VM_WRITE | VM_RESERVED | VM_IO);
4673 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) |
4674 _PAGE_CD_DIS | _PAGE_PWT );
4675 for (ptbl = (rdma_tbl_64_struct_t *)va; ptbl; ptbl++) {
4676 rdma_addr_struct_t pxx;
4677 pxx.addr = (unsigned long)ptbl;
4678 REMAP_T("%s: 0x%08x%08x ptbl\n", __FUNCTION__, pxx.fields.haddr,
4679 pxx.fields.laddr);
4680 pxx.addr = ptbl->addr;
4681 REMAP_T("%s: 0x%08x%08x ptbl->addr\n", __FUNCTION__,
4682 pxx.fields.haddr, pxx.fields.laddr);
4683 pha = (unsigned long)ptbl->addr;
4684 pxx.addr = (unsigned long)phys_to_virt(pha);
4685 REMAP_T("%s: 0x%08x%08x __va(ptbl->addr)\n",
4686 __FUNCTION__, pxx.fields.haddr, pxx.fields.laddr);
4687 pxx.addr = pha;
4688 REMAP_T("%s: 0x%08x%08x __fa(ptbl->addr)\n",
4689 __FUNCTION__, pxx.fields.haddr, pxx.fields.laddr);
4690 sz_pha = ptbl->sz;
4691 //sz_pha = cpu_to_le64(sz_pha);
4692 REMAP_T("%s: sz_pha: %lx\n", __FUNCTION__, sz_pha);
4693 if (remap_pfn_range(vma, vm_start, (pha >> PAGE_SHIFT), sz_pha,
4694 vma->vm_page_prot)) {
4695 ERROR_MSG("%s: FAIL remap_pfn_range\n", __FUNCTION__);
4696 return -EAGAIN;
4698 vm_start += sz_pha;
4699 REMAP_T("%s: vm_start: %lx vm_end: %lx sz_pha: %lx \n",
4700 __FUNCTION__, vm_start, vm_end, sz_pha);
4701 if (vm_start >= vm_end) {
4702 REMAP_T("%s: vm_start(%lx) >= vm_end(%lx)\n", __FUNCTION__,
4703 vm_start, vm_end);
4704 break;
4707 REMAP_T("%s: FINISH\n", __FUNCTION__);
4708 return 0;
4711 #define RDMA_MMAP_DBG 0
4712 #define RDMA_MMAP_DEBUG_MSG(x...)\
4713 if (RDMA_MMAP_DBG) DEBUG_MSG(x)
4714 static int rdma_mmap(struct file *file, struct vm_area_struct *vma)
4716 rdma_pool_buf_t *pool_buf;
4717 rdma_state_link_t *rdma_link;
4718 rw_state_t *rdma_private_data;
4719 int minor, rw;
4720 int link;
4721 int rval;
4723 RDMA_MMAP_DEBUG_MSG("%s: START\n", __FUNCTION__);
4724 minor = get_file_minor(file);
4725 //minor = MINOR(inode->i_rdev);
4726 if (minor < 0)
4727 return minor;
4728 link = DEV_inst(minor);
4729 rdma_link = &rdma_state->rdma_link[link];
4730 rdma_private_data = file->private_data;
4731 rw = rdma_private_data->open_mode;
4732 rw ? (pool_buf = &rdma_link->write_pool) :
4733 (pool_buf = &rdma_link->read_pool);
4734 #if 0
4735 if (pool_buf->alloc != RDMA_BUF_ALLOCED) {
4736 ERROR_MSG("%s : pool_buf->alloc != RDMA_BUF_ALLOCED\n",
4737 __FUNCTION__);
4738 return -EAGAIN;
4740 #endif
4741 if (pool_buf->tm_mode) {
4742 rval = rdma_remap_page_tbl((void *)pool_buf->vdma,
4743 pool_buf->dma_size,
4744 vma, pool_buf->align_buf_tm);
4745 } else {
4746 rval = rdma_remap_page((void *)pool_buf->vdma,
4747 //rval = rdma_remap_page((unsigned long)pool_buf->fdma,
4748 pool_buf->dma_size, vma);
4750 if (rval) {
4751 ERROR_MSG("%s: FAIL\n", __FUNCTION__);
4752 return -EAGAIN;
4754 pool_buf->alloc = RDMA_BUF_MMAP;
4755 RDMA_MMAP_DEBUG_MSG("%s: FINISH\n", __FUNCTION__);
4756 return 0;
4759 unsigned long __get_free_pages_rdma(int node, gfp_t gfp_mask,
4760 unsigned int order, int node_mem_alloc)
4762 struct page *page;
4763 if (node_mem_alloc)
4764 page = alloc_pages_node(node, gfp_mask, order);
4765 else
4766 page = alloc_pages(gfp_mask, order);
4767 if (!page)
4768 return (unsigned long)NULL;
4769 return (unsigned long) page_address(page);
4772 #define RDMA_MEM_ALLOC_DBG 0
4773 #define RDMA_MEM_ALLOC_DEBUG_MSG(x...)\
4774 if (RDMA_MEM_ALLOC_DBG) DEBUG_MSG(x)
4775 int rdma_mem_alloc(int node, size_t size, dma_addr_t *mem, size_t *real_size,
4776 unsigned long *dma_memory, int node_mem_alloc)
4778 struct page *map, *mapend;
4779 int order;
4781 RDMA_MEM_ALLOC_DEBUG_MSG("%s: START\n", __FUNCTION__);
4782 order = get_order(size);
4783 *dma_memory = __get_free_pages_rdma(node, GFP_KERNEL , order,
4784 node_mem_alloc);
4785 if (!(*dma_memory)) {
4786 ERROR_MSG("%s: Cannot bind DMA address order: %d"
4787 " size: 0x%lx\n", __FUNCTION__, order, size);
4788 return -1;
4790 mapend = virt_to_page((*dma_memory) + (PAGE_SIZE << order) - 1);
4791 for (map = virt_to_page((*dma_memory)); map <= mapend; map++)
4792 SetPageReserved(map);
4793 *mem = __pa(*dma_memory);
4795 *real_size = PAGE_SIZE << order;
4796 RDMA_MEM_ALLOC_DEBUG_MSG("%s: FINISH va: 0x%lx fa: 0x%llx size: 0x%lx "
4797 "real_size: 0x%lx\n", __FUNCTION__, *dma_memory,
4798 *mem, size, *real_size);
4799 return 0;
4804 * Size table element SIZE_TLB_EL: 64 bit's addr and 64 bit's size
4806 #define RDMA_MEM_ALLOC_POOL_DBG 0
4807 #define RDMA_MEM_ALLOC_POOL_DEBUG_MSG(x...)\
4808 if (RDMA_MEM_ALLOC_POOL_DBG) DEBUG_MSG(x)
4809 int rdma_mem_alloc_pool(rdma_pool_buf_t *pool_buf)
4811 rdma_tbl_64_struct_t *peltbl;
4812 rdma_addr_struct_t pxx;
4813 size_t size_tm;
4814 char *err_msg = NULL;
4815 int SIZE_TLB, max_size, rest;
4817 RDMA_MEM_ALLOC_POOL_DEBUG_MSG("%s: START \n", __FUNCTION__);
4818 if (pool_buf->tm_mode) {
4819 max_size = pool_buf->size;
4820 //SIZE_TLB = ((PAGE_ALIGN(max_size) / PAGE_SIZE + 1) * SIZE_TLB_EL);
4821 SIZE_TLB = ((PAGE_ALIGN(max_size) / (pool_buf->align_buf_tm * PAGE_SIZE) + 1) * SIZE_TLB_EL);
4822 RDMA_MEM_ALLOC_POOL_DEBUG_MSG("%s: table mode ALIGN PAGE_SIZE: 0x%016lx\n",
4823 __FUNCTION__,
4824 pool_buf->align_buf_tm * PAGE_SIZE);
4825 RDMA_MEM_ALLOC_POOL_DEBUG_MSG("%s: try alloc for tm size "
4826 "SIZE_TLB : 0x%08x\n",
4827 __FUNCTION__, SIZE_TLB);
4828 if (rdma_mem_alloc(pool_buf->node_for_memory, SIZE_TLB,
4829 (dma_addr_t *)&pool_buf->fdma, &size_tm,
4830 (unsigned long *)&pool_buf->vdma, pool_buf->node_mem_alloc )) {
4831 err_msg = "rdma_mem_alloc for tm";
4832 goto failed;
4834 pxx.addr = (unsigned long)pool_buf->vdma;
4835 RDMA_MEM_ALLOC_POOL_DEBUG_MSG("%s: 0x%08x%08x virt_mem table\n",
4836 __FUNCTION__,
4837 pxx.fields.haddr, pxx.fields.laddr);
4838 pxx.addr = pool_buf->fdma;
4839 RDMA_MEM_ALLOC_POOL_DEBUG_MSG("%s: 0x%08x%08x phys_mem table\n",
4840 __FUNCTION__,
4841 pxx.fields.haddr, pxx.fields.laddr);
4842 RDMA_MEM_ALLOC_POOL_DEBUG_MSG("%s: size table: 0x%016lx \n",
4843 __FUNCTION__, size_tm);
4844 pool_buf->size_tm = size_tm;
4845 rest = (int)pool_buf->size;
4846 RDMA_MEM_ALLOC_POOL_DEBUG_MSG("%s: rest: 0x%08x pool_buf->size: 0x%016lx\n",
4847 __FUNCTION__, rest, pool_buf->size);
4848 pool_buf->dma_size = 0;
4849 for (peltbl = (rdma_tbl_64_struct_t *)pool_buf->vdma; rest > 0;
4850 peltbl++){
4851 size_t size_el;
4852 unsigned long addr;
4853 if (rdma_mem_alloc(pool_buf->node_for_memory,
4854 pool_buf->align_buf_tm * PAGE_SIZE,
4855 (dma_addr_t *)&peltbl->addr,
4856 &size_el, (unsigned long *)&addr,
4857 pool_buf->node_mem_alloc)) {
4858 goto failed;
4860 pxx.addr = (unsigned long)peltbl;
4861 RDMA_MEM_ALLOC_POOL_DEBUG_MSG("%s: 0x%08x%08x peltbl\n",
4862 __FUNCTION__,
4863 pxx.fields.haddr, pxx.fields.laddr);
4864 //peltbl->addr = le64_to_cpu(peltbl->addr);
4865 pxx.addr = peltbl->addr;
4866 RDMA_MEM_ALLOC_POOL_DEBUG_MSG("%s: 0x%08x%08x peltbl->addr\n",
4867 __FUNCTION__, pxx.fields.haddr,
4868 pxx.fields.laddr);
4869 peltbl->sz = (unsigned long)size_el;
4870 //peltbl->sz = le64_to_cpu(peltbl->sz);
4871 rest -= size_el;
4872 pool_buf->dma_size += size_el;
4874 peltbl->sz = 0;
4875 } else {
4876 RDMA_MEM_ALLOC_POOL_DEBUG_MSG("%s: single mode PAGE_SIZE: 0x%016lx\n",
4877 __FUNCTION__, PAGE_SIZE);
4878 //if (pool_buf->size > num_buf * allign_dma(MAX_SIZE_BUFF)) {
4879 // ERROR_MSG("%s: The large size of the buffer. "
4880 // "The buffer must be <= 0x%08x.\n",
4881 // __FUNCTION__, MAX_SIZE_BUFF);
4882 // goto failed;
4884 if (rdma_mem_alloc(pool_buf->node_for_memory, pool_buf->size,
4885 (dma_addr_t *)&pool_buf->fdma, &pool_buf->dma_size,
4886 (unsigned long *)&pool_buf->vdma, pool_buf->node_mem_alloc)) {
4887 err_msg = "rdma_mem_alloc";
4888 goto failed;
4890 pxx.addr = (unsigned long)pool_buf->vdma;
4891 RDMA_MEM_ALLOC_POOL_DEBUG_MSG("%s: 0x%08x%08x virt_mem\n",
4892 __FUNCTION__,
4893 pxx.fields.haddr, pxx.fields.laddr);
4894 pxx.addr = pool_buf->fdma;
4895 RDMA_MEM_ALLOC_POOL_DEBUG_MSG("%s: 0x%08x%08x phys_mem\n",
4896 __FUNCTION__,
4897 pxx.fields.haddr, pxx.fields.laddr);
4899 RDMA_MEM_ALLOC_POOL_DEBUG_MSG("%s: FINISH buf real size: 0x%016lx\n",
4900 __FUNCTION__, pool_buf->dma_size);
4901 return 0;
4903 failed:
4904 ERROR_MSG("%s: %s FAILED ****\n", __FUNCTION__, err_msg);
4905 return (-1);
4908 #define RDMA_MEM_FREE_DBG 0
4909 #define RDMA_MEM_FREE_DEBUG_MSG(x...)\
4910 if (RDMA_MEM_FREE_DBG) DEBUG_MSG(x)
4911 void rdma_mem_free(size_t size, dma_addr_t dev_memory,
4912 unsigned long dma_memory)
4914 struct page *map, *mapend;
4915 caddr_t mem;
4916 int order;
4918 RDMA_MEM_FREE_DEBUG_MSG("%s: START\n", __FUNCTION__);
4919 mem = (caddr_t)dma_memory;
4920 order = get_order(size);
4921 mapend = virt_to_page(mem + (PAGE_SIZE << order) - 1);
4922 for (map = virt_to_page(mem); map <= mapend; map++)
4923 ClearPageReserved(map);
4924 free_pages(dma_memory, order);
4925 RDMA_MEM_FREE_DEBUG_MSG("%s: FINISH va: 0x%lx, fa: 0x%llx size: 0x%lx\n",
4926 __FUNCTION__, dma_memory, dev_memory, size);
4929 #define RDMA_MEM_FREE_POOL_DBG 0
4930 #define RDMA_MEM_FREE_POOL_DEBUG_MSG(x...)\
4931 if (RDMA_MEM_FREE_POOL_DBG) DEBUG_MSG(x)
4932 void rdma_mem_free_pool(rdma_pool_buf_t *pool_buf)
4934 signed int rest;
4936 RDMA_MEM_FREE_POOL_DEBUG_MSG("%s: START\n", __FUNCTION__);
4937 if (pool_buf->alloc) {
4938 if (pool_buf->tm_mode) {
4939 rdma_tbl_64_struct_t *peltbl;
4940 for (peltbl = (rdma_tbl_64_struct_t *)pool_buf->vdma,
4941 rest = pool_buf->dma_size; rest > 0; peltbl++) {
4942 rdma_mem_free(peltbl->sz, (dma_addr_t) peltbl->addr,
4943 (unsigned long) __va(peltbl->addr));
4944 rest -= peltbl->sz;
4946 rdma_mem_free(pool_buf->size_tm, pool_buf->fdma,
4947 (unsigned long)pool_buf->vdma);
4948 } else {
4949 //if (pool_buf->size) {
4950 //if (pool_buf->alloc) {
4951 rdma_mem_free(pool_buf->dma_size, pool_buf->fdma,
4952 (unsigned long)pool_buf->vdma);
4955 pool_buf->size = 0;
4956 pool_buf->dma_size = 0;
4957 pool_buf->alloc = RDMA_BUF_EMPTY;
4958 pool_buf->vdma = NULL;
4959 pool_buf->fdma = 0;
4960 RDMA_MEM_FREE_POOL_DEBUG_MSG("%s: FINISH\n", __FUNCTION__);
4963 #define INIT_RDMA_LINK_DBG 0
4964 #define INIT_RDMA_LINK_DEBUG_MSG(x...)\
4965 if (INIT_RDMA_LINK_DBG) DEBUG_MSG(x)
4966 void rdma_link_init(int link)
4968 rdma_state_link_t *rdma_link = &rdma_state->rdma_link[link];
4969 dev_rdma_sem_t *dev_sem;
4970 rw_state_t *pd, *pm;
4971 rdma_addr_struct_t p_xxb;
4972 int i;
4974 INIT_RDMA_LINK_DEBUG_MSG("%s: START\n", __FUNCTION__);
4975 p_xxb.addr = (unsigned long)rdma_link;
4976 INIT_RDMA_LINK_DEBUG_MSG("%s: link: %d rdma_link: 0x%08x%08x\n",
4977 __FUNCTION__, link, p_xxb.fields.haddr,
4978 p_xxb.fields.laddr);
4979 rdma_link->link = link;
4980 rdma_link->tm_mode = tm_mode;
4981 rdma_link->align_buf_tm = align_buf_tm;
4982 rdma_link->max_size_buf = max_size_buf;
4983 rdma_link->max_size_buf_tm = max_size_buf_tm;
4984 rdma_link->num_buf = num_buf;
4985 rdma_link->node_mem_alloc = node_mem_alloc;
4986 rdma_link->type_alloc = 0;
4987 rdma_link->trwd_lock = 0;
4988 rdma_link->trwd_lock_err = 0;
4989 mutex_init(&rdma_link->mu);
4990 pm = &rdma_link->talive;
4991 mutex_init(&pm->mu);
4992 raw_spin_lock_init(&pm->mu_spin);
4993 pm->stat = 0;
4994 pm->timer = TIMER_MIN;
4995 dev_sem = &pm->dev_rdma_sem;
4996 raw_spin_lock_init(&dev_sem->lock);
4997 cv_init(&dev_sem->cond_var);
4998 dev_sem->irq_count_rdma = 0;
4999 pm = &rdma_link->ralive;
5000 mutex_init(&pm->mu);
5001 raw_spin_lock_init(&pm->mu_spin);
5002 pm->stat = 0;
5003 pm->timer = TIMER_MIN;
5004 dev_sem = &pm->dev_rdma_sem;
5005 raw_spin_lock_init(&dev_sem->lock);
5006 cv_init(&dev_sem->cond_var);
5007 dev_sem->irq_count_rdma = 0;
5008 for (i = 0; i < 2; i++) {
5009 pm = &rdma_link->rw_states_m[i];
5010 mutex_init(&pm->mu);
5011 raw_spin_lock_init(&pm->mu_spin);
5012 pm->stat = 0;
5013 pm->timer = TIMER_MIN;
5014 dev_sem = &pm->dev_rdma_sem;
5015 raw_spin_lock_init(&dev_sem->lock);
5016 cv_init(&dev_sem->cond_var);
5017 dev_sem->irq_count_rdma = 0;
5018 pd = &rdma_link->rw_states_d[i];
5019 mutex_init(&pd->mu);
5020 raw_spin_lock_init(&pd->mu_spin);
5021 raw_spin_lock_init(&pd->lock_wr);
5022 raw_spin_lock_init(&pd->lock_rd);
5023 dev_sem = &pd->dev_rdma_sem;
5024 raw_spin_lock_init(&dev_sem->lock);
5025 cv_init(&dev_sem->cond_var);
5026 dev_sem->irq_count_rdma = 0;
5027 pd->trwd_was = 0;
5028 pd->clock_receive_trwd = 0;
5029 pd->clock_begin_read = 0;
5030 pd->clock_end_read_old = 0;
5031 pd->clock_begin_read_old = 0;
5032 pd->trwd_send_count = 0;
5033 pd->ready_send_count = 0;
5034 pd->trwd_rec_count = 0;
5035 pd->ready_rec_count = 0;
5036 pd->n_ready = 0;
5037 pd->stat = 0;
5038 pd->timer_read = TIMER_MIN;
5039 pd->timer_write = TIMER_MIN;
5040 pd->timer_for_read = TIMER_FOR_READ_MIN;
5041 pd->timer_for_write = TIMER_FOR_WRITE_MIN;
5042 pd->state_open_close = 0;
5043 pd->first_open = 0;
5045 raw_spin_lock_init(&rdma_link->mutex_send_msg);
5046 rdma_link = &rdma_state->rdma_link[link];
5047 rdma_link->mok_x_config_sem_link = CONFIG_SEM_LINK_UP;
5048 rdma_link->mok_x_mode_link = STATE_LINK_DEFAULT;
5049 rdma_link->mok_x_mode_number_link == MODE0_LINK;
5050 #ifdef UNX_TRWD
5051 rdma_link->unexpected_trwd = 0;
5052 rdma_link->unexpected_trwd_size = 0;
5053 #endif
5054 INIT_RDMA_LINK_DEBUG_MSG("%s: FINISH\n", __FUNCTION__);
5057 void read_regs_rdma(int i)
5059 printk("%d 0x%08x - 0x0 SHIFT_IOL_CSR\n", i,
5060 RDR_rdma(SHIFT_IOL_CSR, i));
5061 printk("%d 0x%08x - 0x0 SHIFT_IO_CSR\n", i,
5062 RDR_rdma(SHIFT_IO_CSR, i));
5063 printk("%d 0x%08x - 0x0 SHIFT_VID\n", i,
5064 RDR_rdma(SHIFT_VID, i));
5065 printk("%d 0x%08x - 0x4 SHIFT_CH_IDT\n", i,
5066 RDR_rdma(SHIFT_CH_IDT, i));
5067 printk("%d 0x%08x - 0x8 SHIFT_CS\n", i,
5068 RDR_rdma(SHIFT_CS, i));
5069 printk("%d 0x%08x 0x00 - SHIFT_DD_ID\n", i,
5070 RDR_rdma(SHIFT_DD_ID, i));
5071 printk("%d 0x%08x 0x04 - SHIFT_DMD_ID\n", i,
5072 RDR_rdma(SHIFT_DMD_ID, i));
5073 printk("%d 0x%08x 0x08 - SHIFT_N_IDT\n", i,
5074 RDR_rdma(SHIFT_N_IDT, i));
5075 printk("%d 0x%08x 0x0c - SHIFT_ES\n", i,
5076 RDR_rdma(SHIFT_ES, i));
5077 printk("%d 0x%08x 0x10 - SHIFT_IRQ_MC\n", i,
5078 RDR_rdma(SHIFT_IRQ_MC, i));
5079 printk("%d 0x%08x 0x14 - SHIFT_DMA_TCS\n", i,
5080 RDR_rdma(SHIFT_DMA_TCS, i));
5081 printk("%d 0x%08x 0x18 - SHIFT_DMA_TSA\n", i,
5082 RDR_rdma(SHIFT_DMA_TSA, i));
5083 printk("%d 0x%08x 0x1c - SHIFT_DMA_TBC\n", i,
5084 RDR_rdma(SHIFT_DMA_TBC, i));
5085 printk("%d 0x%08x 0x20 - SHIFT_DMA_RCS\n", i,
5086 RDR_rdma(SHIFT_DMA_RCS, i));
5087 printk("%d 0x%08x 0x24 - SHIFT_DMA_RSA\n", i,
5088 RDR_rdma(SHIFT_DMA_RSA, i));
5089 printk("%d 0x%08x 0x28 - SHIFT_DMA_RBC\n", i,
5090 RDR_rdma(SHIFT_DMA_RBC, i));
5091 printk("%d 0x%08x 0x2c - SHIFT_MSG_CS\n", i,
5092 RDR_rdma(SHIFT_MSG_CS, i));
5093 printk("%d 0x%08x 0x30 - SHIFT_TDMSG\n", i,
5094 RDR_rdma(SHIFT_TDMSG, i));
5096 printk("%d 0x%08x 0x34 - SHIFT_RDMSG\n", i,
5097 RDR_rdma(SHIFT_RDMSG, i));
5099 printk("%d 0x%08x 0x38 - SHIFT_CAM\n", i,
5100 RDR_rdma(SHIFT_CAM, i));
5103 void del_dev_mokx(int major, int i)
5105 int i_mokx = 0;
5106 char nod[128];
5107 int minor;
5109 for (i_mokx = 0; i_mokx < RDMA_NODE_DEV; i_mokx ++) {
5110 minor = RDMA_NODE_IOLINKS * i * RDMA_NODE_DEV + i_mokx;
5111 (void) sprintf(nod,"mokx_%d_:%d_r", i, i_mokx);
5112 device_destroy(mokx_class, MKDEV(major, minor));
5113 minor ++;
5114 (void) sprintf(nod,"mokx_%d_:%d_w", i, i_mokx);
5115 device_destroy(mokx_class, MKDEV(major, minor));
5119 int add_dev_mokx(int major, int mode, int i)
5121 int i_mokx = 0;
5122 char nod[128];
5123 int ret = 0;
5124 int minor;
5126 for (i_mokx= 0; i_mokx < RDMA_NODE_DEV; i_mokx ++) {
5127 minor = RDMA_NODE_IOLINKS * i * RDMA_NODE_DEV + i_mokx;
5128 sprintf(nod,"mokx_%d_:%d_r", i, i_mokx);
5129 pr_info("make node /sys/class/mokx/%s\n", nod);
5130 if (device_create(mokx_class, NULL, MKDEV(major,
5131 minor), NULL, nod) == NULL) {
5132 pr_err("create dev: %s a node: %d failed\n",
5133 nod, i);
5134 return -1;
5136 minor ++;
5137 sprintf(nod,"mokx_%d_:%d_w", i, i_mokx);
5138 pr_info("make node /sys/class/mokx/%s\n", nod);
5139 if (device_create(mokx_class, NULL, MKDEV(major,
5140 minor), NULL, nod) == NULL) {
5141 pr_err("create dev: %s a node: %d failed\n",
5142 nod, i);
5143 return -1;
5146 return ret;
5149 int create_dev_mokx(int major)
5151 int i = 0,
5152 mode = 0,
5153 ret = 0;
5156 * Create mokx nodes in /sysfs
5158 mokx_class = class_create(THIS_MODULE, "mokx");
5159 if (IS_ERR(mokx_class)) {
5160 pr_err("Error creating class: /sys/class/mokx.\n");
5162 //for_each_rdma(i) {
5163 for_each_online_rdma(i)
5164 if (add_dev_mokx(major, mode, i))
5165 ret = -1;
5166 return ret;
5169 int remove_dev_mokx(int major)
5171 int i = 0;
5174 * Remove rdma nodes in /sysfs
5176 for_each_rdma(i)
5177 del_dev_mokx(major, i);
5178 class_destroy(mokx_class);
5179 return 0;
5182 module_init(rdma_init);
5183 module_exit(rdma_cleanup);