Lynx framebuffers multidomain implementation.
[linux/elbrus.git] / drivers / mcst / mokx / mokx_intrrupt.c
blob0b768c9718a82a9ea91d8ae5bcab05391ea884c5
1 #define CAM_NO 1
3 nodemask_t node_online_neighbour_map = NODE_MASK_NONE;
5 int node_neighbour_num = 0;
6 void intr_channel(unsigned int es, unsigned int tcs, unsigned int mcs,
7 unsigned int link, unsigned int msg_ext);
9 void rdma_interrupt(struct pt_regs *regs)
11 rdma_addr_struct_t p_xxb;
12 unsigned int es, tcs, mcs;
13 unsigned int node_neighbour_num_add = 0;
14 unsigned int node;
15 unsigned int link = NODE_NUMIOLINKS, i, inst;
16 unsigned int cpu;
18 #ifdef CONFIG_E90S
19 cpu = raw_smp_processor_id();
20 unsigned int node_id = e90s_cpu_to_node(cpu);
21 #else
22 cpu = raw_smp_processor_id();
23 unsigned int node_id = numa_node_id();
24 #endif
26 * Temporarily until a correct definition of link
28 for (i = 0; i < link; i++ ) {
29 node = node_id * RDMA_NODE_IOLINKS + i;
30 if (HAS_MACHINE_L_SIC) {
31 for_each_online_rdma(inst)
32 if ( node == inst ) goto next;
34 continue;
36 next:
37 event_intr(node, RDMA_INTR, START_EVENT, cpu);
38 es = RDR_rdma(SHIFT_CS, node);
39 es = RDR_rdma(SHIFT_ES, node);
40 if (es & ES_RIRM_Ev) {
42 * Started neighbor
44 #if CAM_NO
45 WRR_rdma(SHIFT_ES, node, ES_RIRM_Ev);
46 #endif
47 node_neighbour_num_add = 0;
48 if (!node_test_and_set(node, node_online_neighbour_map))
49 node_neighbour_num_add = 1;
50 es &= ~ES_RIRM_Ev;
51 if (node_neighbour_num_add)
52 node_neighbour_num++;
53 p_xxb.addr =
54 *((unsigned long *)&node_online_neighbour_map);
55 event_intr(node, RDMA_INTR, RIRM_EVENT,
56 ((node_neighbour_num & 0xf) << 28) |
57 (p_xxb.fields.laddr & 0x0fffffff));
59 if (es & ES_RIAM_Ev) {
60 /*
61 * Neighbor is already acive
63 #if CAM_NO
64 WRR_rdma(SHIFT_ES, node, ES_RIAM_Ev);
65 #endif
66 node_neighbour_num_add = 0;
67 if (!node_test_and_set(node, node_online_neighbour_map))
68 node_neighbour_num_add = 1;
69 if (node_neighbour_num_add)
70 node_neighbour_num++;
71 p_xxb.addr =
72 *((unsigned long *)&node_online_neighbour_map);
73 event_intr(node, RDMA_INTR, RIAM_EVENT,
74 ((node_neighbour_num & 0xf) << 28) |
75 (p_xxb.fields.laddr & 0x0fffffff));
76 es &= ~ES_RIAM_Ev;
78 while ((es = RDR_rdma(SHIFT_ES, node)) & irq_mc) {
79 //while ((es = RDR_rdma(SHIFT_ES, node)) & irq_mc_rdc) {
80 tcs = RDR_rdma(SHIFT_DMA_TCS, node);
81 #if DSF_NO
82 WRR_rdma(SHIFT_ES, node, es & (~ES_SM_Ev & ~ES_DSF_Ev));
83 #else
84 WRR_rdma(SHIFT_ES, node, es & ~ES_SM_Ev);
85 #endif
86 mcs = RDR_rdma(SHIFT_MSG_CS, node);
87 intr_channel(es, tcs, mcs, node, 0x0);
89 event_intr(node, RDMA_INTR, RETURN_EVENT, 0);
91 return;
94 void intr_channel(unsigned int evs, unsigned int tcs, unsigned int mcs,
95 unsigned int link, unsigned int msg_ext)
97 rdma_state_link_t *rdma_link = &rdma_state->rdma_link[link];
98 struct stat_rdma *pst;
99 rdma_addr_struct_t p_xxb, p_xxb_pa;
100 dev_rdma_sem_t *dev_sem;
101 rw_state_p pd = NULL;
102 rdma_pool_buf_t *r_pool_buf;
103 rdma_pool_buf_t *w_pool_buf;
104 rdma_buf_t *r_buf;
105 rdma_buf_t *w_buf;
106 //unsigned int int_cnt = 0;
107 unsigned int sending_msg;
108 unsigned int ret_smsg;
109 size_t size;
111 pst = &rdma_link->stat_rdma;
112 event_intr(link, INTR_START_EVENT, evs, tcs);
113 pst->rdma_intr++;
116 * GP3 (rezerv)
118 if (evs & ES_RGP3M_Ev) {
119 #if 0
120 dev_rdma_sem_t *dev_sem;
121 rw_state_p pcam;
122 if (RDR_rdma(SHIFT_CAM, link)) {
123 WRR_rdma(SHIFT_CAM, link, 0);
124 pcam = &rdma_link->talive;
125 dev_sem = &pcam->dev_rdma_sem;
126 raw_spin_lock(&dev_sem->lock);
127 if (pcam->stat == 1) {
128 pcam->clkr = join_curr_clock();
129 pcam->int_cnt = int_cnt;
130 rdma_cv_broadcast_rdma(&pcam->dev_rdma_sem,
131 link);
133 raw_spin_unlock(&dev_sem->lock);
134 } else {
135 if (state_cam == RDMA_UNSET_CAM) {
136 pcam = &rdma_link->talive;
137 dev_sem = &pcam->dev_rdma_sem;
138 raw_spin_lock(&dev_sem->lock);
139 if (pcam->stat == 1) {
140 pcam->clkr = join_curr_clock();
141 pcam->int_cnt = int_cnt;
142 rdma_cv_broadcast_rdma(&pcam->dev_rdma_sem,
143 link);
145 raw_spin_unlock(&dev_sem->lock);
146 } else {
147 WRR_rdma(SHIFT_CAM, link, tr_atl);
148 pcam = &rdma_link->ralive;
149 dev_sem = &pcam->dev_rdma_sem;
150 raw_spin_lock(&dev_sem->lock);
151 if (pcam->stat == 1)
152 rdma_cv_broadcast_rdma(&pcam->dev_rdma_sem,
153 link);
154 raw_spin_unlock(&dev_sem->lock);
157 p_xxb.addr = (unsigned long)pcam;
158 event_intr(link, INTR_RGP3M_EVENT, p_xxb.fields.haddr,
159 p_xxb.fields.laddr);
160 event_intr(link, INTR_RGP3M_EVENT, 0, RDR_rdma(SHIFT_CAM, link));
161 #endif
162 evs = evs & ~ES_RGP3M_Ev;
166 * GP0 (reset)
168 if (evs & ES_RGP0M_Ev) {
169 pst->es_rgp0 ++;
170 #if RESET_THREAD_DMA
171 WRR_rdma(SHIFT_IRQ_MC, link ,irq_mc_03);
172 raw_spin_lock(&rdma_link->rst_thr_lock);
173 rdma_link->start_rst_thr = 1;
174 raw_spin_unlock(&rdma_link->rst_thr_lock);
175 wake_up_process(rdma_link->rst_thr);
176 #endif
177 event_intr(link, INTR_RGP0M_EVENT, 0, pst->es_rgp0);
178 evs = evs & ~ES_RGP0M_Ev;
181 * CMIE
183 if (evs & ES_CMIE_Ev) {
184 pst->es_cmie++;
185 event_intr(link, INTR_CMIE_EVENT, 0, pst->es_cmie++);
186 evs = evs & ~ES_CMIE_Ev;
190 * RDC (end dma reciver)
192 if (evs & ES_RDC_Ev) {
193 pst->es_rdc++;
195 * Reset enable dma
197 pst->rcs = RDR_rdma(SHIFT_DMA_RCS, link);
198 pst->rbc = RDR_rdma(SHIFT_DMA_RBC, link);
199 WRR_rdma(SHIFT_DMA_RCS, link, pst->rcs & (~DMA_RCS_RE));
200 pd = &rdma_link->rw_states_d[READER];
201 dev_sem = &pd->dev_rdma_sem;
202 p_xxb.addr = (unsigned long)pd;
203 r_pool_buf = &rdma_link->read_pool;
204 raw_spin_lock(&dev_sem->lock);
205 event_intr(link, INTR_RDC_EVENT, p_xxb.fields.laddr,
206 pd->int_ac);
207 event_intr(link, INTR_RDC_EVENT, pst->rcs, pst->rbc);
209 * If file READ close
211 if ((!pd->state_open_close) &&
212 (rdma_link->mok_x_mode_link != STATE_LINK_ONLY_RECIVE)) {
214 * Create MSG_READY_DMA "not free buf"
216 sending_msg = MSG_READY_DMA | 0x0;
217 goto empty_dma_rdc;
219 raw_spin_lock(&pd->lock_rd);
221 * Find work_buf in ready_list
223 r_buf = list_entry(r_pool_buf->ready_list.next,
224 rdma_buf_t, list);
225 if (r_buf == NULL) {
226 pd->int_ac = 0;
227 raw_spin_unlock(&pd->lock_rd);
228 event_intr(link, RDMA_BAD_RDC_EVENT,
229 r_pool_buf->num_free_buf,
230 dev_sem->num_obmen);
231 goto ES_RDC_Ev_label;
233 r_buf->rfsm_size = pd->size_trans - pst->rbc;
235 * Work_buf move in busy_list
237 list_move_tail(&r_buf->list, &r_pool_buf->busy_list);
239 * Mode only RECIEVE
240 * ------------------------------------------------------
242 if (rdma_link->mok_x_mode_link == STATE_LINK_ONLY_RECIVE) {
244 * Search free for read buffer
246 if (!rdma_link->generator_stop) {
249 if (list_empty(&r_pool_buf->free_list)) {
250 r_buf = NULL;
251 } else {
252 r_buf = list_entry(r_pool_buf->free_list.next,
253 rdma_buf_t, list);
254 list_move_tail(&r_buf->list,
255 &r_pool_buf->ready_list);
256 r_pool_buf->num_free_buf --;
258 * Programming dma reciver
260 size = rdma_link->mok_x_buf_size;
262 * Check on bad size.
263 * TODO. ôÁËÏÊ ÓÉÔÕÁÃÉÉ ÎÅÄÏÌÖÎÏ ÂÙÔØ. òÁÚÍÅÒ
264 * ËÏÎÔÒÏÌÉÒÕÅÔÓÑ ÐÒÉ ÎÁÚÎÁÞÅÎÉÉ ÒÅÖÉÍÁ.
266 if (size > r_buf->size) {
267 event_intr(link, READ_BADSIZE_EVENT,
268 size, dev_sem->num_obmen);
269 event_intr(link, READ_BADSIZE_EVENT,
270 r_buf->size,
271 dev_sem->num_obmen);
272 size = r_buf->size;
274 r_buf->real_size = size;
275 WRR_rdma(SHIFT_DMA_RCS, link, WCode_64 );
276 if (size > SMALL_CHANGE) {
277 p_xxb_pa.addr = (unsigned long)r_buf->dma_addr;
278 } else {
279 p_xxb_pa.addr = (unsigned long)r_buf->buf_addr_small;
281 WRR_rdma(SHIFT_DMA_HRSA, link,
282 p_xxb_pa.fields.haddr);
283 WRR_rdma(SHIFT_DMA_RSA, link,
284 p_xxb_pa.fields.laddr);
285 if (size > SMALL_CHANGE) {
286 pd->size_trans = (r_pool_buf->tm_mode ?
287 ALIGN(size, (rdma_link->align_buf_tm * PAGE_SIZE)) : (rfsm ?
288 r_buf->size : allign_dma(size)));
289 WRR_rdma(SHIFT_DMA_RBC, link, pd->size_trans);
290 WRR_rdma(SHIFT_DMA_RCS, link, WCode_64 |
291 DMA_RCS_RE |
292 (r_pool_buf->tm_mode ? DMA_RCS_RTM : 0) |
293 (r_pool_buf->tm_mode ? 0 : DMA_RCS_RFSM));
294 if (rdma_link->mok_x_mode_number_link == MODE3_LINK)
295 set_mok_x_SR_ready_to_receive(link);
296 } else {
297 pd->size_trans = allign_dma(size);
298 WRR_rdma(SHIFT_DMA_RBC, link, pd->size_trans);
299 WRR_rdma(SHIFT_DMA_RCS, link, WCode_64 |
300 DMA_RCS_RE | DMA_RCS_RFSM);
301 if (rdma_link->mok_x_mode_number_link == MODE3_LINK)
302 set_mok_x_SR_ready_to_receive(link);
305 r_pool_buf->work_buf = r_buf;
310 * ------------------------------------------------------
312 #ifdef UNX_TRWD
314 * For unexpected TRWD
316 if (rdma_link->unexpected_trwd_size) {
318 * Search free for read buffer
320 event_intr(link, INTR_RMSG_UNXP_EVENT,
321 r_pool_buf->num_free_buf,
322 rdma_link->unexpected_trwd_size);
323 if (list_empty(&r_pool_buf->free_list)) {
324 /** Not free buf
325 * TODO. ôÁËÏÊ ÓÉÔÕÁÃÉÉ ÎÅ ÄÏÌÖÎÏ ÂÙÔØ
326 * ÔÁË ËÁË TRWD ×ÙÓÙÌÁÅÔÓÑ ÔÏÌØËÏ ÐÒÉ ÎÁÌÉÞÉÉ
327 * Ó×ÏÂÏÄÎÏÇÏ ÂÕÆÅÒÁ
329 } else {
330 r_buf = list_entry(r_pool_buf->free_list.next,
331 rdma_buf_t, list);
333 * Buf as ready
335 list_move_tail(&r_buf->list, &r_pool_buf->ready_list);
336 r_pool_buf->num_free_buf--;
339 #endif
341 * Create MSG_READY_DMA
343 if (rdma_link->mok_x_mode_link != STATE_LINK_ONLY_RECIVE)
344 sending_msg = MSG_READY_DMA | r_pool_buf->num_free_buf;
345 raw_spin_unlock(&pd->lock_rd);
346 switch (pd->int_ac) {
347 case 1:
349 * Wake up READER
351 rdma_cv_broadcast_rdma(&pd->dev_rdma_sem, link);
352 event_intr(link, INTR_SIGN2_READ_EVENT,
353 pd->int_ac, dev_sem->num_obmen);
354 pd->int_ac = 0;
355 break;
356 default:
357 break;
359 #ifdef UNX_TRWD
361 * For unexpected TRWD
363 if (rdma_link->unexpected_trwd_size) {
364 unsigned int sending_msg_unexpected_trwd;
366 * Programming dma reciver
368 size = rdma_link->unexpected_trwd_size;
369 r_buf->real_size = size;
371 * Check on bad size
373 if (size > r_buf->size) {
374 event_intr(link, READ_BADSIZE_EVENT,
375 size, dev_sem->num_obmen);
376 event_intr(link, READ_BADSIZE_EVENT,
377 r_buf->size, dev_sem->num_obmen);
379 * Check on bad size.
380 * TODO. ôÁËÏÊ ÓÉÔÕÁÃÉÉ ÎÅÄÏÌÖÎÏ ÂÙÔØ. òÁÚÍÅÒ
381 * ËÏÎÔÒÏÌÉÒÕÅÔÓÑ ÐÒÉ ÎÁÚÎÁÞÅÎÉÉ ÏÂÍÅÎÁ
382 * ÐÅÒÅÄÁÔÞÉËÏÍ
385 WRR_rdma(SHIFT_DMA_RCS, link, WCode_64 );
386 if (size > SMALL_CHANGE) {
387 p_xxb_pa.addr = (unsigned long)r_buf->dma_addr;
388 } else {
389 p_xxb_pa.addr = (unsigned long)r_buf->buf_addr_small;
391 WRR_rdma(SHIFT_DMA_HRSA, link,
392 p_xxb_pa.fields.haddr);
393 WRR_rdma(SHIFT_DMA_RSA, link,
394 p_xxb_pa.fields.laddr);
395 if (size > SMALL_CHANGE) {
396 pd->size_trans = (r_pool_buf->tm_mode ?
397 ALIGN(size, (rdma_link->align_buf_tm * PAGE_SIZE)) : (rfsm ?
398 r_buf->size : allign_dma(size)));
399 WRR_rdma(SHIFT_DMA_RBC, link, pd->size_trans);
400 WRR_rdma(SHIFT_DMA_RCS, link, WCode_64 |
401 DMA_RCS_RE |
402 (r_pool_buf->tm_mode ? DMA_RCS_RTM : 0) |
403 (r_pool_buf->tm_mode ? 0 : DMA_RCS_RFSM));
404 } else {
405 pd->size_trans = allign_dma(size);
406 WRR_rdma(SHIFT_DMA_RBC, link, pd->size_trans);
407 WRR_rdma(SHIFT_DMA_RCS, link, WCode_64 |
408 DMA_RCS_RE | DMA_RCS_RFSM);
411 * Create READY
413 sending_msg_unexpected_trwd = MSG_READY |
414 (dev_sem->num_obmen & MSG_USER);
415 if ((ret_smsg = send_msg_check(sending_msg_unexpected_trwd, link,
416 0, dev_sem, 0)) <= 0) {
417 event_intr(link, READ_SNDMSGBAD_EVENT,
418 sending_msg_unexpected_trwd, dev_sem->num_obmen);
419 event_intr(link, READ_SNDMSGBAD_EVENT,
420 0xff, raw_smp_processor_id());
422 * TODO. îÕÖÎÏ ÐÏÄÕÍÁÔØ.
423 * îÏ ÜÔÏ ÏÚÎÁÞÁÅÔ ÞÔÏ ÎÅÉÓÐÒÁ×ÅÎ ÌÉÎË.
425 } else {
426 event_intr(link, READ_SNDNGMSG_EVENT,
427 sending_msg, dev_sem->num_obmen);
428 event_intr(link, READ_SNDNGMSG_EVENT,
429 0xff, raw_smp_processor_id());
432 #endif
433 empty_dma_rdc:
434 if (rdma_link->mok_x_mode_link != STATE_LINK_ONLY_RECIVE) {
436 * Send READY_DMA
438 if ((ret_smsg = send_msg_check(sending_msg, link, 0,
439 dev_sem, 0)) <= 0) {
440 event_intr(link, READ_SNDMSGBAD_EVENT,
441 sending_msg, dev_sem->num_obmen);
442 event_intr(link, READ_SNDMSGBAD_EVENT,
443 0xff, raw_smp_processor_id());
445 * TODO. îÕÖÎÏ ÐÏÄÕÍÁÔØ.
446 * îÏ ÜÔÏ ÏÚÎÁÞÁÅÔ ÞÔÏ ÎÅÉÓÐÒÁ×ÅÎ ÌÉÎË.
448 } else {
449 event_intr(link, READ_SNDNGMSG_EVENT,
450 sending_msg, dev_sem->num_obmen);
451 event_intr(link, READ_SNDNGMSG_EVENT,
452 0xff, raw_smp_processor_id());
455 ES_RDC_Ev_label:
456 #ifdef UNX_TRWD
457 rdma_link->unexpected_trwd_size = 0x0;
458 rdma_link->unexpected_trwd = 0x0;
459 #endif
460 raw_spin_unlock(&dev_sem->lock);
461 evs = evs & ~ES_RDC_Ev;
464 * TDC (end dma transmiter)
466 if (evs & (ES_TDC_Ev | ES_DSF_Ev)) {
467 pst->es_tdc++;
468 pd = &rdma_link->rw_states_d[WRITER];
469 p_xxb.addr = (unsigned long)pd;
470 dev_sem = &pd->dev_rdma_sem;
471 pst->tcs = RDR_rdma(SHIFT_DMA_TCS, link);
472 pst->tbc = RDR_rdma(SHIFT_DMA_TBC, link);
473 pst->tsa = RDR_rdma(SHIFT_DMA_TSA, link);
474 WRR_rdma(SHIFT_DMA_TCS, link, pst->tcs & (~DMA_TCS_TE));
475 pst->tcs = RDR_rdma(SHIFT_DMA_TCS, link);
476 if (evs & ES_TDC_Ev) {
477 if (rdma_link->trwd_lock) {
478 rdma_link->trwd_lock = 0;
481 if (evs & ES_DSF_Ev) {
482 rdma_link->trwd_lock ++;
483 rdma_link->trwd_lock_err ++;
484 WRR_rdma(SHIFT_ES, link, ES_DSF_Ev);
485 event_intr(link, INTR_DSF_EVENT, pd->int_ac,
486 dev_sem->num_obmen);
487 event_intr(link, INTR_DSF_EVENT, pd->int_ac, tcs);
488 event_intr(link, INTR_DSF_EVENT, pd->int_ac, pst->tbc);
489 if (rdma_link->trwd_lock > 10) {
490 rdma_link->trwd_lock = 0;
491 WRR_rdma(SHIFT_DMA_TCS, link, DMA_TCS_Tx_Rst);
492 #if RESET_THREAD_DMA
494 * Send GP0 (reset)
496 WRR_rdma(SHIFT_IRQ_MC, link ,irq_mc_03);
497 ret_smsg = send_msg_check(0, link, MSG_CS_SGP2_Msg, 0, 0);
498 event_intr(link, INTR_DSF_EVENT, dev_sem->num_obmen,
499 ret_smsg);
500 raw_spin_lock(&rdma_link->rst_thr_lock);
501 rdma_link->start_rst_thr = 1;
502 raw_spin_unlock(&rdma_link->rst_thr_lock);
503 wake_up_process(rdma_link->rst_thr);
504 //goto ES_TDC_Ev_label;
505 #endif
506 } else {
507 WRR_rdma(SHIFT_DMA_TCS, link,
508 RDR_rdma(SHIFT_DMA_TCS, link) | DMA_TCS_TE);
509 goto ES_DSF_Ev_label;
512 w_pool_buf = &rdma_link->write_pool;
513 raw_spin_lock(&dev_sem->lock);
514 //if (evs & ES_DSF_Ev)
515 // pd->trwd_was = 0;
516 event_intr(link, INTR_TDC_EVENT, p_xxb.fields.haddr,
517 p_xxb.fields.laddr);
518 event_intr(link, INTR_TDC_EVENT, pd->int_ac, dev_sem->num_obmen);
519 switch (pd->int_ac) {
520 case 2:
522 * Wake up WRITER
524 event_intr(link, INTR_SIGN1_WRITE_EVENT,
525 pd->int_ac, dev_sem->num_obmen);
526 rdma_cv_broadcast_rdma(&pd->dev_rdma_sem, link);
527 break;
528 default:
529 event_intr(link, INTR_TDC_UNXP_EVENT, pd->int_ac,
530 dev_sem->num_obmen);
531 break;
533 //pd->trwd_was--;
534 raw_spin_unlock(&dev_sem->lock);
535 ES_DSF_Ev_label:
536 //ES_TDC_Ev_label:
537 evs = evs & (~(ES_TDC_Ev | ES_DSF_Ev));
540 * RDM (data messages)
542 if (evs & ES_RDM_Ev) {
543 int rdmc = (evs & ES_RDMC) >> 27;
544 int msg;
546 pst->es_rdm++;
547 if (rdmc == 0)
548 rdmc = 32;
549 if (msg_ext)
550 rdmc = 1;
551 while (rdmc--) {
552 if (msg_ext) {
553 msg = msg_ext;
554 } else
555 msg = RDR_rdma(SHIFT_RDMSG, link);
556 pst->rdm++;
558 * TRWD
560 if ((msg & MSG_OPER) == MSG_TRWD) {
561 r_pool_buf = &rdma_link->read_pool;
562 pd = &rdma_link->rw_states_d[READER];
563 p_xxb.addr = (unsigned long)pd;
564 dev_sem = &pd->dev_rdma_sem;
565 dev_sem->num_obmen++;
566 event_intr(link, INTR_TRWD_EVENT,
567 msg, dev_sem->num_obmen);
568 event_intr(link, INTR_TRWD_EVENT,
569 p_xxb.fields.haddr, p_xxb.fields.laddr);
570 raw_spin_lock(&dev_sem->lock);
572 * For bad TRWD
574 if (!pd->state_open_close) {
575 if (!pd->first_open) {
576 raw_spin_unlock(&dev_sem->lock);
577 continue;
580 #ifdef UNX_TRWD
582 * For unexpected TRWD
584 if (rdma_link->unexpected_trwd) {
585 REPEAT_TRWD ++;
586 rdma_link->unexpected_trwd_size =
587 msg & MSG_USER;
588 raw_spin_unlock(&dev_sem->lock);
589 continue;
591 #endif
592 raw_spin_lock(&pd->lock_rd);
594 * Search free for read buffer
596 if (list_empty(&r_pool_buf->free_list)) {
597 raw_spin_unlock(&pd->lock_rd);
598 raw_spin_unlock(&dev_sem->lock);
600 * Not free buf
602 event_intr(link, INTR_TRWD_UNXP_EVENT,
603 r_pool_buf->num_free_buf,
604 dev_sem->num_obmen);
605 continue;
607 r_buf = list_entry(r_pool_buf->free_list.next,
608 rdma_buf_t, list);
610 * If file READ close
612 if (!pd->state_open_close) {
613 goto r_empty_dma;
616 * Buf as ready
618 list_move_tail(&r_buf->list,
619 &r_pool_buf->ready_list);
620 r_pool_buf->num_free_buf--;
621 r_empty_dma:
622 r_pool_buf->work_buf = r_buf;
623 raw_spin_unlock(&pd->lock_rd);
624 #ifdef UNX_TRWD
625 rdma_link->unexpected_trwd = 1;
626 #endif
627 raw_spin_unlock(&dev_sem->lock);
629 * Programming dma reciver
631 size = msg & MSG_USER;
632 r_buf->real_size = size;
634 * TODO. Check on bad size
636 if (size > r_buf->size) {
637 event_intr(link, READ_BADSIZE_EVENT,
638 size, dev_sem->num_obmen);
639 event_intr(link, READ_BADSIZE_EVENT,
640 r_buf->size,
641 dev_sem->num_obmen);
642 continue;
644 WRR_rdma(SHIFT_DMA_RCS, link, WCode_64 );
645 if (size > SMALL_CHANGE) {
646 p_xxb_pa.addr = (unsigned long)r_buf->dma_addr;
647 } else {
648 p_xxb_pa.addr = (unsigned long)r_buf->buf_addr_small;
650 WRR_rdma(SHIFT_DMA_HRSA, link,
651 p_xxb_pa.fields.haddr);
652 WRR_rdma(SHIFT_DMA_RSA, link,
653 p_xxb_pa.fields.laddr);
654 if (size > SMALL_CHANGE) {
655 pd->size_trans = (r_pool_buf->tm_mode ?
656 ALIGN(size, (rdma_link->align_buf_tm * PAGE_SIZE)) : (rfsm ?
657 r_buf->size : allign_dma(size)));
658 WRR_rdma(SHIFT_DMA_RBC, link, pd->size_trans);
659 WRR_rdma(SHIFT_DMA_RCS, link, WCode_64 |
660 DMA_RCS_RE |
661 (r_pool_buf->tm_mode ? DMA_RCS_RTM : 0) |
662 (r_pool_buf->tm_mode ? 0 : DMA_RCS_RFSM));
663 } else {
664 pd->size_trans = allign_dma(size);
665 ///WRR_rdma(SHIFT_DMA_RBC, link, pd->size_trans);
666 WRR_rdma(SHIFT_DMA_RBC, link, pd->size_trans);
667 WRR_rdma(SHIFT_DMA_RCS, link, WCode_64 |
668 DMA_RCS_RE | DMA_RCS_RFSM);
671 * Create READY
673 sending_msg = MSG_READY |
674 (dev_sem->num_obmen & MSG_USER);
675 if ((ret_smsg = send_msg_check(sending_msg, link,
676 0, dev_sem, 0)) <= 0) {
677 event_intr(link, READ_SNDMSGBAD_EVENT,
678 sending_msg, dev_sem->num_obmen);
679 event_intr(link, READ_SNDMSGBAD_EVENT,
680 0xff, raw_smp_processor_id());
682 * TODO. îÕÖÎÏ ÐÏÄÕÍÁÔØ.
683 * îÏ ÜÔÏ ÏÚÎÁÞÁÅÔ ÞÔÏ ÎÅÉÓÐÒÁ×ÅÎ ÌÉÎË.
685 } else {
686 event_intr(link, READ_SNDNGMSG_EVENT,
687 sending_msg, dev_sem->num_obmen);
688 event_intr(link, READ_SNDNGMSG_EVENT,
689 0xff, raw_smp_processor_id());
691 continue;
692 } else /*
693 * READY
695 if ((msg & MSG_OPER) == MSG_READY) {
696 w_pool_buf = &rdma_link->write_pool;
697 w_buf = w_pool_buf->work_buf;
698 pd = &rdma_link->rw_states_d[WRITER];
699 p_xxb.addr = (unsigned long)pd;
700 dev_sem = &pd->dev_rdma_sem;
701 event_intr(link, INTR_READY_EVENT,
702 p_xxb.fields.haddr, p_xxb.fields.laddr);
703 event_intr(link, INTR_READY_EVENT,
704 msg, dev_sem->num_obmen);
705 raw_spin_lock(&dev_sem->lock);
707 * If file WRITE close
709 if (!pd->state_open_close) {
710 goto t_empty_dma;
712 raw_spin_lock(&pd->lock_wr);
713 if (list_empty(&w_pool_buf->busy_list) ||
714 (!w_pool_buf->num_free_buf)) {
716 * Not ready buf
718 raw_spin_unlock(&pd->lock_wr);
719 raw_spin_unlock(&dev_sem->lock);
720 event_intr(link, INTR_MSG_READY_UNXP_EVENT,
721 w_pool_buf->num_free_buf,
722 dev_sem->num_obmen);
723 if (ev_pr)
724 get_event_rdma(0);
725 continue;
727 raw_spin_unlock(&pd->lock_wr);
728 t_empty_dma:
730 * Programming dma transmiter
732 pd->trwd_was--;
733 raw_spin_unlock(&dev_sem->lock);
734 WRR_rdma(SHIFT_DMA_TCS, link, RCode_64);
735 if (pd->size_trans > SMALL_CHANGE) {
736 p_xxb_pa.addr = (unsigned long)w_buf->dma_addr;
737 } else {
738 p_xxb_pa.addr = (unsigned long)w_buf->buf_addr_small;
740 WRR_rdma(SHIFT_DMA_HTSA, link,
741 p_xxb_pa.fields.haddr);
742 WRR_rdma(SHIFT_DMA_TSA, link,
743 p_xxb_pa.fields.laddr);
744 WRR_rdma( SHIFT_DMA_TBC, link, pd->size_trans);
745 if (pd->size_trans > SMALL_CHANGE) {
746 WRR_rdma(SHIFT_DMA_TCS, link, RCode_64 |
747 DMA_TCS_DRCL | DMA_TCS_TE |
748 (w_pool_buf->tm_mode ? DMA_TCS_TTM : 0));
749 } else {
750 WRR_rdma(SHIFT_DMA_TCS, link, RCode_64 |
751 DMA_TCS_DRCL | DMA_TCS_TE );
753 continue;
754 } else /*
755 * READY_DMA
757 if ((msg & MSG_OPER) == MSG_READY_DMA) {
759 * Get free buf reciver
761 w_pool_buf = &rdma_link->write_pool;
762 w_buf = w_pool_buf->work_buf;
763 pd = &rdma_link->rw_states_d[WRITER];
764 dev_sem = &pd->dev_rdma_sem;
765 event_intr(link, INTR_READY_DMA_EVENT, pd->int_ac,
766 dev_sem->num_obmen);
767 event_intr(link, INTR_READY_DMA_EVENT, msg,
768 dev_sem->num_obmen);
769 raw_spin_lock(&dev_sem->lock);
770 pd->trwd_was = msg & MSG_USER;
772 * If hes free buf's reciver
774 if (pd->trwd_was) {
775 switch (pd->int_ac) {
776 case 1:
778 * Wake up write
780 rdma_cv_broadcast_rdma(
781 &pd->dev_rdma_sem,
782 link);
783 break;
784 default:
785 break;
788 raw_spin_unlock(&dev_sem->lock);
789 continue;
791 #ifdef SETTING_OVER_INTERRUPT
792 else {
793 wait_answer_msg = msg;
795 #endif
797 evs = evs & ~ES_RDM_Ev;
801 * MSF
803 if (evs & ES_MSF_Ev) {
804 #if 0
805 dev_rdma_sem_t *dev_sem;
806 rw_state_p pcam;
808 WRR_rdma(SHIFT_CAM, link, 0);
809 pcam = &rdma_link->talive;
810 dev_sem = &pcam->dev_rdma_sem;
811 raw_spin_lock(&dev_sem->lock);
812 if (pcam->stat == 1) {
813 pcam->clkr = join_curr_clock();
814 pcam->int_cnt = int_cnt;
815 rdma_cv_broadcast_rdma(&pcam->dev_rdma_sem, link);
817 raw_spin_unlock(&dev_sem->lock);
818 #endif
819 WRR_rdma(SHIFT_MSG_CS, link, msg_cs_dmrcl | MSG_CS_Msg_Rst);
820 fix_event(link, INTR_MSF_EVENT, 1, 0);
821 evs = evs & ~ES_MSF_Ev;
823 #if 1
825 * RIAM
827 if (evs & ES_RIAM_Ev) {
828 #if 0
829 dev_rdma_sem_t *dev_sem;
830 rw_state_p pcam;
832 WRR_rdma(SHIFT_CAM, link, tr_atl);
833 time_ID_ANS = join_curr_clock();
834 pcam = &rdma_link->ralive;
835 dev_sem = &pcam->dev_rdma_sem;
836 raw_spin_lock(&dev_sem->lock);
837 if (pcam->stat == 1) {
838 pcam->clkr = join_curr_clock();
839 pcam->int_cnt = int_cnt;
840 rdma_cv_broadcast_rdma(&pcam->dev_rdma_sem, link);
842 raw_spin_unlock(&dev_sem->lock);
843 #endif
844 pst->es_riam ++;
845 fix_event(link, INTR_RIAM_EVENT, 0, pst->es_riam);
846 evs &= ~ES_RIAM_Ev;
850 * RIRM
852 if (evs & ES_RIRM_Ev) {
853 #if 0
854 dev_rdma_sem_t *dev_sem;
855 rw_state_p pcam;
857 WRR_rdma(SHIFT_CAM, link, tr_atl);
858 time_ID_REQ = join_curr_clock();
859 pcam = &rdma_link->ralive;
860 dev_sem = &pcam->dev_rdma_sem;
861 raw_spin_lock(&dev_sem->lock);
862 if (pcam->stat == 1) {
863 pcam->clkr = join_curr_clock();
864 pcam->int_cnt = int_cnt;
865 rdma_cv_broadcast_rdma(&pcam->dev_rdma_sem, link);
867 raw_spin_unlock(&dev_sem->lock);
868 #endif
869 pst->es_rirm ++;
870 fix_event(link, INTR_RIRM_EVENT, 0, pst->es_rirm);
871 evs &= ~ES_RIRM_Ev;
873 #endif
875 * GP1
877 if (evs & ES_RGP1M_Ev) {
878 pst->es_rgp1++;
879 event_intr(link, INTR_RGP1M_EVENT, 0, pst->es_rgp0++);
880 evs &= ~ES_RGP1M_Ev;
883 * GP2
885 if (evs & ES_RGP2M_Ev) {
886 pst->es_rgp2++;
887 #if RESET_THREAD_DMA
888 WRR_rdma(SHIFT_IRQ_MC, link ,irq_mc_03);
889 raw_spin_lock(&rdma_link->rst_thr_lock);
890 rdma_link->start_rst_thr = 1;
891 raw_spin_unlock(&rdma_link->rst_thr_lock);
892 wake_up_process(rdma_link->rst_thr);
894 #endif
895 event_intr(link, INTR_RGP2M_EVENT, 0, pst->es_rgp2++);
896 evs = evs & ~ES_RGP2M_Ev;
899 * RLM
901 if (evs & ES_RLM_Ev) {
902 pst->es_rlm++;
903 evs &= ~ES_RLM_Ev;
906 * RULM
908 if (evs & ES_RULM_Ev) {
909 pst->es_rulm++;
910 evs &= ~ES_RULM_Ev;
912 event_intr(link, INTR_EXIT_EVENT, 0, 0);
913 return;