3 nodemask_t node_online_neighbour_map
= NODE_MASK_NONE
;
5 int node_neighbour_num
= 0;
6 void intr_channel(unsigned int es
, unsigned int tcs
, unsigned int mcs
,
7 unsigned int link
, unsigned int msg_ext
);
9 void rdma_interrupt(struct pt_regs
*regs
)
11 rdma_addr_struct_t p_xxb
;
12 unsigned int es
, tcs
, mcs
;
13 unsigned int node_neighbour_num_add
= 0;
15 unsigned int link
= NODE_NUMIOLINKS
, i
, inst
;
19 cpu
= raw_smp_processor_id();
20 unsigned int node_id
= e90s_cpu_to_node(cpu
);
22 cpu
= raw_smp_processor_id();
23 unsigned int node_id
= numa_node_id();
26 * Temporarily until a correct definition of link
28 for (i
= 0; i
< link
; i
++ ) {
29 node
= node_id
* RDMA_NODE_IOLINKS
+ i
;
30 if (HAS_MACHINE_L_SIC
) {
31 for_each_online_rdma(inst
)
32 if ( node
== inst
) goto next
;
37 event_intr(node
, RDMA_INTR
, START_EVENT
, cpu
);
38 es
= RDR_rdma(SHIFT_CS
, node
);
39 es
= RDR_rdma(SHIFT_ES
, node
);
40 if (es
& ES_RIRM_Ev
) {
45 WRR_rdma(SHIFT_ES
, node
, ES_RIRM_Ev
);
47 node_neighbour_num_add
= 0;
48 if (!node_test_and_set(node
, node_online_neighbour_map
))
49 node_neighbour_num_add
= 1;
51 if (node_neighbour_num_add
)
54 *((unsigned long *)&node_online_neighbour_map
);
55 event_intr(node
, RDMA_INTR
, RIRM_EVENT
,
56 ((node_neighbour_num
& 0xf) << 28) |
57 (p_xxb
.fields
.laddr
& 0x0fffffff));
59 if (es
& ES_RIAM_Ev
) {
61 * Neighbor is already acive
64 WRR_rdma(SHIFT_ES
, node
, ES_RIAM_Ev
);
66 node_neighbour_num_add
= 0;
67 if (!node_test_and_set(node
, node_online_neighbour_map
))
68 node_neighbour_num_add
= 1;
69 if (node_neighbour_num_add
)
72 *((unsigned long *)&node_online_neighbour_map
);
73 event_intr(node
, RDMA_INTR
, RIAM_EVENT
,
74 ((node_neighbour_num
& 0xf) << 28) |
75 (p_xxb
.fields
.laddr
& 0x0fffffff));
78 while ((es
= RDR_rdma(SHIFT_ES
, node
)) & irq_mc
) {
79 //while ((es = RDR_rdma(SHIFT_ES, node)) & irq_mc_rdc) {
80 tcs
= RDR_rdma(SHIFT_DMA_TCS
, node
);
82 WRR_rdma(SHIFT_ES
, node
, es
& (~ES_SM_Ev
& ~ES_DSF_Ev
));
84 WRR_rdma(SHIFT_ES
, node
, es
& ~ES_SM_Ev
);
86 mcs
= RDR_rdma(SHIFT_MSG_CS
, node
);
87 intr_channel(es
, tcs
, mcs
, node
, 0x0);
89 event_intr(node
, RDMA_INTR
, RETURN_EVENT
, 0);
94 void intr_channel(unsigned int evs
, unsigned int tcs
, unsigned int mcs
,
95 unsigned int link
, unsigned int msg_ext
)
97 rdma_state_link_t
*rdma_link
= &rdma_state
->rdma_link
[link
];
98 struct stat_rdma
*pst
;
99 rdma_addr_struct_t p_xxb
, p_xxb_pa
;
100 dev_rdma_sem_t
*dev_sem
;
101 rw_state_p pd
= NULL
;
102 rdma_pool_buf_t
*r_pool_buf
;
103 rdma_pool_buf_t
*w_pool_buf
;
106 //unsigned int int_cnt = 0;
107 unsigned int sending_msg
;
108 unsigned int ret_smsg
;
111 pst
= &rdma_link
->stat_rdma
;
112 event_intr(link
, INTR_START_EVENT
, evs
, tcs
);
118 if (evs
& ES_RGP3M_Ev
) {
120 dev_rdma_sem_t
*dev_sem
;
122 if (RDR_rdma(SHIFT_CAM
, link
)) {
123 WRR_rdma(SHIFT_CAM
, link
, 0);
124 pcam
= &rdma_link
->talive
;
125 dev_sem
= &pcam
->dev_rdma_sem
;
126 raw_spin_lock(&dev_sem
->lock
);
127 if (pcam
->stat
== 1) {
128 pcam
->clkr
= join_curr_clock();
129 pcam
->int_cnt
= int_cnt
;
130 rdma_cv_broadcast_rdma(&pcam
->dev_rdma_sem
,
133 raw_spin_unlock(&dev_sem
->lock
);
135 if (state_cam
== RDMA_UNSET_CAM
) {
136 pcam
= &rdma_link
->talive
;
137 dev_sem
= &pcam
->dev_rdma_sem
;
138 raw_spin_lock(&dev_sem
->lock
);
139 if (pcam
->stat
== 1) {
140 pcam
->clkr
= join_curr_clock();
141 pcam
->int_cnt
= int_cnt
;
142 rdma_cv_broadcast_rdma(&pcam
->dev_rdma_sem
,
145 raw_spin_unlock(&dev_sem
->lock
);
147 WRR_rdma(SHIFT_CAM
, link
, tr_atl
);
148 pcam
= &rdma_link
->ralive
;
149 dev_sem
= &pcam
->dev_rdma_sem
;
150 raw_spin_lock(&dev_sem
->lock
);
152 rdma_cv_broadcast_rdma(&pcam
->dev_rdma_sem
,
154 raw_spin_unlock(&dev_sem
->lock
);
157 p_xxb
.addr
= (unsigned long)pcam
;
158 event_intr(link
, INTR_RGP3M_EVENT
, p_xxb
.fields
.haddr
,
160 event_intr(link
, INTR_RGP3M_EVENT
, 0, RDR_rdma(SHIFT_CAM
, link
));
162 evs
= evs
& ~ES_RGP3M_Ev
;
168 if (evs
& ES_RGP0M_Ev
) {
171 WRR_rdma(SHIFT_IRQ_MC
, link
,irq_mc_03
);
172 raw_spin_lock(&rdma_link
->rst_thr_lock
);
173 rdma_link
->start_rst_thr
= 1;
174 raw_spin_unlock(&rdma_link
->rst_thr_lock
);
175 wake_up_process(rdma_link
->rst_thr
);
177 event_intr(link
, INTR_RGP0M_EVENT
, 0, pst
->es_rgp0
);
178 evs
= evs
& ~ES_RGP0M_Ev
;
183 if (evs
& ES_CMIE_Ev
) {
185 event_intr(link
, INTR_CMIE_EVENT
, 0, pst
->es_cmie
++);
186 evs
= evs
& ~ES_CMIE_Ev
;
190 * RDC (end dma reciver)
192 if (evs
& ES_RDC_Ev
) {
197 pst
->rcs
= RDR_rdma(SHIFT_DMA_RCS
, link
);
198 pst
->rbc
= RDR_rdma(SHIFT_DMA_RBC
, link
);
199 WRR_rdma(SHIFT_DMA_RCS
, link
, pst
->rcs
& (~DMA_RCS_RE
));
200 pd
= &rdma_link
->rw_states_d
[READER
];
201 dev_sem
= &pd
->dev_rdma_sem
;
202 p_xxb
.addr
= (unsigned long)pd
;
203 r_pool_buf
= &rdma_link
->read_pool
;
204 raw_spin_lock(&dev_sem
->lock
);
205 event_intr(link
, INTR_RDC_EVENT
, p_xxb
.fields
.laddr
,
207 event_intr(link
, INTR_RDC_EVENT
, pst
->rcs
, pst
->rbc
);
211 if ((!pd
->state_open_close
) &&
212 (rdma_link
->mok_x_mode_link
!= STATE_LINK_ONLY_RECIVE
)) {
214 * Create MSG_READY_DMA "not free buf"
216 sending_msg
= MSG_READY_DMA
| 0x0;
219 raw_spin_lock(&pd
->lock_rd
);
221 * Find work_buf in ready_list
223 r_buf
= list_entry(r_pool_buf
->ready_list
.next
,
227 raw_spin_unlock(&pd
->lock_rd
);
228 event_intr(link
, RDMA_BAD_RDC_EVENT
,
229 r_pool_buf
->num_free_buf
,
231 goto ES_RDC_Ev_label
;
233 r_buf
->rfsm_size
= pd
->size_trans
- pst
->rbc
;
235 * Work_buf move in busy_list
237 list_move_tail(&r_buf
->list
, &r_pool_buf
->busy_list
);
240 * ------------------------------------------------------
242 if (rdma_link
->mok_x_mode_link
== STATE_LINK_ONLY_RECIVE
) {
244 * Search free for read buffer
246 if (!rdma_link
->generator_stop
) {
249 if (list_empty(&r_pool_buf
->free_list
)) {
252 r_buf
= list_entry(r_pool_buf
->free_list
.next
,
254 list_move_tail(&r_buf
->list
,
255 &r_pool_buf
->ready_list
);
256 r_pool_buf
->num_free_buf
--;
258 * Programming dma reciver
260 size
= rdma_link
->mok_x_buf_size
;
263 * TODO. ôÁËÏÊ ÓÉÔÕÁÃÉÉ ÎÅÄÏÌÖÎÏ ÂÙÔØ. òÁÚÍÅÒ
264 * ËÏÎÔÒÏÌÉÒÕÅÔÓÑ ÐÒÉ ÎÁÚÎÁÞÅÎÉÉ ÒÅÖÉÍÁ.
266 if (size
> r_buf
->size
) {
267 event_intr(link
, READ_BADSIZE_EVENT
,
268 size
, dev_sem
->num_obmen
);
269 event_intr(link
, READ_BADSIZE_EVENT
,
274 r_buf
->real_size
= size
;
275 WRR_rdma(SHIFT_DMA_RCS
, link
, WCode_64
);
276 if (size
> SMALL_CHANGE
) {
277 p_xxb_pa
.addr
= (unsigned long)r_buf
->dma_addr
;
279 p_xxb_pa
.addr
= (unsigned long)r_buf
->buf_addr_small
;
281 WRR_rdma(SHIFT_DMA_HRSA
, link
,
282 p_xxb_pa
.fields
.haddr
);
283 WRR_rdma(SHIFT_DMA_RSA
, link
,
284 p_xxb_pa
.fields
.laddr
);
285 if (size
> SMALL_CHANGE
) {
286 pd
->size_trans
= (r_pool_buf
->tm_mode
?
287 ALIGN(size
, (rdma_link
->align_buf_tm
* PAGE_SIZE
)) : (rfsm
?
288 r_buf
->size
: allign_dma(size
)));
289 WRR_rdma(SHIFT_DMA_RBC
, link
, pd
->size_trans
);
290 WRR_rdma(SHIFT_DMA_RCS
, link
, WCode_64
|
292 (r_pool_buf
->tm_mode
? DMA_RCS_RTM
: 0) |
293 (r_pool_buf
->tm_mode
? 0 : DMA_RCS_RFSM
));
294 if (rdma_link
->mok_x_mode_number_link
== MODE3_LINK
)
295 set_mok_x_SR_ready_to_receive(link
);
297 pd
->size_trans
= allign_dma(size
);
298 WRR_rdma(SHIFT_DMA_RBC
, link
, pd
->size_trans
);
299 WRR_rdma(SHIFT_DMA_RCS
, link
, WCode_64
|
300 DMA_RCS_RE
| DMA_RCS_RFSM
);
301 if (rdma_link
->mok_x_mode_number_link
== MODE3_LINK
)
302 set_mok_x_SR_ready_to_receive(link
);
305 r_pool_buf
->work_buf
= r_buf
;
310 * ------------------------------------------------------
314 * For unexpected TRWD
316 if (rdma_link
->unexpected_trwd_size
) {
318 * Search free for read buffer
320 event_intr(link
, INTR_RMSG_UNXP_EVENT
,
321 r_pool_buf
->num_free_buf
,
322 rdma_link
->unexpected_trwd_size
);
323 if (list_empty(&r_pool_buf
->free_list
)) {
325 * TODO. ôÁËÏÊ ÓÉÔÕÁÃÉÉ ÎÅ ÄÏÌÖÎÏ ÂÙÔØ
326 * ÔÁË ËÁË TRWD ×ÙÓÙÌÁÅÔÓÑ ÔÏÌØËÏ ÐÒÉ ÎÁÌÉÞÉÉ
330 r_buf
= list_entry(r_pool_buf
->free_list
.next
,
335 list_move_tail(&r_buf
->list
, &r_pool_buf
->ready_list
);
336 r_pool_buf
->num_free_buf
--;
341 * Create MSG_READY_DMA
343 if (rdma_link
->mok_x_mode_link
!= STATE_LINK_ONLY_RECIVE
)
344 sending_msg
= MSG_READY_DMA
| r_pool_buf
->num_free_buf
;
345 raw_spin_unlock(&pd
->lock_rd
);
346 switch (pd
->int_ac
) {
351 rdma_cv_broadcast_rdma(&pd
->dev_rdma_sem
, link
);
352 event_intr(link
, INTR_SIGN2_READ_EVENT
,
353 pd
->int_ac
, dev_sem
->num_obmen
);
361 * For unexpected TRWD
363 if (rdma_link
->unexpected_trwd_size
) {
364 unsigned int sending_msg_unexpected_trwd
;
366 * Programming dma reciver
368 size
= rdma_link
->unexpected_trwd_size
;
369 r_buf
->real_size
= size
;
373 if (size
> r_buf
->size
) {
374 event_intr(link
, READ_BADSIZE_EVENT
,
375 size
, dev_sem
->num_obmen
);
376 event_intr(link
, READ_BADSIZE_EVENT
,
377 r_buf
->size
, dev_sem
->num_obmen
);
380 * TODO. ôÁËÏÊ ÓÉÔÕÁÃÉÉ ÎÅÄÏÌÖÎÏ ÂÙÔØ. òÁÚÍÅÒ
381 * ËÏÎÔÒÏÌÉÒÕÅÔÓÑ ÐÒÉ ÎÁÚÎÁÞÅÎÉÉ ÏÂÍÅÎÁ
385 WRR_rdma(SHIFT_DMA_RCS
, link
, WCode_64
);
386 if (size
> SMALL_CHANGE
) {
387 p_xxb_pa
.addr
= (unsigned long)r_buf
->dma_addr
;
389 p_xxb_pa
.addr
= (unsigned long)r_buf
->buf_addr_small
;
391 WRR_rdma(SHIFT_DMA_HRSA
, link
,
392 p_xxb_pa
.fields
.haddr
);
393 WRR_rdma(SHIFT_DMA_RSA
, link
,
394 p_xxb_pa
.fields
.laddr
);
395 if (size
> SMALL_CHANGE
) {
396 pd
->size_trans
= (r_pool_buf
->tm_mode
?
397 ALIGN(size
, (rdma_link
->align_buf_tm
* PAGE_SIZE
)) : (rfsm
?
398 r_buf
->size
: allign_dma(size
)));
399 WRR_rdma(SHIFT_DMA_RBC
, link
, pd
->size_trans
);
400 WRR_rdma(SHIFT_DMA_RCS
, link
, WCode_64
|
402 (r_pool_buf
->tm_mode
? DMA_RCS_RTM
: 0) |
403 (r_pool_buf
->tm_mode
? 0 : DMA_RCS_RFSM
));
405 pd
->size_trans
= allign_dma(size
);
406 WRR_rdma(SHIFT_DMA_RBC
, link
, pd
->size_trans
);
407 WRR_rdma(SHIFT_DMA_RCS
, link
, WCode_64
|
408 DMA_RCS_RE
| DMA_RCS_RFSM
);
413 sending_msg_unexpected_trwd
= MSG_READY
|
414 (dev_sem
->num_obmen
& MSG_USER
);
415 if ((ret_smsg
= send_msg_check(sending_msg_unexpected_trwd
, link
,
416 0, dev_sem
, 0)) <= 0) {
417 event_intr(link
, READ_SNDMSGBAD_EVENT
,
418 sending_msg_unexpected_trwd
, dev_sem
->num_obmen
);
419 event_intr(link
, READ_SNDMSGBAD_EVENT
,
420 0xff, raw_smp_processor_id());
422 * TODO. îÕÖÎÏ ÐÏÄÕÍÁÔØ.
423 * îÏ ÜÔÏ ÏÚÎÁÞÁÅÔ ÞÔÏ ÎÅÉÓÐÒÁ×ÅÎ ÌÉÎË.
426 event_intr(link
, READ_SNDNGMSG_EVENT
,
427 sending_msg
, dev_sem
->num_obmen
);
428 event_intr(link
, READ_SNDNGMSG_EVENT
,
429 0xff, raw_smp_processor_id());
434 if (rdma_link
->mok_x_mode_link
!= STATE_LINK_ONLY_RECIVE
) {
438 if ((ret_smsg
= send_msg_check(sending_msg
, link
, 0,
440 event_intr(link
, READ_SNDMSGBAD_EVENT
,
441 sending_msg
, dev_sem
->num_obmen
);
442 event_intr(link
, READ_SNDMSGBAD_EVENT
,
443 0xff, raw_smp_processor_id());
445 * TODO. îÕÖÎÏ ÐÏÄÕÍÁÔØ.
446 * îÏ ÜÔÏ ÏÚÎÁÞÁÅÔ ÞÔÏ ÎÅÉÓÐÒÁ×ÅÎ ÌÉÎË.
449 event_intr(link
, READ_SNDNGMSG_EVENT
,
450 sending_msg
, dev_sem
->num_obmen
);
451 event_intr(link
, READ_SNDNGMSG_EVENT
,
452 0xff, raw_smp_processor_id());
457 rdma_link
->unexpected_trwd_size
= 0x0;
458 rdma_link
->unexpected_trwd
= 0x0;
460 raw_spin_unlock(&dev_sem
->lock
);
461 evs
= evs
& ~ES_RDC_Ev
;
464 * TDC (end dma transmiter)
466 if (evs
& (ES_TDC_Ev
| ES_DSF_Ev
)) {
468 pd
= &rdma_link
->rw_states_d
[WRITER
];
469 p_xxb
.addr
= (unsigned long)pd
;
470 dev_sem
= &pd
->dev_rdma_sem
;
471 pst
->tcs
= RDR_rdma(SHIFT_DMA_TCS
, link
);
472 pst
->tbc
= RDR_rdma(SHIFT_DMA_TBC
, link
);
473 pst
->tsa
= RDR_rdma(SHIFT_DMA_TSA
, link
);
474 WRR_rdma(SHIFT_DMA_TCS
, link
, pst
->tcs
& (~DMA_TCS_TE
));
475 pst
->tcs
= RDR_rdma(SHIFT_DMA_TCS
, link
);
476 if (evs
& ES_TDC_Ev
) {
477 if (rdma_link
->trwd_lock
) {
478 rdma_link
->trwd_lock
= 0;
481 if (evs
& ES_DSF_Ev
) {
482 rdma_link
->trwd_lock
++;
483 rdma_link
->trwd_lock_err
++;
484 WRR_rdma(SHIFT_ES
, link
, ES_DSF_Ev
);
485 event_intr(link
, INTR_DSF_EVENT
, pd
->int_ac
,
487 event_intr(link
, INTR_DSF_EVENT
, pd
->int_ac
, tcs
);
488 event_intr(link
, INTR_DSF_EVENT
, pd
->int_ac
, pst
->tbc
);
489 if (rdma_link
->trwd_lock
> 10) {
490 rdma_link
->trwd_lock
= 0;
491 WRR_rdma(SHIFT_DMA_TCS
, link
, DMA_TCS_Tx_Rst
);
496 WRR_rdma(SHIFT_IRQ_MC
, link
,irq_mc_03
);
497 ret_smsg
= send_msg_check(0, link
, MSG_CS_SGP2_Msg
, 0, 0);
498 event_intr(link
, INTR_DSF_EVENT
, dev_sem
->num_obmen
,
500 raw_spin_lock(&rdma_link
->rst_thr_lock
);
501 rdma_link
->start_rst_thr
= 1;
502 raw_spin_unlock(&rdma_link
->rst_thr_lock
);
503 wake_up_process(rdma_link
->rst_thr
);
504 //goto ES_TDC_Ev_label;
507 WRR_rdma(SHIFT_DMA_TCS
, link
,
508 RDR_rdma(SHIFT_DMA_TCS
, link
) | DMA_TCS_TE
);
509 goto ES_DSF_Ev_label
;
512 w_pool_buf
= &rdma_link
->write_pool
;
513 raw_spin_lock(&dev_sem
->lock
);
514 //if (evs & ES_DSF_Ev)
516 event_intr(link
, INTR_TDC_EVENT
, p_xxb
.fields
.haddr
,
518 event_intr(link
, INTR_TDC_EVENT
, pd
->int_ac
, dev_sem
->num_obmen
);
519 switch (pd
->int_ac
) {
524 event_intr(link
, INTR_SIGN1_WRITE_EVENT
,
525 pd
->int_ac
, dev_sem
->num_obmen
);
526 rdma_cv_broadcast_rdma(&pd
->dev_rdma_sem
, link
);
529 event_intr(link
, INTR_TDC_UNXP_EVENT
, pd
->int_ac
,
534 raw_spin_unlock(&dev_sem
->lock
);
537 evs
= evs
& (~(ES_TDC_Ev
| ES_DSF_Ev
));
540 * RDM (data messages)
542 if (evs
& ES_RDM_Ev
) {
543 int rdmc
= (evs
& ES_RDMC
) >> 27;
555 msg
= RDR_rdma(SHIFT_RDMSG
, link
);
560 if ((msg
& MSG_OPER
) == MSG_TRWD
) {
561 r_pool_buf
= &rdma_link
->read_pool
;
562 pd
= &rdma_link
->rw_states_d
[READER
];
563 p_xxb
.addr
= (unsigned long)pd
;
564 dev_sem
= &pd
->dev_rdma_sem
;
565 dev_sem
->num_obmen
++;
566 event_intr(link
, INTR_TRWD_EVENT
,
567 msg
, dev_sem
->num_obmen
);
568 event_intr(link
, INTR_TRWD_EVENT
,
569 p_xxb
.fields
.haddr
, p_xxb
.fields
.laddr
);
570 raw_spin_lock(&dev_sem
->lock
);
574 if (!pd
->state_open_close
) {
575 if (!pd
->first_open
) {
576 raw_spin_unlock(&dev_sem
->lock
);
582 * For unexpected TRWD
584 if (rdma_link
->unexpected_trwd
) {
586 rdma_link
->unexpected_trwd_size
=
588 raw_spin_unlock(&dev_sem
->lock
);
592 raw_spin_lock(&pd
->lock_rd
);
594 * Search free for read buffer
596 if (list_empty(&r_pool_buf
->free_list
)) {
597 raw_spin_unlock(&pd
->lock_rd
);
598 raw_spin_unlock(&dev_sem
->lock
);
602 event_intr(link
, INTR_TRWD_UNXP_EVENT
,
603 r_pool_buf
->num_free_buf
,
607 r_buf
= list_entry(r_pool_buf
->free_list
.next
,
612 if (!pd
->state_open_close
) {
618 list_move_tail(&r_buf
->list
,
619 &r_pool_buf
->ready_list
);
620 r_pool_buf
->num_free_buf
--;
622 r_pool_buf
->work_buf
= r_buf
;
623 raw_spin_unlock(&pd
->lock_rd
);
625 rdma_link
->unexpected_trwd
= 1;
627 raw_spin_unlock(&dev_sem
->lock
);
629 * Programming dma reciver
631 size
= msg
& MSG_USER
;
632 r_buf
->real_size
= size
;
634 * TODO. Check on bad size
636 if (size
> r_buf
->size
) {
637 event_intr(link
, READ_BADSIZE_EVENT
,
638 size
, dev_sem
->num_obmen
);
639 event_intr(link
, READ_BADSIZE_EVENT
,
644 WRR_rdma(SHIFT_DMA_RCS
, link
, WCode_64
);
645 if (size
> SMALL_CHANGE
) {
646 p_xxb_pa
.addr
= (unsigned long)r_buf
->dma_addr
;
648 p_xxb_pa
.addr
= (unsigned long)r_buf
->buf_addr_small
;
650 WRR_rdma(SHIFT_DMA_HRSA
, link
,
651 p_xxb_pa
.fields
.haddr
);
652 WRR_rdma(SHIFT_DMA_RSA
, link
,
653 p_xxb_pa
.fields
.laddr
);
654 if (size
> SMALL_CHANGE
) {
655 pd
->size_trans
= (r_pool_buf
->tm_mode
?
656 ALIGN(size
, (rdma_link
->align_buf_tm
* PAGE_SIZE
)) : (rfsm
?
657 r_buf
->size
: allign_dma(size
)));
658 WRR_rdma(SHIFT_DMA_RBC
, link
, pd
->size_trans
);
659 WRR_rdma(SHIFT_DMA_RCS
, link
, WCode_64
|
661 (r_pool_buf
->tm_mode
? DMA_RCS_RTM
: 0) |
662 (r_pool_buf
->tm_mode
? 0 : DMA_RCS_RFSM
));
664 pd
->size_trans
= allign_dma(size
);
665 ///WRR_rdma(SHIFT_DMA_RBC, link, pd->size_trans);
666 WRR_rdma(SHIFT_DMA_RBC
, link
, pd
->size_trans
);
667 WRR_rdma(SHIFT_DMA_RCS
, link
, WCode_64
|
668 DMA_RCS_RE
| DMA_RCS_RFSM
);
673 sending_msg
= MSG_READY
|
674 (dev_sem
->num_obmen
& MSG_USER
);
675 if ((ret_smsg
= send_msg_check(sending_msg
, link
,
676 0, dev_sem
, 0)) <= 0) {
677 event_intr(link
, READ_SNDMSGBAD_EVENT
,
678 sending_msg
, dev_sem
->num_obmen
);
679 event_intr(link
, READ_SNDMSGBAD_EVENT
,
680 0xff, raw_smp_processor_id());
682 * TODO. îÕÖÎÏ ÐÏÄÕÍÁÔØ.
683 * îÏ ÜÔÏ ÏÚÎÁÞÁÅÔ ÞÔÏ ÎÅÉÓÐÒÁ×ÅÎ ÌÉÎË.
686 event_intr(link
, READ_SNDNGMSG_EVENT
,
687 sending_msg
, dev_sem
->num_obmen
);
688 event_intr(link
, READ_SNDNGMSG_EVENT
,
689 0xff, raw_smp_processor_id());
695 if ((msg
& MSG_OPER
) == MSG_READY
) {
696 w_pool_buf
= &rdma_link
->write_pool
;
697 w_buf
= w_pool_buf
->work_buf
;
698 pd
= &rdma_link
->rw_states_d
[WRITER
];
699 p_xxb
.addr
= (unsigned long)pd
;
700 dev_sem
= &pd
->dev_rdma_sem
;
701 event_intr(link
, INTR_READY_EVENT
,
702 p_xxb
.fields
.haddr
, p_xxb
.fields
.laddr
);
703 event_intr(link
, INTR_READY_EVENT
,
704 msg
, dev_sem
->num_obmen
);
705 raw_spin_lock(&dev_sem
->lock
);
707 * If file WRITE close
709 if (!pd
->state_open_close
) {
712 raw_spin_lock(&pd
->lock_wr
);
713 if (list_empty(&w_pool_buf
->busy_list
) ||
714 (!w_pool_buf
->num_free_buf
)) {
718 raw_spin_unlock(&pd
->lock_wr
);
719 raw_spin_unlock(&dev_sem
->lock
);
720 event_intr(link
, INTR_MSG_READY_UNXP_EVENT
,
721 w_pool_buf
->num_free_buf
,
727 raw_spin_unlock(&pd
->lock_wr
);
730 * Programming dma transmiter
733 raw_spin_unlock(&dev_sem
->lock
);
734 WRR_rdma(SHIFT_DMA_TCS
, link
, RCode_64
);
735 if (pd
->size_trans
> SMALL_CHANGE
) {
736 p_xxb_pa
.addr
= (unsigned long)w_buf
->dma_addr
;
738 p_xxb_pa
.addr
= (unsigned long)w_buf
->buf_addr_small
;
740 WRR_rdma(SHIFT_DMA_HTSA
, link
,
741 p_xxb_pa
.fields
.haddr
);
742 WRR_rdma(SHIFT_DMA_TSA
, link
,
743 p_xxb_pa
.fields
.laddr
);
744 WRR_rdma( SHIFT_DMA_TBC
, link
, pd
->size_trans
);
745 if (pd
->size_trans
> SMALL_CHANGE
) {
746 WRR_rdma(SHIFT_DMA_TCS
, link
, RCode_64
|
747 DMA_TCS_DRCL
| DMA_TCS_TE
|
748 (w_pool_buf
->tm_mode
? DMA_TCS_TTM
: 0));
750 WRR_rdma(SHIFT_DMA_TCS
, link
, RCode_64
|
751 DMA_TCS_DRCL
| DMA_TCS_TE
);
757 if ((msg
& MSG_OPER
) == MSG_READY_DMA
) {
759 * Get free buf reciver
761 w_pool_buf
= &rdma_link
->write_pool
;
762 w_buf
= w_pool_buf
->work_buf
;
763 pd
= &rdma_link
->rw_states_d
[WRITER
];
764 dev_sem
= &pd
->dev_rdma_sem
;
765 event_intr(link
, INTR_READY_DMA_EVENT
, pd
->int_ac
,
767 event_intr(link
, INTR_READY_DMA_EVENT
, msg
,
769 raw_spin_lock(&dev_sem
->lock
);
770 pd
->trwd_was
= msg
& MSG_USER
;
772 * If hes free buf's reciver
775 switch (pd
->int_ac
) {
780 rdma_cv_broadcast_rdma(
788 raw_spin_unlock(&dev_sem
->lock
);
791 #ifdef SETTING_OVER_INTERRUPT
793 wait_answer_msg
= msg
;
797 evs
= evs
& ~ES_RDM_Ev
;
803 if (evs
& ES_MSF_Ev
) {
805 dev_rdma_sem_t
*dev_sem
;
808 WRR_rdma(SHIFT_CAM
, link
, 0);
809 pcam
= &rdma_link
->talive
;
810 dev_sem
= &pcam
->dev_rdma_sem
;
811 raw_spin_lock(&dev_sem
->lock
);
812 if (pcam
->stat
== 1) {
813 pcam
->clkr
= join_curr_clock();
814 pcam
->int_cnt
= int_cnt
;
815 rdma_cv_broadcast_rdma(&pcam
->dev_rdma_sem
, link
);
817 raw_spin_unlock(&dev_sem
->lock
);
819 WRR_rdma(SHIFT_MSG_CS
, link
, msg_cs_dmrcl
| MSG_CS_Msg_Rst
);
820 fix_event(link
, INTR_MSF_EVENT
, 1, 0);
821 evs
= evs
& ~ES_MSF_Ev
;
827 if (evs
& ES_RIAM_Ev
) {
829 dev_rdma_sem_t
*dev_sem
;
832 WRR_rdma(SHIFT_CAM
, link
, tr_atl
);
833 time_ID_ANS
= join_curr_clock();
834 pcam
= &rdma_link
->ralive
;
835 dev_sem
= &pcam
->dev_rdma_sem
;
836 raw_spin_lock(&dev_sem
->lock
);
837 if (pcam
->stat
== 1) {
838 pcam
->clkr
= join_curr_clock();
839 pcam
->int_cnt
= int_cnt
;
840 rdma_cv_broadcast_rdma(&pcam
->dev_rdma_sem
, link
);
842 raw_spin_unlock(&dev_sem
->lock
);
845 fix_event(link
, INTR_RIAM_EVENT
, 0, pst
->es_riam
);
852 if (evs
& ES_RIRM_Ev
) {
854 dev_rdma_sem_t
*dev_sem
;
857 WRR_rdma(SHIFT_CAM
, link
, tr_atl
);
858 time_ID_REQ
= join_curr_clock();
859 pcam
= &rdma_link
->ralive
;
860 dev_sem
= &pcam
->dev_rdma_sem
;
861 raw_spin_lock(&dev_sem
->lock
);
862 if (pcam
->stat
== 1) {
863 pcam
->clkr
= join_curr_clock();
864 pcam
->int_cnt
= int_cnt
;
865 rdma_cv_broadcast_rdma(&pcam
->dev_rdma_sem
, link
);
867 raw_spin_unlock(&dev_sem
->lock
);
870 fix_event(link
, INTR_RIRM_EVENT
, 0, pst
->es_rirm
);
877 if (evs
& ES_RGP1M_Ev
) {
879 event_intr(link
, INTR_RGP1M_EVENT
, 0, pst
->es_rgp0
++);
885 if (evs
& ES_RGP2M_Ev
) {
888 WRR_rdma(SHIFT_IRQ_MC
, link
,irq_mc_03
);
889 raw_spin_lock(&rdma_link
->rst_thr_lock
);
890 rdma_link
->start_rst_thr
= 1;
891 raw_spin_unlock(&rdma_link
->rst_thr_lock
);
892 wake_up_process(rdma_link
->rst_thr
);
895 event_intr(link
, INTR_RGP2M_EVENT
, 0, pst
->es_rgp2
++);
896 evs
= evs
& ~ES_RGP2M_Ev
;
901 if (evs
& ES_RLM_Ev
) {
908 if (evs
& ES_RULM_Ev
) {
912 event_intr(link
, INTR_EXIT_EVENT
, 0, 0);