Lynx framebuffers multidomain implementation.
[linux/elbrus.git] / drivers / mcst / rdma_sic / rdma_intr.c
blob26c8f21a701f9b53419d0e1c6aec70d1f0b41a93
1 #define CAM_NO 0
3 nodemask_t node_online_neighbour_map = NODE_MASK_NONE;
4 EXPORT_SYMBOL(node_online_neighbour_map);
6 int node_neighbour_num = 0;
7 void intr_channel(unsigned int es, unsigned int tcs, unsigned int mcs,
8 unsigned int instance);
10 void rdma_interrupt(struct pt_regs *regs)
12 unsigned int es, tcs, mcs;
13 unsigned int node_neighbour_num_add = 0;
14 rdma_addr_struct_t p_xxb;
15 unsigned int node;
16 unsigned int link = NODE_NUMIOLINKS, i, inst;
17 unsigned int cpu;
19 #ifdef CONFIG_E90S
20 cpu = raw_smp_processor_id();
21 unsigned int node_id = e90s_cpu_to_node(cpu);
22 #else /* E3S */
23 cpu = raw_smp_processor_id();
24 unsigned int node_id = numa_node_id();
25 #endif
26 /* Temporarily until a correct definition of link */
27 for (i = 0; i < link; i++ ) {
28 node = node_id * NODE_NUMIOLINKS + i;
29 for_each_online_rdma(inst)
30 if ( node == inst ) goto next;
32 continue;
33 next:
34 fix_event(node, RDMA_INTR, START_EVENT, cpu);
35 es = RDR_rdma(SHIFT_CS, node);
36 es = RDR_rdma(SHIFT_ES, node);
37 if (es & ES_RIRM_Ev) {
38 /* Started neighbor */
39 #if CAM_NO
40 WRR_rdma(SHIFT_ES, node, ES_RIRM_Ev); /* for CAM */
41 #endif
42 node_neighbour_num_add = 0;
43 if (!node_test_and_set(node, node_online_neighbour_map))
44 node_neighbour_num_add = 1;
45 es &= ~ES_RIRM_Ev;
46 if (node_neighbour_num_add)
47 node_neighbour_num++;
48 p_xxb.addr =
49 *((unsigned long *)&node_online_neighbour_map);
50 fix_event(node, RDMA_INTR, RIRM_EVENT,
51 ((node_neighbour_num & 0xf) << 28) |
52 (p_xxb.fields.laddr & 0x0fffffff));
54 if (es & ES_RIAM_Ev) {
55 /* Neighbor is already acive */
56 #if CAM_NO
57 WRR_rdma(SHIFT_ES, node, ES_RIAM_Ev); /* for CAM */
58 #endif
59 node_neighbour_num_add = 0;
60 if (!node_test_and_set(node, node_online_neighbour_map))
61 node_neighbour_num_add = 1;
62 if (node_neighbour_num_add)
63 node_neighbour_num++;
64 p_xxb.addr =
65 *((unsigned long *)&node_online_neighbour_map);
66 fix_event(node, RDMA_INTR, RIAM_EVENT,
67 ((node_neighbour_num & 0xf) << 28) |
68 (p_xxb.fields.laddr & 0x0fffffff));
69 es &= ~ES_RIAM_Ev;
71 while ((es = RDR_rdma(SHIFT_ES, node)) & irq_mc) {
72 tcs = RDR_rdma(SHIFT_DMA_TCS, node);
73 #if DSF_NO
74 WRR_rdma(SHIFT_ES, node, es & ~ES_SM_Ev & ~ES_DSF_Ev);
75 #else
76 WRR_rdma(SHIFT_ES, node, es & ~ES_SM_Ev);
77 #endif
78 mcs = RDR_rdma(SHIFT_MSG_CS, node);
79 intr_channel(es, tcs, mcs, node);
81 fix_event(node, RDMA_INTR, RETURN_EVENT, 0);
83 return;
86 EXPORT_SYMBOL(rdma_interrupt);
87 void intr_channel(unsigned int evs, unsigned int tcs, unsigned int mcs,
88 unsigned int instance)
90 struct stat_rdma *pst;
91 rw_state_p pd = NULL;
92 rw_state_p pm = NULL;
93 dev_rdma_sem_t *dev_sem;
94 ulong cur_clock;
95 register volatile unsigned int tbc;
96 unsigned int int_cnt = 0;
97 rdma_state_inst_t *xspi = &rdma_state->rdma_sti[instance];
98 rdma_addr_struct_t p_xxb, p_xxb_pa;
100 fix_event(instance, INTR_START_EVENT, evs, tcs);
101 pst = &xspi->stat_rdma;
102 pst->rdma_intr++;
104 if (evs & ES_RGP3M_Ev) {
105 dev_rdma_sem_t *dev_sem;
106 rw_state_p pcam;
107 if (RDR_rdma(SHIFT_CAM, instance)) {
108 WRR_rdma(SHIFT_CAM, instance, 0);
109 pcam = &xspi->talive;
110 dev_sem = &pcam->dev_rdma_sem;
111 raw_spin_lock(&dev_sem->lock);
112 if (pcam->stat == 1) {
113 pcam->clkr = join_curr_clock();
114 pcam->int_cnt = int_cnt;
115 rdma_cv_broadcast_rdma(&pcam->dev_rdma_sem,
116 instance);
118 raw_spin_unlock(&dev_sem->lock);
119 } else {
120 if (state_cam == RDMA_UNSET_CAM) {
121 pcam = &xspi->talive;
122 dev_sem = &pcam->dev_rdma_sem;
123 raw_spin_lock(&dev_sem->lock);
124 if (pcam->stat == 1) {
125 pcam->clkr = join_curr_clock();
126 pcam->int_cnt = int_cnt;
127 rdma_cv_broadcast_rdma(
128 &pcam->dev_rdma_sem,
129 instance);
131 raw_spin_unlock(&dev_sem->lock);
132 } else {
133 WRR_rdma(SHIFT_CAM, instance, tr_atl);
134 pcam = &xspi->ralive;
135 dev_sem = &pcam->dev_rdma_sem;
136 raw_spin_lock(&dev_sem->lock);
137 if (pcam->stat == 1)
138 rdma_cv_broadcast_rdma(
139 &pcam->dev_rdma_sem,
140 instance);
141 raw_spin_unlock(&dev_sem->lock);
145 cur_clock = (unsigned long)jiffies;
146 if (evs & ES_CMIE_Ev) {
147 WRR_rdma(SHIFT_MSG_CS, instance, MSG_CS_Msg_Rst);
148 fix_event(instance, INTR_CMIE_EVENT, 0, 0);
149 pst->es_cmie++;
150 return;
152 if (evs & ES_RDC_Ev) {
153 pst->rcs = RDR_rdma(SHIFT_DMA_RCS, instance);
154 pst->rbc = RDR_rdma(SHIFT_DMA_RBC, instance);
155 pst->rsa = RDR_rdma(SHIFT_DMA_RSA, instance);
156 WRR_rdma(SHIFT_DMA_RCS, instance, pst->rcs & (~DMA_RCS_RE));
157 pst->rcs = RDR_rdma(SHIFT_DMA_RCS, instance);
158 if (rfsm) {
159 WRR_rdma(SHIFT_DMA_RCS, instance,
160 pst->rcs & (~DMA_RCS_RFSM));
161 WRR_rdma(SHIFT_DMA_RBC, instance, CLEAR_RFSM);
163 pd = xspi->rw_states_rd;
164 p_xxb.addr = (unsigned long)pd;
165 fix_event(instance, INTR_RDC_EVENT, p_xxb.fields.haddr,
166 p_xxb.fields.laddr);
167 xspi->rw_states_rd = 0;
168 if (pd == NULL) {
169 fix_event(instance, INTR_RDC_PD_NULL_EVENT,
170 intr_rdc_count[instance], tcs);
171 pst->pd_rd++;
172 goto ES_RDC_Ev_label;
174 dev_sem = &pd->dev_rdma_sem;
175 p_xxb.addr = (unsigned long)dev_sem;
176 fix_event(instance, INTR_RDC_EVENT, pd->int_ac,
177 intr_rdc_count[instance]);
178 fix_event(instance, INTR_RDC_EVENT, p_xxb.fields.haddr,
179 p_xxb.fields.laddr);
180 raw_spin_lock(&dev_sem->lock);
181 intr_rdc_count[instance]++;
182 pd->clock_rdc = cur_clock;
183 switch (pd->int_ac) {
184 case 2:
185 pd->int_ac = 3;
186 fix_event(instance, INTR_SIGN2_READ_EVENT,
187 pd->int_ac, dev_sem->num_obmen);
188 dev_sem->time_broadcast = join_curr_clock();
189 rdma_cv_broadcast_rdma(&pd->dev_rdma_sem, instance);
190 break;
191 case 0:
192 case 1:
193 case 3:
194 default:
195 fix_event(instance, INTR_UNEXP2_READ_EVENT,
196 pd->int_ac, dev_sem->num_obmen);
197 pst->rdc_unxp++;
198 break;
200 raw_spin_unlock(&dev_sem->lock);
201 pd->rbc = 0;
202 pst->es_rdc++;
203 rdc_byte += allign_dma(pd->size_trb);
204 if (rdc_byte >> 10) {
205 pst->rdc_kbyte += (rdc_byte >> 10);
206 rdc_byte &= 0x3ff;
208 ES_RDC_Ev_label:
209 evs = evs & ~ES_RDC_Ev;
211 if (evs & (ES_TDC_Ev | ES_DSF_Ev)) {
212 pst->tcs = RDR_rdma(SHIFT_DMA_TCS, instance);
213 pst->tbc = RDR_rdma(SHIFT_DMA_TBC, instance);
214 pst->tsa = RDR_rdma(SHIFT_DMA_TSA, instance);
215 if (evs & ES_TDC_Ev )
216 WRR_rdma(SHIFT_DMA_TCS, instance, pst->tcs & (~DMA_TCS_TE));
217 pst->tcs = RDR_rdma(SHIFT_DMA_TCS, instance);
218 pd = xspi->rw_states_wr;
219 if (pd == NULL) {
220 fix_event(instance, INTR_TDC_DSF_PD_NULL_EVENT,
221 intr_rdc_count[instance], tcs);
222 goto ES_TDC_Ev_label;
224 xspi->rw_states_wr = 0;
225 dev_sem = &pd->dev_rdma_sem;
226 raw_spin_lock(&dev_sem->lock);
227 pd->dsf = 0;
228 pd->clock_tdc = cur_clock;
229 if (evs & ES_DSF_Ev) {
230 tbc = RDR_rdma(SHIFT_DMA_TBC, instance);
231 pd->dsf = tcs;
232 # if 0
233 int count_reset_tcs;
234 WRR_rdma(SIC_rdma_irq_mc, instance , irq_mc & ~IRQ_DSF);
235 for (count_reset_tcs = 0; count_reset_tcs < 10;
236 count_reset_tcs++) {
237 //udelay(10);
238 WRR_rdma(SHIFT_DMA_TCS, instance,
239 DMA_TCS_Tx_Rst);
241 WRR_rdma(SIC_rdma_irq_mc, instance , irq_mc );
242 WRR_rdma(SHIFT_DMA_TCS, instance,
243 RCode_64 | DMA_TCS_DRCL);
244 #endif
245 fix_event(instance, INTR_DSF_EVENT, pd->int_ac, tcs);
246 fix_event(instance, INTR_DSF_EVENT, pd->int_ac,
247 pst->tbc);
248 } else {
249 fix_event(instance, INTR_TDC_EVENT, pd->int_ac,
250 dev_sem->num_obmen);
252 switch (pd->int_ac) {
253 case 2:
254 pd->int_ac = 3;
255 fix_event(instance, INTR_SIGN1_WRITE_EVENT,
256 pd->int_ac, dev_sem->num_obmen);
257 rdma_cv_broadcast_rdma(&pd->dev_rdma_sem, instance);
258 break;
259 case 0:
260 case 1:
261 case 3:
262 default:
263 pst->tdc_dsf_unxp++;
264 fix_event(instance, INTR_TDC_UNXP_EVENT, pd->int_ac,
265 dev_sem->num_obmen);
266 break;
268 raw_spin_unlock(&dev_sem->lock);
270 if (evs & ES_DSF_Ev) {
271 pst->es_dsf++;
272 if (tcs &DMA_TCS_DPS_Err)
273 pst->dma_tcs_dps_err++;
274 else
275 if (tcs &DMA_TCS_DPCRC_Err)
276 pst->dma_tcs_dpcrc_err++;
277 else
278 if (tcs &DMA_TCS_DPTO_Err)
279 pst->dma_tcs_dpto_err++;
280 else
281 if (tcs &DMA_TCS_DPID_Err)
282 pst->dma_tcs_dpid_err++;
283 if (evs & ES_TDC_Ev) {
284 pst->es_dsf_tdc++;
286 } else {
287 pst->es_tdc++;
289 ES_TDC_Ev_label:
290 evs = evs & (~(ES_TDC_Ev | ES_DSF_Ev));
292 if (evs & ES_RDM_Ev) {
293 int rdmc = (evs & ES_RDMC)>>27;
294 int msg;
296 pst->es_rdm++;
297 if (rdmc == 0)
298 rdmc = 32;
299 while (rdmc--) {
300 msg = RDR_rdma(SHIFT_RDMSG, instance);
301 pst->rdm++;
303 if ((msg & MSG_OPER) == MSG_READY) {
304 pst->rec_ready++;
305 switch ((msg & MSG_ABONENT) >> SHIFT_ABONENT) {
306 case 0:
307 case 1:
308 case 2:
309 case 3:
310 pd = &xspi->rw_states_d[WRITER];
311 break;
312 default:
313 pd = &xspi->rw_states_d[WRITER];
314 break;
316 dev_sem = &pd->dev_rdma_sem;
317 p_xxb.addr = (unsigned long)pd;
318 fix_event(instance, INTR_READY_EVENT,
319 pd->int_ac, dev_sem->num_obmen);
320 fix_event(instance, INTR_READY_EVENT,
321 p_xxb.fields.haddr, p_xxb.fields.laddr);
322 raw_spin_lock(&dev_sem->lock);
323 switch (pd->int_ac) {
324 case 1:
325 break;
326 case 0:
327 raw_spin_unlock(&dev_sem->lock);
328 pst->READY_UNXP++;
329 continue;
330 break;
331 case 2:
332 raw_spin_unlock(&dev_sem->lock);
333 pst->miss_READY_2++;
334 continue;
335 break;
336 case 3:
337 raw_spin_unlock(&dev_sem->lock);
338 pst->miss_READY_3++;
339 continue;
340 break;
341 default:
342 raw_spin_unlock(&dev_sem->lock);
343 continue;
345 pd->msg = msg;
346 pd->clock_receive_ready = cur_clock;
347 pd->int_ac = 2;
348 fix_event(instance, INTR_TDMA_EVENT,
349 pd->real_size, pd->dma);
350 xspi->rw_states_wr = pd;
351 if (RDR_rdma(SHIFT_DMA_TBC, instance)) {
352 pd->int_ac = 5;
353 rdma_cv_broadcast_rdma(
354 &pd->dev_rdma_sem,
355 instance);
356 raw_spin_unlock(&dev_sem->lock);
357 continue;
359 if (RDR_rdma(SHIFT_DMA_TCS, instance) &
360 DMA_TCS_TDMA_On) {
361 pd->int_ac = 5;
362 rdma_cv_broadcast_rdma(
363 &pd->dev_rdma_sem,
364 instance);
365 raw_spin_unlock(&dev_sem->lock);
366 continue;
368 if (!pd->dma) {
369 pd->int_ac = 5;
370 rdma_cv_broadcast_rdma(
371 &pd->dev_rdma_sem,
372 instance);
373 raw_spin_unlock(&dev_sem->lock);
374 continue;
376 p_xxb_pa.addr = (unsigned long)pd->dma;
377 WRR_rdma(SHIFT_DMA_HTSA, instance,
378 p_xxb_pa.fields.haddr);
379 WRR_rdma(SHIFT_DMA_TSA, instance,
380 p_xxb_pa.fields.laddr);
381 if (rfsm) {
382 #ifdef CONFIG_E2K
383 if (IS_MACHINE_E2S)
384 WRR_rdma( SHIFT_DMA_TBC,
385 instance,
386 pd->real_size);
387 else
388 WRR_rdma( SHIFT_DMA_TBC,
389 instance,
390 PAGE_ALIGN(pd->real_size));
391 #else
392 WRR_rdma( SHIFT_DMA_TBC, instance,
393 PAGE_ALIGN(pd->real_size));
394 #endif
396 else
397 WRR_rdma( SHIFT_DMA_TBC, instance,
398 pd->real_size);
399 WRR_rdma(SHIFT_DMA_TCS, instance,
400 RCode_64 | DMA_TCS_DRCL |
401 DMA_TCS_TE |
402 (pd->tm?DMA_TCS_TTM:0));
403 pst->tcs = RDR_rdma(SHIFT_DMA_TCS,
404 instance);
405 pd->tm?pst->try_TDMA_tm++:pst->try_TDMA++;
406 raw_spin_unlock(&dev_sem->lock);
407 continue;
408 } else
409 if ((msg & MSG_OPER) == MSG_TRWD) {
410 int chann;
412 pst->rec_trwd++;
413 switch ((msg & MSG_ABONENT) >> SHIFT_ABONENT) {
414 case 0:
415 case 1:
416 case 2:
417 case 3:
418 chann = msg & MSG_ABONENT;
419 pd = &xspi->rw_states_d[READER];
420 break;
421 default:
422 chann = msg & MSG_ABONENT;
423 pd = &xspi->rw_states_d[READER];
424 break;
425 /* for E3S */
427 p_xxb.addr = (unsigned long)pd;
428 dev_sem = &pd->dev_rdma_sem;
429 pd->clock_receive_trwd = cur_clock;
430 raw_spin_lock(&dev_sem->lock);
431 fix_event(instance, INTR_TRWD_EVENT,
432 pd->int_ac, dev_sem->num_obmen);
433 fix_event(instance, INTR_TRWD_EVENT,
434 p_xxb.fields.haddr, p_xxb.fields.laddr);
435 switch (pd->int_ac) {
436 case 1:
437 pd->int_ac = 2;
438 pd->msg = msg;
439 fix_event(instance,
440 INTR_SIGN1_READ_EVENT,
441 pd->int_ac, dev_sem->num_obmen);
442 rdma_cv_broadcast_rdma(
443 &pd->dev_rdma_sem,
444 instance);
445 raw_spin_unlock(&dev_sem->lock);
446 continue;
447 break;
448 case 0:
449 pd->trwd_was++;
450 pd->msg = msg;
451 pst->trwd_was++;
452 pst->TRWD_UNXP++;
453 fix_event(instance,
454 INTR_TRWD_UNXP_EVENT,
455 pd->int_ac, dev_sem->num_obmen);
456 raw_spin_unlock(&dev_sem->lock);
457 continue;
458 break;
459 case 2:
460 pd->trwd_was++;
461 pst->trwd_was++;
462 pd->msg = msg;
463 pst->miss_TRWD_2++;
464 fix_event(instance,
465 INTR_TRWD_UNXP_EVENT,
466 pd->int_ac, dev_sem->num_obmen);
467 raw_spin_unlock(&dev_sem->lock);
468 continue;
469 break;
470 case 3:
471 pd->trwd_was++;
472 pd->msg = msg;
473 pst->trwd_was++;
474 pst->miss_TRWD_3++;
475 fix_event(instance,
476 INTR_TRWD_UNXP_EVENT,
477 pd->int_ac, dev_sem->num_obmen);
478 raw_spin_unlock(&dev_sem->lock);
479 continue;
480 break;
481 case 4:
482 pd->trwd_was++;
483 pd->msg = msg;
484 pst->miss_TRWD_4++;
485 fix_event(instance,
486 INTR_TRWD_UNXP_EVENT,
487 pd->int_ac, dev_sem->num_obmen);
488 raw_spin_unlock(&dev_sem->lock);
489 continue;
490 break;
491 default:
492 pd->trwd_was++;
493 pd->msg = msg;
494 fix_event(instance,
495 INTR_TRWD_UNXP_EVENT,
496 pd->int_ac, dev_sem->num_obmen);
497 raw_spin_unlock(&dev_sem->lock);
498 continue;
500 } else { /* if (msg & MSG_TRWD) { */
501 pm = &xspi->rw_states_m[0];
502 dev_sem = &pm->dev_rdma_sem;
503 raw_spin_lock(&dev_sem->lock);
504 if (pm->stat == RDMA_IOC_DR) {
505 fix_event(instance, INTR_RMSG_EVENT,
506 pd->int_ac, 0);
507 pm->msg = msg;
508 pst->rdm_EXP++;
509 rdma_cv_broadcast_rdma(
510 &pm->dev_rdma_sem,
511 instance);
512 raw_spin_unlock(&dev_sem->lock);
513 } else {
514 fix_event(instance,
515 INTR_RMSG_UNXP_EVENT,
516 pd->int_ac, 0);
517 raw_spin_unlock(&dev_sem->lock);
518 pst->rdm_UNXP++;
522 evs = evs & ~ES_RDM_Ev;
524 if (evs & ES_MSF_Ev) {
525 dev_rdma_sem_t *dev_sem;
526 rw_state_p pcam;
528 WRR_rdma(SHIFT_CAM, instance, 0);
529 WRR_rdma(SHIFT_MSG_CS, instance, msg_cs_dmrcl | MSG_CS_Msg_Rst);
530 fix_event(instance, INTR_MSF_EVENT, 1, 0);
531 pcam = &xspi->talive;
532 dev_sem = &pcam->dev_rdma_sem;
533 raw_spin_lock(&dev_sem->lock);
534 if (pcam->stat == 1) {
535 pcam->clkr = join_curr_clock();
536 pcam->int_cnt = int_cnt;
537 rdma_cv_broadcast_rdma(&pcam->dev_rdma_sem, instance);
539 raw_spin_unlock(&dev_sem->lock);
541 if (evs & ES_RGP2M_Ev) {
542 pst->es_rgp2++;
543 evs &= ~ES_RGP2M_Ev;
545 if (evs & ES_RGP1M_Ev) {
546 pst->es_rgp1++;
547 evs &= ~ES_RGP1M_Ev;
549 if (evs & ES_RGP0M_Ev) {
550 pst->es_rgp0++;
551 if (enable_exit_gp0) {
552 pd = &xspi->rw_states_d[READER];
553 if (pd == NULL) {
554 goto GP0_label;
556 dev_sem = &pd->dev_rdma_sem;
557 fix_event(instance, INTR_GP0_EVENT, pd->int_ac,
558 pd->state_GP0);
559 raw_spin_lock(&dev_sem->lock);
560 pd->state_GP0 = 1;
561 switch (pd->int_ac) {
562 case 1:
563 rdma_cv_broadcast_rdma(&pd->dev_rdma_sem,
564 instance);
565 break;
566 case 0:
567 case 2:
568 case 3:
569 default:
570 break;
572 raw_spin_unlock(&dev_sem->lock);
574 GP0_label:
575 evs &= ~ES_RGP0M_Ev;
577 if (evs & ES_RLM_Ev) {
578 pst->es_rlm++;
579 evs &= ~ES_RLM_Ev;
581 if (evs & ES_RULM_Ev) {
582 pst->es_rulm++;
583 evs &= ~ES_RULM_Ev;
585 if (evs & ES_RIAM_Ev) {
586 dev_rdma_sem_t *dev_sem;
587 rw_state_p pcam;
589 WRR_rdma(SHIFT_CAM, instance, tr_atl);
590 time_ID_ANS = join_curr_clock();
591 pcam = &xspi->ralive;
592 dev_sem = &pcam->dev_rdma_sem;
593 raw_spin_lock(&dev_sem->lock);
594 if (pcam->stat == 1) {
595 pcam->clkr = join_curr_clock();
596 pcam->int_cnt = int_cnt;
597 rdma_cv_broadcast_rdma(&pcam->dev_rdma_sem, instance);
599 raw_spin_unlock(&dev_sem->lock);
600 pst->es_riam++;
601 evs &= ~ES_RIAM_Ev;
602 fix_event(instance, INTR_RIAM_EVENT, 0, pst->es_riam);
604 if (evs & ES_RIRM_Ev) {
605 dev_rdma_sem_t *dev_sem;
606 rw_state_p pcam;
608 WRR_rdma(SHIFT_CAM, instance, tr_atl);
609 time_ID_REQ = join_curr_clock();
610 pcam = &xspi->ralive;
611 dev_sem = &pcam->dev_rdma_sem;
612 raw_spin_lock(&dev_sem->lock);
613 if (pcam->stat == 1) {
614 pcam->clkr = join_curr_clock();
615 pcam->int_cnt = int_cnt;
616 rdma_cv_broadcast_rdma(&pcam->dev_rdma_sem, instance);
618 raw_spin_unlock(&dev_sem->lock);
619 pst->es_rirm++;
620 evs &= ~ES_RIRM_Ev;
621 fix_event(instance, INTR_RIRM_EVENT, 0, pst->es_rirm);
623 fix_event(instance, INTR_EXIT_EVENT, 0, 0);
624 return;