2 * linux/drivers/s390/cio/qdio_main.c
4 * Linux for s390 qdio support, buffer handling, qdio API and module support.
6 * Copyright 2000,2008 IBM Corp.
7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8 * Jan Glauber <jang@linux.vnet.ibm.com>
9 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/timer.h>
15 #include <linux/delay.h>
16 #include <linux/gfp.h>
17 #include <linux/kernel_stat.h>
18 #include <asm/atomic.h>
19 #include <asm/debug.h>
26 #include "qdio_debug.h"
28 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
29 "Jan Glauber <jang@linux.vnet.ibm.com>");
30 MODULE_DESCRIPTION("QDIO base support");
31 MODULE_LICENSE("GPL");
33 static inline int do_siga_sync(unsigned long schid
,
34 unsigned int out_mask
, unsigned int in_mask
,
37 register unsigned long __fc
asm ("0") = fc
;
38 register unsigned long __schid
asm ("1") = schid
;
39 register unsigned long out
asm ("2") = out_mask
;
40 register unsigned long in
asm ("3") = in_mask
;
48 : "d" (__fc
), "d" (__schid
), "d" (out
), "d" (in
) : "cc");
52 static inline int do_siga_input(unsigned long schid
, unsigned int mask
,
55 register unsigned long __fc
asm ("0") = fc
;
56 register unsigned long __schid
asm ("1") = schid
;
57 register unsigned long __mask
asm ("2") = mask
;
65 : "d" (__fc
), "d" (__schid
), "d" (__mask
) : "cc", "memory");
70 * do_siga_output - perform SIGA-w/wt function
71 * @schid: subchannel id or in case of QEBSM the subchannel token
72 * @mask: which output queues to process
73 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
74 * @fc: function code to perform
76 * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
77 * Note: For IQDC unicast queues only the highest priority queue is processed.
79 static inline int do_siga_output(unsigned long schid
, unsigned long mask
,
80 unsigned int *bb
, unsigned int fc
)
82 register unsigned long __fc
asm("0") = fc
;
83 register unsigned long __schid
asm("1") = schid
;
84 register unsigned long __mask
asm("2") = mask
;
85 int cc
= QDIO_ERROR_SIGA_ACCESS_EXCEPTION
;
93 : "+d" (cc
), "+d" (__fc
), "+d" (__schid
), "+d" (__mask
)
95 *bb
= ((unsigned int) __fc
) >> 31;
99 static inline int qdio_check_ccq(struct qdio_q
*q
, unsigned int ccq
)
101 /* all done or next buffer state different */
102 if (ccq
== 0 || ccq
== 32)
104 /* not all buffers processed */
105 if (ccq
== 96 || ccq
== 97)
107 /* notify devices immediately */
108 DBF_ERROR("%4x ccq:%3d", SCH_NO(q
), ccq
);
113 * qdio_do_eqbs - extract buffer states for QEBSM
114 * @q: queue to manipulate
115 * @state: state of the extracted buffers
116 * @start: buffer number to start at
117 * @count: count of buffers to examine
118 * @auto_ack: automatically acknowledge buffers
120 * Returns the number of successfully extracted equal buffer states.
121 * Stops processing if a state is different from the last buffers state.
123 static int qdio_do_eqbs(struct qdio_q
*q
, unsigned char *state
,
124 int start
, int count
, int auto_ack
)
126 unsigned int ccq
= 0;
127 int tmp_count
= count
, tmp_start
= start
;
131 BUG_ON(!q
->irq_ptr
->sch_token
);
135 nr
+= q
->irq_ptr
->nr_input_qs
;
137 ccq
= do_eqbs(q
->irq_ptr
->sch_token
, state
, nr
, &tmp_start
, &tmp_count
,
139 rc
= qdio_check_ccq(q
, ccq
);
141 /* At least one buffer was processed, return and extract the remaining
144 if ((ccq
== 96) && (count
!= tmp_count
)) {
145 qperf_inc(q
, eqbs_partial
);
146 return (count
- tmp_count
);
150 DBF_DEV_EVENT(DBF_WARN
, q
->irq_ptr
, "EQBS again:%2d", ccq
);
155 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q
));
156 DBF_ERROR("%3d%3d%2d", count
, tmp_count
, nr
);
157 q
->handler(q
->irq_ptr
->cdev
,
158 QDIO_ERROR_ACTIVATE_CHECK_CONDITION
,
159 0, -1, -1, q
->irq_ptr
->int_parm
);
162 return count
- tmp_count
;
166 * qdio_do_sqbs - set buffer states for QEBSM
167 * @q: queue to manipulate
168 * @state: new state of the buffers
169 * @start: first buffer number to change
170 * @count: how many buffers to change
172 * Returns the number of successfully changed buffers.
173 * Does retrying until the specified count of buffer states is set or an
176 static int qdio_do_sqbs(struct qdio_q
*q
, unsigned char state
, int start
,
179 unsigned int ccq
= 0;
180 int tmp_count
= count
, tmp_start
= start
;
187 BUG_ON(!q
->irq_ptr
->sch_token
);
191 nr
+= q
->irq_ptr
->nr_input_qs
;
193 ccq
= do_sqbs(q
->irq_ptr
->sch_token
, state
, nr
, &tmp_start
, &tmp_count
);
194 rc
= qdio_check_ccq(q
, ccq
);
196 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "SQBS again:%2d", ccq
);
197 qperf_inc(q
, sqbs_partial
);
201 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q
));
202 DBF_ERROR("%3d%3d%2d", count
, tmp_count
, nr
);
203 q
->handler(q
->irq_ptr
->cdev
,
204 QDIO_ERROR_ACTIVATE_CHECK_CONDITION
,
205 0, -1, -1, q
->irq_ptr
->int_parm
);
209 return count
- tmp_count
;
212 /* returns number of examined buffers and their common state in *state */
213 static inline int get_buf_states(struct qdio_q
*q
, unsigned int bufnr
,
214 unsigned char *state
, unsigned int count
,
217 unsigned char __state
= 0;
220 BUG_ON(bufnr
> QDIO_MAX_BUFFERS_MASK
);
221 BUG_ON(count
> QDIO_MAX_BUFFERS_PER_Q
);
224 return qdio_do_eqbs(q
, state
, bufnr
, count
, auto_ack
);
226 for (i
= 0; i
< count
; i
++) {
228 __state
= q
->slsb
.val
[bufnr
];
229 else if (q
->slsb
.val
[bufnr
] != __state
)
231 bufnr
= next_buf(bufnr
);
237 static inline int get_buf_state(struct qdio_q
*q
, unsigned int bufnr
,
238 unsigned char *state
, int auto_ack
)
240 return get_buf_states(q
, bufnr
, state
, 1, auto_ack
);
243 /* wrap-around safe setting of slsb states, returns number of changed buffers */
244 static inline int set_buf_states(struct qdio_q
*q
, int bufnr
,
245 unsigned char state
, int count
)
249 BUG_ON(bufnr
> QDIO_MAX_BUFFERS_MASK
);
250 BUG_ON(count
> QDIO_MAX_BUFFERS_PER_Q
);
253 return qdio_do_sqbs(q
, state
, bufnr
, count
);
255 for (i
= 0; i
< count
; i
++) {
256 xchg(&q
->slsb
.val
[bufnr
], state
);
257 bufnr
= next_buf(bufnr
);
262 static inline int set_buf_state(struct qdio_q
*q
, int bufnr
,
265 return set_buf_states(q
, bufnr
, state
, 1);
268 /* set slsb states to initial state */
269 void qdio_init_buf_states(struct qdio_irq
*irq_ptr
)
274 for_each_input_queue(irq_ptr
, q
, i
)
275 set_buf_states(q
, 0, SLSB_P_INPUT_NOT_INIT
,
276 QDIO_MAX_BUFFERS_PER_Q
);
277 for_each_output_queue(irq_ptr
, q
, i
)
278 set_buf_states(q
, 0, SLSB_P_OUTPUT_NOT_INIT
,
279 QDIO_MAX_BUFFERS_PER_Q
);
282 static inline int qdio_siga_sync(struct qdio_q
*q
, unsigned int output
,
285 unsigned long schid
= *((u32
*) &q
->irq_ptr
->schid
);
286 unsigned int fc
= QDIO_SIGA_SYNC
;
289 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "siga-s:%1d", q
->nr
);
290 qperf_inc(q
, siga_sync
);
293 schid
= q
->irq_ptr
->sch_token
;
294 fc
|= QDIO_SIGA_QEBSM_FLAG
;
297 cc
= do_siga_sync(schid
, output
, input
, fc
);
299 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q
), cc
);
303 static inline int qdio_siga_sync_q(struct qdio_q
*q
)
306 return qdio_siga_sync(q
, 0, q
->mask
);
308 return qdio_siga_sync(q
, q
->mask
, 0);
311 static int qdio_siga_output(struct qdio_q
*q
, unsigned int *busy_bit
)
313 unsigned long schid
= *((u32
*) &q
->irq_ptr
->schid
);
314 unsigned int fc
= QDIO_SIGA_WRITE
;
319 schid
= q
->irq_ptr
->sch_token
;
320 fc
|= QDIO_SIGA_QEBSM_FLAG
;
323 cc
= do_siga_output(schid
, q
->mask
, busy_bit
, fc
);
325 /* hipersocket busy condition */
326 if (unlikely(*busy_bit
)) {
327 WARN_ON(queue_type(q
) != QDIO_IQDIO_QFMT
|| cc
!= 2);
330 start_time
= get_clock();
333 if ((get_clock() - start_time
) < QDIO_BUSY_BIT_PATIENCE
)
339 static inline int qdio_siga_input(struct qdio_q
*q
)
341 unsigned long schid
= *((u32
*) &q
->irq_ptr
->schid
);
342 unsigned int fc
= QDIO_SIGA_READ
;
345 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "siga-r:%1d", q
->nr
);
346 qperf_inc(q
, siga_read
);
349 schid
= q
->irq_ptr
->sch_token
;
350 fc
|= QDIO_SIGA_QEBSM_FLAG
;
353 cc
= do_siga_input(schid
, q
->mask
, fc
);
355 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q
), cc
);
359 #define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
360 #define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
362 static inline void qdio_sync_queues(struct qdio_q
*q
)
364 /* PCI capable outbound queues will also be scanned so sync them too */
365 if (pci_out_supported(q
))
366 qdio_siga_sync_all(q
);
371 int debug_get_buf_state(struct qdio_q
*q
, unsigned int bufnr
,
372 unsigned char *state
)
374 if (need_siga_sync(q
))
376 return get_buf_states(q
, bufnr
, state
, 1, 0);
379 static inline void qdio_stop_polling(struct qdio_q
*q
)
381 if (!q
->u
.in
.polling
)
385 qperf_inc(q
, stop_polling
);
387 /* show the card that we are not polling anymore */
389 set_buf_states(q
, q
->u
.in
.ack_start
, SLSB_P_INPUT_NOT_INIT
,
391 q
->u
.in
.ack_count
= 0;
393 set_buf_state(q
, q
->u
.in
.ack_start
, SLSB_P_INPUT_NOT_INIT
);
396 static inline void account_sbals(struct qdio_q
*q
, int count
)
400 q
->q_stats
.nr_sbal_total
+= count
;
401 if (count
== QDIO_MAX_BUFFERS_MASK
) {
402 q
->q_stats
.nr_sbals
[7]++;
407 q
->q_stats
.nr_sbals
[pos
]++;
410 static void process_buffer_error(struct qdio_q
*q
, int count
)
412 unsigned char state
= (q
->is_input_q
) ? SLSB_P_INPUT_NOT_INIT
:
413 SLSB_P_OUTPUT_NOT_INIT
;
415 q
->qdio_error
|= QDIO_ERROR_SLSB_STATE
;
417 /* special handling for no target buffer empty */
418 if ((!q
->is_input_q
&&
419 (q
->sbal
[q
->first_to_check
]->element
[15].sflags
) == 0x10)) {
420 qperf_inc(q
, target_full
);
421 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "OUTFULL FTC:%02x",
426 DBF_ERROR("%4x BUF ERROR", SCH_NO(q
));
427 DBF_ERROR((q
->is_input_q
) ? "IN:%2d" : "OUT:%2d", q
->nr
);
428 DBF_ERROR("FTC:%3d C:%3d", q
->first_to_check
, count
);
429 DBF_ERROR("F14:%2x F15:%2x",
430 q
->sbal
[q
->first_to_check
]->element
[14].sflags
,
431 q
->sbal
[q
->first_to_check
]->element
[15].sflags
);
434 * Interrupts may be avoided as long as the error is present
435 * so change the buffer state immediately to avoid starvation.
437 set_buf_states(q
, q
->first_to_check
, state
, count
);
440 static inline void inbound_primed(struct qdio_q
*q
, int count
)
444 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "in prim: %02x", count
);
446 /* for QEBSM the ACK was already set by EQBS */
448 if (!q
->u
.in
.polling
) {
450 q
->u
.in
.ack_count
= count
;
451 q
->u
.in
.ack_start
= q
->first_to_check
;
455 /* delete the previous ACK's */
456 set_buf_states(q
, q
->u
.in
.ack_start
, SLSB_P_INPUT_NOT_INIT
,
458 q
->u
.in
.ack_count
= count
;
459 q
->u
.in
.ack_start
= q
->first_to_check
;
464 * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
465 * or by the next inbound run.
467 new = add_buf(q
->first_to_check
, count
- 1);
468 if (q
->u
.in
.polling
) {
469 /* reset the previous ACK but first set the new one */
470 set_buf_state(q
, new, SLSB_P_INPUT_ACK
);
471 set_buf_state(q
, q
->u
.in
.ack_start
, SLSB_P_INPUT_NOT_INIT
);
474 set_buf_state(q
, new, SLSB_P_INPUT_ACK
);
477 q
->u
.in
.ack_start
= new;
481 /* need to change ALL buffers to get more interrupts */
482 set_buf_states(q
, q
->first_to_check
, SLSB_P_INPUT_NOT_INIT
, count
);
485 static int get_inbound_buffer_frontier(struct qdio_q
*q
)
488 unsigned char state
= 0;
491 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
494 count
= min(atomic_read(&q
->nr_buf_used
), QDIO_MAX_BUFFERS_MASK
);
495 stop
= add_buf(q
->first_to_check
, count
);
497 if (q
->first_to_check
== stop
)
501 * No siga sync here, as a PCI or we after a thin interrupt
502 * already sync'ed the queues.
504 count
= get_buf_states(q
, q
->first_to_check
, &state
, count
, 1);
509 case SLSB_P_INPUT_PRIMED
:
510 inbound_primed(q
, count
);
511 q
->first_to_check
= add_buf(q
->first_to_check
, count
);
512 if (atomic_sub(count
, &q
->nr_buf_used
) == 0)
513 qperf_inc(q
, inbound_queue_full
);
514 if (q
->irq_ptr
->perf_stat_enabled
)
515 account_sbals(q
, count
);
517 case SLSB_P_INPUT_ERROR
:
518 process_buffer_error(q
, count
);
519 q
->first_to_check
= add_buf(q
->first_to_check
, count
);
520 atomic_sub(count
, &q
->nr_buf_used
);
521 if (q
->irq_ptr
->perf_stat_enabled
)
522 account_sbals_error(q
, count
);
524 case SLSB_CU_INPUT_EMPTY
:
525 case SLSB_P_INPUT_NOT_INIT
:
526 case SLSB_P_INPUT_ACK
:
527 if (q
->irq_ptr
->perf_stat_enabled
)
528 q
->q_stats
.nr_sbal_nop
++;
529 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "in nop");
535 return q
->first_to_check
;
538 static int qdio_inbound_q_moved(struct qdio_q
*q
)
542 bufnr
= get_inbound_buffer_frontier(q
);
544 if ((bufnr
!= q
->last_move
) || q
->qdio_error
) {
545 q
->last_move
= bufnr
;
546 if (!is_thinint_irq(q
->irq_ptr
) && MACHINE_IS_LPAR
)
547 q
->u
.in
.timestamp
= get_clock();
553 static inline int qdio_inbound_q_done(struct qdio_q
*q
)
555 unsigned char state
= 0;
557 if (!atomic_read(&q
->nr_buf_used
))
560 if (need_siga_sync(q
))
562 get_buf_state(q
, q
->first_to_check
, &state
, 0);
564 if (state
== SLSB_P_INPUT_PRIMED
|| state
== SLSB_P_INPUT_ERROR
)
565 /* more work coming */
568 if (is_thinint_irq(q
->irq_ptr
))
571 /* don't poll under z/VM */
576 * At this point we know, that inbound first_to_check
577 * has (probably) not moved (see qdio_inbound_processing).
579 if (get_clock() > q
->u
.in
.timestamp
+ QDIO_INPUT_THRESHOLD
) {
580 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "in done:%02x",
587 static void qdio_kick_handler(struct qdio_q
*q
)
589 int start
= q
->first_to_kick
;
590 int end
= q
->first_to_check
;
593 if (unlikely(q
->irq_ptr
->state
!= QDIO_IRQ_STATE_ACTIVE
))
596 count
= sub_buf(end
, start
);
599 qperf_inc(q
, inbound_handler
);
600 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "kih s:%02x c:%02x", start
, count
);
602 qperf_inc(q
, outbound_handler
);
603 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "koh: s:%02x c:%02x",
607 q
->handler(q
->irq_ptr
->cdev
, q
->qdio_error
, q
->nr
, start
, count
,
608 q
->irq_ptr
->int_parm
);
610 /* for the next time */
611 q
->first_to_kick
= end
;
615 static void __qdio_inbound_processing(struct qdio_q
*q
)
617 qperf_inc(q
, tasklet_inbound
);
619 if (!qdio_inbound_q_moved(q
))
622 qdio_kick_handler(q
);
624 if (!qdio_inbound_q_done(q
)) {
625 /* means poll time is not yet over */
626 qperf_inc(q
, tasklet_inbound_resched
);
627 if (likely(q
->irq_ptr
->state
!= QDIO_IRQ_STATE_STOPPED
)) {
628 tasklet_schedule(&q
->tasklet
);
633 qdio_stop_polling(q
);
635 * We need to check again to not lose initiative after
636 * resetting the ACK state.
638 if (!qdio_inbound_q_done(q
)) {
639 qperf_inc(q
, tasklet_inbound_resched2
);
640 if (likely(q
->irq_ptr
->state
!= QDIO_IRQ_STATE_STOPPED
))
641 tasklet_schedule(&q
->tasklet
);
645 void qdio_inbound_processing(unsigned long data
)
647 struct qdio_q
*q
= (struct qdio_q
*)data
;
648 __qdio_inbound_processing(q
);
651 static int get_outbound_buffer_frontier(struct qdio_q
*q
)
654 unsigned char state
= 0;
656 if (need_siga_sync(q
))
657 if (((queue_type(q
) != QDIO_IQDIO_QFMT
) &&
658 !pci_out_supported(q
)) ||
659 (queue_type(q
) == QDIO_IQDIO_QFMT
&&
660 multicast_outbound(q
)))
664 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
667 count
= min(atomic_read(&q
->nr_buf_used
), QDIO_MAX_BUFFERS_MASK
);
668 stop
= add_buf(q
->first_to_check
, count
);
670 if (q
->first_to_check
== stop
)
671 return q
->first_to_check
;
673 count
= get_buf_states(q
, q
->first_to_check
, &state
, count
, 0);
675 return q
->first_to_check
;
678 case SLSB_P_OUTPUT_EMPTY
:
679 /* the adapter got it */
680 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "out empty:%1d %02x", q
->nr
, count
);
682 atomic_sub(count
, &q
->nr_buf_used
);
683 q
->first_to_check
= add_buf(q
->first_to_check
, count
);
684 if (q
->irq_ptr
->perf_stat_enabled
)
685 account_sbals(q
, count
);
687 case SLSB_P_OUTPUT_ERROR
:
688 process_buffer_error(q
, count
);
689 q
->first_to_check
= add_buf(q
->first_to_check
, count
);
690 atomic_sub(count
, &q
->nr_buf_used
);
691 if (q
->irq_ptr
->perf_stat_enabled
)
692 account_sbals_error(q
, count
);
694 case SLSB_CU_OUTPUT_PRIMED
:
695 /* the adapter has not fetched the output yet */
696 if (q
->irq_ptr
->perf_stat_enabled
)
697 q
->q_stats
.nr_sbal_nop
++;
698 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "out primed:%1d", q
->nr
);
700 case SLSB_P_OUTPUT_NOT_INIT
:
701 case SLSB_P_OUTPUT_HALTED
:
706 return q
->first_to_check
;
709 /* all buffers processed? */
710 static inline int qdio_outbound_q_done(struct qdio_q
*q
)
712 return atomic_read(&q
->nr_buf_used
) == 0;
715 static inline int qdio_outbound_q_moved(struct qdio_q
*q
)
719 bufnr
= get_outbound_buffer_frontier(q
);
721 if ((bufnr
!= q
->last_move
) || q
->qdio_error
) {
722 q
->last_move
= bufnr
;
723 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "out moved:%1d", q
->nr
);
729 static int qdio_kick_outbound_q(struct qdio_q
*q
)
731 unsigned int busy_bit
;
734 if (!need_siga_out(q
))
737 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "siga-w:%1d", q
->nr
);
738 qperf_inc(q
, siga_write
);
740 cc
= qdio_siga_output(q
, &busy_bit
);
746 DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q
), q
->nr
);
747 cc
|= QDIO_ERROR_SIGA_BUSY
;
749 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "siga-w cc2:%1d", q
->nr
);
753 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q
), cc
);
759 static void __qdio_outbound_processing(struct qdio_q
*q
)
761 qperf_inc(q
, tasklet_outbound
);
762 BUG_ON(atomic_read(&q
->nr_buf_used
) < 0);
764 if (qdio_outbound_q_moved(q
))
765 qdio_kick_handler(q
);
767 if (queue_type(q
) == QDIO_ZFCP_QFMT
)
768 if (!pci_out_supported(q
) && !qdio_outbound_q_done(q
))
771 /* bail out for HiperSockets unicast queues */
772 if (queue_type(q
) == QDIO_IQDIO_QFMT
&& !multicast_outbound(q
))
775 if ((queue_type(q
) == QDIO_IQDIO_QFMT
) &&
776 (atomic_read(&q
->nr_buf_used
)) > QDIO_IQDIO_POLL_LVL
)
779 if (q
->u
.out
.pci_out_enabled
)
783 * Now we know that queue type is either qeth without pci enabled
784 * or HiperSockets multicast. Make sure buffer switch from PRIMED to
785 * EMPTY is noticed and outbound_handler is called after some time.
787 if (qdio_outbound_q_done(q
))
788 del_timer(&q
->u
.out
.timer
);
790 if (!timer_pending(&q
->u
.out
.timer
))
791 mod_timer(&q
->u
.out
.timer
, jiffies
+ 10 * HZ
);
795 if (unlikely(q
->irq_ptr
->state
== QDIO_IRQ_STATE_STOPPED
))
797 tasklet_schedule(&q
->tasklet
);
800 /* outbound tasklet */
801 void qdio_outbound_processing(unsigned long data
)
803 struct qdio_q
*q
= (struct qdio_q
*)data
;
804 __qdio_outbound_processing(q
);
807 void qdio_outbound_timer(unsigned long data
)
809 struct qdio_q
*q
= (struct qdio_q
*)data
;
811 if (unlikely(q
->irq_ptr
->state
== QDIO_IRQ_STATE_STOPPED
))
813 tasklet_schedule(&q
->tasklet
);
816 static inline void qdio_check_outbound_after_thinint(struct qdio_q
*q
)
821 if (!pci_out_supported(q
))
824 for_each_output_queue(q
->irq_ptr
, out
, i
)
825 if (!qdio_outbound_q_done(out
))
826 tasklet_schedule(&out
->tasklet
);
829 static void __tiqdio_inbound_processing(struct qdio_q
*q
)
831 qperf_inc(q
, tasklet_inbound
);
832 if (need_siga_sync(q
) && need_siga_sync_after_ai(q
))
836 * The interrupt could be caused by a PCI request. Check the
837 * PCI capable outbound queues.
839 qdio_check_outbound_after_thinint(q
);
841 if (!qdio_inbound_q_moved(q
))
844 qdio_kick_handler(q
);
846 if (!qdio_inbound_q_done(q
)) {
847 qperf_inc(q
, tasklet_inbound_resched
);
848 if (likely(q
->irq_ptr
->state
!= QDIO_IRQ_STATE_STOPPED
)) {
849 tasklet_schedule(&q
->tasklet
);
854 qdio_stop_polling(q
);
856 * We need to check again to not lose initiative after
857 * resetting the ACK state.
859 if (!qdio_inbound_q_done(q
)) {
860 qperf_inc(q
, tasklet_inbound_resched2
);
861 if (likely(q
->irq_ptr
->state
!= QDIO_IRQ_STATE_STOPPED
))
862 tasklet_schedule(&q
->tasklet
);
866 void tiqdio_inbound_processing(unsigned long data
)
868 struct qdio_q
*q
= (struct qdio_q
*)data
;
869 __tiqdio_inbound_processing(q
);
872 static inline void qdio_set_state(struct qdio_irq
*irq_ptr
,
873 enum qdio_irq_states state
)
875 DBF_DEV_EVENT(DBF_INFO
, irq_ptr
, "newstate: %1d", state
);
877 irq_ptr
->state
= state
;
881 static void qdio_irq_check_sense(struct qdio_irq
*irq_ptr
, struct irb
*irb
)
883 if (irb
->esw
.esw0
.erw
.cons
) {
884 DBF_ERROR("%4x sense:", irq_ptr
->schid
.sch_no
);
885 DBF_ERROR_HEX(irb
, 64);
886 DBF_ERROR_HEX(irb
->ecw
, 64);
890 /* PCI interrupt handler */
891 static void qdio_int_handler_pci(struct qdio_irq
*irq_ptr
)
896 if (unlikely(irq_ptr
->state
== QDIO_IRQ_STATE_STOPPED
))
899 for_each_input_queue(irq_ptr
, q
, i
) {
900 if (q
->u
.in
.queue_start_poll
) {
901 /* skip if polling is enabled or already in work */
902 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED
,
903 &q
->u
.in
.queue_irq_state
)) {
904 qperf_inc(q
, int_discarded
);
907 q
->u
.in
.queue_start_poll(q
->irq_ptr
->cdev
, q
->nr
,
908 q
->irq_ptr
->int_parm
);
910 tasklet_schedule(&q
->tasklet
);
913 if (!pci_out_supported(q
))
916 for_each_output_queue(irq_ptr
, q
, i
) {
917 if (qdio_outbound_q_done(q
))
919 if (need_siga_sync(q
) && need_siga_sync_out_after_pci(q
))
921 tasklet_schedule(&q
->tasklet
);
925 static void qdio_handle_activate_check(struct ccw_device
*cdev
,
926 unsigned long intparm
, int cstat
, int dstat
)
928 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
931 DBF_ERROR("%4x ACT CHECK", irq_ptr
->schid
.sch_no
);
932 DBF_ERROR("intp :%lx", intparm
);
933 DBF_ERROR("ds: %2x cs:%2x", dstat
, cstat
);
935 if (irq_ptr
->nr_input_qs
) {
936 q
= irq_ptr
->input_qs
[0];
937 } else if (irq_ptr
->nr_output_qs
) {
938 q
= irq_ptr
->output_qs
[0];
943 q
->handler(q
->irq_ptr
->cdev
, QDIO_ERROR_ACTIVATE_CHECK_CONDITION
,
944 0, -1, -1, irq_ptr
->int_parm
);
946 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_STOPPED
);
949 static void qdio_establish_handle_irq(struct ccw_device
*cdev
, int cstat
,
952 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
954 DBF_DEV_EVENT(DBF_INFO
, irq_ptr
, "qest irq");
958 if (dstat
& ~(DEV_STAT_DEV_END
| DEV_STAT_CHN_END
))
960 if (!(dstat
& DEV_STAT_DEV_END
))
962 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_ESTABLISHED
);
966 DBF_ERROR("%4x EQ:error", irq_ptr
->schid
.sch_no
);
967 DBF_ERROR("ds: %2x cs:%2x", dstat
, cstat
);
968 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_ERR
);
971 /* qdio interrupt handler */
972 void qdio_int_handler(struct ccw_device
*cdev
, unsigned long intparm
,
975 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
978 if (!intparm
|| !irq_ptr
) {
979 DBF_ERROR("qint:%4x", cdev
->private->schid
.sch_no
);
983 kstat_cpu(smp_processor_id()).irqs
[IOINT_QDI
]++;
984 if (irq_ptr
->perf_stat_enabled
)
985 irq_ptr
->perf_stat
.qdio_int
++;
988 switch (PTR_ERR(irb
)) {
990 DBF_ERROR("%4x IO error", irq_ptr
->schid
.sch_no
);
991 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_ERR
);
992 wake_up(&cdev
->private->wait_q
);
999 qdio_irq_check_sense(irq_ptr
, irb
);
1000 cstat
= irb
->scsw
.cmd
.cstat
;
1001 dstat
= irb
->scsw
.cmd
.dstat
;
1003 switch (irq_ptr
->state
) {
1004 case QDIO_IRQ_STATE_INACTIVE
:
1005 qdio_establish_handle_irq(cdev
, cstat
, dstat
);
1007 case QDIO_IRQ_STATE_CLEANUP
:
1008 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_INACTIVE
);
1010 case QDIO_IRQ_STATE_ESTABLISHED
:
1011 case QDIO_IRQ_STATE_ACTIVE
:
1012 if (cstat
& SCHN_STAT_PCI
) {
1013 qdio_int_handler_pci(irq_ptr
);
1017 qdio_handle_activate_check(cdev
, intparm
, cstat
,
1020 case QDIO_IRQ_STATE_STOPPED
:
1025 wake_up(&cdev
->private->wait_q
);
1029 * qdio_get_ssqd_desc - get qdio subchannel description
1030 * @cdev: ccw device to get description for
1031 * @data: where to store the ssqd
1033 * Returns 0 or an error code. The results of the chsc are stored in the
1034 * specified structure.
1036 int qdio_get_ssqd_desc(struct ccw_device
*cdev
,
1037 struct qdio_ssqd_desc
*data
)
1040 if (!cdev
|| !cdev
->private)
1043 DBF_EVENT("get ssqd:%4x", cdev
->private->schid
.sch_no
);
1044 return qdio_setup_get_ssqd(NULL
, &cdev
->private->schid
, data
);
1046 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc
);
1048 static void qdio_shutdown_queues(struct ccw_device
*cdev
)
1050 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
1054 for_each_input_queue(irq_ptr
, q
, i
)
1055 tasklet_kill(&q
->tasklet
);
1057 for_each_output_queue(irq_ptr
, q
, i
) {
1058 del_timer(&q
->u
.out
.timer
);
1059 tasklet_kill(&q
->tasklet
);
1064 * qdio_shutdown - shut down a qdio subchannel
1065 * @cdev: associated ccw device
1066 * @how: use halt or clear to shutdown
1068 int qdio_shutdown(struct ccw_device
*cdev
, int how
)
1070 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
1072 unsigned long flags
;
1077 BUG_ON(irqs_disabled());
1078 DBF_EVENT("qshutdown:%4x", cdev
->private->schid
.sch_no
);
1080 mutex_lock(&irq_ptr
->setup_mutex
);
1082 * Subchannel was already shot down. We cannot prevent being called
1083 * twice since cio may trigger a shutdown asynchronously.
1085 if (irq_ptr
->state
== QDIO_IRQ_STATE_INACTIVE
) {
1086 mutex_unlock(&irq_ptr
->setup_mutex
);
1091 * Indicate that the device is going down. Scheduling the queue
1092 * tasklets is forbidden from here on.
1094 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_STOPPED
);
1096 tiqdio_remove_input_queues(irq_ptr
);
1097 qdio_shutdown_queues(cdev
);
1098 qdio_shutdown_debug_entries(irq_ptr
, cdev
);
1100 /* cleanup subchannel */
1101 spin_lock_irqsave(get_ccwdev_lock(cdev
), flags
);
1103 if (how
& QDIO_FLAG_CLEANUP_USING_CLEAR
)
1104 rc
= ccw_device_clear(cdev
, QDIO_DOING_CLEANUP
);
1106 /* default behaviour is halt */
1107 rc
= ccw_device_halt(cdev
, QDIO_DOING_CLEANUP
);
1109 DBF_ERROR("%4x SHUTD ERR", irq_ptr
->schid
.sch_no
);
1110 DBF_ERROR("rc:%4d", rc
);
1114 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_CLEANUP
);
1115 spin_unlock_irqrestore(get_ccwdev_lock(cdev
), flags
);
1116 wait_event_interruptible_timeout(cdev
->private->wait_q
,
1117 irq_ptr
->state
== QDIO_IRQ_STATE_INACTIVE
||
1118 irq_ptr
->state
== QDIO_IRQ_STATE_ERR
,
1120 spin_lock_irqsave(get_ccwdev_lock(cdev
), flags
);
1123 qdio_shutdown_thinint(irq_ptr
);
1125 /* restore interrupt handler */
1126 if ((void *)cdev
->handler
== (void *)qdio_int_handler
)
1127 cdev
->handler
= irq_ptr
->orig_handler
;
1128 spin_unlock_irqrestore(get_ccwdev_lock(cdev
), flags
);
1130 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_INACTIVE
);
1131 mutex_unlock(&irq_ptr
->setup_mutex
);
1136 EXPORT_SYMBOL_GPL(qdio_shutdown
);
1139 * qdio_free - free data structures for a qdio subchannel
1140 * @cdev: associated ccw device
1142 int qdio_free(struct ccw_device
*cdev
)
1144 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
1149 DBF_EVENT("qfree:%4x", cdev
->private->schid
.sch_no
);
1150 mutex_lock(&irq_ptr
->setup_mutex
);
1152 if (irq_ptr
->debug_area
!= NULL
) {
1153 debug_unregister(irq_ptr
->debug_area
);
1154 irq_ptr
->debug_area
= NULL
;
1156 cdev
->private->qdio_data
= NULL
;
1157 mutex_unlock(&irq_ptr
->setup_mutex
);
1159 qdio_release_memory(irq_ptr
);
1162 EXPORT_SYMBOL_GPL(qdio_free
);
1165 * qdio_allocate - allocate qdio queues and associated data
1166 * @init_data: initialization data
1168 int qdio_allocate(struct qdio_initialize
*init_data
)
1170 struct qdio_irq
*irq_ptr
;
1172 DBF_EVENT("qallocate:%4x", init_data
->cdev
->private->schid
.sch_no
);
1174 if ((init_data
->no_input_qs
&& !init_data
->input_handler
) ||
1175 (init_data
->no_output_qs
&& !init_data
->output_handler
))
1178 if ((init_data
->no_input_qs
> QDIO_MAX_QUEUES_PER_IRQ
) ||
1179 (init_data
->no_output_qs
> QDIO_MAX_QUEUES_PER_IRQ
))
1182 if ((!init_data
->input_sbal_addr_array
) ||
1183 (!init_data
->output_sbal_addr_array
))
1186 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1187 irq_ptr
= (void *) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
1191 mutex_init(&irq_ptr
->setup_mutex
);
1192 qdio_allocate_dbf(init_data
, irq_ptr
);
1195 * Allocate a page for the chsc calls in qdio_establish.
1196 * Must be pre-allocated since a zfcp recovery will call
1197 * qdio_establish. In case of low memory and swap on a zfcp disk
1198 * we may not be able to allocate memory otherwise.
1200 irq_ptr
->chsc_page
= get_zeroed_page(GFP_KERNEL
);
1201 if (!irq_ptr
->chsc_page
)
1204 /* qdr is used in ccw1.cda which is u32 */
1205 irq_ptr
->qdr
= (struct qdr
*) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
1208 WARN_ON((unsigned long)irq_ptr
->qdr
& 0xfff);
1210 if (qdio_allocate_qs(irq_ptr
, init_data
->no_input_qs
,
1211 init_data
->no_output_qs
))
1214 init_data
->cdev
->private->qdio_data
= irq_ptr
;
1215 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_INACTIVE
);
1218 qdio_release_memory(irq_ptr
);
1222 EXPORT_SYMBOL_GPL(qdio_allocate
);
1225 * qdio_establish - establish queues on a qdio subchannel
1226 * @init_data: initialization data
1228 int qdio_establish(struct qdio_initialize
*init_data
)
1230 struct qdio_irq
*irq_ptr
;
1231 struct ccw_device
*cdev
= init_data
->cdev
;
1232 unsigned long saveflags
;
1235 DBF_EVENT("qestablish:%4x", cdev
->private->schid
.sch_no
);
1237 irq_ptr
= cdev
->private->qdio_data
;
1241 if (cdev
->private->state
!= DEV_STATE_ONLINE
)
1244 mutex_lock(&irq_ptr
->setup_mutex
);
1245 qdio_setup_irq(init_data
);
1247 rc
= qdio_establish_thinint(irq_ptr
);
1249 mutex_unlock(&irq_ptr
->setup_mutex
);
1250 qdio_shutdown(cdev
, QDIO_FLAG_CLEANUP_USING_CLEAR
);
1255 irq_ptr
->ccw
.cmd_code
= irq_ptr
->equeue
.cmd
;
1256 irq_ptr
->ccw
.flags
= CCW_FLAG_SLI
;
1257 irq_ptr
->ccw
.count
= irq_ptr
->equeue
.count
;
1258 irq_ptr
->ccw
.cda
= (u32
)((addr_t
)irq_ptr
->qdr
);
1260 spin_lock_irqsave(get_ccwdev_lock(cdev
), saveflags
);
1261 ccw_device_set_options_mask(cdev
, 0);
1263 rc
= ccw_device_start(cdev
, &irq_ptr
->ccw
, QDIO_DOING_ESTABLISH
, 0, 0);
1265 DBF_ERROR("%4x est IO ERR", irq_ptr
->schid
.sch_no
);
1266 DBF_ERROR("rc:%4x", rc
);
1268 spin_unlock_irqrestore(get_ccwdev_lock(cdev
), saveflags
);
1271 mutex_unlock(&irq_ptr
->setup_mutex
);
1272 qdio_shutdown(cdev
, QDIO_FLAG_CLEANUP_USING_CLEAR
);
1276 wait_event_interruptible_timeout(cdev
->private->wait_q
,
1277 irq_ptr
->state
== QDIO_IRQ_STATE_ESTABLISHED
||
1278 irq_ptr
->state
== QDIO_IRQ_STATE_ERR
, HZ
);
1280 if (irq_ptr
->state
!= QDIO_IRQ_STATE_ESTABLISHED
) {
1281 mutex_unlock(&irq_ptr
->setup_mutex
);
1282 qdio_shutdown(cdev
, QDIO_FLAG_CLEANUP_USING_CLEAR
);
1286 qdio_setup_ssqd_info(irq_ptr
);
1287 DBF_EVENT("qib ac:%4x", irq_ptr
->qib
.ac
);
1289 /* qebsm is now setup if available, initialize buffer states */
1290 qdio_init_buf_states(irq_ptr
);
1292 mutex_unlock(&irq_ptr
->setup_mutex
);
1293 qdio_print_subchannel_info(irq_ptr
, cdev
);
1294 qdio_setup_debug_entries(irq_ptr
, cdev
);
1297 EXPORT_SYMBOL_GPL(qdio_establish
);
1300 * qdio_activate - activate queues on a qdio subchannel
1301 * @cdev: associated cdev
1303 int qdio_activate(struct ccw_device
*cdev
)
1305 struct qdio_irq
*irq_ptr
;
1307 unsigned long saveflags
;
1309 DBF_EVENT("qactivate:%4x", cdev
->private->schid
.sch_no
);
1311 irq_ptr
= cdev
->private->qdio_data
;
1315 if (cdev
->private->state
!= DEV_STATE_ONLINE
)
1318 mutex_lock(&irq_ptr
->setup_mutex
);
1319 if (irq_ptr
->state
== QDIO_IRQ_STATE_INACTIVE
) {
1324 irq_ptr
->ccw
.cmd_code
= irq_ptr
->aqueue
.cmd
;
1325 irq_ptr
->ccw
.flags
= CCW_FLAG_SLI
;
1326 irq_ptr
->ccw
.count
= irq_ptr
->aqueue
.count
;
1327 irq_ptr
->ccw
.cda
= 0;
1329 spin_lock_irqsave(get_ccwdev_lock(cdev
), saveflags
);
1330 ccw_device_set_options(cdev
, CCWDEV_REPORT_ALL
);
1332 rc
= ccw_device_start(cdev
, &irq_ptr
->ccw
, QDIO_DOING_ACTIVATE
,
1333 0, DOIO_DENY_PREFETCH
);
1335 DBF_ERROR("%4x act IO ERR", irq_ptr
->schid
.sch_no
);
1336 DBF_ERROR("rc:%4x", rc
);
1338 spin_unlock_irqrestore(get_ccwdev_lock(cdev
), saveflags
);
1343 if (is_thinint_irq(irq_ptr
))
1344 tiqdio_add_input_queues(irq_ptr
);
1346 /* wait for subchannel to become active */
1349 switch (irq_ptr
->state
) {
1350 case QDIO_IRQ_STATE_STOPPED
:
1351 case QDIO_IRQ_STATE_ERR
:
1355 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_ACTIVE
);
1359 mutex_unlock(&irq_ptr
->setup_mutex
);
1362 EXPORT_SYMBOL_GPL(qdio_activate
);
1364 static inline int buf_in_between(int bufnr
, int start
, int count
)
1366 int end
= add_buf(start
, count
);
1369 if (bufnr
>= start
&& bufnr
< end
)
1375 /* wrap-around case */
1376 if ((bufnr
>= start
&& bufnr
<= QDIO_MAX_BUFFERS_PER_Q
) ||
1384 * handle_inbound - reset processed input buffers
1385 * @q: queue containing the buffers
1387 * @bufnr: first buffer to process
1388 * @count: how many buffers are emptied
1390 static int handle_inbound(struct qdio_q
*q
, unsigned int callflags
,
1391 int bufnr
, int count
)
1395 qperf_inc(q
, inbound_call
);
1397 if (!q
->u
.in
.polling
)
1400 /* protect against stop polling setting an ACK for an emptied slsb */
1401 if (count
== QDIO_MAX_BUFFERS_PER_Q
) {
1402 /* overwriting everything, just delete polling status */
1403 q
->u
.in
.polling
= 0;
1404 q
->u
.in
.ack_count
= 0;
1406 } else if (buf_in_between(q
->u
.in
.ack_start
, bufnr
, count
)) {
1408 /* partial overwrite, just update ack_start */
1409 diff
= add_buf(bufnr
, count
);
1410 diff
= sub_buf(diff
, q
->u
.in
.ack_start
);
1411 q
->u
.in
.ack_count
-= diff
;
1412 if (q
->u
.in
.ack_count
<= 0) {
1413 q
->u
.in
.polling
= 0;
1414 q
->u
.in
.ack_count
= 0;
1417 q
->u
.in
.ack_start
= add_buf(q
->u
.in
.ack_start
, diff
);
1420 /* the only ACK will be deleted, so stop polling */
1421 q
->u
.in
.polling
= 0;
1425 count
= set_buf_states(q
, bufnr
, SLSB_CU_INPUT_EMPTY
, count
);
1427 used
= atomic_add_return(count
, &q
->nr_buf_used
) - count
;
1428 BUG_ON(used
+ count
> QDIO_MAX_BUFFERS_PER_Q
);
1430 /* no need to signal as long as the adapter had free buffers */
1434 if (need_siga_in(q
))
1435 return qdio_siga_input(q
);
1440 * handle_outbound - process filled outbound buffers
1441 * @q: queue containing the buffers
1443 * @bufnr: first buffer to process
1444 * @count: how many buffers are filled
1446 static int handle_outbound(struct qdio_q
*q
, unsigned int callflags
,
1447 int bufnr
, int count
)
1449 unsigned char state
= 0;
1452 qperf_inc(q
, outbound_call
);
1454 count
= set_buf_states(q
, bufnr
, SLSB_CU_OUTPUT_PRIMED
, count
);
1455 used
= atomic_add_return(count
, &q
->nr_buf_used
);
1456 BUG_ON(used
> QDIO_MAX_BUFFERS_PER_Q
);
1458 if (used
== QDIO_MAX_BUFFERS_PER_Q
)
1459 qperf_inc(q
, outbound_queue_full
);
1461 if (callflags
& QDIO_FLAG_PCI_OUT
) {
1462 q
->u
.out
.pci_out_enabled
= 1;
1463 qperf_inc(q
, pci_request_int
);
1465 q
->u
.out
.pci_out_enabled
= 0;
1467 if (queue_type(q
) == QDIO_IQDIO_QFMT
) {
1468 /* One SIGA-W per buffer required for unicast HiperSockets. */
1469 WARN_ON_ONCE(count
> 1 && !multicast_outbound(q
));
1471 rc
= qdio_kick_outbound_q(q
);
1472 } else if (need_siga_sync(q
)) {
1473 rc
= qdio_siga_sync_q(q
);
1475 /* try to fast requeue buffers */
1476 get_buf_state(q
, prev_buf(bufnr
), &state
, 0);
1477 if (state
!= SLSB_CU_OUTPUT_PRIMED
)
1478 rc
= qdio_kick_outbound_q(q
);
1480 qperf_inc(q
, fast_requeue
);
1483 /* in case of SIGA errors we must process the error immediately */
1484 if (used
>= q
->u
.out
.scan_threshold
|| rc
)
1485 tasklet_schedule(&q
->tasklet
);
1487 /* free the SBALs in case of no further traffic */
1488 if (!timer_pending(&q
->u
.out
.timer
))
1489 mod_timer(&q
->u
.out
.timer
, jiffies
+ HZ
);
1494 * do_QDIO - process input or output buffers
1495 * @cdev: associated ccw_device for the qdio subchannel
1496 * @callflags: input or output and special flags from the program
1497 * @q_nr: queue number
1498 * @bufnr: buffer number
1499 * @count: how many buffers to process
1501 int do_QDIO(struct ccw_device
*cdev
, unsigned int callflags
,
1502 int q_nr
, unsigned int bufnr
, unsigned int count
)
1504 struct qdio_irq
*irq_ptr
;
1506 if (bufnr
>= QDIO_MAX_BUFFERS_PER_Q
|| count
> QDIO_MAX_BUFFERS_PER_Q
)
1509 irq_ptr
= cdev
->private->qdio_data
;
1513 DBF_DEV_EVENT(DBF_INFO
, irq_ptr
,
1514 "do%02x b:%02x c:%02x", callflags
, bufnr
, count
);
1516 if (irq_ptr
->state
!= QDIO_IRQ_STATE_ACTIVE
)
1520 if (callflags
& QDIO_FLAG_SYNC_INPUT
)
1521 return handle_inbound(irq_ptr
->input_qs
[q_nr
],
1522 callflags
, bufnr
, count
);
1523 else if (callflags
& QDIO_FLAG_SYNC_OUTPUT
)
1524 return handle_outbound(irq_ptr
->output_qs
[q_nr
],
1525 callflags
, bufnr
, count
);
1528 EXPORT_SYMBOL_GPL(do_QDIO
);
1531 * qdio_start_irq - process input buffers
1532 * @cdev: associated ccw_device for the qdio subchannel
1533 * @nr: input queue number
1537 * 1 - irqs not started since new data is available
1539 int qdio_start_irq(struct ccw_device
*cdev
, int nr
)
1542 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
1546 q
= irq_ptr
->input_qs
[nr
];
1548 WARN_ON(queue_irqs_enabled(q
));
1550 if (!shared_ind(q
->irq_ptr
->dsci
))
1551 xchg(q
->irq_ptr
->dsci
, 0);
1553 qdio_stop_polling(q
);
1554 clear_bit(QDIO_QUEUE_IRQS_DISABLED
, &q
->u
.in
.queue_irq_state
);
1557 * We need to check again to not lose initiative after
1558 * resetting the ACK state.
1560 if (!shared_ind(q
->irq_ptr
->dsci
) && *q
->irq_ptr
->dsci
)
1562 if (!qdio_inbound_q_done(q
))
1567 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED
,
1568 &q
->u
.in
.queue_irq_state
))
1574 EXPORT_SYMBOL(qdio_start_irq
);
1577 * qdio_get_next_buffers - process input buffers
1578 * @cdev: associated ccw_device for the qdio subchannel
1579 * @nr: input queue number
1580 * @bufnr: first filled buffer number
1581 * @error: buffers are in error state
1585 * = 0 - no new buffers found
1586 * > 0 - number of processed buffers
1588 int qdio_get_next_buffers(struct ccw_device
*cdev
, int nr
, int *bufnr
,
1593 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
1597 q
= irq_ptr
->input_qs
[nr
];
1598 WARN_ON(queue_irqs_enabled(q
));
1601 * Cannot rely on automatic sync after interrupt since queues may
1602 * also be examined without interrupt.
1604 if (need_siga_sync(q
))
1605 qdio_sync_queues(q
);
1607 /* check the PCI capable outbound queues. */
1608 qdio_check_outbound_after_thinint(q
);
1610 if (!qdio_inbound_q_moved(q
))
1613 /* Note: upper-layer MUST stop processing immediately here ... */
1614 if (unlikely(q
->irq_ptr
->state
!= QDIO_IRQ_STATE_ACTIVE
))
1617 start
= q
->first_to_kick
;
1618 end
= q
->first_to_check
;
1620 *error
= q
->qdio_error
;
1622 /* for the next time */
1623 q
->first_to_kick
= end
;
1625 return sub_buf(end
, start
);
1627 EXPORT_SYMBOL(qdio_get_next_buffers
);
1630 * qdio_stop_irq - disable interrupt processing for the device
1631 * @cdev: associated ccw_device for the qdio subchannel
1632 * @nr: input queue number
1635 * 0 - interrupts were already disabled
1636 * 1 - interrupts successfully disabled
1638 int qdio_stop_irq(struct ccw_device
*cdev
, int nr
)
1641 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
1645 q
= irq_ptr
->input_qs
[nr
];
1647 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED
,
1648 &q
->u
.in
.queue_irq_state
))
1653 EXPORT_SYMBOL(qdio_stop_irq
);
1655 static int __init
init_QDIO(void)
1659 rc
= qdio_debug_init();
1662 rc
= qdio_setup_init();
1665 rc
= tiqdio_allocate_memory();
1668 rc
= tiqdio_register_thinints();
1674 tiqdio_free_memory();
1682 static void __exit
exit_QDIO(void)
1684 tiqdio_unregister_thinints();
1685 tiqdio_free_memory();
1690 module_init(init_QDIO
);
1691 module_exit(exit_QDIO
);