2 * Linux for s390 qdio support, buffer handling, qdio API and module support.
4 * Copyright IBM Corp. 2000, 2008
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
6 * Jan Glauber <jang@linux.vnet.ibm.com>
7 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/timer.h>
13 #include <linux/delay.h>
14 #include <linux/gfp.h>
16 #include <linux/atomic.h>
17 #include <asm/debug.h>
25 #include "qdio_debug.h"
27 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
28 "Jan Glauber <jang@linux.vnet.ibm.com>");
29 MODULE_DESCRIPTION("QDIO base support");
30 MODULE_LICENSE("GPL");
32 static inline int do_siga_sync(unsigned long schid
,
33 unsigned int out_mask
, unsigned int in_mask
,
36 register unsigned long __fc
asm ("0") = fc
;
37 register unsigned long __schid
asm ("1") = schid
;
38 register unsigned long out
asm ("2") = out_mask
;
39 register unsigned long in
asm ("3") = in_mask
;
47 : "d" (__fc
), "d" (__schid
), "d" (out
), "d" (in
) : "cc");
51 static inline int do_siga_input(unsigned long schid
, unsigned int mask
,
54 register unsigned long __fc
asm ("0") = fc
;
55 register unsigned long __schid
asm ("1") = schid
;
56 register unsigned long __mask
asm ("2") = mask
;
64 : "d" (__fc
), "d" (__schid
), "d" (__mask
) : "cc");
69 * do_siga_output - perform SIGA-w/wt function
70 * @schid: subchannel id or in case of QEBSM the subchannel token
71 * @mask: which output queues to process
72 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
73 * @fc: function code to perform
75 * Returns condition code.
76 * Note: For IQDC unicast queues only the highest priority queue is processed.
78 static inline int do_siga_output(unsigned long schid
, unsigned long mask
,
79 unsigned int *bb
, unsigned int fc
,
82 register unsigned long __fc
asm("0") = fc
;
83 register unsigned long __schid
asm("1") = schid
;
84 register unsigned long __mask
asm("2") = mask
;
85 register unsigned long __aob
asm("3") = aob
;
92 : "=d" (cc
), "+d" (__fc
), "+d" (__aob
)
93 : "d" (__schid
), "d" (__mask
)
99 static inline int qdio_check_ccq(struct qdio_q
*q
, unsigned int ccq
)
101 /* all done or next buffer state different */
102 if (ccq
== 0 || ccq
== 32)
104 /* no buffer processed */
107 /* not all buffers processed */
110 /* notify devices immediately */
111 DBF_ERROR("%4x ccq:%3d", SCH_NO(q
), ccq
);
116 * qdio_do_eqbs - extract buffer states for QEBSM
117 * @q: queue to manipulate
118 * @state: state of the extracted buffers
119 * @start: buffer number to start at
120 * @count: count of buffers to examine
121 * @auto_ack: automatically acknowledge buffers
123 * Returns the number of successfully extracted equal buffer states.
124 * Stops processing if a state is different from the last buffers state.
126 static int qdio_do_eqbs(struct qdio_q
*q
, unsigned char *state
,
127 int start
, int count
, int auto_ack
)
129 int rc
, tmp_count
= count
, tmp_start
= start
, nr
= q
->nr
, retried
= 0;
130 unsigned int ccq
= 0;
135 nr
+= q
->irq_ptr
->nr_input_qs
;
137 ccq
= do_eqbs(q
->irq_ptr
->sch_token
, state
, nr
, &tmp_start
, &tmp_count
,
139 rc
= qdio_check_ccq(q
, ccq
);
141 return count
- tmp_count
;
144 DBF_DEV_EVENT(DBF_WARN
, q
->irq_ptr
, "EQBS again:%2d", ccq
);
149 qperf_inc(q
, eqbs_partial
);
150 DBF_DEV_EVENT(DBF_WARN
, q
->irq_ptr
, "EQBS part:%02x",
153 * Retry once, if that fails bail out and process the
154 * extracted buffers before trying again.
159 return count
- tmp_count
;
162 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q
));
163 DBF_ERROR("%3d%3d%2d", count
, tmp_count
, nr
);
164 q
->handler(q
->irq_ptr
->cdev
, QDIO_ERROR_GET_BUF_STATE
,
165 q
->nr
, q
->first_to_kick
, count
, q
->irq_ptr
->int_parm
);
170 * qdio_do_sqbs - set buffer states for QEBSM
171 * @q: queue to manipulate
172 * @state: new state of the buffers
173 * @start: first buffer number to change
174 * @count: how many buffers to change
176 * Returns the number of successfully changed buffers.
177 * Does retrying until the specified count of buffer states is set or an
180 static int qdio_do_sqbs(struct qdio_q
*q
, unsigned char state
, int start
,
183 unsigned int ccq
= 0;
184 int tmp_count
= count
, tmp_start
= start
;
193 nr
+= q
->irq_ptr
->nr_input_qs
;
195 ccq
= do_sqbs(q
->irq_ptr
->sch_token
, state
, nr
, &tmp_start
, &tmp_count
);
196 rc
= qdio_check_ccq(q
, ccq
);
198 WARN_ON_ONCE(tmp_count
);
199 return count
- tmp_count
;
202 if (rc
== 1 || rc
== 2) {
203 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "SQBS again:%2d", ccq
);
204 qperf_inc(q
, sqbs_partial
);
208 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q
));
209 DBF_ERROR("%3d%3d%2d", count
, tmp_count
, nr
);
210 q
->handler(q
->irq_ptr
->cdev
, QDIO_ERROR_SET_BUF_STATE
,
211 q
->nr
, q
->first_to_kick
, count
, q
->irq_ptr
->int_parm
);
215 /* returns number of examined buffers and their common state in *state */
216 static inline int get_buf_states(struct qdio_q
*q
, unsigned int bufnr
,
217 unsigned char *state
, unsigned int count
,
218 int auto_ack
, int merge_pending
)
220 unsigned char __state
= 0;
224 return qdio_do_eqbs(q
, state
, bufnr
, count
, auto_ack
);
226 for (i
= 0; i
< count
; i
++) {
228 __state
= q
->slsb
.val
[bufnr
];
229 if (merge_pending
&& __state
== SLSB_P_OUTPUT_PENDING
)
230 __state
= SLSB_P_OUTPUT_EMPTY
;
231 } else if (merge_pending
) {
232 if ((q
->slsb
.val
[bufnr
] & __state
) != __state
)
234 } else if (q
->slsb
.val
[bufnr
] != __state
)
236 bufnr
= next_buf(bufnr
);
242 static inline int get_buf_state(struct qdio_q
*q
, unsigned int bufnr
,
243 unsigned char *state
, int auto_ack
)
245 return get_buf_states(q
, bufnr
, state
, 1, auto_ack
, 0);
248 /* wrap-around safe setting of slsb states, returns number of changed buffers */
249 static inline int set_buf_states(struct qdio_q
*q
, int bufnr
,
250 unsigned char state
, int count
)
255 return qdio_do_sqbs(q
, state
, bufnr
, count
);
257 for (i
= 0; i
< count
; i
++) {
258 xchg(&q
->slsb
.val
[bufnr
], state
);
259 bufnr
= next_buf(bufnr
);
264 static inline int set_buf_state(struct qdio_q
*q
, int bufnr
,
267 return set_buf_states(q
, bufnr
, state
, 1);
270 /* set slsb states to initial state */
271 static void qdio_init_buf_states(struct qdio_irq
*irq_ptr
)
276 for_each_input_queue(irq_ptr
, q
, i
)
277 set_buf_states(q
, 0, SLSB_P_INPUT_NOT_INIT
,
278 QDIO_MAX_BUFFERS_PER_Q
);
279 for_each_output_queue(irq_ptr
, q
, i
)
280 set_buf_states(q
, 0, SLSB_P_OUTPUT_NOT_INIT
,
281 QDIO_MAX_BUFFERS_PER_Q
);
284 static inline int qdio_siga_sync(struct qdio_q
*q
, unsigned int output
,
287 unsigned long schid
= *((u32
*) &q
->irq_ptr
->schid
);
288 unsigned int fc
= QDIO_SIGA_SYNC
;
291 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "siga-s:%1d", q
->nr
);
292 qperf_inc(q
, siga_sync
);
295 schid
= q
->irq_ptr
->sch_token
;
296 fc
|= QDIO_SIGA_QEBSM_FLAG
;
299 cc
= do_siga_sync(schid
, output
, input
, fc
);
301 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q
), cc
);
302 return (cc
) ? -EIO
: 0;
305 static inline int qdio_siga_sync_q(struct qdio_q
*q
)
308 return qdio_siga_sync(q
, 0, q
->mask
);
310 return qdio_siga_sync(q
, q
->mask
, 0);
313 static int qdio_siga_output(struct qdio_q
*q
, unsigned int *busy_bit
,
316 unsigned long schid
= *((u32
*) &q
->irq_ptr
->schid
);
317 unsigned int fc
= QDIO_SIGA_WRITE
;
320 unsigned long laob
= 0;
322 if (q
->u
.out
.use_cq
&& aob
!= 0) {
323 fc
= QDIO_SIGA_WRITEQ
;
328 schid
= q
->irq_ptr
->sch_token
;
329 fc
|= QDIO_SIGA_QEBSM_FLAG
;
332 WARN_ON_ONCE((aob
&& queue_type(q
) != QDIO_IQDIO_QFMT
) ||
333 (aob
&& fc
!= QDIO_SIGA_WRITEQ
));
334 cc
= do_siga_output(schid
, q
->mask
, busy_bit
, fc
, laob
);
336 /* hipersocket busy condition */
337 if (unlikely(*busy_bit
)) {
341 start_time
= get_tod_clock();
344 if ((get_tod_clock() - start_time
) < QDIO_BUSY_BIT_PATIENCE
)
348 DBF_DEV_EVENT(DBF_WARN
, q
->irq_ptr
,
349 "%4x cc2 BB1:%1d", SCH_NO(q
), q
->nr
);
350 DBF_DEV_EVENT(DBF_WARN
, q
->irq_ptr
, "count:%u", retries
);
355 static inline int qdio_siga_input(struct qdio_q
*q
)
357 unsigned long schid
= *((u32
*) &q
->irq_ptr
->schid
);
358 unsigned int fc
= QDIO_SIGA_READ
;
361 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "siga-r:%1d", q
->nr
);
362 qperf_inc(q
, siga_read
);
365 schid
= q
->irq_ptr
->sch_token
;
366 fc
|= QDIO_SIGA_QEBSM_FLAG
;
369 cc
= do_siga_input(schid
, q
->mask
, fc
);
371 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q
), cc
);
372 return (cc
) ? -EIO
: 0;
375 #define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
376 #define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
378 static inline void qdio_sync_queues(struct qdio_q
*q
)
380 /* PCI capable outbound queues will also be scanned so sync them too */
381 if (pci_out_supported(q
))
382 qdio_siga_sync_all(q
);
387 int debug_get_buf_state(struct qdio_q
*q
, unsigned int bufnr
,
388 unsigned char *state
)
390 if (need_siga_sync(q
))
392 return get_buf_states(q
, bufnr
, state
, 1, 0, 0);
395 static inline void qdio_stop_polling(struct qdio_q
*q
)
397 if (!q
->u
.in
.polling
)
401 qperf_inc(q
, stop_polling
);
403 /* show the card that we are not polling anymore */
405 set_buf_states(q
, q
->u
.in
.ack_start
, SLSB_P_INPUT_NOT_INIT
,
407 q
->u
.in
.ack_count
= 0;
409 set_buf_state(q
, q
->u
.in
.ack_start
, SLSB_P_INPUT_NOT_INIT
);
412 static inline void account_sbals(struct qdio_q
*q
, int count
)
416 q
->q_stats
.nr_sbal_total
+= count
;
417 if (count
== QDIO_MAX_BUFFERS_MASK
) {
418 q
->q_stats
.nr_sbals
[7]++;
423 q
->q_stats
.nr_sbals
[pos
]++;
426 static void process_buffer_error(struct qdio_q
*q
, int count
)
428 unsigned char state
= (q
->is_input_q
) ? SLSB_P_INPUT_NOT_INIT
:
429 SLSB_P_OUTPUT_NOT_INIT
;
431 q
->qdio_error
= QDIO_ERROR_SLSB_STATE
;
433 /* special handling for no target buffer empty */
434 if ((!q
->is_input_q
&&
435 (q
->sbal
[q
->first_to_check
]->element
[15].sflags
) == 0x10)) {
436 qperf_inc(q
, target_full
);
437 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "OUTFULL FTC:%02x",
442 DBF_ERROR("%4x BUF ERROR", SCH_NO(q
));
443 DBF_ERROR((q
->is_input_q
) ? "IN:%2d" : "OUT:%2d", q
->nr
);
444 DBF_ERROR("FTC:%3d C:%3d", q
->first_to_check
, count
);
445 DBF_ERROR("F14:%2x F15:%2x",
446 q
->sbal
[q
->first_to_check
]->element
[14].sflags
,
447 q
->sbal
[q
->first_to_check
]->element
[15].sflags
);
451 * Interrupts may be avoided as long as the error is present
452 * so change the buffer state immediately to avoid starvation.
454 set_buf_states(q
, q
->first_to_check
, state
, count
);
457 static inline void inbound_primed(struct qdio_q
*q
, int count
)
461 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "in prim: %02x", count
);
463 /* for QEBSM the ACK was already set by EQBS */
465 if (!q
->u
.in
.polling
) {
467 q
->u
.in
.ack_count
= count
;
468 q
->u
.in
.ack_start
= q
->first_to_check
;
472 /* delete the previous ACK's */
473 set_buf_states(q
, q
->u
.in
.ack_start
, SLSB_P_INPUT_NOT_INIT
,
475 q
->u
.in
.ack_count
= count
;
476 q
->u
.in
.ack_start
= q
->first_to_check
;
481 * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
482 * or by the next inbound run.
484 new = add_buf(q
->first_to_check
, count
- 1);
485 if (q
->u
.in
.polling
) {
486 /* reset the previous ACK but first set the new one */
487 set_buf_state(q
, new, SLSB_P_INPUT_ACK
);
488 set_buf_state(q
, q
->u
.in
.ack_start
, SLSB_P_INPUT_NOT_INIT
);
491 set_buf_state(q
, new, SLSB_P_INPUT_ACK
);
494 q
->u
.in
.ack_start
= new;
498 /* need to change ALL buffers to get more interrupts */
499 set_buf_states(q
, q
->first_to_check
, SLSB_P_INPUT_NOT_INIT
, count
);
502 static int get_inbound_buffer_frontier(struct qdio_q
*q
)
505 unsigned char state
= 0;
507 q
->timestamp
= get_tod_clock();
510 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
513 count
= min(atomic_read(&q
->nr_buf_used
), QDIO_MAX_BUFFERS_MASK
);
514 stop
= add_buf(q
->first_to_check
, count
);
516 if (q
->first_to_check
== stop
)
520 * No siga sync here, as a PCI or we after a thin interrupt
521 * already sync'ed the queues.
523 count
= get_buf_states(q
, q
->first_to_check
, &state
, count
, 1, 0);
528 case SLSB_P_INPUT_PRIMED
:
529 inbound_primed(q
, count
);
530 q
->first_to_check
= add_buf(q
->first_to_check
, count
);
531 if (atomic_sub(count
, &q
->nr_buf_used
) == 0)
532 qperf_inc(q
, inbound_queue_full
);
533 if (q
->irq_ptr
->perf_stat_enabled
)
534 account_sbals(q
, count
);
536 case SLSB_P_INPUT_ERROR
:
537 process_buffer_error(q
, count
);
538 q
->first_to_check
= add_buf(q
->first_to_check
, count
);
539 atomic_sub(count
, &q
->nr_buf_used
);
540 if (q
->irq_ptr
->perf_stat_enabled
)
541 account_sbals_error(q
, count
);
543 case SLSB_CU_INPUT_EMPTY
:
544 case SLSB_P_INPUT_NOT_INIT
:
545 case SLSB_P_INPUT_ACK
:
546 if (q
->irq_ptr
->perf_stat_enabled
)
547 q
->q_stats
.nr_sbal_nop
++;
548 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "in nop");
554 return q
->first_to_check
;
557 static int qdio_inbound_q_moved(struct qdio_q
*q
)
561 bufnr
= get_inbound_buffer_frontier(q
);
563 if (bufnr
!= q
->last_move
) {
564 q
->last_move
= bufnr
;
565 if (!is_thinint_irq(q
->irq_ptr
) && MACHINE_IS_LPAR
)
566 q
->u
.in
.timestamp
= get_tod_clock();
572 static inline int qdio_inbound_q_done(struct qdio_q
*q
)
574 unsigned char state
= 0;
576 if (!atomic_read(&q
->nr_buf_used
))
579 if (need_siga_sync(q
))
581 get_buf_state(q
, q
->first_to_check
, &state
, 0);
583 if (state
== SLSB_P_INPUT_PRIMED
|| state
== SLSB_P_INPUT_ERROR
)
584 /* more work coming */
587 if (is_thinint_irq(q
->irq_ptr
))
590 /* don't poll under z/VM */
595 * At this point we know, that inbound first_to_check
596 * has (probably) not moved (see qdio_inbound_processing).
598 if (get_tod_clock() > q
->u
.in
.timestamp
+ QDIO_INPUT_THRESHOLD
) {
599 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "in done:%02x",
606 static inline int contains_aobs(struct qdio_q
*q
)
608 return !q
->is_input_q
&& q
->u
.out
.use_cq
;
611 static inline void qdio_handle_aobs(struct qdio_q
*q
, int start
, int count
)
613 unsigned char state
= 0;
616 if (!contains_aobs(q
))
619 for (j
= 0; j
< count
; ++j
) {
620 get_buf_state(q
, b
, &state
, 0);
621 if (state
== SLSB_P_OUTPUT_PENDING
) {
622 struct qaob
*aob
= q
->u
.out
.aobs
[b
];
626 q
->u
.out
.sbal_state
[b
].flags
|=
627 QDIO_OUTBUF_STATE_FLAG_PENDING
;
628 q
->u
.out
.aobs
[b
] = NULL
;
629 } else if (state
== SLSB_P_OUTPUT_EMPTY
) {
630 q
->u
.out
.sbal_state
[b
].aob
= NULL
;
636 static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q
*q
,
639 unsigned long phys_aob
= 0;
644 if (!q
->aobs
[bufnr
]) {
645 struct qaob
*aob
= qdio_allocate_aob();
646 q
->aobs
[bufnr
] = aob
;
648 if (q
->aobs
[bufnr
]) {
649 q
->sbal_state
[bufnr
].flags
= QDIO_OUTBUF_STATE_FLAG_NONE
;
650 q
->sbal_state
[bufnr
].aob
= q
->aobs
[bufnr
];
651 q
->aobs
[bufnr
]->user1
= (u64
) q
->sbal_state
[bufnr
].user
;
652 phys_aob
= virt_to_phys(q
->aobs
[bufnr
]);
653 WARN_ON_ONCE(phys_aob
& 0xFF);
660 static void qdio_kick_handler(struct qdio_q
*q
)
662 int start
= q
->first_to_kick
;
663 int end
= q
->first_to_check
;
666 if (unlikely(q
->irq_ptr
->state
!= QDIO_IRQ_STATE_ACTIVE
))
669 count
= sub_buf(end
, start
);
672 qperf_inc(q
, inbound_handler
);
673 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "kih s:%02x c:%02x", start
, count
);
675 qperf_inc(q
, outbound_handler
);
676 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "koh: s:%02x c:%02x",
680 qdio_handle_aobs(q
, start
, count
);
682 q
->handler(q
->irq_ptr
->cdev
, q
->qdio_error
, q
->nr
, start
, count
,
683 q
->irq_ptr
->int_parm
);
685 /* for the next time */
686 q
->first_to_kick
= end
;
690 static void __qdio_inbound_processing(struct qdio_q
*q
)
692 qperf_inc(q
, tasklet_inbound
);
694 if (!qdio_inbound_q_moved(q
))
697 qdio_kick_handler(q
);
699 if (!qdio_inbound_q_done(q
)) {
700 /* means poll time is not yet over */
701 qperf_inc(q
, tasklet_inbound_resched
);
702 if (likely(q
->irq_ptr
->state
!= QDIO_IRQ_STATE_STOPPED
)) {
703 tasklet_schedule(&q
->tasklet
);
708 qdio_stop_polling(q
);
710 * We need to check again to not lose initiative after
711 * resetting the ACK state.
713 if (!qdio_inbound_q_done(q
)) {
714 qperf_inc(q
, tasklet_inbound_resched2
);
715 if (likely(q
->irq_ptr
->state
!= QDIO_IRQ_STATE_STOPPED
))
716 tasklet_schedule(&q
->tasklet
);
720 void qdio_inbound_processing(unsigned long data
)
722 struct qdio_q
*q
= (struct qdio_q
*)data
;
723 __qdio_inbound_processing(q
);
726 static int get_outbound_buffer_frontier(struct qdio_q
*q
)
729 unsigned char state
= 0;
731 q
->timestamp
= get_tod_clock();
733 if (need_siga_sync(q
))
734 if (((queue_type(q
) != QDIO_IQDIO_QFMT
) &&
735 !pci_out_supported(q
)) ||
736 (queue_type(q
) == QDIO_IQDIO_QFMT
&&
737 multicast_outbound(q
)))
741 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
744 count
= min(atomic_read(&q
->nr_buf_used
), QDIO_MAX_BUFFERS_MASK
);
745 stop
= add_buf(q
->first_to_check
, count
);
746 if (q
->first_to_check
== stop
)
749 count
= get_buf_states(q
, q
->first_to_check
, &state
, count
, 0, 1);
754 case SLSB_P_OUTPUT_EMPTY
:
755 /* the adapter got it */
756 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
,
757 "out empty:%1d %02x", q
->nr
, count
);
759 atomic_sub(count
, &q
->nr_buf_used
);
760 q
->first_to_check
= add_buf(q
->first_to_check
, count
);
761 if (q
->irq_ptr
->perf_stat_enabled
)
762 account_sbals(q
, count
);
765 case SLSB_P_OUTPUT_ERROR
:
766 process_buffer_error(q
, count
);
767 q
->first_to_check
= add_buf(q
->first_to_check
, count
);
768 atomic_sub(count
, &q
->nr_buf_used
);
769 if (q
->irq_ptr
->perf_stat_enabled
)
770 account_sbals_error(q
, count
);
772 case SLSB_CU_OUTPUT_PRIMED
:
773 /* the adapter has not fetched the output yet */
774 if (q
->irq_ptr
->perf_stat_enabled
)
775 q
->q_stats
.nr_sbal_nop
++;
776 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "out primed:%1d",
779 case SLSB_P_OUTPUT_NOT_INIT
:
780 case SLSB_P_OUTPUT_HALTED
:
787 return q
->first_to_check
;
790 /* all buffers processed? */
791 static inline int qdio_outbound_q_done(struct qdio_q
*q
)
793 return atomic_read(&q
->nr_buf_used
) == 0;
796 static inline int qdio_outbound_q_moved(struct qdio_q
*q
)
800 bufnr
= get_outbound_buffer_frontier(q
);
802 if (bufnr
!= q
->last_move
) {
803 q
->last_move
= bufnr
;
804 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "out moved:%1d", q
->nr
);
810 static int qdio_kick_outbound_q(struct qdio_q
*q
, unsigned long aob
)
813 unsigned int busy_bit
;
815 if (!need_siga_out(q
))
818 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "siga-w:%1d", q
->nr
);
820 qperf_inc(q
, siga_write
);
822 cc
= qdio_siga_output(q
, &busy_bit
, aob
);
828 while (++retries
< QDIO_BUSY_BIT_RETRIES
) {
829 mdelay(QDIO_BUSY_BIT_RETRY_DELAY
);
832 DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q
), q
->nr
);
835 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "siga-w cc2:%1d", q
->nr
);
841 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q
), cc
);
846 DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q
), q
->nr
);
847 DBF_ERROR("count:%u", retries
);
852 static void __qdio_outbound_processing(struct qdio_q
*q
)
854 qperf_inc(q
, tasklet_outbound
);
855 WARN_ON_ONCE(atomic_read(&q
->nr_buf_used
) < 0);
857 if (qdio_outbound_q_moved(q
))
858 qdio_kick_handler(q
);
860 if (queue_type(q
) == QDIO_ZFCP_QFMT
)
861 if (!pci_out_supported(q
) && !qdio_outbound_q_done(q
))
864 if (q
->u
.out
.pci_out_enabled
)
868 * Now we know that queue type is either qeth without pci enabled
869 * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
870 * is noticed and outbound_handler is called after some time.
872 if (qdio_outbound_q_done(q
))
873 del_timer(&q
->u
.out
.timer
);
875 if (!timer_pending(&q
->u
.out
.timer
))
876 mod_timer(&q
->u
.out
.timer
, jiffies
+ 10 * HZ
);
880 if (unlikely(q
->irq_ptr
->state
== QDIO_IRQ_STATE_STOPPED
))
882 tasklet_schedule(&q
->tasklet
);
885 /* outbound tasklet */
886 void qdio_outbound_processing(unsigned long data
)
888 struct qdio_q
*q
= (struct qdio_q
*)data
;
889 __qdio_outbound_processing(q
);
892 void qdio_outbound_timer(unsigned long data
)
894 struct qdio_q
*q
= (struct qdio_q
*)data
;
896 if (unlikely(q
->irq_ptr
->state
== QDIO_IRQ_STATE_STOPPED
))
898 tasklet_schedule(&q
->tasklet
);
901 static inline void qdio_check_outbound_after_thinint(struct qdio_q
*q
)
906 if (!pci_out_supported(q
))
909 for_each_output_queue(q
->irq_ptr
, out
, i
)
910 if (!qdio_outbound_q_done(out
))
911 tasklet_schedule(&out
->tasklet
);
914 static void __tiqdio_inbound_processing(struct qdio_q
*q
)
916 qperf_inc(q
, tasklet_inbound
);
917 if (need_siga_sync(q
) && need_siga_sync_after_ai(q
))
921 * The interrupt could be caused by a PCI request. Check the
922 * PCI capable outbound queues.
924 qdio_check_outbound_after_thinint(q
);
926 if (!qdio_inbound_q_moved(q
))
929 qdio_kick_handler(q
);
931 if (!qdio_inbound_q_done(q
)) {
932 qperf_inc(q
, tasklet_inbound_resched
);
933 if (likely(q
->irq_ptr
->state
!= QDIO_IRQ_STATE_STOPPED
)) {
934 tasklet_schedule(&q
->tasklet
);
939 qdio_stop_polling(q
);
941 * We need to check again to not lose initiative after
942 * resetting the ACK state.
944 if (!qdio_inbound_q_done(q
)) {
945 qperf_inc(q
, tasklet_inbound_resched2
);
946 if (likely(q
->irq_ptr
->state
!= QDIO_IRQ_STATE_STOPPED
))
947 tasklet_schedule(&q
->tasklet
);
951 void tiqdio_inbound_processing(unsigned long data
)
953 struct qdio_q
*q
= (struct qdio_q
*)data
;
954 __tiqdio_inbound_processing(q
);
957 static inline void qdio_set_state(struct qdio_irq
*irq_ptr
,
958 enum qdio_irq_states state
)
960 DBF_DEV_EVENT(DBF_INFO
, irq_ptr
, "newstate: %1d", state
);
962 irq_ptr
->state
= state
;
966 static void qdio_irq_check_sense(struct qdio_irq
*irq_ptr
, struct irb
*irb
)
968 if (irb
->esw
.esw0
.erw
.cons
) {
969 DBF_ERROR("%4x sense:", irq_ptr
->schid
.sch_no
);
970 DBF_ERROR_HEX(irb
, 64);
971 DBF_ERROR_HEX(irb
->ecw
, 64);
975 /* PCI interrupt handler */
976 static void qdio_int_handler_pci(struct qdio_irq
*irq_ptr
)
981 if (unlikely(irq_ptr
->state
== QDIO_IRQ_STATE_STOPPED
))
984 for_each_input_queue(irq_ptr
, q
, i
) {
985 if (q
->u
.in
.queue_start_poll
) {
986 /* skip if polling is enabled or already in work */
987 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED
,
988 &q
->u
.in
.queue_irq_state
)) {
989 qperf_inc(q
, int_discarded
);
992 q
->u
.in
.queue_start_poll(q
->irq_ptr
->cdev
, q
->nr
,
993 q
->irq_ptr
->int_parm
);
995 tasklet_schedule(&q
->tasklet
);
999 if (!pci_out_supported(q
))
1002 for_each_output_queue(irq_ptr
, q
, i
) {
1003 if (qdio_outbound_q_done(q
))
1005 if (need_siga_sync(q
) && need_siga_sync_out_after_pci(q
))
1006 qdio_siga_sync_q(q
);
1007 tasklet_schedule(&q
->tasklet
);
1011 static void qdio_handle_activate_check(struct ccw_device
*cdev
,
1012 unsigned long intparm
, int cstat
, int dstat
)
1014 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
1018 DBF_ERROR("%4x ACT CHECK", irq_ptr
->schid
.sch_no
);
1019 DBF_ERROR("intp :%lx", intparm
);
1020 DBF_ERROR("ds: %2x cs:%2x", dstat
, cstat
);
1022 if (irq_ptr
->nr_input_qs
) {
1023 q
= irq_ptr
->input_qs
[0];
1024 } else if (irq_ptr
->nr_output_qs
) {
1025 q
= irq_ptr
->output_qs
[0];
1031 count
= sub_buf(q
->first_to_check
, q
->first_to_kick
);
1032 q
->handler(q
->irq_ptr
->cdev
, QDIO_ERROR_ACTIVATE
,
1033 q
->nr
, q
->first_to_kick
, count
, irq_ptr
->int_parm
);
1035 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_STOPPED
);
1037 * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen.
1038 * Therefore we call the LGR detection function here.
1043 static void qdio_establish_handle_irq(struct ccw_device
*cdev
, int cstat
,
1046 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
1048 DBF_DEV_EVENT(DBF_INFO
, irq_ptr
, "qest irq");
1052 if (dstat
& ~(DEV_STAT_DEV_END
| DEV_STAT_CHN_END
))
1054 if (!(dstat
& DEV_STAT_DEV_END
))
1056 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_ESTABLISHED
);
1060 DBF_ERROR("%4x EQ:error", irq_ptr
->schid
.sch_no
);
1061 DBF_ERROR("ds: %2x cs:%2x", dstat
, cstat
);
1062 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_ERR
);
1065 /* qdio interrupt handler */
1066 void qdio_int_handler(struct ccw_device
*cdev
, unsigned long intparm
,
1069 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
1072 if (!intparm
|| !irq_ptr
) {
1073 DBF_ERROR("qint:%4x", cdev
->private->schid
.sch_no
);
1077 if (irq_ptr
->perf_stat_enabled
)
1078 irq_ptr
->perf_stat
.qdio_int
++;
1081 DBF_ERROR("%4x IO error", irq_ptr
->schid
.sch_no
);
1082 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_ERR
);
1083 wake_up(&cdev
->private->wait_q
);
1086 qdio_irq_check_sense(irq_ptr
, irb
);
1087 cstat
= irb
->scsw
.cmd
.cstat
;
1088 dstat
= irb
->scsw
.cmd
.dstat
;
1090 switch (irq_ptr
->state
) {
1091 case QDIO_IRQ_STATE_INACTIVE
:
1092 qdio_establish_handle_irq(cdev
, cstat
, dstat
);
1094 case QDIO_IRQ_STATE_CLEANUP
:
1095 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_INACTIVE
);
1097 case QDIO_IRQ_STATE_ESTABLISHED
:
1098 case QDIO_IRQ_STATE_ACTIVE
:
1099 if (cstat
& SCHN_STAT_PCI
) {
1100 qdio_int_handler_pci(irq_ptr
);
1104 qdio_handle_activate_check(cdev
, intparm
, cstat
,
1107 case QDIO_IRQ_STATE_STOPPED
:
1112 wake_up(&cdev
->private->wait_q
);
1116 * qdio_get_ssqd_desc - get qdio subchannel description
1117 * @cdev: ccw device to get description for
1118 * @data: where to store the ssqd
1120 * Returns 0 or an error code. The results of the chsc are stored in the
1121 * specified structure.
1123 int qdio_get_ssqd_desc(struct ccw_device
*cdev
,
1124 struct qdio_ssqd_desc
*data
)
1127 if (!cdev
|| !cdev
->private)
1130 DBF_EVENT("get ssqd:%4x", cdev
->private->schid
.sch_no
);
1131 return qdio_setup_get_ssqd(NULL
, &cdev
->private->schid
, data
);
1133 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc
);
1135 static void qdio_shutdown_queues(struct ccw_device
*cdev
)
1137 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
1141 for_each_input_queue(irq_ptr
, q
, i
)
1142 tasklet_kill(&q
->tasklet
);
1144 for_each_output_queue(irq_ptr
, q
, i
) {
1145 del_timer(&q
->u
.out
.timer
);
1146 tasklet_kill(&q
->tasklet
);
1151 * qdio_shutdown - shut down a qdio subchannel
1152 * @cdev: associated ccw device
1153 * @how: use halt or clear to shutdown
1155 int qdio_shutdown(struct ccw_device
*cdev
, int how
)
1157 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
1159 unsigned long flags
;
1164 WARN_ON_ONCE(irqs_disabled());
1165 DBF_EVENT("qshutdown:%4x", cdev
->private->schid
.sch_no
);
1167 mutex_lock(&irq_ptr
->setup_mutex
);
1169 * Subchannel was already shot down. We cannot prevent being called
1170 * twice since cio may trigger a shutdown asynchronously.
1172 if (irq_ptr
->state
== QDIO_IRQ_STATE_INACTIVE
) {
1173 mutex_unlock(&irq_ptr
->setup_mutex
);
1178 * Indicate that the device is going down. Scheduling the queue
1179 * tasklets is forbidden from here on.
1181 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_STOPPED
);
1183 tiqdio_remove_input_queues(irq_ptr
);
1184 qdio_shutdown_queues(cdev
);
1185 qdio_shutdown_debug_entries(irq_ptr
);
1187 /* cleanup subchannel */
1188 spin_lock_irqsave(get_ccwdev_lock(cdev
), flags
);
1190 if (how
& QDIO_FLAG_CLEANUP_USING_CLEAR
)
1191 rc
= ccw_device_clear(cdev
, QDIO_DOING_CLEANUP
);
1193 /* default behaviour is halt */
1194 rc
= ccw_device_halt(cdev
, QDIO_DOING_CLEANUP
);
1196 DBF_ERROR("%4x SHUTD ERR", irq_ptr
->schid
.sch_no
);
1197 DBF_ERROR("rc:%4d", rc
);
1201 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_CLEANUP
);
1202 spin_unlock_irqrestore(get_ccwdev_lock(cdev
), flags
);
1203 wait_event_interruptible_timeout(cdev
->private->wait_q
,
1204 irq_ptr
->state
== QDIO_IRQ_STATE_INACTIVE
||
1205 irq_ptr
->state
== QDIO_IRQ_STATE_ERR
,
1207 spin_lock_irqsave(get_ccwdev_lock(cdev
), flags
);
1210 qdio_shutdown_thinint(irq_ptr
);
1212 /* restore interrupt handler */
1213 if ((void *)cdev
->handler
== (void *)qdio_int_handler
)
1214 cdev
->handler
= irq_ptr
->orig_handler
;
1215 spin_unlock_irqrestore(get_ccwdev_lock(cdev
), flags
);
1217 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_INACTIVE
);
1218 mutex_unlock(&irq_ptr
->setup_mutex
);
1223 EXPORT_SYMBOL_GPL(qdio_shutdown
);
1226 * qdio_free - free data structures for a qdio subchannel
1227 * @cdev: associated ccw device
1229 int qdio_free(struct ccw_device
*cdev
)
1231 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
1236 DBF_EVENT("qfree:%4x", cdev
->private->schid
.sch_no
);
1237 mutex_lock(&irq_ptr
->setup_mutex
);
1239 if (irq_ptr
->debug_area
!= NULL
) {
1240 debug_unregister(irq_ptr
->debug_area
);
1241 irq_ptr
->debug_area
= NULL
;
1243 cdev
->private->qdio_data
= NULL
;
1244 mutex_unlock(&irq_ptr
->setup_mutex
);
1246 qdio_release_memory(irq_ptr
);
1249 EXPORT_SYMBOL_GPL(qdio_free
);
1252 * qdio_allocate - allocate qdio queues and associated data
1253 * @init_data: initialization data
1255 int qdio_allocate(struct qdio_initialize
*init_data
)
1257 struct qdio_irq
*irq_ptr
;
1259 DBF_EVENT("qallocate:%4x", init_data
->cdev
->private->schid
.sch_no
);
1261 if ((init_data
->no_input_qs
&& !init_data
->input_handler
) ||
1262 (init_data
->no_output_qs
&& !init_data
->output_handler
))
1265 if ((init_data
->no_input_qs
> QDIO_MAX_QUEUES_PER_IRQ
) ||
1266 (init_data
->no_output_qs
> QDIO_MAX_QUEUES_PER_IRQ
))
1269 if ((!init_data
->input_sbal_addr_array
) ||
1270 (!init_data
->output_sbal_addr_array
))
1273 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1274 irq_ptr
= (void *) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
1278 mutex_init(&irq_ptr
->setup_mutex
);
1279 qdio_allocate_dbf(init_data
, irq_ptr
);
1282 * Allocate a page for the chsc calls in qdio_establish.
1283 * Must be pre-allocated since a zfcp recovery will call
1284 * qdio_establish. In case of low memory and swap on a zfcp disk
1285 * we may not be able to allocate memory otherwise.
1287 irq_ptr
->chsc_page
= get_zeroed_page(GFP_KERNEL
);
1288 if (!irq_ptr
->chsc_page
)
1291 /* qdr is used in ccw1.cda which is u32 */
1292 irq_ptr
->qdr
= (struct qdr
*) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
1296 if (qdio_allocate_qs(irq_ptr
, init_data
->no_input_qs
,
1297 init_data
->no_output_qs
))
1300 init_data
->cdev
->private->qdio_data
= irq_ptr
;
1301 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_INACTIVE
);
1304 qdio_release_memory(irq_ptr
);
1308 EXPORT_SYMBOL_GPL(qdio_allocate
);
1310 static void qdio_detect_hsicq(struct qdio_irq
*irq_ptr
)
1312 struct qdio_q
*q
= irq_ptr
->input_qs
[0];
1315 if (irq_ptr
->nr_input_qs
> 1 && queue_type(q
) == QDIO_IQDIO_QFMT
)
1318 for_each_output_queue(irq_ptr
, q
, i
) {
1320 if (qdio_enable_async_operation(&q
->u
.out
) < 0) {
1325 qdio_disable_async_operation(&q
->u
.out
);
1327 DBF_EVENT("use_cq:%d", use_cq
);
1331 * qdio_establish - establish queues on a qdio subchannel
1332 * @init_data: initialization data
1334 int qdio_establish(struct qdio_initialize
*init_data
)
1336 struct qdio_irq
*irq_ptr
;
1337 struct ccw_device
*cdev
= init_data
->cdev
;
1338 unsigned long saveflags
;
1341 DBF_EVENT("qestablish:%4x", cdev
->private->schid
.sch_no
);
1343 irq_ptr
= cdev
->private->qdio_data
;
1347 if (cdev
->private->state
!= DEV_STATE_ONLINE
)
1350 mutex_lock(&irq_ptr
->setup_mutex
);
1351 qdio_setup_irq(init_data
);
1353 rc
= qdio_establish_thinint(irq_ptr
);
1355 mutex_unlock(&irq_ptr
->setup_mutex
);
1356 qdio_shutdown(cdev
, QDIO_FLAG_CLEANUP_USING_CLEAR
);
1361 irq_ptr
->ccw
.cmd_code
= irq_ptr
->equeue
.cmd
;
1362 irq_ptr
->ccw
.flags
= CCW_FLAG_SLI
;
1363 irq_ptr
->ccw
.count
= irq_ptr
->equeue
.count
;
1364 irq_ptr
->ccw
.cda
= (u32
)((addr_t
)irq_ptr
->qdr
);
1366 spin_lock_irqsave(get_ccwdev_lock(cdev
), saveflags
);
1367 ccw_device_set_options_mask(cdev
, 0);
1369 rc
= ccw_device_start(cdev
, &irq_ptr
->ccw
, QDIO_DOING_ESTABLISH
, 0, 0);
1371 DBF_ERROR("%4x est IO ERR", irq_ptr
->schid
.sch_no
);
1372 DBF_ERROR("rc:%4x", rc
);
1374 spin_unlock_irqrestore(get_ccwdev_lock(cdev
), saveflags
);
1377 mutex_unlock(&irq_ptr
->setup_mutex
);
1378 qdio_shutdown(cdev
, QDIO_FLAG_CLEANUP_USING_CLEAR
);
1382 wait_event_interruptible_timeout(cdev
->private->wait_q
,
1383 irq_ptr
->state
== QDIO_IRQ_STATE_ESTABLISHED
||
1384 irq_ptr
->state
== QDIO_IRQ_STATE_ERR
, HZ
);
1386 if (irq_ptr
->state
!= QDIO_IRQ_STATE_ESTABLISHED
) {
1387 mutex_unlock(&irq_ptr
->setup_mutex
);
1388 qdio_shutdown(cdev
, QDIO_FLAG_CLEANUP_USING_CLEAR
);
1392 qdio_setup_ssqd_info(irq_ptr
);
1394 qdio_detect_hsicq(irq_ptr
);
1396 /* qebsm is now setup if available, initialize buffer states */
1397 qdio_init_buf_states(irq_ptr
);
1399 mutex_unlock(&irq_ptr
->setup_mutex
);
1400 qdio_print_subchannel_info(irq_ptr
, cdev
);
1401 qdio_setup_debug_entries(irq_ptr
, cdev
);
1404 EXPORT_SYMBOL_GPL(qdio_establish
);
1407 * qdio_activate - activate queues on a qdio subchannel
1408 * @cdev: associated cdev
1410 int qdio_activate(struct ccw_device
*cdev
)
1412 struct qdio_irq
*irq_ptr
;
1414 unsigned long saveflags
;
1416 DBF_EVENT("qactivate:%4x", cdev
->private->schid
.sch_no
);
1418 irq_ptr
= cdev
->private->qdio_data
;
1422 if (cdev
->private->state
!= DEV_STATE_ONLINE
)
1425 mutex_lock(&irq_ptr
->setup_mutex
);
1426 if (irq_ptr
->state
== QDIO_IRQ_STATE_INACTIVE
) {
1431 irq_ptr
->ccw
.cmd_code
= irq_ptr
->aqueue
.cmd
;
1432 irq_ptr
->ccw
.flags
= CCW_FLAG_SLI
;
1433 irq_ptr
->ccw
.count
= irq_ptr
->aqueue
.count
;
1434 irq_ptr
->ccw
.cda
= 0;
1436 spin_lock_irqsave(get_ccwdev_lock(cdev
), saveflags
);
1437 ccw_device_set_options(cdev
, CCWDEV_REPORT_ALL
);
1439 rc
= ccw_device_start(cdev
, &irq_ptr
->ccw
, QDIO_DOING_ACTIVATE
,
1440 0, DOIO_DENY_PREFETCH
);
1442 DBF_ERROR("%4x act IO ERR", irq_ptr
->schid
.sch_no
);
1443 DBF_ERROR("rc:%4x", rc
);
1445 spin_unlock_irqrestore(get_ccwdev_lock(cdev
), saveflags
);
1450 if (is_thinint_irq(irq_ptr
))
1451 tiqdio_add_input_queues(irq_ptr
);
1453 /* wait for subchannel to become active */
1456 switch (irq_ptr
->state
) {
1457 case QDIO_IRQ_STATE_STOPPED
:
1458 case QDIO_IRQ_STATE_ERR
:
1462 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_ACTIVE
);
1466 mutex_unlock(&irq_ptr
->setup_mutex
);
1469 EXPORT_SYMBOL_GPL(qdio_activate
);
1471 static inline int buf_in_between(int bufnr
, int start
, int count
)
1473 int end
= add_buf(start
, count
);
1476 if (bufnr
>= start
&& bufnr
< end
)
1482 /* wrap-around case */
1483 if ((bufnr
>= start
&& bufnr
<= QDIO_MAX_BUFFERS_PER_Q
) ||
1491 * handle_inbound - reset processed input buffers
1492 * @q: queue containing the buffers
1494 * @bufnr: first buffer to process
1495 * @count: how many buffers are emptied
1497 static int handle_inbound(struct qdio_q
*q
, unsigned int callflags
,
1498 int bufnr
, int count
)
1502 qperf_inc(q
, inbound_call
);
1504 if (!q
->u
.in
.polling
)
1507 /* protect against stop polling setting an ACK for an emptied slsb */
1508 if (count
== QDIO_MAX_BUFFERS_PER_Q
) {
1509 /* overwriting everything, just delete polling status */
1510 q
->u
.in
.polling
= 0;
1511 q
->u
.in
.ack_count
= 0;
1513 } else if (buf_in_between(q
->u
.in
.ack_start
, bufnr
, count
)) {
1515 /* partial overwrite, just update ack_start */
1516 diff
= add_buf(bufnr
, count
);
1517 diff
= sub_buf(diff
, q
->u
.in
.ack_start
);
1518 q
->u
.in
.ack_count
-= diff
;
1519 if (q
->u
.in
.ack_count
<= 0) {
1520 q
->u
.in
.polling
= 0;
1521 q
->u
.in
.ack_count
= 0;
1524 q
->u
.in
.ack_start
= add_buf(q
->u
.in
.ack_start
, diff
);
1527 /* the only ACK will be deleted, so stop polling */
1528 q
->u
.in
.polling
= 0;
1532 count
= set_buf_states(q
, bufnr
, SLSB_CU_INPUT_EMPTY
, count
);
1533 atomic_add(count
, &q
->nr_buf_used
);
1535 if (need_siga_in(q
))
1536 return qdio_siga_input(q
);
1542 * handle_outbound - process filled outbound buffers
1543 * @q: queue containing the buffers
1545 * @bufnr: first buffer to process
1546 * @count: how many buffers are filled
1548 static int handle_outbound(struct qdio_q
*q
, unsigned int callflags
,
1549 int bufnr
, int count
)
1551 unsigned char state
= 0;
1554 qperf_inc(q
, outbound_call
);
1556 count
= set_buf_states(q
, bufnr
, SLSB_CU_OUTPUT_PRIMED
, count
);
1557 used
= atomic_add_return(count
, &q
->nr_buf_used
);
1559 if (used
== QDIO_MAX_BUFFERS_PER_Q
)
1560 qperf_inc(q
, outbound_queue_full
);
1562 if (callflags
& QDIO_FLAG_PCI_OUT
) {
1563 q
->u
.out
.pci_out_enabled
= 1;
1564 qperf_inc(q
, pci_request_int
);
1566 q
->u
.out
.pci_out_enabled
= 0;
1568 if (queue_type(q
) == QDIO_IQDIO_QFMT
) {
1569 unsigned long phys_aob
= 0;
1571 /* One SIGA-W per buffer required for unicast HSI */
1572 WARN_ON_ONCE(count
> 1 && !multicast_outbound(q
));
1574 phys_aob
= qdio_aob_for_buffer(&q
->u
.out
, bufnr
);
1576 rc
= qdio_kick_outbound_q(q
, phys_aob
);
1577 } else if (need_siga_sync(q
)) {
1578 rc
= qdio_siga_sync_q(q
);
1580 /* try to fast requeue buffers */
1581 get_buf_state(q
, prev_buf(bufnr
), &state
, 0);
1582 if (state
!= SLSB_CU_OUTPUT_PRIMED
)
1583 rc
= qdio_kick_outbound_q(q
, 0);
1585 qperf_inc(q
, fast_requeue
);
1588 /* in case of SIGA errors we must process the error immediately */
1589 if (used
>= q
->u
.out
.scan_threshold
|| rc
)
1590 tasklet_schedule(&q
->tasklet
);
1592 /* free the SBALs in case of no further traffic */
1593 if (!timer_pending(&q
->u
.out
.timer
))
1594 mod_timer(&q
->u
.out
.timer
, jiffies
+ HZ
);
1599 * do_QDIO - process input or output buffers
1600 * @cdev: associated ccw_device for the qdio subchannel
1601 * @callflags: input or output and special flags from the program
1602 * @q_nr: queue number
1603 * @bufnr: buffer number
1604 * @count: how many buffers to process
1606 int do_QDIO(struct ccw_device
*cdev
, unsigned int callflags
,
1607 int q_nr
, unsigned int bufnr
, unsigned int count
)
1609 struct qdio_irq
*irq_ptr
;
1611 if (bufnr
>= QDIO_MAX_BUFFERS_PER_Q
|| count
> QDIO_MAX_BUFFERS_PER_Q
)
1614 irq_ptr
= cdev
->private->qdio_data
;
1618 DBF_DEV_EVENT(DBF_INFO
, irq_ptr
,
1619 "do%02x b:%02x c:%02x", callflags
, bufnr
, count
);
1621 if (irq_ptr
->state
!= QDIO_IRQ_STATE_ACTIVE
)
1625 if (callflags
& QDIO_FLAG_SYNC_INPUT
)
1626 return handle_inbound(irq_ptr
->input_qs
[q_nr
],
1627 callflags
, bufnr
, count
);
1628 else if (callflags
& QDIO_FLAG_SYNC_OUTPUT
)
1629 return handle_outbound(irq_ptr
->output_qs
[q_nr
],
1630 callflags
, bufnr
, count
);
1633 EXPORT_SYMBOL_GPL(do_QDIO
);
1636 * qdio_start_irq - process input buffers
1637 * @cdev: associated ccw_device for the qdio subchannel
1638 * @nr: input queue number
1642 * 1 - irqs not started since new data is available
1644 int qdio_start_irq(struct ccw_device
*cdev
, int nr
)
1647 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
1651 q
= irq_ptr
->input_qs
[nr
];
1653 clear_nonshared_ind(irq_ptr
);
1654 qdio_stop_polling(q
);
1655 clear_bit(QDIO_QUEUE_IRQS_DISABLED
, &q
->u
.in
.queue_irq_state
);
1658 * We need to check again to not lose initiative after
1659 * resetting the ACK state.
1661 if (test_nonshared_ind(irq_ptr
))
1663 if (!qdio_inbound_q_done(q
))
1668 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED
,
1669 &q
->u
.in
.queue_irq_state
))
1675 EXPORT_SYMBOL(qdio_start_irq
);
1678 * qdio_get_next_buffers - process input buffers
1679 * @cdev: associated ccw_device for the qdio subchannel
1680 * @nr: input queue number
1681 * @bufnr: first filled buffer number
1682 * @error: buffers are in error state
1686 * = 0 - no new buffers found
1687 * > 0 - number of processed buffers
1689 int qdio_get_next_buffers(struct ccw_device
*cdev
, int nr
, int *bufnr
,
1694 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
1698 q
= irq_ptr
->input_qs
[nr
];
1701 * Cannot rely on automatic sync after interrupt since queues may
1702 * also be examined without interrupt.
1704 if (need_siga_sync(q
))
1705 qdio_sync_queues(q
);
1707 /* check the PCI capable outbound queues. */
1708 qdio_check_outbound_after_thinint(q
);
1710 if (!qdio_inbound_q_moved(q
))
1713 /* Note: upper-layer MUST stop processing immediately here ... */
1714 if (unlikely(q
->irq_ptr
->state
!= QDIO_IRQ_STATE_ACTIVE
))
1717 start
= q
->first_to_kick
;
1718 end
= q
->first_to_check
;
1720 *error
= q
->qdio_error
;
1722 /* for the next time */
1723 q
->first_to_kick
= end
;
1725 return sub_buf(end
, start
);
1727 EXPORT_SYMBOL(qdio_get_next_buffers
);
1730 * qdio_stop_irq - disable interrupt processing for the device
1731 * @cdev: associated ccw_device for the qdio subchannel
1732 * @nr: input queue number
1735 * 0 - interrupts were already disabled
1736 * 1 - interrupts successfully disabled
1738 int qdio_stop_irq(struct ccw_device
*cdev
, int nr
)
1741 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
1745 q
= irq_ptr
->input_qs
[nr
];
1747 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED
,
1748 &q
->u
.in
.queue_irq_state
))
1753 EXPORT_SYMBOL(qdio_stop_irq
);
1755 static int __init
init_QDIO(void)
1759 rc
= qdio_debug_init();
1762 rc
= qdio_setup_init();
1765 rc
= tiqdio_allocate_memory();
1768 rc
= tiqdio_register_thinints();
1774 tiqdio_free_memory();
1782 static void __exit
exit_QDIO(void)
1784 tiqdio_unregister_thinints();
1785 tiqdio_free_memory();
1790 module_init(init_QDIO
);
1791 module_exit(exit_QDIO
);