2 * linux/drivers/s390/cio/thinint_qdio.c
4 * Copyright 2000,2009 IBM Corp.
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
6 * Cornelia Huck <cornelia.huck@de.ibm.com>
7 * Jan Glauber <jang@linux.vnet.ibm.com>
10 #include <linux/slab.h>
11 #include <linux/kernel_stat.h>
12 #include <asm/atomic.h>
13 #include <asm/debug.h>
21 #include "qdio_debug.h"
24 * Restriction: only 63 iqdio subchannels would have its own indicator,
25 * after that, subsequent subchannels share one indicator
27 #define TIQDIO_NR_NONSHARED_IND 63
28 #define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1)
30 /* list of thin interrupt input queues */
31 static LIST_HEAD(tiq_list
);
32 DEFINE_MUTEX(tiq_list_lock
);
34 /* adapter local summary indicator */
35 static u8
*tiqdio_alsi
;
37 struct indicator_t
*q_indicators
;
39 static u64 last_ai_time
;
41 /* returns addr for the device state change indicator */
42 static u32
*get_indicator(void)
46 for (i
= 0; i
< TIQDIO_NR_NONSHARED_IND
; i
++)
47 if (!atomic_read(&q_indicators
[i
].count
)) {
48 atomic_set(&q_indicators
[i
].count
, 1);
49 return &q_indicators
[i
].ind
;
52 /* use the shared indicator */
53 atomic_inc(&q_indicators
[TIQDIO_SHARED_IND
].count
);
54 return &q_indicators
[TIQDIO_SHARED_IND
].ind
;
57 static void put_indicator(u32
*addr
)
63 i
= ((unsigned long)addr
- (unsigned long)q_indicators
) /
64 sizeof(struct indicator_t
);
65 atomic_dec(&q_indicators
[i
].count
);
68 void tiqdio_add_input_queues(struct qdio_irq
*irq_ptr
)
73 mutex_lock(&tiq_list_lock
);
74 for_each_input_queue(irq_ptr
, q
, i
)
75 list_add_rcu(&q
->entry
, &tiq_list
);
76 mutex_unlock(&tiq_list_lock
);
77 xchg(irq_ptr
->dsci
, 1 << 7);
80 void tiqdio_remove_input_queues(struct qdio_irq
*irq_ptr
)
85 for (i
= 0; i
< irq_ptr
->nr_input_qs
; i
++) {
86 q
= irq_ptr
->input_qs
[i
];
87 /* if establish triggered an error */
88 if (!q
|| !q
->entry
.prev
|| !q
->entry
.next
)
91 mutex_lock(&tiq_list_lock
);
92 list_del_rcu(&q
->entry
);
93 mutex_unlock(&tiq_list_lock
);
98 static inline u32
clear_shared_ind(void)
100 if (!atomic_read(&q_indicators
[TIQDIO_SHARED_IND
].count
))
102 return xchg(&q_indicators
[TIQDIO_SHARED_IND
].ind
, 0);
106 * tiqdio_thinint_handler - thin interrupt handler for qdio
107 * @alsi: pointer to adapter local summary indicator
110 static void tiqdio_thinint_handler(void *alsi
, void *data
)
112 u32 si_used
= clear_shared_ind();
115 last_ai_time
= S390_lowcore
.int_clock
;
116 kstat_cpu(smp_processor_id()).irqs
[IOINT_QAI
]++;
118 /* protect tiq_list entries, only changed in activate or shutdown */
121 /* check for work on all inbound thinint queues */
122 list_for_each_entry_rcu(q
, &tiq_list
, entry
) {
124 /* only process queues from changed sets */
125 if (unlikely(shared_ind(q
->irq_ptr
->dsci
))) {
128 } else if (!*q
->irq_ptr
->dsci
)
131 if (q
->u
.in
.queue_start_poll
) {
132 /* skip if polling is enabled or already in work */
133 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED
,
134 &q
->u
.in
.queue_irq_state
)) {
135 qperf_inc(q
, int_discarded
);
139 /* avoid dsci clear here, done after processing */
140 q
->u
.in
.queue_start_poll(q
->irq_ptr
->cdev
, q
->nr
,
141 q
->irq_ptr
->int_parm
);
143 /* only clear it if the indicator is non-shared */
144 if (!shared_ind(q
->irq_ptr
->dsci
))
145 xchg(q
->irq_ptr
->dsci
, 0);
147 * Call inbound processing but not directly
148 * since that could starve other thinint queues.
150 tasklet_schedule(&q
->tasklet
);
152 qperf_inc(q
, adapter_int
);
157 static int set_subchannel_ind(struct qdio_irq
*irq_ptr
, int reset
)
159 struct scssc_area
*scssc_area
;
162 scssc_area
= (struct scssc_area
*)irq_ptr
->chsc_page
;
163 memset(scssc_area
, 0, PAGE_SIZE
);
166 scssc_area
->summary_indicator_addr
= 0;
167 scssc_area
->subchannel_indicator_addr
= 0;
169 scssc_area
->summary_indicator_addr
= virt_to_phys(tiqdio_alsi
);
170 scssc_area
->subchannel_indicator_addr
=
171 virt_to_phys(irq_ptr
->dsci
);
174 scssc_area
->request
= (struct chsc_header
) {
178 scssc_area
->operation_code
= 0;
179 scssc_area
->ks
= PAGE_DEFAULT_KEY
>> 4;
180 scssc_area
->kc
= PAGE_DEFAULT_KEY
>> 4;
181 scssc_area
->isc
= QDIO_AIRQ_ISC
;
182 scssc_area
->schid
= irq_ptr
->schid
;
184 /* enable the time delay disablement facility */
185 if (css_general_characteristics
.aif_tdd
)
186 scssc_area
->word_with_d_bit
= 0x10000000;
188 rc
= chsc(scssc_area
);
192 rc
= chsc_error_from_response(scssc_area
->response
.code
);
194 DBF_ERROR("%4x SSI r:%4x", irq_ptr
->schid
.sch_no
,
195 scssc_area
->response
.code
);
196 DBF_ERROR_HEX(&scssc_area
->response
, sizeof(void *));
200 DBF_EVENT("setscind");
201 DBF_HEX(&scssc_area
->summary_indicator_addr
, sizeof(unsigned long));
202 DBF_HEX(&scssc_area
->subchannel_indicator_addr
, sizeof(unsigned long));
206 /* allocate non-shared indicators and shared indicator */
207 int __init
tiqdio_allocate_memory(void)
209 q_indicators
= kzalloc(sizeof(struct indicator_t
) * TIQDIO_NR_INDICATORS
,
216 void tiqdio_free_memory(void)
221 int __init
tiqdio_register_thinints(void)
223 isc_register(QDIO_AIRQ_ISC
);
224 tiqdio_alsi
= s390_register_adapter_interrupt(&tiqdio_thinint_handler
,
225 NULL
, QDIO_AIRQ_ISC
);
226 if (IS_ERR(tiqdio_alsi
)) {
227 DBF_EVENT("RTI:%lx", PTR_ERR(tiqdio_alsi
));
229 isc_unregister(QDIO_AIRQ_ISC
);
235 int qdio_establish_thinint(struct qdio_irq
*irq_ptr
)
237 if (!is_thinint_irq(irq_ptr
))
239 return set_subchannel_ind(irq_ptr
, 0);
242 void qdio_setup_thinint(struct qdio_irq
*irq_ptr
)
244 if (!is_thinint_irq(irq_ptr
))
246 irq_ptr
->dsci
= get_indicator();
247 DBF_HEX(&irq_ptr
->dsci
, sizeof(void *));
250 void qdio_shutdown_thinint(struct qdio_irq
*irq_ptr
)
252 if (!is_thinint_irq(irq_ptr
))
255 /* reset adapter interrupt indicators */
256 set_subchannel_ind(irq_ptr
, 1);
257 put_indicator(irq_ptr
->dsci
);
260 void __exit
tiqdio_unregister_thinints(void)
262 WARN_ON(!list_empty(&tiq_list
));
265 s390_unregister_adapter_interrupt(tiqdio_alsi
, QDIO_AIRQ_ISC
);
266 isc_unregister(QDIO_AIRQ_ISC
);