2 * qdio queue initialization
4 * Copyright IBM Corp. 2008
5 * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
7 #include <linux/kernel.h>
8 #include <linux/slab.h>
9 #include <linux/export.h>
18 #include "qdio_debug.h"
20 static struct kmem_cache
*qdio_q_cache
;
21 static struct kmem_cache
*qdio_aob_cache
;
23 struct qaob
*qdio_allocate_aob(void)
25 return kmem_cache_zalloc(qdio_aob_cache
, GFP_ATOMIC
);
27 EXPORT_SYMBOL_GPL(qdio_allocate_aob
);
29 void qdio_release_aob(struct qaob
*aob
)
31 kmem_cache_free(qdio_aob_cache
, aob
);
33 EXPORT_SYMBOL_GPL(qdio_release_aob
);
36 * qebsm is only available under 64bit but the adapter sets the feature
37 * flag anyway, so we manually override it.
39 static inline int qebsm_possible(void)
42 return css_general_characteristics
.qebsm
;
48 * qib_param_field: pointer to 128 bytes or NULL, if no param field
49 * nr_input_qs: pointer to nr_queues*128 words of data or NULL
51 static void set_impl_params(struct qdio_irq
*irq_ptr
,
52 unsigned int qib_param_field_format
,
53 unsigned char *qib_param_field
,
54 unsigned long *input_slib_elements
,
55 unsigned long *output_slib_elements
)
63 irq_ptr
->qib
.pfmt
= qib_param_field_format
;
65 memcpy(irq_ptr
->qib
.parm
, qib_param_field
,
66 QDIO_MAX_BUFFERS_PER_Q
);
68 if (!input_slib_elements
)
71 for_each_input_queue(irq_ptr
, q
, i
) {
72 for (j
= 0; j
< QDIO_MAX_BUFFERS_PER_Q
; j
++)
73 q
->slib
->slibe
[j
].parms
=
74 input_slib_elements
[i
* QDIO_MAX_BUFFERS_PER_Q
+ j
];
77 if (!output_slib_elements
)
80 for_each_output_queue(irq_ptr
, q
, i
) {
81 for (j
= 0; j
< QDIO_MAX_BUFFERS_PER_Q
; j
++)
82 q
->slib
->slibe
[j
].parms
=
83 output_slib_elements
[i
* QDIO_MAX_BUFFERS_PER_Q
+ j
];
87 static int __qdio_allocate_qs(struct qdio_q
**irq_ptr_qs
, int nr_queues
)
92 for (i
= 0; i
< nr_queues
; i
++) {
93 q
= kmem_cache_alloc(qdio_q_cache
, GFP_KERNEL
);
97 q
->slib
= (struct slib
*) __get_free_page(GFP_KERNEL
);
99 kmem_cache_free(qdio_q_cache
, q
);
107 int qdio_allocate_qs(struct qdio_irq
*irq_ptr
, int nr_input_qs
, int nr_output_qs
)
111 rc
= __qdio_allocate_qs(irq_ptr
->input_qs
, nr_input_qs
);
114 rc
= __qdio_allocate_qs(irq_ptr
->output_qs
, nr_output_qs
);
118 static void setup_queues_misc(struct qdio_q
*q
, struct qdio_irq
*irq_ptr
,
119 qdio_handler_t
*handler
, int i
)
121 struct slib
*slib
= q
->slib
;
123 /* queue must be cleared for qdio_establish */
124 memset(q
, 0, sizeof(*q
));
125 memset(slib
, 0, PAGE_SIZE
);
127 q
->irq_ptr
= irq_ptr
;
128 q
->mask
= 1 << (31 - i
);
130 q
->handler
= handler
;
133 static void setup_storage_lists(struct qdio_q
*q
, struct qdio_irq
*irq_ptr
,
134 void **sbals_array
, int i
)
139 DBF_HEX(&q
, sizeof(void *));
140 q
->sl
= (struct sl
*)((char *)q
->slib
+ PAGE_SIZE
/ 2);
143 for (j
= 0; j
< QDIO_MAX_BUFFERS_PER_Q
; j
++)
144 q
->sbal
[j
] = *sbals_array
++;
148 prev
= (q
->is_input_q
) ? irq_ptr
->input_qs
[i
- 1]
149 : irq_ptr
->output_qs
[i
- 1];
150 prev
->slib
->nsliba
= (unsigned long)q
->slib
;
153 q
->slib
->sla
= (unsigned long)q
->sl
;
154 q
->slib
->slsba
= (unsigned long)&q
->slsb
.val
[0];
157 for (j
= 0; j
< QDIO_MAX_BUFFERS_PER_Q
; j
++)
158 q
->sl
->element
[j
].sbal
= (unsigned long)q
->sbal
[j
];
161 static void setup_queues(struct qdio_irq
*irq_ptr
,
162 struct qdio_initialize
*qdio_init
)
165 void **input_sbal_array
= qdio_init
->input_sbal_addr_array
;
166 void **output_sbal_array
= qdio_init
->output_sbal_addr_array
;
167 struct qdio_outbuf_state
*output_sbal_state_array
=
168 qdio_init
->output_sbal_state_array
;
171 for_each_input_queue(irq_ptr
, q
, i
) {
172 DBF_EVENT("inq:%1d", i
);
173 setup_queues_misc(q
, irq_ptr
, qdio_init
->input_handler
, i
);
176 q
->u
.in
.queue_start_poll
= qdio_init
->queue_start_poll_array
?
177 qdio_init
->queue_start_poll_array
[i
] : NULL
;
179 setup_storage_lists(q
, irq_ptr
, input_sbal_array
, i
);
180 input_sbal_array
+= QDIO_MAX_BUFFERS_PER_Q
;
182 if (is_thinint_irq(irq_ptr
)) {
183 tasklet_init(&q
->tasklet
, tiqdio_inbound_processing
,
186 tasklet_init(&q
->tasklet
, qdio_inbound_processing
,
191 for_each_output_queue(irq_ptr
, q
, i
) {
192 DBF_EVENT("outq:%1d", i
);
193 setup_queues_misc(q
, irq_ptr
, qdio_init
->output_handler
, i
);
195 q
->u
.out
.sbal_state
= output_sbal_state_array
;
196 output_sbal_state_array
+= QDIO_MAX_BUFFERS_PER_Q
;
199 q
->u
.out
.scan_threshold
= qdio_init
->scan_threshold
;
200 setup_storage_lists(q
, irq_ptr
, output_sbal_array
, i
);
201 output_sbal_array
+= QDIO_MAX_BUFFERS_PER_Q
;
203 tasklet_init(&q
->tasklet
, qdio_outbound_processing
,
205 setup_timer(&q
->u
.out
.timer
, (void(*)(unsigned long))
206 &qdio_outbound_timer
, (unsigned long)q
);
210 static void process_ac_flags(struct qdio_irq
*irq_ptr
, unsigned char qdioac
)
212 if (qdioac
& AC1_SIGA_INPUT_NEEDED
)
213 irq_ptr
->siga_flag
.input
= 1;
214 if (qdioac
& AC1_SIGA_OUTPUT_NEEDED
)
215 irq_ptr
->siga_flag
.output
= 1;
216 if (qdioac
& AC1_SIGA_SYNC_NEEDED
)
217 irq_ptr
->siga_flag
.sync
= 1;
218 if (!(qdioac
& AC1_AUTOMATIC_SYNC_ON_THININT
))
219 irq_ptr
->siga_flag
.sync_after_ai
= 1;
220 if (!(qdioac
& AC1_AUTOMATIC_SYNC_ON_OUT_PCI
))
221 irq_ptr
->siga_flag
.sync_out_after_pci
= 1;
224 static void check_and_setup_qebsm(struct qdio_irq
*irq_ptr
,
225 unsigned char qdioac
, unsigned long token
)
227 if (!(irq_ptr
->qib
.rflags
& QIB_RFLAGS_ENABLE_QEBSM
))
229 if (!(qdioac
& AC1_SC_QEBSM_AVAILABLE
) ||
230 (!(qdioac
& AC1_SC_QEBSM_ENABLED
)))
233 irq_ptr
->sch_token
= token
;
236 DBF_EVENT("%8lx", irq_ptr
->sch_token
);
240 irq_ptr
->sch_token
= 0;
241 irq_ptr
->qib
.rflags
&= ~QIB_RFLAGS_ENABLE_QEBSM
;
246 * If there is a qdio_irq we use the chsc_page and store the information
247 * in the qdio_irq, otherwise we copy it to the specified structure.
249 int qdio_setup_get_ssqd(struct qdio_irq
*irq_ptr
,
250 struct subchannel_id
*schid
,
251 struct qdio_ssqd_desc
*data
)
253 struct chsc_ssqd_area
*ssqd
;
256 DBF_EVENT("getssqd:%4x", schid
->sch_no
);
258 ssqd
= (struct chsc_ssqd_area
*)__get_free_page(GFP_KERNEL
);
262 ssqd
= (struct chsc_ssqd_area
*)irq_ptr
->chsc_page
;
265 rc
= chsc_ssqd(*schid
, ssqd
);
269 if (!(ssqd
->qdio_ssqd
.flags
& CHSC_FLAG_QDIO_CAPABILITY
) ||
270 !(ssqd
->qdio_ssqd
.flags
& CHSC_FLAG_VALIDITY
) ||
271 (ssqd
->qdio_ssqd
.sch
!= schid
->sch_no
))
275 memcpy(data
, &ssqd
->qdio_ssqd
, sizeof(*data
));
279 free_page((unsigned long)ssqd
);
284 void qdio_setup_ssqd_info(struct qdio_irq
*irq_ptr
)
286 unsigned char qdioac
;
289 rc
= qdio_setup_get_ssqd(irq_ptr
, &irq_ptr
->schid
, &irq_ptr
->ssqd_desc
);
291 DBF_ERROR("%4x ssqd ERR", irq_ptr
->schid
.sch_no
);
292 DBF_ERROR("rc:%x", rc
);
293 /* all flags set, worst case */
294 qdioac
= AC1_SIGA_INPUT_NEEDED
| AC1_SIGA_OUTPUT_NEEDED
|
295 AC1_SIGA_SYNC_NEEDED
;
297 qdioac
= irq_ptr
->ssqd_desc
.qdioac1
;
299 check_and_setup_qebsm(irq_ptr
, qdioac
, irq_ptr
->ssqd_desc
.sch_token
);
300 process_ac_flags(irq_ptr
, qdioac
);
301 DBF_EVENT("ac 1:%2x 2:%4x", qdioac
, irq_ptr
->ssqd_desc
.qdioac2
);
302 DBF_EVENT("3:%4x qib:%4x", irq_ptr
->ssqd_desc
.qdioac3
, irq_ptr
->qib
.ac
);
305 void qdio_release_memory(struct qdio_irq
*irq_ptr
)
311 * Must check queue array manually since irq_ptr->nr_input_queues /
312 * irq_ptr->nr_input_queues may not yet be set.
314 for (i
= 0; i
< QDIO_MAX_QUEUES_PER_IRQ
; i
++) {
315 q
= irq_ptr
->input_qs
[i
];
317 free_page((unsigned long) q
->slib
);
318 kmem_cache_free(qdio_q_cache
, q
);
321 for (i
= 0; i
< QDIO_MAX_QUEUES_PER_IRQ
; i
++) {
322 q
= irq_ptr
->output_qs
[i
];
324 if (q
->u
.out
.use_cq
) {
327 for (n
= 0; n
< QDIO_MAX_BUFFERS_PER_Q
; ++n
) {
328 struct qaob
*aob
= q
->u
.out
.aobs
[n
];
330 qdio_release_aob(aob
);
331 q
->u
.out
.aobs
[n
] = NULL
;
335 qdio_disable_async_operation(&q
->u
.out
);
337 free_page((unsigned long) q
->slib
);
338 kmem_cache_free(qdio_q_cache
, q
);
341 free_page((unsigned long) irq_ptr
->qdr
);
342 free_page(irq_ptr
->chsc_page
);
343 free_page((unsigned long) irq_ptr
);
346 static void __qdio_allocate_fill_qdr(struct qdio_irq
*irq_ptr
,
347 struct qdio_q
**irq_ptr_qs
,
350 irq_ptr
->qdr
->qdf0
[i
+ nr
].sliba
=
351 (unsigned long)irq_ptr_qs
[i
]->slib
;
353 irq_ptr
->qdr
->qdf0
[i
+ nr
].sla
=
354 (unsigned long)irq_ptr_qs
[i
]->sl
;
356 irq_ptr
->qdr
->qdf0
[i
+ nr
].slsba
=
357 (unsigned long)&irq_ptr_qs
[i
]->slsb
.val
[0];
359 irq_ptr
->qdr
->qdf0
[i
+ nr
].akey
= PAGE_DEFAULT_KEY
>> 4;
360 irq_ptr
->qdr
->qdf0
[i
+ nr
].bkey
= PAGE_DEFAULT_KEY
>> 4;
361 irq_ptr
->qdr
->qdf0
[i
+ nr
].ckey
= PAGE_DEFAULT_KEY
>> 4;
362 irq_ptr
->qdr
->qdf0
[i
+ nr
].dkey
= PAGE_DEFAULT_KEY
>> 4;
365 static void setup_qdr(struct qdio_irq
*irq_ptr
,
366 struct qdio_initialize
*qdio_init
)
370 irq_ptr
->qdr
->qfmt
= qdio_init
->q_format
;
371 irq_ptr
->qdr
->ac
= qdio_init
->qdr_ac
;
372 irq_ptr
->qdr
->iqdcnt
= qdio_init
->no_input_qs
;
373 irq_ptr
->qdr
->oqdcnt
= qdio_init
->no_output_qs
;
374 irq_ptr
->qdr
->iqdsz
= sizeof(struct qdesfmt0
) / 4; /* size in words */
375 irq_ptr
->qdr
->oqdsz
= sizeof(struct qdesfmt0
) / 4;
376 irq_ptr
->qdr
->qiba
= (unsigned long)&irq_ptr
->qib
;
377 irq_ptr
->qdr
->qkey
= PAGE_DEFAULT_KEY
>> 4;
379 for (i
= 0; i
< qdio_init
->no_input_qs
; i
++)
380 __qdio_allocate_fill_qdr(irq_ptr
, irq_ptr
->input_qs
, i
, 0);
382 for (i
= 0; i
< qdio_init
->no_output_qs
; i
++)
383 __qdio_allocate_fill_qdr(irq_ptr
, irq_ptr
->output_qs
, i
,
384 qdio_init
->no_input_qs
);
387 static void setup_qib(struct qdio_irq
*irq_ptr
,
388 struct qdio_initialize
*init_data
)
390 if (qebsm_possible())
391 irq_ptr
->qib
.rflags
|= QIB_RFLAGS_ENABLE_QEBSM
;
393 irq_ptr
->qib
.rflags
|= init_data
->qib_rflags
;
395 irq_ptr
->qib
.qfmt
= init_data
->q_format
;
396 if (init_data
->no_input_qs
)
397 irq_ptr
->qib
.isliba
=
398 (unsigned long)(irq_ptr
->input_qs
[0]->slib
);
399 if (init_data
->no_output_qs
)
400 irq_ptr
->qib
.osliba
=
401 (unsigned long)(irq_ptr
->output_qs
[0]->slib
);
402 memcpy(irq_ptr
->qib
.ebcnam
, init_data
->adapter_name
, 8);
405 int qdio_setup_irq(struct qdio_initialize
*init_data
)
408 struct qdio_irq
*irq_ptr
= init_data
->cdev
->private->qdio_data
;
411 memset(&irq_ptr
->qib
, 0, sizeof(irq_ptr
->qib
));
412 memset(&irq_ptr
->siga_flag
, 0, sizeof(irq_ptr
->siga_flag
));
413 memset(&irq_ptr
->ccw
, 0, sizeof(irq_ptr
->ccw
));
414 memset(&irq_ptr
->ssqd_desc
, 0, sizeof(irq_ptr
->ssqd_desc
));
415 memset(&irq_ptr
->perf_stat
, 0, sizeof(irq_ptr
->perf_stat
));
417 irq_ptr
->debugfs_dev
= irq_ptr
->debugfs_perf
= NULL
;
418 irq_ptr
->sch_token
= irq_ptr
->state
= irq_ptr
->perf_stat_enabled
= 0;
420 /* wipes qib.ac, required by ar7063 */
421 memset(irq_ptr
->qdr
, 0, sizeof(struct qdr
));
423 irq_ptr
->int_parm
= init_data
->int_parm
;
424 irq_ptr
->nr_input_qs
= init_data
->no_input_qs
;
425 irq_ptr
->nr_output_qs
= init_data
->no_output_qs
;
426 irq_ptr
->cdev
= init_data
->cdev
;
427 ccw_device_get_schid(irq_ptr
->cdev
, &irq_ptr
->schid
);
428 setup_queues(irq_ptr
, init_data
);
430 setup_qib(irq_ptr
, init_data
);
431 qdio_setup_thinint(irq_ptr
);
432 set_impl_params(irq_ptr
, init_data
->qib_param_field_format
,
433 init_data
->qib_param_field
,
434 init_data
->input_slib_elements
,
435 init_data
->output_slib_elements
);
437 /* fill input and output descriptors */
438 setup_qdr(irq_ptr
, init_data
);
440 /* qdr, qib, sls, slsbs, slibs, sbales are filled now */
442 /* get qdio commands */
443 ciw
= ccw_device_get_ciw(init_data
->cdev
, CIW_TYPE_EQUEUE
);
445 DBF_ERROR("%4x NO EQ", irq_ptr
->schid
.sch_no
);
449 irq_ptr
->equeue
= *ciw
;
451 ciw
= ccw_device_get_ciw(init_data
->cdev
, CIW_TYPE_AQUEUE
);
453 DBF_ERROR("%4x NO AQ", irq_ptr
->schid
.sch_no
);
457 irq_ptr
->aqueue
= *ciw
;
459 /* set new interrupt handler */
460 irq_ptr
->orig_handler
= init_data
->cdev
->handler
;
461 init_data
->cdev
->handler
= qdio_int_handler
;
464 qdio_release_memory(irq_ptr
);
468 void qdio_print_subchannel_info(struct qdio_irq
*irq_ptr
,
469 struct ccw_device
*cdev
)
473 snprintf(s
, 80, "qdio: %s %s on SC %x using "
474 "AI:%d QEBSM:%d PRI:%d TDD:%d SIGA:%s%s%s%s%s\n",
475 dev_name(&cdev
->dev
),
476 (irq_ptr
->qib
.qfmt
== QDIO_QETH_QFMT
) ? "OSA" :
477 ((irq_ptr
->qib
.qfmt
== QDIO_ZFCP_QFMT
) ? "ZFCP" : "HS"),
478 irq_ptr
->schid
.sch_no
,
479 is_thinint_irq(irq_ptr
),
480 (irq_ptr
->sch_token
) ? 1 : 0,
481 (irq_ptr
->qib
.ac
& QIB_AC_OUTBOUND_PCI_SUPPORTED
) ? 1 : 0,
482 css_general_characteristics
.aif_tdd
,
483 (irq_ptr
->siga_flag
.input
) ? "R" : " ",
484 (irq_ptr
->siga_flag
.output
) ? "W" : " ",
485 (irq_ptr
->siga_flag
.sync
) ? "S" : " ",
486 (irq_ptr
->siga_flag
.sync_after_ai
) ? "A" : " ",
487 (irq_ptr
->siga_flag
.sync_out_after_pci
) ? "P" : " ");
488 printk(KERN_INFO
"%s", s
);
491 int qdio_enable_async_operation(struct qdio_output_q
*outq
)
493 outq
->aobs
= kzalloc(sizeof(struct qaob
*) * QDIO_MAX_BUFFERS_PER_Q
,
503 void qdio_disable_async_operation(struct qdio_output_q
*q
)
510 int __init
qdio_setup_init(void)
514 qdio_q_cache
= kmem_cache_create("qdio_q", sizeof(struct qdio_q
),
519 qdio_aob_cache
= kmem_cache_create("qdio_aob",
524 if (!qdio_aob_cache
) {
526 goto free_qdio_q_cache
;
529 /* Check for OSA/FCP thin interrupts (bit 67). */
530 DBF_EVENT("thinint:%1d",
531 (css_general_characteristics
.aif_osa
) ? 1 : 0);
533 /* Check for QEBSM support in general (bit 58). */
534 DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0);
539 kmem_cache_destroy(qdio_q_cache
);
543 void qdio_setup_exit(void)
545 kmem_cache_destroy(qdio_aob_cache
);
546 kmem_cache_destroy(qdio_q_cache
);