Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / drivers / s390 / scsi / zfcp_qdio.c
blob8cbc5e1711af055ebf7f66ac3e6a2383a3c2be71
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * zfcp device driver
5 * Setup and helper functions to access QDIO.
7 * Copyright IBM Corp. 2002, 2020
8 */
10 #define KMSG_COMPONENT "zfcp"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 #include <linux/lockdep.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include "zfcp_ext.h"
17 #include "zfcp_qdio.h"
19 static bool enable_multibuffer = true;
20 module_param_named(datarouter, enable_multibuffer, bool, 0400);
21 MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)");
23 #define ZFCP_QDIO_REQUEST_RESCAN_MSECS (MSEC_PER_SEC * 10)
24 #define ZFCP_QDIO_REQUEST_SCAN_MSECS MSEC_PER_SEC
26 static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *dbftag,
27 unsigned int qdio_err)
29 struct zfcp_adapter *adapter = qdio->adapter;
31 dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
33 if (qdio_err & QDIO_ERROR_SLSB_STATE) {
34 zfcp_qdio_siosl(adapter);
35 zfcp_erp_adapter_shutdown(adapter, 0, dbftag);
36 return;
38 zfcp_erp_adapter_reopen(adapter,
39 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
40 ZFCP_STATUS_COMMON_ERP_FAILED, dbftag);
43 static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
45 int i, sbal_idx;
47 for (i = first; i < first + cnt; i++) {
48 sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q;
49 memset(sbal[sbal_idx], 0, sizeof(struct qdio_buffer));
53 /* this needs to be called prior to updating the queue fill level */
54 static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
56 unsigned long long now, span;
57 int used;
59 now = get_tod_clock_monotonic();
60 span = (now - qdio->req_q_time) >> 12;
61 used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
62 qdio->req_q_util += used * span;
63 qdio->req_q_time = now;
66 static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
67 int queue_no, int idx, int count,
68 unsigned long parm)
70 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
72 zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
75 static void zfcp_qdio_request_tasklet(struct tasklet_struct *tasklet)
77 struct zfcp_qdio *qdio = from_tasklet(qdio, tasklet, request_tasklet);
78 struct ccw_device *cdev = qdio->adapter->ccw_device;
79 unsigned int start, error;
80 int completed;
82 completed = qdio_inspect_output_queue(cdev, 0, &start, &error);
83 if (completed > 0) {
84 if (error) {
85 zfcp_qdio_handler_error(qdio, "qdreqt1", error);
86 } else {
87 /* cleanup all SBALs being program-owned now */
88 zfcp_qdio_zero_sbals(qdio->req_q, start, completed);
90 spin_lock_irq(&qdio->stat_lock);
91 zfcp_qdio_account(qdio);
92 spin_unlock_irq(&qdio->stat_lock);
93 atomic_add(completed, &qdio->req_q_free);
94 wake_up(&qdio->req_q_wq);
98 if (atomic_read(&qdio->req_q_free) < QDIO_MAX_BUFFERS_PER_Q)
99 timer_reduce(&qdio->request_timer,
100 jiffies + msecs_to_jiffies(ZFCP_QDIO_REQUEST_RESCAN_MSECS));
103 static void zfcp_qdio_request_timer(struct timer_list *timer)
105 struct zfcp_qdio *qdio = from_timer(qdio, timer, request_timer);
107 tasklet_schedule(&qdio->request_tasklet);
110 static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
111 int queue_no, int idx, int count,
112 unsigned long parm)
114 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
115 struct zfcp_adapter *adapter = qdio->adapter;
116 int sbal_no, sbal_idx;
118 if (unlikely(qdio_err)) {
119 if (zfcp_adapter_multi_buffer_active(adapter)) {
120 void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
121 struct qdio_buffer_element *sbale;
122 u64 req_id;
123 u8 scount;
125 memset(pl, 0,
126 ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
127 sbale = qdio->res_q[idx]->element;
128 req_id = dma64_to_u64(sbale->addr);
129 scount = min(sbale->scount + 1,
130 ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
131 /* incl. signaling SBAL */
133 for (sbal_no = 0; sbal_no < scount; sbal_no++) {
134 sbal_idx = (idx + sbal_no) %
135 QDIO_MAX_BUFFERS_PER_Q;
136 pl[sbal_no] = qdio->res_q[sbal_idx];
138 zfcp_dbf_hba_def_err(adapter, req_id, scount, pl);
140 zfcp_qdio_handler_error(qdio, "qdires1", qdio_err);
141 return;
145 * go through all SBALs from input queue currently
146 * returned by QDIO layer
148 for (sbal_no = 0; sbal_no < count; sbal_no++) {
149 sbal_idx = (idx + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
150 /* go through all SBALEs of SBAL */
151 zfcp_fsf_reqid_check(qdio, sbal_idx);
155 * put SBALs back to response queue
157 if (qdio_add_bufs_to_input_queue(cdev, 0, idx, count))
158 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
161 static void zfcp_qdio_irq_tasklet(struct tasklet_struct *tasklet)
163 struct zfcp_qdio *qdio = from_tasklet(qdio, tasklet, irq_tasklet);
164 struct ccw_device *cdev = qdio->adapter->ccw_device;
165 unsigned int start, error;
166 int completed;
168 if (atomic_read(&qdio->req_q_free) < QDIO_MAX_BUFFERS_PER_Q)
169 tasklet_schedule(&qdio->request_tasklet);
171 /* Check the Response Queue: */
172 completed = qdio_inspect_input_queue(cdev, 0, &start, &error);
173 if (completed < 0)
174 return;
175 if (completed > 0)
176 zfcp_qdio_int_resp(cdev, error, 0, start, completed,
177 (unsigned long) qdio);
179 if (qdio_start_irq(cdev))
180 /* More work pending: */
181 tasklet_schedule(&qdio->irq_tasklet);
184 static void zfcp_qdio_poll(struct ccw_device *cdev, unsigned long data)
186 struct zfcp_qdio *qdio = (struct zfcp_qdio *) data;
188 tasklet_schedule(&qdio->irq_tasklet);
191 static struct qdio_buffer_element *
192 zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
194 struct qdio_buffer_element *sbale;
196 /* set last entry flag in current SBALE of current SBAL */
197 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
198 sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
200 /* don't exceed last allowed SBAL */
201 if (q_req->sbal_last == q_req->sbal_limit)
202 return NULL;
204 /* set chaining flag in first SBALE of current SBAL */
205 sbale = zfcp_qdio_sbale_req(qdio, q_req);
206 sbale->sflags |= SBAL_SFLAGS0_MORE_SBALS;
208 /* calculate index of next SBAL */
209 q_req->sbal_last++;
210 q_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
212 /* keep this requests number of SBALs up-to-date */
213 q_req->sbal_number++;
214 BUG_ON(q_req->sbal_number > ZFCP_QDIO_MAX_SBALS_PER_REQ);
216 /* start at first SBALE of new SBAL */
217 q_req->sbale_curr = 0;
219 /* set storage-block type for new SBAL */
220 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
221 sbale->sflags |= q_req->sbtype;
223 return sbale;
226 static struct qdio_buffer_element *
227 zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
229 if (q_req->sbale_curr == qdio->max_sbale_per_sbal - 1)
230 return zfcp_qdio_sbal_chain(qdio, q_req);
231 q_req->sbale_curr++;
232 return zfcp_qdio_sbale_curr(qdio, q_req);
236 * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
237 * @qdio: pointer to struct zfcp_qdio
238 * @q_req: pointer to struct zfcp_qdio_req
239 * @sg: scatter-gather list
240 * Returns: zero or -EINVAL on error
242 int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
243 struct scatterlist *sg)
245 struct qdio_buffer_element *sbale;
247 /* set storage-block type for this request */
248 sbale = zfcp_qdio_sbale_req(qdio, q_req);
249 sbale->sflags |= q_req->sbtype;
251 for (; sg; sg = sg_next(sg)) {
252 sbale = zfcp_qdio_sbale_next(qdio, q_req);
253 if (!sbale) {
254 atomic_inc(&qdio->req_q_full);
255 zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
256 q_req->sbal_number);
257 return -EINVAL;
259 sbale->addr = u64_to_dma64(sg_phys(sg));
260 sbale->length = sg->length;
262 return 0;
265 static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
267 if (atomic_read(&qdio->req_q_free) ||
268 !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
269 return 1;
270 return 0;
274 * zfcp_qdio_sbal_get - get free sbal in request queue, wait if necessary
275 * @qdio: pointer to struct zfcp_qdio
277 * The req_q_lock must be held by the caller of this function, and
278 * this function may only be called from process context; it will
279 * sleep when waiting for a free sbal.
281 * Returns: 0 on success, -EIO if there is no free sbal after waiting.
283 int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
285 long ret;
287 ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
288 zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
290 if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
291 return -EIO;
293 if (ret > 0)
294 return 0;
296 if (!ret) {
297 atomic_inc(&qdio->req_q_full);
298 /* assume hanging outbound queue, try queue recovery */
299 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
302 return -EIO;
306 * zfcp_qdio_send - send req to QDIO
307 * @qdio: pointer to struct zfcp_qdio
308 * @q_req: pointer to struct zfcp_qdio_req
309 * Returns: 0 on success, error otherwise
311 int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
313 int retval;
314 u8 sbal_number = q_req->sbal_number;
317 * This should actually be a spin_lock_bh(stat_lock), to protect against
318 * Request Queue completion processing in tasklet context.
319 * But we can't do so (and are safe), as we always get called with IRQs
320 * disabled by spin_lock_irq[save](req_q_lock).
322 lockdep_assert_irqs_disabled();
323 spin_lock(&qdio->stat_lock);
324 zfcp_qdio_account(qdio);
325 spin_unlock(&qdio->stat_lock);
327 atomic_sub(sbal_number, &qdio->req_q_free);
329 retval = qdio_add_bufs_to_output_queue(qdio->adapter->ccw_device, 0,
330 q_req->sbal_first, sbal_number,
331 NULL);
333 if (unlikely(retval)) {
334 /* Failed to submit the IO, roll back our modifications. */
335 atomic_add(sbal_number, &qdio->req_q_free);
336 zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
337 sbal_number);
338 return retval;
341 if (atomic_read(&qdio->req_q_free) <= 2 * ZFCP_QDIO_MAX_SBALS_PER_REQ)
342 tasklet_schedule(&qdio->request_tasklet);
343 else
344 timer_reduce(&qdio->request_timer,
345 jiffies + msecs_to_jiffies(ZFCP_QDIO_REQUEST_SCAN_MSECS));
347 /* account for transferred buffers */
348 qdio->req_q_idx += sbal_number;
349 qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q;
351 return 0;
355 * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
356 * @qdio: pointer to struct zfcp_qdio
357 * Returns: -ENOMEM on memory allocation error or return value from
358 * qdio_allocate
360 static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
362 int ret;
364 ret = qdio_alloc_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
365 if (ret)
366 return -ENOMEM;
368 ret = qdio_alloc_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
369 if (ret)
370 goto free_req_q;
372 init_waitqueue_head(&qdio->req_q_wq);
374 ret = qdio_allocate(qdio->adapter->ccw_device, 1, 1);
375 if (ret)
376 goto free_res_q;
378 return 0;
380 free_res_q:
381 qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
382 free_req_q:
383 qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
384 return ret;
388 * zfcp_qdio_close - close qdio queues for an adapter
389 * @qdio: pointer to structure zfcp_qdio
391 void zfcp_qdio_close(struct zfcp_qdio *qdio)
393 struct zfcp_adapter *adapter = qdio->adapter;
394 int idx, count;
396 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
397 return;
400 * Clear QDIOUP flag, thus qdio_add_bufs_to_output_queue() is not called
401 * during qdio_shutdown().
403 spin_lock_irq(&qdio->req_q_lock);
404 atomic_andnot(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
405 spin_unlock_irq(&qdio->req_q_lock);
407 wake_up(&qdio->req_q_wq);
409 tasklet_disable(&qdio->irq_tasklet);
410 tasklet_disable(&qdio->request_tasklet);
411 del_timer_sync(&qdio->request_timer);
412 qdio_stop_irq(adapter->ccw_device);
413 qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
415 /* cleanup used outbound sbals */
416 count = atomic_read(&qdio->req_q_free);
417 if (count < QDIO_MAX_BUFFERS_PER_Q) {
418 idx = (qdio->req_q_idx + count) % QDIO_MAX_BUFFERS_PER_Q;
419 count = QDIO_MAX_BUFFERS_PER_Q - count;
420 zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
422 qdio->req_q_idx = 0;
423 atomic_set(&qdio->req_q_free, 0);
426 void zfcp_qdio_shost_update(struct zfcp_adapter *const adapter,
427 const struct zfcp_qdio *const qdio)
429 struct Scsi_Host *const shost = adapter->scsi_host;
431 if (shost == NULL)
432 return;
434 shost->sg_tablesize = qdio->max_sbale_per_req;
435 shost->max_sectors = qdio->max_sbale_per_req * 8;
439 * zfcp_qdio_open - prepare and initialize response queue
440 * @qdio: pointer to struct zfcp_qdio
441 * Returns: 0 on success, otherwise -EIO
443 int zfcp_qdio_open(struct zfcp_qdio *qdio)
445 struct qdio_buffer **input_sbals[1] = {qdio->res_q};
446 struct qdio_buffer **output_sbals[1] = {qdio->req_q};
447 struct qdio_buffer_element *sbale;
448 struct qdio_initialize init_data = {0};
449 struct zfcp_adapter *adapter = qdio->adapter;
450 struct ccw_device *cdev = adapter->ccw_device;
451 struct qdio_ssqd_desc ssqd;
452 int cc;
454 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
455 return -EIO;
457 atomic_andnot(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
458 &qdio->adapter->status);
460 init_data.q_format = QDIO_ZFCP_QFMT;
461 init_data.qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
462 if (enable_multibuffer)
463 init_data.qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE;
464 init_data.no_input_qs = 1;
465 init_data.no_output_qs = 1;
466 init_data.input_handler = zfcp_qdio_int_resp;
467 init_data.output_handler = zfcp_qdio_int_req;
468 init_data.irq_poll = zfcp_qdio_poll;
469 init_data.int_parm = (unsigned long) qdio;
470 init_data.input_sbal_addr_array = input_sbals;
471 init_data.output_sbal_addr_array = output_sbals;
473 if (qdio_establish(cdev, &init_data))
474 goto failed_establish;
476 if (qdio_get_ssqd_desc(cdev, &ssqd))
477 goto failed_qdio;
479 if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
480 atomic_or(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
481 &qdio->adapter->status);
483 if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) {
484 atomic_or(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
485 qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER;
486 } else {
487 atomic_andnot(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
488 qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1;
491 qdio->max_sbale_per_req =
492 ZFCP_QDIO_MAX_SBALS_PER_REQ * qdio->max_sbale_per_sbal
493 - 2;
494 if (qdio_activate(cdev))
495 goto failed_qdio;
497 for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
498 sbale = &(qdio->res_q[cc]->element[0]);
499 sbale->length = 0;
500 sbale->eflags = SBAL_EFLAGS_LAST_ENTRY;
501 sbale->sflags = 0;
502 sbale->addr = 0;
505 if (qdio_add_bufs_to_input_queue(cdev, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
506 goto failed_qdio;
508 /* set index of first available SBALS / number of available SBALS */
509 qdio->req_q_idx = 0;
510 atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
511 atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
513 /* Enable processing for Request Queue completions: */
514 tasklet_enable(&qdio->request_tasklet);
515 /* Enable processing for QDIO interrupts: */
516 tasklet_enable(&qdio->irq_tasklet);
517 /* This results in a qdio_start_irq(): */
518 tasklet_schedule(&qdio->irq_tasklet);
520 zfcp_qdio_shost_update(adapter, qdio);
522 return 0;
524 failed_qdio:
525 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
526 failed_establish:
527 dev_err(&cdev->dev,
528 "Setting up the QDIO connection to the FCP adapter failed\n");
529 return -EIO;
532 void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
534 if (!qdio)
535 return;
537 tasklet_kill(&qdio->irq_tasklet);
538 tasklet_kill(&qdio->request_tasklet);
540 if (qdio->adapter->ccw_device)
541 qdio_free(qdio->adapter->ccw_device);
543 qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
544 qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
545 kfree(qdio);
548 int zfcp_qdio_setup(struct zfcp_adapter *adapter)
550 struct zfcp_qdio *qdio;
552 qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL);
553 if (!qdio)
554 return -ENOMEM;
556 qdio->adapter = adapter;
558 if (zfcp_qdio_allocate(qdio)) {
559 kfree(qdio);
560 return -ENOMEM;
563 spin_lock_init(&qdio->req_q_lock);
564 spin_lock_init(&qdio->stat_lock);
565 timer_setup(&qdio->request_timer, zfcp_qdio_request_timer, 0);
566 tasklet_setup(&qdio->irq_tasklet, zfcp_qdio_irq_tasklet);
567 tasklet_setup(&qdio->request_tasklet, zfcp_qdio_request_tasklet);
568 tasklet_disable(&qdio->irq_tasklet);
569 tasklet_disable(&qdio->request_tasklet);
571 adapter->qdio = qdio;
572 return 0;
576 * zfcp_qdio_siosl - Trigger logging in FCP channel
577 * @adapter: The zfcp_adapter where to trigger logging
579 * Call the cio siosl function to trigger hardware logging. This
580 * wrapper function sets a flag to ensure hardware logging is only
581 * triggered once before going through qdio shutdown.
583 * The triggers are always run from qdio tasklet context, so no
584 * additional synchronization is necessary.
586 void zfcp_qdio_siosl(struct zfcp_adapter *adapter)
588 int rc;
590 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SIOSL_ISSUED)
591 return;
593 rc = ccw_device_siosl(adapter->ccw_device);
594 if (!rc)
595 atomic_or(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
596 &adapter->status);