4 * Setup and helper functions to access QDIO.
6 * Copyright IBM Corporation 2002, 2008
9 #define KMSG_COMPONENT "zfcp"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 #define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
16 static int zfcp_qdio_buffers_enqueue(struct qdio_buffer
**sbal
)
20 for (pos
= 0; pos
< QDIO_MAX_BUFFERS_PER_Q
; pos
+= QBUFF_PER_PAGE
) {
21 sbal
[pos
] = (struct qdio_buffer
*) get_zeroed_page(GFP_KERNEL
);
25 for (pos
= 0; pos
< QDIO_MAX_BUFFERS_PER_Q
; pos
++)
26 if (pos
% QBUFF_PER_PAGE
)
27 sbal
[pos
] = sbal
[pos
- 1] + 1;
31 static struct qdio_buffer_element
*
32 zfcp_qdio_sbale(struct zfcp_qdio_queue
*q
, int sbal_idx
, int sbale_idx
)
34 return &q
->sbal
[sbal_idx
]->element
[sbale_idx
];
38 * zfcp_qdio_free - free memory used by request- and resposne queue
39 * @adapter: pointer to the zfcp_adapter structure
41 void zfcp_qdio_free(struct zfcp_adapter
*adapter
)
43 struct qdio_buffer
**sbal_req
, **sbal_resp
;
46 if (adapter
->ccw_device
)
47 qdio_free(adapter
->ccw_device
);
49 sbal_req
= adapter
->req_q
.sbal
;
50 sbal_resp
= adapter
->resp_q
.sbal
;
52 for (p
= 0; p
< QDIO_MAX_BUFFERS_PER_Q
; p
+= QBUFF_PER_PAGE
) {
53 free_page((unsigned long) sbal_req
[p
]);
54 free_page((unsigned long) sbal_resp
[p
]);
58 static void zfcp_qdio_handler_error(struct zfcp_adapter
*adapter
, char *id
)
60 dev_warn(&adapter
->ccw_device
->dev
, "A QDIO problem occurred\n");
62 zfcp_erp_adapter_reopen(adapter
,
63 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED
|
64 ZFCP_STATUS_COMMON_ERP_FAILED
, id
, NULL
);
67 static void zfcp_qdio_zero_sbals(struct qdio_buffer
*sbal
[], int first
, int cnt
)
71 for (i
= first
; i
< first
+ cnt
; i
++) {
72 sbal_idx
= i
% QDIO_MAX_BUFFERS_PER_Q
;
73 memset(sbal
[sbal_idx
], 0, sizeof(struct qdio_buffer
));
77 /* this needs to be called prior to updating the queue fill level */
78 static void zfcp_qdio_account(struct zfcp_adapter
*adapter
)
84 spin_lock(&adapter
->qdio_stat_lock
);
86 span
= ktime_us_delta(now
, adapter
->req_q_time
);
87 free
= max(0, atomic_read(&adapter
->req_q
.count
));
88 used
= QDIO_MAX_BUFFERS_PER_Q
- free
;
89 adapter
->req_q_util
+= used
* span
;
90 adapter
->req_q_time
= now
;
91 spin_unlock(&adapter
->qdio_stat_lock
);
94 static void zfcp_qdio_int_req(struct ccw_device
*cdev
, unsigned int qdio_err
,
95 int queue_no
, int first
, int count
,
98 struct zfcp_adapter
*adapter
= (struct zfcp_adapter
*) parm
;
99 struct zfcp_qdio_queue
*queue
= &adapter
->req_q
;
101 if (unlikely(qdio_err
)) {
102 zfcp_hba_dbf_event_qdio(adapter
, qdio_err
, first
, count
);
103 zfcp_qdio_handler_error(adapter
, "qdireq1");
107 /* cleanup all SBALs being program-owned now */
108 zfcp_qdio_zero_sbals(queue
->sbal
, first
, count
);
110 zfcp_qdio_account(adapter
);
111 atomic_add(count
, &queue
->count
);
112 wake_up(&adapter
->request_wq
);
115 static void zfcp_qdio_reqid_check(struct zfcp_adapter
*adapter
,
116 unsigned long req_id
, int sbal_idx
)
118 struct zfcp_fsf_req
*fsf_req
;
121 spin_lock_irqsave(&adapter
->req_list_lock
, flags
);
122 fsf_req
= zfcp_reqlist_find(adapter
, req_id
);
126 * Unknown request means that we have potentially memory
127 * corruption and must stop the machine immediatly.
129 panic("error: unknown request id (%lx) on adapter %s.\n",
130 req_id
, dev_name(&adapter
->ccw_device
->dev
));
132 zfcp_reqlist_remove(adapter
, fsf_req
);
133 spin_unlock_irqrestore(&adapter
->req_list_lock
, flags
);
135 fsf_req
->sbal_response
= sbal_idx
;
136 fsf_req
->qdio_inb_usage
= atomic_read(&adapter
->resp_q
.count
);
137 zfcp_fsf_req_complete(fsf_req
);
140 static void zfcp_qdio_resp_put_back(struct zfcp_adapter
*adapter
, int processed
)
142 struct zfcp_qdio_queue
*queue
= &adapter
->resp_q
;
143 struct ccw_device
*cdev
= adapter
->ccw_device
;
144 u8 count
, start
= queue
->first
;
147 count
= atomic_read(&queue
->count
) + processed
;
149 retval
= do_QDIO(cdev
, QDIO_FLAG_SYNC_INPUT
, 0, start
, count
);
151 if (unlikely(retval
)) {
152 atomic_set(&queue
->count
, count
);
153 /* FIXME: Recover this with an adapter reopen? */
155 queue
->first
+= count
;
156 queue
->first
%= QDIO_MAX_BUFFERS_PER_Q
;
157 atomic_set(&queue
->count
, 0);
161 static void zfcp_qdio_int_resp(struct ccw_device
*cdev
, unsigned int qdio_err
,
162 int queue_no
, int first
, int count
,
165 struct zfcp_adapter
*adapter
= (struct zfcp_adapter
*) parm
;
166 struct zfcp_qdio_queue
*queue
= &adapter
->resp_q
;
167 struct qdio_buffer_element
*sbale
;
168 int sbal_idx
, sbale_idx
, sbal_no
;
170 if (unlikely(qdio_err
)) {
171 zfcp_hba_dbf_event_qdio(adapter
, qdio_err
, first
, count
);
172 zfcp_qdio_handler_error(adapter
, "qdires1");
177 * go through all SBALs from input queue currently
178 * returned by QDIO layer
180 for (sbal_no
= 0; sbal_no
< count
; sbal_no
++) {
181 sbal_idx
= (first
+ sbal_no
) % QDIO_MAX_BUFFERS_PER_Q
;
183 /* go through all SBALEs of SBAL */
184 for (sbale_idx
= 0; sbale_idx
< QDIO_MAX_ELEMENTS_PER_BUFFER
;
186 sbale
= zfcp_qdio_sbale(queue
, sbal_idx
, sbale_idx
);
187 zfcp_qdio_reqid_check(adapter
,
188 (unsigned long) sbale
->addr
,
190 if (likely(sbale
->flags
& SBAL_FLAGS_LAST_ENTRY
))
194 if (unlikely(!(sbale
->flags
& SBAL_FLAGS_LAST_ENTRY
)))
195 dev_warn(&adapter
->ccw_device
->dev
,
196 "A QDIO protocol error occurred, "
197 "operations continue\n");
201 * put range of SBALs back to response queue
202 * (including SBALs which have already been free before)
204 zfcp_qdio_resp_put_back(adapter
, count
);
208 * zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req
209 * @fsf_req: pointer to struct fsf_req
210 * Returns: pointer to qdio_buffer_element (SBALE) structure
212 struct qdio_buffer_element
*zfcp_qdio_sbale_req(struct zfcp_fsf_req
*req
)
214 return zfcp_qdio_sbale(&req
->adapter
->req_q
, req
->sbal_last
, 0);
218 * zfcp_qdio_sbale_curr - return curr SBALE on req_q for a struct zfcp_fsf_req
219 * @fsf_req: pointer to struct fsf_req
220 * Returns: pointer to qdio_buffer_element (SBALE) structure
222 struct qdio_buffer_element
*zfcp_qdio_sbale_curr(struct zfcp_fsf_req
*req
)
224 return zfcp_qdio_sbale(&req
->adapter
->req_q
, req
->sbal_last
,
228 static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req
*fsf_req
, int max_sbals
)
230 int count
= atomic_read(&fsf_req
->adapter
->req_q
.count
);
231 count
= min(count
, max_sbals
);
232 fsf_req
->sbal_limit
= (fsf_req
->sbal_first
+ count
- 1)
233 % QDIO_MAX_BUFFERS_PER_Q
;
236 static struct qdio_buffer_element
*
237 zfcp_qdio_sbal_chain(struct zfcp_fsf_req
*fsf_req
, unsigned long sbtype
)
239 struct qdio_buffer_element
*sbale
;
241 /* set last entry flag in current SBALE of current SBAL */
242 sbale
= zfcp_qdio_sbale_curr(fsf_req
);
243 sbale
->flags
|= SBAL_FLAGS_LAST_ENTRY
;
245 /* don't exceed last allowed SBAL */
246 if (fsf_req
->sbal_last
== fsf_req
->sbal_limit
)
249 /* set chaining flag in first SBALE of current SBAL */
250 sbale
= zfcp_qdio_sbale_req(fsf_req
);
251 sbale
->flags
|= SBAL_FLAGS0_MORE_SBALS
;
253 /* calculate index of next SBAL */
254 fsf_req
->sbal_last
++;
255 fsf_req
->sbal_last
%= QDIO_MAX_BUFFERS_PER_Q
;
257 /* keep this requests number of SBALs up-to-date */
258 fsf_req
->sbal_number
++;
260 /* start at first SBALE of new SBAL */
261 fsf_req
->sbale_curr
= 0;
263 /* set storage-block type for new SBAL */
264 sbale
= zfcp_qdio_sbale_curr(fsf_req
);
265 sbale
->flags
|= sbtype
;
270 static struct qdio_buffer_element
*
271 zfcp_qdio_sbale_next(struct zfcp_fsf_req
*fsf_req
, unsigned long sbtype
)
273 if (fsf_req
->sbale_curr
== ZFCP_LAST_SBALE_PER_SBAL
)
274 return zfcp_qdio_sbal_chain(fsf_req
, sbtype
);
275 fsf_req
->sbale_curr
++;
276 return zfcp_qdio_sbale_curr(fsf_req
);
279 static void zfcp_qdio_undo_sbals(struct zfcp_fsf_req
*fsf_req
)
281 struct qdio_buffer
**sbal
= fsf_req
->adapter
->req_q
.sbal
;
282 int first
= fsf_req
->sbal_first
;
283 int last
= fsf_req
->sbal_last
;
284 int count
= (last
- first
+ QDIO_MAX_BUFFERS_PER_Q
) %
285 QDIO_MAX_BUFFERS_PER_Q
+ 1;
286 zfcp_qdio_zero_sbals(sbal
, first
, count
);
289 static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req
*fsf_req
,
290 unsigned int sbtype
, void *start_addr
,
291 unsigned int total_length
)
293 struct qdio_buffer_element
*sbale
;
294 unsigned long remaining
, length
;
297 /* split segment up */
298 for (addr
= start_addr
, remaining
= total_length
; remaining
> 0;
299 addr
+= length
, remaining
-= length
) {
300 sbale
= zfcp_qdio_sbale_next(fsf_req
, sbtype
);
302 atomic_inc(&fsf_req
->adapter
->qdio_outb_full
);
303 zfcp_qdio_undo_sbals(fsf_req
);
307 /* new piece must not exceed next page boundary */
308 length
= min(remaining
,
309 (PAGE_SIZE
- ((unsigned long)addr
&
312 sbale
->length
= length
;
318 * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
319 * @fsf_req: request to be processed
320 * @sbtype: SBALE flags
321 * @sg: scatter-gather list
322 * @max_sbals: upper bound for number of SBALs to be used
323 * Returns: number of bytes, or error (negativ)
325 int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req
*fsf_req
, unsigned long sbtype
,
326 struct scatterlist
*sg
, int max_sbals
)
328 struct qdio_buffer_element
*sbale
;
329 int retval
, bytes
= 0;
331 /* figure out last allowed SBAL */
332 zfcp_qdio_sbal_limit(fsf_req
, max_sbals
);
334 /* set storage-block type for this request */
335 sbale
= zfcp_qdio_sbale_req(fsf_req
);
336 sbale
->flags
|= sbtype
;
338 for (; sg
; sg
= sg_next(sg
)) {
339 retval
= zfcp_qdio_fill_sbals(fsf_req
, sbtype
, sg_virt(sg
),
346 /* assume that no other SBALEs are to follow in the same SBAL */
347 sbale
= zfcp_qdio_sbale_curr(fsf_req
);
348 sbale
->flags
|= SBAL_FLAGS_LAST_ENTRY
;
354 * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
355 * @fsf_req: pointer to struct zfcp_fsf_req
356 * Returns: 0 on success, error otherwise
358 int zfcp_qdio_send(struct zfcp_fsf_req
*fsf_req
)
360 struct zfcp_adapter
*adapter
= fsf_req
->adapter
;
361 struct zfcp_qdio_queue
*req_q
= &adapter
->req_q
;
362 int first
= fsf_req
->sbal_first
;
363 int count
= fsf_req
->sbal_number
;
365 unsigned int qdio_flags
= QDIO_FLAG_SYNC_OUTPUT
;
367 zfcp_qdio_account(adapter
);
369 retval
= do_QDIO(adapter
->ccw_device
, qdio_flags
, 0, first
, count
);
370 if (unlikely(retval
)) {
371 zfcp_qdio_zero_sbals(req_q
->sbal
, first
, count
);
375 /* account for transferred buffers */
376 atomic_sub(count
, &req_q
->count
);
377 req_q
->first
+= count
;
378 req_q
->first
%= QDIO_MAX_BUFFERS_PER_Q
;
383 * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
384 * @adapter: pointer to struct zfcp_adapter
385 * Returns: -ENOMEM on memory allocation error or return value from
388 int zfcp_qdio_allocate(struct zfcp_adapter
*adapter
)
390 struct qdio_initialize
*init_data
;
392 if (zfcp_qdio_buffers_enqueue(adapter
->req_q
.sbal
) ||
393 zfcp_qdio_buffers_enqueue(adapter
->resp_q
.sbal
))
396 init_data
= &adapter
->qdio_init_data
;
398 init_data
->cdev
= adapter
->ccw_device
;
399 init_data
->q_format
= QDIO_ZFCP_QFMT
;
400 memcpy(init_data
->adapter_name
, dev_name(&adapter
->ccw_device
->dev
), 8);
401 ASCEBC(init_data
->adapter_name
, 8);
402 init_data
->qib_param_field_format
= 0;
403 init_data
->qib_param_field
= NULL
;
404 init_data
->input_slib_elements
= NULL
;
405 init_data
->output_slib_elements
= NULL
;
406 init_data
->no_input_qs
= 1;
407 init_data
->no_output_qs
= 1;
408 init_data
->input_handler
= zfcp_qdio_int_resp
;
409 init_data
->output_handler
= zfcp_qdio_int_req
;
410 init_data
->int_parm
= (unsigned long) adapter
;
411 init_data
->flags
= QDIO_INBOUND_0COPY_SBALS
|
412 QDIO_OUTBOUND_0COPY_SBALS
| QDIO_USE_OUTBOUND_PCIS
;
413 init_data
->input_sbal_addr_array
=
414 (void **) (adapter
->resp_q
.sbal
);
415 init_data
->output_sbal_addr_array
=
416 (void **) (adapter
->req_q
.sbal
);
418 return qdio_allocate(init_data
);
422 * zfcp_close_qdio - close qdio queues for an adapter
424 void zfcp_qdio_close(struct zfcp_adapter
*adapter
)
426 struct zfcp_qdio_queue
*req_q
;
429 if (!(atomic_read(&adapter
->status
) & ZFCP_STATUS_ADAPTER_QDIOUP
))
432 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
433 req_q
= &adapter
->req_q
;
434 spin_lock_bh(&adapter
->req_q_lock
);
435 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP
, &adapter
->status
);
436 spin_unlock_bh(&adapter
->req_q_lock
);
438 qdio_shutdown(adapter
->ccw_device
, QDIO_FLAG_CLEANUP_USING_CLEAR
);
440 /* cleanup used outbound sbals */
441 count
= atomic_read(&req_q
->count
);
442 if (count
< QDIO_MAX_BUFFERS_PER_Q
) {
443 first
= (req_q
->first
+ count
) % QDIO_MAX_BUFFERS_PER_Q
;
444 count
= QDIO_MAX_BUFFERS_PER_Q
- count
;
445 zfcp_qdio_zero_sbals(req_q
->sbal
, first
, count
);
448 atomic_set(&req_q
->count
, 0);
449 adapter
->resp_q
.first
= 0;
450 atomic_set(&adapter
->resp_q
.count
, 0);
454 * zfcp_qdio_open - prepare and initialize response queue
455 * @adapter: pointer to struct zfcp_adapter
456 * Returns: 0 on success, otherwise -EIO
458 int zfcp_qdio_open(struct zfcp_adapter
*adapter
)
460 struct qdio_buffer_element
*sbale
;
463 if (atomic_read(&adapter
->status
) & ZFCP_STATUS_ADAPTER_QDIOUP
)
466 if (qdio_establish(&adapter
->qdio_init_data
))
467 goto failed_establish
;
469 if (qdio_activate(adapter
->ccw_device
))
472 for (cc
= 0; cc
< QDIO_MAX_BUFFERS_PER_Q
; cc
++) {
473 sbale
= &(adapter
->resp_q
.sbal
[cc
]->element
[0]);
475 sbale
->flags
= SBAL_FLAGS_LAST_ENTRY
;
479 if (do_QDIO(adapter
->ccw_device
, QDIO_FLAG_SYNC_INPUT
, 0, 0,
480 QDIO_MAX_BUFFERS_PER_Q
))
483 /* set index of first avalable SBALS / number of available SBALS */
484 adapter
->req_q
.first
= 0;
485 atomic_set(&adapter
->req_q
.count
, QDIO_MAX_BUFFERS_PER_Q
);
490 qdio_shutdown(adapter
->ccw_device
, QDIO_FLAG_CLEANUP_USING_CLEAR
);
492 dev_err(&adapter
->ccw_device
->dev
,
493 "Setting up the QDIO connection to the FCP adapter failed\n");