4 * Setup and helper functions to access QDIO.
6 * Copyright IBM Corporation 2002, 2010
9 #define KMSG_COMPONENT "zfcp"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 #include <linux/slab.h>
14 #include "zfcp_qdio.h"
16 #define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
18 static int zfcp_qdio_buffers_enqueue(struct qdio_buffer
**sbal
)
22 for (pos
= 0; pos
< QDIO_MAX_BUFFERS_PER_Q
; pos
+= QBUFF_PER_PAGE
) {
23 sbal
[pos
] = (struct qdio_buffer
*) get_zeroed_page(GFP_KERNEL
);
27 for (pos
= 0; pos
< QDIO_MAX_BUFFERS_PER_Q
; pos
++)
28 if (pos
% QBUFF_PER_PAGE
)
29 sbal
[pos
] = sbal
[pos
- 1] + 1;
33 static void zfcp_qdio_handler_error(struct zfcp_qdio
*qdio
, char *id
)
35 struct zfcp_adapter
*adapter
= qdio
->adapter
;
37 dev_warn(&adapter
->ccw_device
->dev
, "A QDIO problem occurred\n");
39 zfcp_erp_adapter_reopen(adapter
,
40 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED
|
41 ZFCP_STATUS_COMMON_ERP_FAILED
, id
, NULL
);
44 static void zfcp_qdio_zero_sbals(struct qdio_buffer
*sbal
[], int first
, int cnt
)
48 for (i
= first
; i
< first
+ cnt
; i
++) {
49 sbal_idx
= i
% QDIO_MAX_BUFFERS_PER_Q
;
50 memset(sbal
[sbal_idx
], 0, sizeof(struct qdio_buffer
));
54 /* this needs to be called prior to updating the queue fill level */
55 static inline void zfcp_qdio_account(struct zfcp_qdio
*qdio
)
57 unsigned long long now
, span
;
60 spin_lock(&qdio
->stat_lock
);
61 now
= get_clock_monotonic();
62 span
= (now
- qdio
->req_q_time
) >> 12;
63 free
= atomic_read(&qdio
->req_q
.count
);
64 used
= QDIO_MAX_BUFFERS_PER_Q
- free
;
65 qdio
->req_q_util
+= used
* span
;
66 qdio
->req_q_time
= now
;
67 spin_unlock(&qdio
->stat_lock
);
70 static void zfcp_qdio_int_req(struct ccw_device
*cdev
, unsigned int qdio_err
,
71 int queue_no
, int first
, int count
,
74 struct zfcp_qdio
*qdio
= (struct zfcp_qdio
*) parm
;
75 struct zfcp_qdio_queue
*queue
= &qdio
->req_q
;
77 if (unlikely(qdio_err
)) {
78 zfcp_dbf_hba_qdio(qdio
->adapter
->dbf
, qdio_err
, first
,
80 zfcp_qdio_handler_error(qdio
, "qdireq1");
84 /* cleanup all SBALs being program-owned now */
85 zfcp_qdio_zero_sbals(queue
->sbal
, first
, count
);
87 zfcp_qdio_account(qdio
);
88 atomic_add(count
, &queue
->count
);
89 wake_up(&qdio
->req_q_wq
);
92 static void zfcp_qdio_resp_put_back(struct zfcp_qdio
*qdio
, int processed
)
94 struct zfcp_qdio_queue
*queue
= &qdio
->resp_q
;
95 struct ccw_device
*cdev
= qdio
->adapter
->ccw_device
;
96 u8 count
, start
= queue
->first
;
99 count
= atomic_read(&queue
->count
) + processed
;
101 retval
= do_QDIO(cdev
, QDIO_FLAG_SYNC_INPUT
, 0, start
, count
);
103 if (unlikely(retval
)) {
104 atomic_set(&queue
->count
, count
);
105 zfcp_erp_adapter_reopen(qdio
->adapter
, 0, "qdrpb_1", NULL
);
107 queue
->first
+= count
;
108 queue
->first
%= QDIO_MAX_BUFFERS_PER_Q
;
109 atomic_set(&queue
->count
, 0);
113 static void zfcp_qdio_int_resp(struct ccw_device
*cdev
, unsigned int qdio_err
,
114 int queue_no
, int first
, int count
,
117 struct zfcp_qdio
*qdio
= (struct zfcp_qdio
*) parm
;
118 int sbal_idx
, sbal_no
;
120 if (unlikely(qdio_err
)) {
121 zfcp_dbf_hba_qdio(qdio
->adapter
->dbf
, qdio_err
, first
,
123 zfcp_qdio_handler_error(qdio
, "qdires1");
128 * go through all SBALs from input queue currently
129 * returned by QDIO layer
131 for (sbal_no
= 0; sbal_no
< count
; sbal_no
++) {
132 sbal_idx
= (first
+ sbal_no
) % QDIO_MAX_BUFFERS_PER_Q
;
133 /* go through all SBALEs of SBAL */
134 zfcp_fsf_reqid_check(qdio
, sbal_idx
);
138 * put range of SBALs back to response queue
139 * (including SBALs which have already been free before)
141 zfcp_qdio_resp_put_back(qdio
, count
);
144 static void zfcp_qdio_sbal_limit(struct zfcp_qdio
*qdio
,
145 struct zfcp_qdio_req
*q_req
, int max_sbals
)
147 int count
= atomic_read(&qdio
->req_q
.count
);
148 count
= min(count
, max_sbals
);
149 q_req
->sbal_limit
= (q_req
->sbal_first
+ count
- 1)
150 % QDIO_MAX_BUFFERS_PER_Q
;
153 static struct qdio_buffer_element
*
154 zfcp_qdio_sbal_chain(struct zfcp_qdio
*qdio
, struct zfcp_qdio_req
*q_req
)
156 struct qdio_buffer_element
*sbale
;
158 /* set last entry flag in current SBALE of current SBAL */
159 sbale
= zfcp_qdio_sbale_curr(qdio
, q_req
);
160 sbale
->flags
|= SBAL_FLAGS_LAST_ENTRY
;
162 /* don't exceed last allowed SBAL */
163 if (q_req
->sbal_last
== q_req
->sbal_limit
)
166 /* set chaining flag in first SBALE of current SBAL */
167 sbale
= zfcp_qdio_sbale_req(qdio
, q_req
);
168 sbale
->flags
|= SBAL_FLAGS0_MORE_SBALS
;
170 /* calculate index of next SBAL */
172 q_req
->sbal_last
%= QDIO_MAX_BUFFERS_PER_Q
;
174 /* keep this requests number of SBALs up-to-date */
175 q_req
->sbal_number
++;
177 /* start at first SBALE of new SBAL */
178 q_req
->sbale_curr
= 0;
180 /* set storage-block type for new SBAL */
181 sbale
= zfcp_qdio_sbale_curr(qdio
, q_req
);
182 sbale
->flags
|= q_req
->sbtype
;
187 static struct qdio_buffer_element
*
188 zfcp_qdio_sbale_next(struct zfcp_qdio
*qdio
, struct zfcp_qdio_req
*q_req
)
190 if (q_req
->sbale_curr
== ZFCP_QDIO_LAST_SBALE_PER_SBAL
)
191 return zfcp_qdio_sbal_chain(qdio
, q_req
);
193 return zfcp_qdio_sbale_curr(qdio
, q_req
);
196 static void zfcp_qdio_undo_sbals(struct zfcp_qdio
*qdio
,
197 struct zfcp_qdio_req
*q_req
)
199 struct qdio_buffer
**sbal
= qdio
->req_q
.sbal
;
200 int first
= q_req
->sbal_first
;
201 int last
= q_req
->sbal_last
;
202 int count
= (last
- first
+ QDIO_MAX_BUFFERS_PER_Q
) %
203 QDIO_MAX_BUFFERS_PER_Q
+ 1;
204 zfcp_qdio_zero_sbals(sbal
, first
, count
);
208 * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
209 * @qdio: pointer to struct zfcp_qdio
210 * @q_req: pointer to struct zfcp_qdio_req
211 * @sg: scatter-gather list
212 * @max_sbals: upper bound for number of SBALs to be used
213 * Returns: number of bytes, or error (negativ)
215 int zfcp_qdio_sbals_from_sg(struct zfcp_qdio
*qdio
, struct zfcp_qdio_req
*q_req
,
216 struct scatterlist
*sg
, int max_sbals
)
218 struct qdio_buffer_element
*sbale
;
221 /* figure out last allowed SBAL */
222 zfcp_qdio_sbal_limit(qdio
, q_req
, max_sbals
);
224 /* set storage-block type for this request */
225 sbale
= zfcp_qdio_sbale_req(qdio
, q_req
);
226 sbale
->flags
|= q_req
->sbtype
;
228 for (; sg
; sg
= sg_next(sg
)) {
229 sbale
= zfcp_qdio_sbale_next(qdio
, q_req
);
231 atomic_inc(&qdio
->req_q_full
);
232 zfcp_qdio_undo_sbals(qdio
, q_req
);
236 sbale
->addr
= sg_virt(sg
);
237 sbale
->length
= sg
->length
;
242 /* assume that no other SBALEs are to follow in the same SBAL */
243 sbale
= zfcp_qdio_sbale_curr(qdio
, q_req
);
244 sbale
->flags
|= SBAL_FLAGS_LAST_ENTRY
;
249 static int zfcp_qdio_sbal_check(struct zfcp_qdio
*qdio
)
251 struct zfcp_qdio_queue
*req_q
= &qdio
->req_q
;
253 spin_lock_bh(&qdio
->req_q_lock
);
254 if (atomic_read(&req_q
->count
))
256 spin_unlock_bh(&qdio
->req_q_lock
);
261 * zfcp_qdio_sbal_get - get free sbal in request queue, wait if necessary
262 * @qdio: pointer to struct zfcp_qdio
264 * The req_q_lock must be held by the caller of this function, and
265 * this function may only be called from process context; it will
266 * sleep when waiting for a free sbal.
268 * Returns: 0 on success, -EIO if there is no free sbal after waiting.
270 int zfcp_qdio_sbal_get(struct zfcp_qdio
*qdio
)
274 spin_unlock_bh(&qdio
->req_q_lock
);
275 ret
= wait_event_interruptible_timeout(qdio
->req_q_wq
,
276 zfcp_qdio_sbal_check(qdio
), 5 * HZ
);
280 atomic_inc(&qdio
->req_q_full
);
281 /* assume hanging outbound queue, try queue recovery */
282 zfcp_erp_adapter_reopen(qdio
->adapter
, 0, "qdsbg_1", NULL
);
285 spin_lock_bh(&qdio
->req_q_lock
);
290 * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
291 * @qdio: pointer to struct zfcp_qdio
292 * @q_req: pointer to struct zfcp_qdio_req
293 * Returns: 0 on success, error otherwise
295 int zfcp_qdio_send(struct zfcp_qdio
*qdio
, struct zfcp_qdio_req
*q_req
)
297 struct zfcp_qdio_queue
*req_q
= &qdio
->req_q
;
298 int first
= q_req
->sbal_first
;
299 int count
= q_req
->sbal_number
;
301 unsigned int qdio_flags
= QDIO_FLAG_SYNC_OUTPUT
;
303 zfcp_qdio_account(qdio
);
305 retval
= do_QDIO(qdio
->adapter
->ccw_device
, qdio_flags
, 0, first
,
307 if (unlikely(retval
)) {
308 zfcp_qdio_zero_sbals(req_q
->sbal
, first
, count
);
312 /* account for transferred buffers */
313 atomic_sub(count
, &req_q
->count
);
314 req_q
->first
+= count
;
315 req_q
->first
%= QDIO_MAX_BUFFERS_PER_Q
;
320 static void zfcp_qdio_setup_init_data(struct qdio_initialize
*id
,
321 struct zfcp_qdio
*qdio
)
324 id
->cdev
= qdio
->adapter
->ccw_device
;
325 id
->q_format
= QDIO_ZFCP_QFMT
;
326 memcpy(id
->adapter_name
, dev_name(&id
->cdev
->dev
), 8);
327 ASCEBC(id
->adapter_name
, 8);
328 id
->qib_param_field_format
= 0;
329 id
->qib_param_field
= NULL
;
330 id
->input_slib_elements
= NULL
;
331 id
->output_slib_elements
= NULL
;
333 id
->no_output_qs
= 1;
334 id
->input_handler
= zfcp_qdio_int_resp
;
335 id
->output_handler
= zfcp_qdio_int_req
;
336 id
->int_parm
= (unsigned long) qdio
;
337 id
->input_sbal_addr_array
= (void **) (qdio
->resp_q
.sbal
);
338 id
->output_sbal_addr_array
= (void **) (qdio
->req_q
.sbal
);
342 * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
343 * @adapter: pointer to struct zfcp_adapter
344 * Returns: -ENOMEM on memory allocation error or return value from
347 static int zfcp_qdio_allocate(struct zfcp_qdio
*qdio
)
349 struct qdio_initialize init_data
;
351 if (zfcp_qdio_buffers_enqueue(qdio
->req_q
.sbal
) ||
352 zfcp_qdio_buffers_enqueue(qdio
->resp_q
.sbal
))
355 zfcp_qdio_setup_init_data(&init_data
, qdio
);
357 return qdio_allocate(&init_data
);
361 * zfcp_close_qdio - close qdio queues for an adapter
362 * @qdio: pointer to structure zfcp_qdio
364 void zfcp_qdio_close(struct zfcp_qdio
*qdio
)
366 struct zfcp_qdio_queue
*req_q
;
369 if (!(atomic_read(&qdio
->adapter
->status
) & ZFCP_STATUS_ADAPTER_QDIOUP
))
372 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
373 req_q
= &qdio
->req_q
;
374 spin_lock_bh(&qdio
->req_q_lock
);
375 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP
, &qdio
->adapter
->status
);
376 spin_unlock_bh(&qdio
->req_q_lock
);
378 qdio_shutdown(qdio
->adapter
->ccw_device
,
379 QDIO_FLAG_CLEANUP_USING_CLEAR
);
381 /* cleanup used outbound sbals */
382 count
= atomic_read(&req_q
->count
);
383 if (count
< QDIO_MAX_BUFFERS_PER_Q
) {
384 first
= (req_q
->first
+ count
) % QDIO_MAX_BUFFERS_PER_Q
;
385 count
= QDIO_MAX_BUFFERS_PER_Q
- count
;
386 zfcp_qdio_zero_sbals(req_q
->sbal
, first
, count
);
389 atomic_set(&req_q
->count
, 0);
390 qdio
->resp_q
.first
= 0;
391 atomic_set(&qdio
->resp_q
.count
, 0);
395 * zfcp_qdio_open - prepare and initialize response queue
396 * @qdio: pointer to struct zfcp_qdio
397 * Returns: 0 on success, otherwise -EIO
399 int zfcp_qdio_open(struct zfcp_qdio
*qdio
)
401 struct qdio_buffer_element
*sbale
;
402 struct qdio_initialize init_data
;
403 struct ccw_device
*cdev
= qdio
->adapter
->ccw_device
;
406 if (atomic_read(&qdio
->adapter
->status
) & ZFCP_STATUS_ADAPTER_QDIOUP
)
409 zfcp_qdio_setup_init_data(&init_data
, qdio
);
411 if (qdio_establish(&init_data
))
412 goto failed_establish
;
414 if (qdio_activate(cdev
))
417 for (cc
= 0; cc
< QDIO_MAX_BUFFERS_PER_Q
; cc
++) {
418 sbale
= &(qdio
->resp_q
.sbal
[cc
]->element
[0]);
420 sbale
->flags
= SBAL_FLAGS_LAST_ENTRY
;
424 if (do_QDIO(cdev
, QDIO_FLAG_SYNC_INPUT
, 0, 0,
425 QDIO_MAX_BUFFERS_PER_Q
))
428 /* set index of first avalable SBALS / number of available SBALS */
429 qdio
->req_q
.first
= 0;
430 atomic_set(&qdio
->req_q
.count
, QDIO_MAX_BUFFERS_PER_Q
);
435 qdio_shutdown(cdev
, QDIO_FLAG_CLEANUP_USING_CLEAR
);
438 "Setting up the QDIO connection to the FCP adapter failed\n");
442 void zfcp_qdio_destroy(struct zfcp_qdio
*qdio
)
444 struct qdio_buffer
**sbal_req
, **sbal_resp
;
450 if (qdio
->adapter
->ccw_device
)
451 qdio_free(qdio
->adapter
->ccw_device
);
453 sbal_req
= qdio
->req_q
.sbal
;
454 sbal_resp
= qdio
->resp_q
.sbal
;
456 for (p
= 0; p
< QDIO_MAX_BUFFERS_PER_Q
; p
+= QBUFF_PER_PAGE
) {
457 free_page((unsigned long) sbal_req
[p
]);
458 free_page((unsigned long) sbal_resp
[p
]);
464 int zfcp_qdio_setup(struct zfcp_adapter
*adapter
)
466 struct zfcp_qdio
*qdio
;
468 qdio
= kzalloc(sizeof(struct zfcp_qdio
), GFP_KERNEL
);
472 qdio
->adapter
= adapter
;
474 if (zfcp_qdio_allocate(qdio
)) {
475 zfcp_qdio_destroy(qdio
);
479 spin_lock_init(&qdio
->req_q_lock
);
480 spin_lock_init(&qdio
->stat_lock
);
482 adapter
->qdio
= qdio
;