4 * Debug traces for zfcp.
6 * Copyright IBM Corp. 2002, 2017
9 #define KMSG_COMPONENT "zfcp"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 #include <linux/module.h>
13 #include <linux/ctype.h>
14 #include <linux/slab.h>
15 #include <asm/debug.h>
20 static u32 dbfsize
= 4;
22 module_param(dbfsize
, uint
, 0400);
23 MODULE_PARM_DESC(dbfsize
,
24 "number of pages for each debug feature area (default 4)");
26 static u32 dbflevel
= 3;
28 module_param(dbflevel
, uint
, 0400);
29 MODULE_PARM_DESC(dbflevel
,
30 "log level for each debug feature area "
31 "(default 3, range 0..6)");
33 static inline unsigned int zfcp_dbf_plen(unsigned int offset
)
35 return sizeof(struct zfcp_dbf_pay
) + offset
- ZFCP_DBF_PAY_MAX_REC
;
39 void zfcp_dbf_pl_write(struct zfcp_dbf
*dbf
, void *data
, u16 length
, char *area
,
42 struct zfcp_dbf_pay
*pl
= &dbf
->pay_buf
;
43 u16 offset
= 0, rec_length
;
45 spin_lock(&dbf
->pay_lock
);
46 memset(pl
, 0, sizeof(*pl
));
47 pl
->fsf_req_id
= req_id
;
48 memcpy(pl
->area
, area
, ZFCP_DBF_TAG_LEN
);
50 while (offset
< length
) {
51 rec_length
= min((u16
) ZFCP_DBF_PAY_MAX_REC
,
52 (u16
) (length
- offset
));
53 memcpy(pl
->data
, data
+ offset
, rec_length
);
54 debug_event(dbf
->pay
, 1, pl
, zfcp_dbf_plen(rec_length
));
60 spin_unlock(&dbf
->pay_lock
);
64 * zfcp_dbf_hba_fsf_res - trace event for fsf responses
65 * @tag: tag indicating which kind of unsolicited status has been received
66 * @req: request for which a response was received
68 void zfcp_dbf_hba_fsf_res(char *tag
, int level
, struct zfcp_fsf_req
*req
)
70 struct zfcp_dbf
*dbf
= req
->adapter
->dbf
;
71 struct fsf_qtcb_prefix
*q_pref
= &req
->qtcb
->prefix
;
72 struct fsf_qtcb_header
*q_head
= &req
->qtcb
->header
;
73 struct zfcp_dbf_hba
*rec
= &dbf
->hba_buf
;
76 spin_lock_irqsave(&dbf
->hba_lock
, flags
);
77 memset(rec
, 0, sizeof(*rec
));
79 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
80 rec
->id
= ZFCP_DBF_HBA_RES
;
81 rec
->fsf_req_id
= req
->req_id
;
82 rec
->fsf_req_status
= req
->status
;
83 rec
->fsf_cmd
= req
->fsf_command
;
84 rec
->fsf_seq_no
= req
->seq_no
;
85 rec
->u
.res
.req_issued
= req
->issued
;
86 rec
->u
.res
.prot_status
= q_pref
->prot_status
;
87 rec
->u
.res
.fsf_status
= q_head
->fsf_status
;
88 rec
->u
.res
.port_handle
= q_head
->port_handle
;
89 rec
->u
.res
.lun_handle
= q_head
->lun_handle
;
91 memcpy(rec
->u
.res
.prot_status_qual
, &q_pref
->prot_status_qual
,
92 FSF_PROT_STATUS_QUAL_SIZE
);
93 memcpy(rec
->u
.res
.fsf_status_qual
, &q_head
->fsf_status_qual
,
94 FSF_STATUS_QUALIFIER_SIZE
);
96 if (req
->fsf_command
!= FSF_QTCB_FCP_CMND
) {
97 rec
->pl_len
= q_head
->log_length
;
98 zfcp_dbf_pl_write(dbf
, (char *)q_pref
+ q_head
->log_start
,
99 rec
->pl_len
, "fsf_res", req
->req_id
);
102 debug_event(dbf
->hba
, level
, rec
, sizeof(*rec
));
103 spin_unlock_irqrestore(&dbf
->hba_lock
, flags
);
107 * zfcp_dbf_hba_fsf_uss - trace event for an unsolicited status buffer
108 * @tag: tag indicating which kind of unsolicited status has been received
109 * @req: request providing the unsolicited status
111 void zfcp_dbf_hba_fsf_uss(char *tag
, struct zfcp_fsf_req
*req
)
113 struct zfcp_dbf
*dbf
= req
->adapter
->dbf
;
114 struct fsf_status_read_buffer
*srb
= req
->data
;
115 struct zfcp_dbf_hba
*rec
= &dbf
->hba_buf
;
118 spin_lock_irqsave(&dbf
->hba_lock
, flags
);
119 memset(rec
, 0, sizeof(*rec
));
121 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
122 rec
->id
= ZFCP_DBF_HBA_USS
;
123 rec
->fsf_req_id
= req
->req_id
;
124 rec
->fsf_req_status
= req
->status
;
125 rec
->fsf_cmd
= req
->fsf_command
;
130 rec
->u
.uss
.status_type
= srb
->status_type
;
131 rec
->u
.uss
.status_subtype
= srb
->status_subtype
;
132 rec
->u
.uss
.d_id
= ntoh24(srb
->d_id
);
133 rec
->u
.uss
.lun
= srb
->fcp_lun
;
134 memcpy(&rec
->u
.uss
.queue_designator
, &srb
->queue_designator
,
135 sizeof(rec
->u
.uss
.queue_designator
));
137 /* status read buffer payload length */
138 rec
->pl_len
= (!srb
->length
) ? 0 : srb
->length
-
139 offsetof(struct fsf_status_read_buffer
, payload
);
142 zfcp_dbf_pl_write(dbf
, srb
->payload
.data
, rec
->pl_len
,
143 "fsf_uss", req
->req_id
);
145 debug_event(dbf
->hba
, 2, rec
, sizeof(*rec
));
146 spin_unlock_irqrestore(&dbf
->hba_lock
, flags
);
150 * zfcp_dbf_hba_bit_err - trace event for bit error conditions
151 * @tag: tag indicating which kind of unsolicited status has been received
152 * @req: request which caused the bit_error condition
154 void zfcp_dbf_hba_bit_err(char *tag
, struct zfcp_fsf_req
*req
)
156 struct zfcp_dbf
*dbf
= req
->adapter
->dbf
;
157 struct zfcp_dbf_hba
*rec
= &dbf
->hba_buf
;
158 struct fsf_status_read_buffer
*sr_buf
= req
->data
;
161 spin_lock_irqsave(&dbf
->hba_lock
, flags
);
162 memset(rec
, 0, sizeof(*rec
));
164 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
165 rec
->id
= ZFCP_DBF_HBA_BIT
;
166 rec
->fsf_req_id
= req
->req_id
;
167 rec
->fsf_req_status
= req
->status
;
168 rec
->fsf_cmd
= req
->fsf_command
;
169 memcpy(&rec
->u
.be
, &sr_buf
->payload
.bit_error
,
170 sizeof(struct fsf_bit_error_payload
));
172 debug_event(dbf
->hba
, 1, rec
, sizeof(*rec
));
173 spin_unlock_irqrestore(&dbf
->hba_lock
, flags
);
177 * zfcp_dbf_hba_def_err - trace event for deferred error messages
178 * @adapter: pointer to struct zfcp_adapter
179 * @req_id: request id which caused the deferred error message
180 * @scount: number of sbals incl. the signaling sbal
181 * @pl: array of all involved sbals
183 void zfcp_dbf_hba_def_err(struct zfcp_adapter
*adapter
, u64 req_id
, u16 scount
,
186 struct zfcp_dbf
*dbf
= adapter
->dbf
;
187 struct zfcp_dbf_pay
*payload
= &dbf
->pay_buf
;
194 spin_lock_irqsave(&dbf
->pay_lock
, flags
);
195 memset(payload
, 0, sizeof(*payload
));
197 memcpy(payload
->area
, "def_err", 7);
198 payload
->fsf_req_id
= req_id
;
199 payload
->counter
= 0;
200 length
= min((u16
)sizeof(struct qdio_buffer
),
201 (u16
)ZFCP_DBF_PAY_MAX_REC
);
203 while (payload
->counter
< scount
&& (char *)pl
[payload
->counter
]) {
204 memcpy(payload
->data
, (char *)pl
[payload
->counter
], length
);
205 debug_event(dbf
->pay
, 1, payload
, zfcp_dbf_plen(length
));
209 spin_unlock_irqrestore(&dbf
->pay_lock
, flags
);
213 * zfcp_dbf_hba_basic - trace event for basic adapter events
214 * @adapter: pointer to struct zfcp_adapter
216 void zfcp_dbf_hba_basic(char *tag
, struct zfcp_adapter
*adapter
)
218 struct zfcp_dbf
*dbf
= adapter
->dbf
;
219 struct zfcp_dbf_hba
*rec
= &dbf
->hba_buf
;
222 spin_lock_irqsave(&dbf
->hba_lock
, flags
);
223 memset(rec
, 0, sizeof(*rec
));
225 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
226 rec
->id
= ZFCP_DBF_HBA_BASIC
;
228 debug_event(dbf
->hba
, 1, rec
, sizeof(*rec
));
229 spin_unlock_irqrestore(&dbf
->hba_lock
, flags
);
232 static void zfcp_dbf_set_common(struct zfcp_dbf_rec
*rec
,
233 struct zfcp_adapter
*adapter
,
234 struct zfcp_port
*port
,
235 struct scsi_device
*sdev
)
237 rec
->adapter_status
= atomic_read(&adapter
->status
);
239 rec
->port_status
= atomic_read(&port
->status
);
240 rec
->wwpn
= port
->wwpn
;
241 rec
->d_id
= port
->d_id
;
244 rec
->lun_status
= atomic_read(&sdev_to_zfcp(sdev
)->status
);
245 rec
->lun
= zfcp_scsi_dev_lun(sdev
);
247 rec
->lun
= ZFCP_DBF_INVALID_LUN
;
251 * zfcp_dbf_rec_trig - trace event related to triggered recovery
252 * @tag: identifier for event
253 * @adapter: adapter on which the erp_action should run
254 * @port: remote port involved in the erp_action
255 * @sdev: scsi device involved in the erp_action
256 * @want: wanted erp_action
257 * @need: required erp_action
259 * The adapter->erp_lock has to be held.
261 void zfcp_dbf_rec_trig(char *tag
, struct zfcp_adapter
*adapter
,
262 struct zfcp_port
*port
, struct scsi_device
*sdev
,
265 struct zfcp_dbf
*dbf
= adapter
->dbf
;
266 struct zfcp_dbf_rec
*rec
= &dbf
->rec_buf
;
267 struct list_head
*entry
;
270 spin_lock_irqsave(&dbf
->rec_lock
, flags
);
271 memset(rec
, 0, sizeof(*rec
));
273 rec
->id
= ZFCP_DBF_REC_TRIG
;
274 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
275 zfcp_dbf_set_common(rec
, adapter
, port
, sdev
);
277 list_for_each(entry
, &adapter
->erp_ready_head
)
280 list_for_each(entry
, &adapter
->erp_running_head
)
281 rec
->u
.trig
.running
++;
283 rec
->u
.trig
.want
= want
;
284 rec
->u
.trig
.need
= need
;
286 debug_event(dbf
->rec
, 1, rec
, sizeof(*rec
));
287 spin_unlock_irqrestore(&dbf
->rec_lock
, flags
);
292 * zfcp_dbf_rec_run_lvl - trace event related to running recovery
293 * @level: trace level to be used for event
294 * @tag: identifier for event
295 * @erp: erp_action running
297 void zfcp_dbf_rec_run_lvl(int level
, char *tag
, struct zfcp_erp_action
*erp
)
299 struct zfcp_dbf
*dbf
= erp
->adapter
->dbf
;
300 struct zfcp_dbf_rec
*rec
= &dbf
->rec_buf
;
303 spin_lock_irqsave(&dbf
->rec_lock
, flags
);
304 memset(rec
, 0, sizeof(*rec
));
306 rec
->id
= ZFCP_DBF_REC_RUN
;
307 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
308 zfcp_dbf_set_common(rec
, erp
->adapter
, erp
->port
, erp
->sdev
);
310 rec
->u
.run
.fsf_req_id
= erp
->fsf_req_id
;
311 rec
->u
.run
.rec_status
= erp
->status
;
312 rec
->u
.run
.rec_step
= erp
->step
;
313 rec
->u
.run
.rec_action
= erp
->action
;
316 rec
->u
.run
.rec_count
=
317 atomic_read(&sdev_to_zfcp(erp
->sdev
)->erp_counter
);
319 rec
->u
.run
.rec_count
= atomic_read(&erp
->port
->erp_counter
);
321 rec
->u
.run
.rec_count
= atomic_read(&erp
->adapter
->erp_counter
);
323 debug_event(dbf
->rec
, level
, rec
, sizeof(*rec
));
324 spin_unlock_irqrestore(&dbf
->rec_lock
, flags
);
328 * zfcp_dbf_rec_run - trace event related to running recovery
329 * @tag: identifier for event
330 * @erp: erp_action running
332 void zfcp_dbf_rec_run(char *tag
, struct zfcp_erp_action
*erp
)
334 zfcp_dbf_rec_run_lvl(1, tag
, erp
);
338 * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
339 * @tag: identifier for event
340 * @wka_port: well known address port
341 * @req_id: request ID to correlate with potential HBA trace record
343 void zfcp_dbf_rec_run_wka(char *tag
, struct zfcp_fc_wka_port
*wka_port
,
346 struct zfcp_dbf
*dbf
= wka_port
->adapter
->dbf
;
347 struct zfcp_dbf_rec
*rec
= &dbf
->rec_buf
;
350 spin_lock_irqsave(&dbf
->rec_lock
, flags
);
351 memset(rec
, 0, sizeof(*rec
));
353 rec
->id
= ZFCP_DBF_REC_RUN
;
354 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
355 rec
->port_status
= wka_port
->status
;
356 rec
->d_id
= wka_port
->d_id
;
357 rec
->lun
= ZFCP_DBF_INVALID_LUN
;
359 rec
->u
.run
.fsf_req_id
= req_id
;
360 rec
->u
.run
.rec_status
= ~0;
361 rec
->u
.run
.rec_step
= ~0;
362 rec
->u
.run
.rec_action
= ~0;
363 rec
->u
.run
.rec_count
= ~0;
365 debug_event(dbf
->rec
, 1, rec
, sizeof(*rec
));
366 spin_unlock_irqrestore(&dbf
->rec_lock
, flags
);
370 void zfcp_dbf_san(char *tag
, struct zfcp_dbf
*dbf
,
371 char *paytag
, struct scatterlist
*sg
, u8 id
, u16 len
,
372 u64 req_id
, u32 d_id
, u16 cap_len
)
374 struct zfcp_dbf_san
*rec
= &dbf
->san_buf
;
377 struct zfcp_dbf_pay
*payload
= &dbf
->pay_buf
;
380 spin_lock_irqsave(&dbf
->san_lock
, flags
);
381 memset(rec
, 0, sizeof(*rec
));
384 rec
->fsf_req_id
= req_id
;
386 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
387 rec
->pl_len
= len
; /* full length even if we cap pay below */
390 rec_len
= min_t(unsigned int, sg
->length
, ZFCP_DBF_SAN_MAX_PAYLOAD
);
391 memcpy(rec
->payload
, sg_virt(sg
), rec_len
); /* part of 1st sg entry */
393 goto out
; /* skip pay record if full content in rec->payload */
395 /* if (len > rec_len):
396 * dump data up to cap_len ignoring small duplicate in rec->payload
398 spin_lock(&dbf
->pay_lock
);
399 memset(payload
, 0, sizeof(*payload
));
400 memcpy(payload
->area
, paytag
, ZFCP_DBF_TAG_LEN
);
401 payload
->fsf_req_id
= req_id
;
402 payload
->counter
= 0;
403 for (; sg
&& pay_sum
< cap_len
; sg
= sg_next(sg
)) {
404 u16 pay_len
, offset
= 0;
406 while (offset
< sg
->length
&& pay_sum
< cap_len
) {
407 pay_len
= min((u16
)ZFCP_DBF_PAY_MAX_REC
,
408 (u16
)(sg
->length
- offset
));
409 /* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */
410 memcpy(payload
->data
, sg_virt(sg
) + offset
, pay_len
);
411 debug_event(dbf
->pay
, 1, payload
,
412 zfcp_dbf_plen(pay_len
));
418 spin_unlock(&dbf
->pay_lock
);
421 debug_event(dbf
->san
, 1, rec
, sizeof(*rec
));
422 spin_unlock_irqrestore(&dbf
->san_lock
, flags
);
426 * zfcp_dbf_san_req - trace event for issued SAN request
427 * @tag: identifier for event
428 * @fsf_req: request containing issued CT data
429 * d_id: destination ID
431 void zfcp_dbf_san_req(char *tag
, struct zfcp_fsf_req
*fsf
, u32 d_id
)
433 struct zfcp_dbf
*dbf
= fsf
->adapter
->dbf
;
434 struct zfcp_fsf_ct_els
*ct_els
= fsf
->data
;
437 length
= (u16
)zfcp_qdio_real_bytes(ct_els
->req
);
438 zfcp_dbf_san(tag
, dbf
, "san_req", ct_els
->req
, ZFCP_DBF_SAN_REQ
,
439 length
, fsf
->req_id
, d_id
, length
);
442 static u16
zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag
,
443 struct zfcp_fsf_req
*fsf
,
446 struct zfcp_fsf_ct_els
*ct_els
= fsf
->data
;
447 struct fc_ct_hdr
*reqh
= sg_virt(ct_els
->req
);
448 struct fc_ns_gid_ft
*reqn
= (struct fc_ns_gid_ft
*)(reqh
+ 1);
449 struct scatterlist
*resp_entry
= ct_els
->resp
;
450 struct fc_ct_hdr
*resph
;
451 struct fc_gpn_ft_resp
*acc
;
452 int max_entries
, x
, last
= 0;
454 if (!(memcmp(tag
, "fsscth2", 7) == 0
455 && ct_els
->d_id
== FC_FID_DIR_SERV
456 && reqh
->ct_rev
== FC_CT_REV
457 && reqh
->ct_in_id
[0] == 0
458 && reqh
->ct_in_id
[1] == 0
459 && reqh
->ct_in_id
[2] == 0
460 && reqh
->ct_fs_type
== FC_FST_DIR
461 && reqh
->ct_fs_subtype
== FC_NS_SUBTYPE
462 && reqh
->ct_options
== 0
463 && reqh
->_ct_resvd1
== 0
464 && reqh
->ct_cmd
== FC_NS_GPN_FT
465 /* reqh->ct_mr_size can vary so do not match but read below */
466 && reqh
->_ct_resvd2
== 0
467 && reqh
->ct_reason
== 0
468 && reqh
->ct_explan
== 0
469 && reqh
->ct_vendor
== 0
470 && reqn
->fn_resvd
== 0
471 && reqn
->fn_domain_id_scope
== 0
472 && reqn
->fn_area_id_scope
== 0
473 && reqn
->fn_fc4_type
== FC_TYPE_FCP
))
474 return len
; /* not GPN_FT response so do not cap */
476 acc
= sg_virt(resp_entry
);
478 /* cap all but accept CT responses to at least the CT header */
479 resph
= (struct fc_ct_hdr
*)acc
;
480 if ((ct_els
->status
) ||
481 (resph
->ct_cmd
!= cpu_to_be16(FC_FS_ACC
)))
482 return max(FC_CT_HDR_LEN
, ZFCP_DBF_SAN_MAX_PAYLOAD
);
484 max_entries
= (reqh
->ct_mr_size
* 4 / sizeof(struct fc_gpn_ft_resp
))
485 + 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
486 * to account for header as 1st pseudo "entry" */;
488 /* the basic CT_IU preamble is the same size as one entry in the GPN_FT
489 * response, allowing us to skip special handling for it - just skip it
491 for (x
= 1; x
< max_entries
&& !last
; x
++) {
492 if (x
% (ZFCP_FC_GPN_FT_ENT_PAGE
+ 1))
495 acc
= sg_virt(++resp_entry
);
497 last
= acc
->fp_flags
& FC_NS_FID_LAST
;
499 len
= min(len
, (u16
)(x
* sizeof(struct fc_gpn_ft_resp
)));
500 return len
; /* cap after last entry */
504 * zfcp_dbf_san_res - trace event for received SAN request
505 * @tag: identifier for event
506 * @fsf_req: request containing issued CT data
508 void zfcp_dbf_san_res(char *tag
, struct zfcp_fsf_req
*fsf
)
510 struct zfcp_dbf
*dbf
= fsf
->adapter
->dbf
;
511 struct zfcp_fsf_ct_els
*ct_els
= fsf
->data
;
514 length
= (u16
)zfcp_qdio_real_bytes(ct_els
->resp
);
515 zfcp_dbf_san(tag
, dbf
, "san_res", ct_els
->resp
, ZFCP_DBF_SAN_RES
,
516 length
, fsf
->req_id
, ct_els
->d_id
,
517 zfcp_dbf_san_res_cap_len_if_gpn_ft(tag
, fsf
, length
));
521 * zfcp_dbf_san_in_els - trace event for incoming ELS
522 * @tag: identifier for event
523 * @fsf_req: request containing issued CT data
525 void zfcp_dbf_san_in_els(char *tag
, struct zfcp_fsf_req
*fsf
)
527 struct zfcp_dbf
*dbf
= fsf
->adapter
->dbf
;
528 struct fsf_status_read_buffer
*srb
=
529 (struct fsf_status_read_buffer
*) fsf
->data
;
531 struct scatterlist sg
;
533 length
= (u16
)(srb
->length
-
534 offsetof(struct fsf_status_read_buffer
, payload
));
535 sg_init_one(&sg
, srb
->payload
.data
, length
);
536 zfcp_dbf_san(tag
, dbf
, "san_els", &sg
, ZFCP_DBF_SAN_ELS
, length
,
537 fsf
->req_id
, ntoh24(srb
->d_id
), length
);
541 * zfcp_dbf_scsi - trace event for scsi commands
542 * @tag: identifier for event
543 * @sc: pointer to struct scsi_cmnd
544 * @fsf: pointer to struct zfcp_fsf_req
546 void zfcp_dbf_scsi(char *tag
, int level
, struct scsi_cmnd
*sc
,
547 struct zfcp_fsf_req
*fsf
)
549 struct zfcp_adapter
*adapter
=
550 (struct zfcp_adapter
*) sc
->device
->host
->hostdata
[0];
551 struct zfcp_dbf
*dbf
= adapter
->dbf
;
552 struct zfcp_dbf_scsi
*rec
= &dbf
->scsi_buf
;
553 struct fcp_resp_with_ext
*fcp_rsp
;
554 struct fcp_resp_rsp_info
*fcp_rsp_info
;
557 spin_lock_irqsave(&dbf
->scsi_lock
, flags
);
558 memset(rec
, 0, sizeof(*rec
));
560 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
561 rec
->id
= ZFCP_DBF_SCSI_CMND
;
562 rec
->scsi_result
= sc
->result
;
563 rec
->scsi_retries
= sc
->retries
;
564 rec
->scsi_allowed
= sc
->allowed
;
565 rec
->scsi_id
= sc
->device
->id
;
566 rec
->scsi_lun
= (u32
)sc
->device
->lun
;
567 rec
->scsi_lun_64_hi
= (u32
)(sc
->device
->lun
>> 32);
568 rec
->host_scribble
= (unsigned long)sc
->host_scribble
;
570 memcpy(rec
->scsi_opcode
, sc
->cmnd
,
571 min((int)sc
->cmd_len
, ZFCP_DBF_SCSI_OPCODE
));
574 rec
->fsf_req_id
= fsf
->req_id
;
575 rec
->pl_len
= FCP_RESP_WITH_EXT
;
576 fcp_rsp
= (struct fcp_resp_with_ext
*)
577 &(fsf
->qtcb
->bottom
.io
.fcp_rsp
);
578 /* mandatory parts of FCP_RSP IU in this SCSI record */
579 memcpy(&rec
->fcp_rsp
, fcp_rsp
, FCP_RESP_WITH_EXT
);
580 if (fcp_rsp
->resp
.fr_flags
& FCP_RSP_LEN_VAL
) {
581 fcp_rsp_info
= (struct fcp_resp_rsp_info
*) &fcp_rsp
[1];
582 rec
->fcp_rsp_info
= fcp_rsp_info
->rsp_code
;
583 rec
->pl_len
+= be32_to_cpu(fcp_rsp
->ext
.fr_rsp_len
);
585 if (fcp_rsp
->resp
.fr_flags
& FCP_SNS_LEN_VAL
) {
586 rec
->pl_len
+= be32_to_cpu(fcp_rsp
->ext
.fr_sns_len
);
588 /* complete FCP_RSP IU in associated PAYload record
589 * but only if there are optional parts
591 if (fcp_rsp
->resp
.fr_flags
!= 0)
594 /* at least one full PAY record
595 * but not beyond hardware response field
597 min_t(u16
, max_t(u16
, rec
->pl_len
,
598 ZFCP_DBF_PAY_MAX_REC
),
600 "fcp_riu", fsf
->req_id
);
603 debug_event(dbf
->scsi
, level
, rec
, sizeof(*rec
));
604 spin_unlock_irqrestore(&dbf
->scsi_lock
, flags
);
607 static debug_info_t
*zfcp_dbf_reg(const char *name
, int size
, int rec_size
)
609 struct debug_info
*d
;
611 d
= debug_register(name
, size
, 1, rec_size
);
615 debug_register_view(d
, &debug_hex_ascii_view
);
616 debug_set_level(d
, dbflevel
);
621 static void zfcp_dbf_unregister(struct zfcp_dbf
*dbf
)
626 debug_unregister(dbf
->scsi
);
627 debug_unregister(dbf
->san
);
628 debug_unregister(dbf
->hba
);
629 debug_unregister(dbf
->pay
);
630 debug_unregister(dbf
->rec
);
635 * zfcp_adapter_debug_register - registers debug feature for an adapter
636 * @adapter: pointer to adapter for which debug features should be registered
637 * return: -ENOMEM on error, 0 otherwise
639 int zfcp_dbf_adapter_register(struct zfcp_adapter
*adapter
)
641 char name
[DEBUG_MAX_NAME_LEN
];
642 struct zfcp_dbf
*dbf
;
644 dbf
= kzalloc(sizeof(struct zfcp_dbf
), GFP_KERNEL
);
648 spin_lock_init(&dbf
->pay_lock
);
649 spin_lock_init(&dbf
->hba_lock
);
650 spin_lock_init(&dbf
->san_lock
);
651 spin_lock_init(&dbf
->scsi_lock
);
652 spin_lock_init(&dbf
->rec_lock
);
654 /* debug feature area which records recovery activity */
655 sprintf(name
, "zfcp_%s_rec", dev_name(&adapter
->ccw_device
->dev
));
656 dbf
->rec
= zfcp_dbf_reg(name
, dbfsize
, sizeof(struct zfcp_dbf_rec
));
660 /* debug feature area which records HBA (FSF and QDIO) conditions */
661 sprintf(name
, "zfcp_%s_hba", dev_name(&adapter
->ccw_device
->dev
));
662 dbf
->hba
= zfcp_dbf_reg(name
, dbfsize
, sizeof(struct zfcp_dbf_hba
));
666 /* debug feature area which records payload info */
667 sprintf(name
, "zfcp_%s_pay", dev_name(&adapter
->ccw_device
->dev
));
668 dbf
->pay
= zfcp_dbf_reg(name
, dbfsize
* 2, sizeof(struct zfcp_dbf_pay
));
672 /* debug feature area which records SAN command failures and recovery */
673 sprintf(name
, "zfcp_%s_san", dev_name(&adapter
->ccw_device
->dev
));
674 dbf
->san
= zfcp_dbf_reg(name
, dbfsize
, sizeof(struct zfcp_dbf_san
));
678 /* debug feature area which records SCSI command failures and recovery */
679 sprintf(name
, "zfcp_%s_scsi", dev_name(&adapter
->ccw_device
->dev
));
680 dbf
->scsi
= zfcp_dbf_reg(name
, dbfsize
, sizeof(struct zfcp_dbf_scsi
));
688 zfcp_dbf_unregister(dbf
);
693 * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
694 * @adapter: pointer to adapter for which debug features should be unregistered
696 void zfcp_dbf_adapter_unregister(struct zfcp_adapter
*adapter
)
698 struct zfcp_dbf
*dbf
= adapter
->dbf
;
701 zfcp_dbf_unregister(dbf
);