4 * Debug traces for zfcp.
6 * Copyright IBM Corp. 2002, 2016
9 #define KMSG_COMPONENT "zfcp"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 #include <linux/module.h>
13 #include <linux/ctype.h>
14 #include <linux/slab.h>
15 #include <asm/debug.h>
20 static u32 dbfsize
= 4;
22 module_param(dbfsize
, uint
, 0400);
23 MODULE_PARM_DESC(dbfsize
,
24 "number of pages for each debug feature area (default 4)");
26 static u32 dbflevel
= 3;
28 module_param(dbflevel
, uint
, 0400);
29 MODULE_PARM_DESC(dbflevel
,
30 "log level for each debug feature area "
31 "(default 3, range 0..6)");
33 static inline unsigned int zfcp_dbf_plen(unsigned int offset
)
35 return sizeof(struct zfcp_dbf_pay
) + offset
- ZFCP_DBF_PAY_MAX_REC
;
39 void zfcp_dbf_pl_write(struct zfcp_dbf
*dbf
, void *data
, u16 length
, char *area
,
42 struct zfcp_dbf_pay
*pl
= &dbf
->pay_buf
;
43 u16 offset
= 0, rec_length
;
45 spin_lock(&dbf
->pay_lock
);
46 memset(pl
, 0, sizeof(*pl
));
47 pl
->fsf_req_id
= req_id
;
48 memcpy(pl
->area
, area
, ZFCP_DBF_TAG_LEN
);
50 while (offset
< length
) {
51 rec_length
= min((u16
) ZFCP_DBF_PAY_MAX_REC
,
52 (u16
) (length
- offset
));
53 memcpy(pl
->data
, data
+ offset
, rec_length
);
54 debug_event(dbf
->pay
, 1, pl
, zfcp_dbf_plen(rec_length
));
60 spin_unlock(&dbf
->pay_lock
);
64 * zfcp_dbf_hba_fsf_res - trace event for fsf responses
65 * @tag: tag indicating which kind of unsolicited status has been received
66 * @req: request for which a response was received
68 void zfcp_dbf_hba_fsf_res(char *tag
, int level
, struct zfcp_fsf_req
*req
)
70 struct zfcp_dbf
*dbf
= req
->adapter
->dbf
;
71 struct fsf_qtcb_prefix
*q_pref
= &req
->qtcb
->prefix
;
72 struct fsf_qtcb_header
*q_head
= &req
->qtcb
->header
;
73 struct zfcp_dbf_hba
*rec
= &dbf
->hba_buf
;
76 spin_lock_irqsave(&dbf
->hba_lock
, flags
);
77 memset(rec
, 0, sizeof(*rec
));
79 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
80 rec
->id
= ZFCP_DBF_HBA_RES
;
81 rec
->fsf_req_id
= req
->req_id
;
82 rec
->fsf_req_status
= req
->status
;
83 rec
->fsf_cmd
= req
->fsf_command
;
84 rec
->fsf_seq_no
= req
->seq_no
;
85 rec
->u
.res
.req_issued
= req
->issued
;
86 rec
->u
.res
.prot_status
= q_pref
->prot_status
;
87 rec
->u
.res
.fsf_status
= q_head
->fsf_status
;
88 rec
->u
.res
.port_handle
= q_head
->port_handle
;
89 rec
->u
.res
.lun_handle
= q_head
->lun_handle
;
91 memcpy(rec
->u
.res
.prot_status_qual
, &q_pref
->prot_status_qual
,
92 FSF_PROT_STATUS_QUAL_SIZE
);
93 memcpy(rec
->u
.res
.fsf_status_qual
, &q_head
->fsf_status_qual
,
94 FSF_STATUS_QUALIFIER_SIZE
);
96 if (req
->fsf_command
!= FSF_QTCB_FCP_CMND
) {
97 rec
->pl_len
= q_head
->log_length
;
98 zfcp_dbf_pl_write(dbf
, (char *)q_pref
+ q_head
->log_start
,
99 rec
->pl_len
, "fsf_res", req
->req_id
);
102 debug_event(dbf
->hba
, level
, rec
, sizeof(*rec
));
103 spin_unlock_irqrestore(&dbf
->hba_lock
, flags
);
107 * zfcp_dbf_hba_fsf_uss - trace event for an unsolicited status buffer
108 * @tag: tag indicating which kind of unsolicited status has been received
109 * @req: request providing the unsolicited status
111 void zfcp_dbf_hba_fsf_uss(char *tag
, struct zfcp_fsf_req
*req
)
113 struct zfcp_dbf
*dbf
= req
->adapter
->dbf
;
114 struct fsf_status_read_buffer
*srb
= req
->data
;
115 struct zfcp_dbf_hba
*rec
= &dbf
->hba_buf
;
118 spin_lock_irqsave(&dbf
->hba_lock
, flags
);
119 memset(rec
, 0, sizeof(*rec
));
121 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
122 rec
->id
= ZFCP_DBF_HBA_USS
;
123 rec
->fsf_req_id
= req
->req_id
;
124 rec
->fsf_req_status
= req
->status
;
125 rec
->fsf_cmd
= req
->fsf_command
;
130 rec
->u
.uss
.status_type
= srb
->status_type
;
131 rec
->u
.uss
.status_subtype
= srb
->status_subtype
;
132 rec
->u
.uss
.d_id
= ntoh24(srb
->d_id
);
133 rec
->u
.uss
.lun
= srb
->fcp_lun
;
134 memcpy(&rec
->u
.uss
.queue_designator
, &srb
->queue_designator
,
135 sizeof(rec
->u
.uss
.queue_designator
));
137 /* status read buffer payload length */
138 rec
->pl_len
= (!srb
->length
) ? 0 : srb
->length
-
139 offsetof(struct fsf_status_read_buffer
, payload
);
142 zfcp_dbf_pl_write(dbf
, srb
->payload
.data
, rec
->pl_len
,
143 "fsf_uss", req
->req_id
);
145 debug_event(dbf
->hba
, 2, rec
, sizeof(*rec
));
146 spin_unlock_irqrestore(&dbf
->hba_lock
, flags
);
150 * zfcp_dbf_hba_bit_err - trace event for bit error conditions
151 * @tag: tag indicating which kind of unsolicited status has been received
152 * @req: request which caused the bit_error condition
154 void zfcp_dbf_hba_bit_err(char *tag
, struct zfcp_fsf_req
*req
)
156 struct zfcp_dbf
*dbf
= req
->adapter
->dbf
;
157 struct zfcp_dbf_hba
*rec
= &dbf
->hba_buf
;
158 struct fsf_status_read_buffer
*sr_buf
= req
->data
;
161 spin_lock_irqsave(&dbf
->hba_lock
, flags
);
162 memset(rec
, 0, sizeof(*rec
));
164 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
165 rec
->id
= ZFCP_DBF_HBA_BIT
;
166 rec
->fsf_req_id
= req
->req_id
;
167 rec
->fsf_req_status
= req
->status
;
168 rec
->fsf_cmd
= req
->fsf_command
;
169 memcpy(&rec
->u
.be
, &sr_buf
->payload
.bit_error
,
170 sizeof(struct fsf_bit_error_payload
));
172 debug_event(dbf
->hba
, 1, rec
, sizeof(*rec
));
173 spin_unlock_irqrestore(&dbf
->hba_lock
, flags
);
177 * zfcp_dbf_hba_def_err - trace event for deferred error messages
178 * @adapter: pointer to struct zfcp_adapter
179 * @req_id: request id which caused the deferred error message
180 * @scount: number of sbals incl. the signaling sbal
181 * @pl: array of all involved sbals
183 void zfcp_dbf_hba_def_err(struct zfcp_adapter
*adapter
, u64 req_id
, u16 scount
,
186 struct zfcp_dbf
*dbf
= adapter
->dbf
;
187 struct zfcp_dbf_pay
*payload
= &dbf
->pay_buf
;
194 spin_lock_irqsave(&dbf
->pay_lock
, flags
);
195 memset(payload
, 0, sizeof(*payload
));
197 memcpy(payload
->area
, "def_err", 7);
198 payload
->fsf_req_id
= req_id
;
199 payload
->counter
= 0;
200 length
= min((u16
)sizeof(struct qdio_buffer
),
201 (u16
)ZFCP_DBF_PAY_MAX_REC
);
203 while (payload
->counter
< scount
&& (char *)pl
[payload
->counter
]) {
204 memcpy(payload
->data
, (char *)pl
[payload
->counter
], length
);
205 debug_event(dbf
->pay
, 1, payload
, zfcp_dbf_plen(length
));
209 spin_unlock_irqrestore(&dbf
->pay_lock
, flags
);
213 * zfcp_dbf_hba_basic - trace event for basic adapter events
214 * @adapter: pointer to struct zfcp_adapter
216 void zfcp_dbf_hba_basic(char *tag
, struct zfcp_adapter
*adapter
)
218 struct zfcp_dbf
*dbf
= adapter
->dbf
;
219 struct zfcp_dbf_hba
*rec
= &dbf
->hba_buf
;
222 spin_lock_irqsave(&dbf
->hba_lock
, flags
);
223 memset(rec
, 0, sizeof(*rec
));
225 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
226 rec
->id
= ZFCP_DBF_HBA_BASIC
;
228 debug_event(dbf
->hba
, 1, rec
, sizeof(*rec
));
229 spin_unlock_irqrestore(&dbf
->hba_lock
, flags
);
232 static void zfcp_dbf_set_common(struct zfcp_dbf_rec
*rec
,
233 struct zfcp_adapter
*adapter
,
234 struct zfcp_port
*port
,
235 struct scsi_device
*sdev
)
237 rec
->adapter_status
= atomic_read(&adapter
->status
);
239 rec
->port_status
= atomic_read(&port
->status
);
240 rec
->wwpn
= port
->wwpn
;
241 rec
->d_id
= port
->d_id
;
244 rec
->lun_status
= atomic_read(&sdev_to_zfcp(sdev
)->status
);
245 rec
->lun
= zfcp_scsi_dev_lun(sdev
);
247 rec
->lun
= ZFCP_DBF_INVALID_LUN
;
251 * zfcp_dbf_rec_trig - trace event related to triggered recovery
252 * @tag: identifier for event
253 * @adapter: adapter on which the erp_action should run
254 * @port: remote port involved in the erp_action
255 * @sdev: scsi device involved in the erp_action
256 * @want: wanted erp_action
257 * @need: required erp_action
259 * The adapter->erp_lock has to be held.
261 void zfcp_dbf_rec_trig(char *tag
, struct zfcp_adapter
*adapter
,
262 struct zfcp_port
*port
, struct scsi_device
*sdev
,
265 struct zfcp_dbf
*dbf
= adapter
->dbf
;
266 struct zfcp_dbf_rec
*rec
= &dbf
->rec_buf
;
267 struct list_head
*entry
;
270 spin_lock_irqsave(&dbf
->rec_lock
, flags
);
271 memset(rec
, 0, sizeof(*rec
));
273 rec
->id
= ZFCP_DBF_REC_TRIG
;
274 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
275 zfcp_dbf_set_common(rec
, adapter
, port
, sdev
);
277 list_for_each(entry
, &adapter
->erp_ready_head
)
280 list_for_each(entry
, &adapter
->erp_running_head
)
281 rec
->u
.trig
.running
++;
283 rec
->u
.trig
.want
= want
;
284 rec
->u
.trig
.need
= need
;
286 debug_event(dbf
->rec
, 1, rec
, sizeof(*rec
));
287 spin_unlock_irqrestore(&dbf
->rec_lock
, flags
);
292 * zfcp_dbf_rec_run_lvl - trace event related to running recovery
293 * @level: trace level to be used for event
294 * @tag: identifier for event
295 * @erp: erp_action running
297 void zfcp_dbf_rec_run_lvl(int level
, char *tag
, struct zfcp_erp_action
*erp
)
299 struct zfcp_dbf
*dbf
= erp
->adapter
->dbf
;
300 struct zfcp_dbf_rec
*rec
= &dbf
->rec_buf
;
303 spin_lock_irqsave(&dbf
->rec_lock
, flags
);
304 memset(rec
, 0, sizeof(*rec
));
306 rec
->id
= ZFCP_DBF_REC_RUN
;
307 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
308 zfcp_dbf_set_common(rec
, erp
->adapter
, erp
->port
, erp
->sdev
);
310 rec
->u
.run
.fsf_req_id
= erp
->fsf_req_id
;
311 rec
->u
.run
.rec_status
= erp
->status
;
312 rec
->u
.run
.rec_step
= erp
->step
;
313 rec
->u
.run
.rec_action
= erp
->action
;
316 rec
->u
.run
.rec_count
=
317 atomic_read(&sdev_to_zfcp(erp
->sdev
)->erp_counter
);
319 rec
->u
.run
.rec_count
= atomic_read(&erp
->port
->erp_counter
);
321 rec
->u
.run
.rec_count
= atomic_read(&erp
->adapter
->erp_counter
);
323 debug_event(dbf
->rec
, level
, rec
, sizeof(*rec
));
324 spin_unlock_irqrestore(&dbf
->rec_lock
, flags
);
328 * zfcp_dbf_rec_run - trace event related to running recovery
329 * @tag: identifier for event
330 * @erp: erp_action running
332 void zfcp_dbf_rec_run(char *tag
, struct zfcp_erp_action
*erp
)
334 zfcp_dbf_rec_run_lvl(1, tag
, erp
);
338 * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
339 * @tag: identifier for event
340 * @wka_port: well known address port
341 * @req_id: request ID to correlate with potential HBA trace record
343 void zfcp_dbf_rec_run_wka(char *tag
, struct zfcp_fc_wka_port
*wka_port
,
346 struct zfcp_dbf
*dbf
= wka_port
->adapter
->dbf
;
347 struct zfcp_dbf_rec
*rec
= &dbf
->rec_buf
;
350 spin_lock_irqsave(&dbf
->rec_lock
, flags
);
351 memset(rec
, 0, sizeof(*rec
));
353 rec
->id
= ZFCP_DBF_REC_RUN
;
354 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
355 rec
->port_status
= wka_port
->status
;
356 rec
->d_id
= wka_port
->d_id
;
357 rec
->lun
= ZFCP_DBF_INVALID_LUN
;
359 rec
->u
.run
.fsf_req_id
= req_id
;
360 rec
->u
.run
.rec_status
= ~0;
361 rec
->u
.run
.rec_step
= ~0;
362 rec
->u
.run
.rec_action
= ~0;
363 rec
->u
.run
.rec_count
= ~0;
365 debug_event(dbf
->rec
, 1, rec
, sizeof(*rec
));
366 spin_unlock_irqrestore(&dbf
->rec_lock
, flags
);
370 void zfcp_dbf_san(char *tag
, struct zfcp_dbf
*dbf
,
371 char *paytag
, struct scatterlist
*sg
, u8 id
, u16 len
,
372 u64 req_id
, u32 d_id
, u16 cap_len
)
374 struct zfcp_dbf_san
*rec
= &dbf
->san_buf
;
377 struct zfcp_dbf_pay
*payload
= &dbf
->pay_buf
;
380 spin_lock_irqsave(&dbf
->san_lock
, flags
);
381 memset(rec
, 0, sizeof(*rec
));
384 rec
->fsf_req_id
= req_id
;
386 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
387 rec
->pl_len
= len
; /* full length even if we cap pay below */
390 rec_len
= min_t(unsigned int, sg
->length
, ZFCP_DBF_SAN_MAX_PAYLOAD
);
391 memcpy(rec
->payload
, sg_virt(sg
), rec_len
); /* part of 1st sg entry */
393 goto out
; /* skip pay record if full content in rec->payload */
395 /* if (len > rec_len):
396 * dump data up to cap_len ignoring small duplicate in rec->payload
398 spin_lock(&dbf
->pay_lock
);
399 memset(payload
, 0, sizeof(*payload
));
400 memcpy(payload
->area
, paytag
, ZFCP_DBF_TAG_LEN
);
401 payload
->fsf_req_id
= req_id
;
402 payload
->counter
= 0;
403 for (; sg
&& pay_sum
< cap_len
; sg
= sg_next(sg
)) {
404 u16 pay_len
, offset
= 0;
406 while (offset
< sg
->length
&& pay_sum
< cap_len
) {
407 pay_len
= min((u16
)ZFCP_DBF_PAY_MAX_REC
,
408 (u16
)(sg
->length
- offset
));
409 /* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */
410 memcpy(payload
->data
, sg_virt(sg
) + offset
, pay_len
);
411 debug_event(dbf
->pay
, 1, payload
,
412 zfcp_dbf_plen(pay_len
));
418 spin_unlock(&dbf
->pay_lock
);
421 debug_event(dbf
->san
, 1, rec
, sizeof(*rec
));
422 spin_unlock_irqrestore(&dbf
->san_lock
, flags
);
426 * zfcp_dbf_san_req - trace event for issued SAN request
427 * @tag: identifier for event
428 * @fsf_req: request containing issued CT data
429 * d_id: destination ID
431 void zfcp_dbf_san_req(char *tag
, struct zfcp_fsf_req
*fsf
, u32 d_id
)
433 struct zfcp_dbf
*dbf
= fsf
->adapter
->dbf
;
434 struct zfcp_fsf_ct_els
*ct_els
= fsf
->data
;
437 length
= (u16
)zfcp_qdio_real_bytes(ct_els
->req
);
438 zfcp_dbf_san(tag
, dbf
, "san_req", ct_els
->req
, ZFCP_DBF_SAN_REQ
,
439 length
, fsf
->req_id
, d_id
, length
);
442 static u16
zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag
,
443 struct zfcp_fsf_req
*fsf
,
446 struct zfcp_fsf_ct_els
*ct_els
= fsf
->data
;
447 struct fc_ct_hdr
*reqh
= sg_virt(ct_els
->req
);
448 struct fc_ns_gid_ft
*reqn
= (struct fc_ns_gid_ft
*)(reqh
+ 1);
449 struct scatterlist
*resp_entry
= ct_els
->resp
;
450 struct fc_gpn_ft_resp
*acc
;
451 int max_entries
, x
, last
= 0;
453 if (!(memcmp(tag
, "fsscth2", 7) == 0
454 && ct_els
->d_id
== FC_FID_DIR_SERV
455 && reqh
->ct_rev
== FC_CT_REV
456 && reqh
->ct_in_id
[0] == 0
457 && reqh
->ct_in_id
[1] == 0
458 && reqh
->ct_in_id
[2] == 0
459 && reqh
->ct_fs_type
== FC_FST_DIR
460 && reqh
->ct_fs_subtype
== FC_NS_SUBTYPE
461 && reqh
->ct_options
== 0
462 && reqh
->_ct_resvd1
== 0
463 && reqh
->ct_cmd
== FC_NS_GPN_FT
464 /* reqh->ct_mr_size can vary so do not match but read below */
465 && reqh
->_ct_resvd2
== 0
466 && reqh
->ct_reason
== 0
467 && reqh
->ct_explan
== 0
468 && reqh
->ct_vendor
== 0
469 && reqn
->fn_resvd
== 0
470 && reqn
->fn_domain_id_scope
== 0
471 && reqn
->fn_area_id_scope
== 0
472 && reqn
->fn_fc4_type
== FC_TYPE_FCP
))
473 return len
; /* not GPN_FT response so do not cap */
475 acc
= sg_virt(resp_entry
);
476 max_entries
= (reqh
->ct_mr_size
* 4 / sizeof(struct fc_gpn_ft_resp
))
477 + 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
478 * to account for header as 1st pseudo "entry" */;
480 /* the basic CT_IU preamble is the same size as one entry in the GPN_FT
481 * response, allowing us to skip special handling for it - just skip it
483 for (x
= 1; x
< max_entries
&& !last
; x
++) {
484 if (x
% (ZFCP_FC_GPN_FT_ENT_PAGE
+ 1))
487 acc
= sg_virt(++resp_entry
);
489 last
= acc
->fp_flags
& FC_NS_FID_LAST
;
491 len
= min(len
, (u16
)(x
* sizeof(struct fc_gpn_ft_resp
)));
492 return len
; /* cap after last entry */
496 * zfcp_dbf_san_res - trace event for received SAN request
497 * @tag: identifier for event
498 * @fsf_req: request containing issued CT data
500 void zfcp_dbf_san_res(char *tag
, struct zfcp_fsf_req
*fsf
)
502 struct zfcp_dbf
*dbf
= fsf
->adapter
->dbf
;
503 struct zfcp_fsf_ct_els
*ct_els
= fsf
->data
;
506 length
= (u16
)zfcp_qdio_real_bytes(ct_els
->resp
);
507 zfcp_dbf_san(tag
, dbf
, "san_res", ct_els
->resp
, ZFCP_DBF_SAN_RES
,
508 length
, fsf
->req_id
, ct_els
->d_id
,
509 zfcp_dbf_san_res_cap_len_if_gpn_ft(tag
, fsf
, length
));
513 * zfcp_dbf_san_in_els - trace event for incoming ELS
514 * @tag: identifier for event
515 * @fsf_req: request containing issued CT data
517 void zfcp_dbf_san_in_els(char *tag
, struct zfcp_fsf_req
*fsf
)
519 struct zfcp_dbf
*dbf
= fsf
->adapter
->dbf
;
520 struct fsf_status_read_buffer
*srb
=
521 (struct fsf_status_read_buffer
*) fsf
->data
;
523 struct scatterlist sg
;
525 length
= (u16
)(srb
->length
-
526 offsetof(struct fsf_status_read_buffer
, payload
));
527 sg_init_one(&sg
, srb
->payload
.data
, length
);
528 zfcp_dbf_san(tag
, dbf
, "san_els", &sg
, ZFCP_DBF_SAN_ELS
, length
,
529 fsf
->req_id
, ntoh24(srb
->d_id
), length
);
533 * zfcp_dbf_scsi - trace event for scsi commands
534 * @tag: identifier for event
535 * @sc: pointer to struct scsi_cmnd
536 * @fsf: pointer to struct zfcp_fsf_req
538 void zfcp_dbf_scsi(char *tag
, int level
, struct scsi_cmnd
*sc
,
539 struct zfcp_fsf_req
*fsf
)
541 struct zfcp_adapter
*adapter
=
542 (struct zfcp_adapter
*) sc
->device
->host
->hostdata
[0];
543 struct zfcp_dbf
*dbf
= adapter
->dbf
;
544 struct zfcp_dbf_scsi
*rec
= &dbf
->scsi_buf
;
545 struct fcp_resp_with_ext
*fcp_rsp
;
546 struct fcp_resp_rsp_info
*fcp_rsp_info
;
549 spin_lock_irqsave(&dbf
->scsi_lock
, flags
);
550 memset(rec
, 0, sizeof(*rec
));
552 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
553 rec
->id
= ZFCP_DBF_SCSI_CMND
;
554 rec
->scsi_result
= sc
->result
;
555 rec
->scsi_retries
= sc
->retries
;
556 rec
->scsi_allowed
= sc
->allowed
;
557 rec
->scsi_id
= sc
->device
->id
;
558 /* struct zfcp_dbf_scsi needs to be updated to handle 64bit LUNs */
559 rec
->scsi_lun
= (u32
)sc
->device
->lun
;
560 rec
->host_scribble
= (unsigned long)sc
->host_scribble
;
562 memcpy(rec
->scsi_opcode
, sc
->cmnd
,
563 min((int)sc
->cmd_len
, ZFCP_DBF_SCSI_OPCODE
));
566 rec
->fsf_req_id
= fsf
->req_id
;
567 fcp_rsp
= (struct fcp_resp_with_ext
*)
568 &(fsf
->qtcb
->bottom
.io
.fcp_rsp
);
569 memcpy(&rec
->fcp_rsp
, fcp_rsp
, FCP_RESP_WITH_EXT
);
570 if (fcp_rsp
->resp
.fr_flags
& FCP_RSP_LEN_VAL
) {
571 fcp_rsp_info
= (struct fcp_resp_rsp_info
*) &fcp_rsp
[1];
572 rec
->fcp_rsp_info
= fcp_rsp_info
->rsp_code
;
574 if (fcp_rsp
->resp
.fr_flags
& FCP_SNS_LEN_VAL
) {
575 rec
->pl_len
= min((u16
)SCSI_SENSE_BUFFERSIZE
,
576 (u16
)ZFCP_DBF_PAY_MAX_REC
);
577 zfcp_dbf_pl_write(dbf
, sc
->sense_buffer
, rec
->pl_len
,
578 "fcp_sns", fsf
->req_id
);
582 debug_event(dbf
->scsi
, level
, rec
, sizeof(*rec
));
583 spin_unlock_irqrestore(&dbf
->scsi_lock
, flags
);
586 static debug_info_t
*zfcp_dbf_reg(const char *name
, int size
, int rec_size
)
588 struct debug_info
*d
;
590 d
= debug_register(name
, size
, 1, rec_size
);
594 debug_register_view(d
, &debug_hex_ascii_view
);
595 debug_set_level(d
, dbflevel
);
600 static void zfcp_dbf_unregister(struct zfcp_dbf
*dbf
)
605 debug_unregister(dbf
->scsi
);
606 debug_unregister(dbf
->san
);
607 debug_unregister(dbf
->hba
);
608 debug_unregister(dbf
->pay
);
609 debug_unregister(dbf
->rec
);
614 * zfcp_adapter_debug_register - registers debug feature for an adapter
615 * @adapter: pointer to adapter for which debug features should be registered
616 * return: -ENOMEM on error, 0 otherwise
618 int zfcp_dbf_adapter_register(struct zfcp_adapter
*adapter
)
620 char name
[DEBUG_MAX_NAME_LEN
];
621 struct zfcp_dbf
*dbf
;
623 dbf
= kzalloc(sizeof(struct zfcp_dbf
), GFP_KERNEL
);
627 spin_lock_init(&dbf
->pay_lock
);
628 spin_lock_init(&dbf
->hba_lock
);
629 spin_lock_init(&dbf
->san_lock
);
630 spin_lock_init(&dbf
->scsi_lock
);
631 spin_lock_init(&dbf
->rec_lock
);
633 /* debug feature area which records recovery activity */
634 sprintf(name
, "zfcp_%s_rec", dev_name(&adapter
->ccw_device
->dev
));
635 dbf
->rec
= zfcp_dbf_reg(name
, dbfsize
, sizeof(struct zfcp_dbf_rec
));
639 /* debug feature area which records HBA (FSF and QDIO) conditions */
640 sprintf(name
, "zfcp_%s_hba", dev_name(&adapter
->ccw_device
->dev
));
641 dbf
->hba
= zfcp_dbf_reg(name
, dbfsize
, sizeof(struct zfcp_dbf_hba
));
645 /* debug feature area which records payload info */
646 sprintf(name
, "zfcp_%s_pay", dev_name(&adapter
->ccw_device
->dev
));
647 dbf
->pay
= zfcp_dbf_reg(name
, dbfsize
* 2, sizeof(struct zfcp_dbf_pay
));
651 /* debug feature area which records SAN command failures and recovery */
652 sprintf(name
, "zfcp_%s_san", dev_name(&adapter
->ccw_device
->dev
));
653 dbf
->san
= zfcp_dbf_reg(name
, dbfsize
, sizeof(struct zfcp_dbf_san
));
657 /* debug feature area which records SCSI command failures and recovery */
658 sprintf(name
, "zfcp_%s_scsi", dev_name(&adapter
->ccw_device
->dev
));
659 dbf
->scsi
= zfcp_dbf_reg(name
, dbfsize
, sizeof(struct zfcp_dbf_scsi
));
667 zfcp_dbf_unregister(dbf
);
672 * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
673 * @adapter: pointer to adapter for which debug features should be unregistered
675 void zfcp_dbf_adapter_unregister(struct zfcp_adapter
*adapter
)
677 struct zfcp_dbf
*dbf
= adapter
->dbf
;
680 zfcp_dbf_unregister(dbf
);