1 // SPDX-License-Identifier: GPL-2.0
5 * Debug traces for zfcp.
7 * Copyright IBM Corp. 2002, 2017
10 #define KMSG_COMPONENT "zfcp"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 #include <linux/module.h>
14 #include <linux/ctype.h>
15 #include <linux/slab.h>
16 #include <asm/debug.h>
21 static u32 dbfsize
= 4;
23 module_param(dbfsize
, uint
, 0400);
24 MODULE_PARM_DESC(dbfsize
,
25 "number of pages for each debug feature area (default 4)");
27 static u32 dbflevel
= 3;
29 module_param(dbflevel
, uint
, 0400);
30 MODULE_PARM_DESC(dbflevel
,
31 "log level for each debug feature area "
32 "(default 3, range 0..6)");
34 static inline unsigned int zfcp_dbf_plen(unsigned int offset
)
36 return sizeof(struct zfcp_dbf_pay
) + offset
- ZFCP_DBF_PAY_MAX_REC
;
40 void zfcp_dbf_pl_write(struct zfcp_dbf
*dbf
, void *data
, u16 length
, char *area
,
43 struct zfcp_dbf_pay
*pl
= &dbf
->pay_buf
;
44 u16 offset
= 0, rec_length
;
46 spin_lock(&dbf
->pay_lock
);
47 memset(pl
, 0, sizeof(*pl
));
48 pl
->fsf_req_id
= req_id
;
49 memcpy(pl
->area
, area
, ZFCP_DBF_TAG_LEN
);
51 while (offset
< length
) {
52 rec_length
= min((u16
) ZFCP_DBF_PAY_MAX_REC
,
53 (u16
) (length
- offset
));
54 memcpy(pl
->data
, data
+ offset
, rec_length
);
55 debug_event(dbf
->pay
, 1, pl
, zfcp_dbf_plen(rec_length
));
61 spin_unlock(&dbf
->pay_lock
);
65 * zfcp_dbf_hba_fsf_res - trace event for fsf responses
66 * @tag: tag indicating which kind of unsolicited status has been received
67 * @req: request for which a response was received
69 void zfcp_dbf_hba_fsf_res(char *tag
, int level
, struct zfcp_fsf_req
*req
)
71 struct zfcp_dbf
*dbf
= req
->adapter
->dbf
;
72 struct fsf_qtcb_prefix
*q_pref
= &req
->qtcb
->prefix
;
73 struct fsf_qtcb_header
*q_head
= &req
->qtcb
->header
;
74 struct zfcp_dbf_hba
*rec
= &dbf
->hba_buf
;
77 spin_lock_irqsave(&dbf
->hba_lock
, flags
);
78 memset(rec
, 0, sizeof(*rec
));
80 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
81 rec
->id
= ZFCP_DBF_HBA_RES
;
82 rec
->fsf_req_id
= req
->req_id
;
83 rec
->fsf_req_status
= req
->status
;
84 rec
->fsf_cmd
= req
->fsf_command
;
85 rec
->fsf_seq_no
= req
->seq_no
;
86 rec
->u
.res
.req_issued
= req
->issued
;
87 rec
->u
.res
.prot_status
= q_pref
->prot_status
;
88 rec
->u
.res
.fsf_status
= q_head
->fsf_status
;
89 rec
->u
.res
.port_handle
= q_head
->port_handle
;
90 rec
->u
.res
.lun_handle
= q_head
->lun_handle
;
92 memcpy(rec
->u
.res
.prot_status_qual
, &q_pref
->prot_status_qual
,
93 FSF_PROT_STATUS_QUAL_SIZE
);
94 memcpy(rec
->u
.res
.fsf_status_qual
, &q_head
->fsf_status_qual
,
95 FSF_STATUS_QUALIFIER_SIZE
);
97 if (req
->fsf_command
!= FSF_QTCB_FCP_CMND
) {
98 rec
->pl_len
= q_head
->log_length
;
99 zfcp_dbf_pl_write(dbf
, (char *)q_pref
+ q_head
->log_start
,
100 rec
->pl_len
, "fsf_res", req
->req_id
);
103 debug_event(dbf
->hba
, level
, rec
, sizeof(*rec
));
104 spin_unlock_irqrestore(&dbf
->hba_lock
, flags
);
108 * zfcp_dbf_hba_fsf_uss - trace event for an unsolicited status buffer
109 * @tag: tag indicating which kind of unsolicited status has been received
110 * @req: request providing the unsolicited status
112 void zfcp_dbf_hba_fsf_uss(char *tag
, struct zfcp_fsf_req
*req
)
114 struct zfcp_dbf
*dbf
= req
->adapter
->dbf
;
115 struct fsf_status_read_buffer
*srb
= req
->data
;
116 struct zfcp_dbf_hba
*rec
= &dbf
->hba_buf
;
117 static int const level
= 2;
120 if (unlikely(!debug_level_enabled(dbf
->hba
, level
)))
123 spin_lock_irqsave(&dbf
->hba_lock
, flags
);
124 memset(rec
, 0, sizeof(*rec
));
126 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
127 rec
->id
= ZFCP_DBF_HBA_USS
;
128 rec
->fsf_req_id
= req
->req_id
;
129 rec
->fsf_req_status
= req
->status
;
130 rec
->fsf_cmd
= req
->fsf_command
;
135 rec
->u
.uss
.status_type
= srb
->status_type
;
136 rec
->u
.uss
.status_subtype
= srb
->status_subtype
;
137 rec
->u
.uss
.d_id
= ntoh24(srb
->d_id
);
138 rec
->u
.uss
.lun
= srb
->fcp_lun
;
139 memcpy(&rec
->u
.uss
.queue_designator
, &srb
->queue_designator
,
140 sizeof(rec
->u
.uss
.queue_designator
));
142 /* status read buffer payload length */
143 rec
->pl_len
= (!srb
->length
) ? 0 : srb
->length
-
144 offsetof(struct fsf_status_read_buffer
, payload
);
147 zfcp_dbf_pl_write(dbf
, srb
->payload
.data
, rec
->pl_len
,
148 "fsf_uss", req
->req_id
);
150 debug_event(dbf
->hba
, level
, rec
, sizeof(*rec
));
151 spin_unlock_irqrestore(&dbf
->hba_lock
, flags
);
155 * zfcp_dbf_hba_bit_err - trace event for bit error conditions
156 * @tag: tag indicating which kind of unsolicited status has been received
157 * @req: request which caused the bit_error condition
159 void zfcp_dbf_hba_bit_err(char *tag
, struct zfcp_fsf_req
*req
)
161 struct zfcp_dbf
*dbf
= req
->adapter
->dbf
;
162 struct zfcp_dbf_hba
*rec
= &dbf
->hba_buf
;
163 struct fsf_status_read_buffer
*sr_buf
= req
->data
;
164 static int const level
= 1;
167 if (unlikely(!debug_level_enabled(dbf
->hba
, level
)))
170 spin_lock_irqsave(&dbf
->hba_lock
, flags
);
171 memset(rec
, 0, sizeof(*rec
));
173 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
174 rec
->id
= ZFCP_DBF_HBA_BIT
;
175 rec
->fsf_req_id
= req
->req_id
;
176 rec
->fsf_req_status
= req
->status
;
177 rec
->fsf_cmd
= req
->fsf_command
;
178 memcpy(&rec
->u
.be
, &sr_buf
->payload
.bit_error
,
179 sizeof(struct fsf_bit_error_payload
));
181 debug_event(dbf
->hba
, level
, rec
, sizeof(*rec
));
182 spin_unlock_irqrestore(&dbf
->hba_lock
, flags
);
186 * zfcp_dbf_hba_def_err - trace event for deferred error messages
187 * @adapter: pointer to struct zfcp_adapter
188 * @req_id: request id which caused the deferred error message
189 * @scount: number of sbals incl. the signaling sbal
190 * @pl: array of all involved sbals
192 void zfcp_dbf_hba_def_err(struct zfcp_adapter
*adapter
, u64 req_id
, u16 scount
,
195 struct zfcp_dbf
*dbf
= adapter
->dbf
;
196 struct zfcp_dbf_pay
*payload
= &dbf
->pay_buf
;
198 static int const level
= 1;
201 if (unlikely(!debug_level_enabled(dbf
->pay
, level
)))
207 spin_lock_irqsave(&dbf
->pay_lock
, flags
);
208 memset(payload
, 0, sizeof(*payload
));
210 memcpy(payload
->area
, "def_err", 7);
211 payload
->fsf_req_id
= req_id
;
212 payload
->counter
= 0;
213 length
= min((u16
)sizeof(struct qdio_buffer
),
214 (u16
)ZFCP_DBF_PAY_MAX_REC
);
216 while (payload
->counter
< scount
&& (char *)pl
[payload
->counter
]) {
217 memcpy(payload
->data
, (char *)pl
[payload
->counter
], length
);
218 debug_event(dbf
->pay
, level
, payload
, zfcp_dbf_plen(length
));
222 spin_unlock_irqrestore(&dbf
->pay_lock
, flags
);
226 * zfcp_dbf_hba_basic - trace event for basic adapter events
227 * @adapter: pointer to struct zfcp_adapter
229 void zfcp_dbf_hba_basic(char *tag
, struct zfcp_adapter
*adapter
)
231 struct zfcp_dbf
*dbf
= adapter
->dbf
;
232 struct zfcp_dbf_hba
*rec
= &dbf
->hba_buf
;
233 static int const level
= 1;
236 if (unlikely(!debug_level_enabled(dbf
->hba
, level
)))
239 spin_lock_irqsave(&dbf
->hba_lock
, flags
);
240 memset(rec
, 0, sizeof(*rec
));
242 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
243 rec
->id
= ZFCP_DBF_HBA_BASIC
;
245 debug_event(dbf
->hba
, level
, rec
, sizeof(*rec
));
246 spin_unlock_irqrestore(&dbf
->hba_lock
, flags
);
249 static void zfcp_dbf_set_common(struct zfcp_dbf_rec
*rec
,
250 struct zfcp_adapter
*adapter
,
251 struct zfcp_port
*port
,
252 struct scsi_device
*sdev
)
254 rec
->adapter_status
= atomic_read(&adapter
->status
);
256 rec
->port_status
= atomic_read(&port
->status
);
257 rec
->wwpn
= port
->wwpn
;
258 rec
->d_id
= port
->d_id
;
261 rec
->lun_status
= atomic_read(&sdev_to_zfcp(sdev
)->status
);
262 rec
->lun
= zfcp_scsi_dev_lun(sdev
);
264 rec
->lun
= ZFCP_DBF_INVALID_LUN
;
268 * zfcp_dbf_rec_trig - trace event related to triggered recovery
269 * @tag: identifier for event
270 * @adapter: adapter on which the erp_action should run
271 * @port: remote port involved in the erp_action
272 * @sdev: scsi device involved in the erp_action
273 * @want: wanted erp_action
274 * @need: required erp_action
276 * The adapter->erp_lock has to be held.
278 void zfcp_dbf_rec_trig(char *tag
, struct zfcp_adapter
*adapter
,
279 struct zfcp_port
*port
, struct scsi_device
*sdev
,
282 struct zfcp_dbf
*dbf
= adapter
->dbf
;
283 struct zfcp_dbf_rec
*rec
= &dbf
->rec_buf
;
284 static int const level
= 1;
285 struct list_head
*entry
;
288 if (unlikely(!debug_level_enabled(dbf
->rec
, level
)))
291 spin_lock_irqsave(&dbf
->rec_lock
, flags
);
292 memset(rec
, 0, sizeof(*rec
));
294 rec
->id
= ZFCP_DBF_REC_TRIG
;
295 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
296 zfcp_dbf_set_common(rec
, adapter
, port
, sdev
);
298 list_for_each(entry
, &adapter
->erp_ready_head
)
301 list_for_each(entry
, &adapter
->erp_running_head
)
302 rec
->u
.trig
.running
++;
304 rec
->u
.trig
.want
= want
;
305 rec
->u
.trig
.need
= need
;
307 debug_event(dbf
->rec
, level
, rec
, sizeof(*rec
));
308 spin_unlock_irqrestore(&dbf
->rec_lock
, flags
);
313 * zfcp_dbf_rec_run_lvl - trace event related to running recovery
314 * @level: trace level to be used for event
315 * @tag: identifier for event
316 * @erp: erp_action running
318 void zfcp_dbf_rec_run_lvl(int level
, char *tag
, struct zfcp_erp_action
*erp
)
320 struct zfcp_dbf
*dbf
= erp
->adapter
->dbf
;
321 struct zfcp_dbf_rec
*rec
= &dbf
->rec_buf
;
324 if (!debug_level_enabled(dbf
->rec
, level
))
327 spin_lock_irqsave(&dbf
->rec_lock
, flags
);
328 memset(rec
, 0, sizeof(*rec
));
330 rec
->id
= ZFCP_DBF_REC_RUN
;
331 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
332 zfcp_dbf_set_common(rec
, erp
->adapter
, erp
->port
, erp
->sdev
);
334 rec
->u
.run
.fsf_req_id
= erp
->fsf_req_id
;
335 rec
->u
.run
.rec_status
= erp
->status
;
336 rec
->u
.run
.rec_step
= erp
->step
;
337 rec
->u
.run
.rec_action
= erp
->action
;
340 rec
->u
.run
.rec_count
=
341 atomic_read(&sdev_to_zfcp(erp
->sdev
)->erp_counter
);
343 rec
->u
.run
.rec_count
= atomic_read(&erp
->port
->erp_counter
);
345 rec
->u
.run
.rec_count
= atomic_read(&erp
->adapter
->erp_counter
);
347 debug_event(dbf
->rec
, level
, rec
, sizeof(*rec
));
348 spin_unlock_irqrestore(&dbf
->rec_lock
, flags
);
352 * zfcp_dbf_rec_run - trace event related to running recovery
353 * @tag: identifier for event
354 * @erp: erp_action running
356 void zfcp_dbf_rec_run(char *tag
, struct zfcp_erp_action
*erp
)
358 zfcp_dbf_rec_run_lvl(1, tag
, erp
);
362 * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
363 * @tag: identifier for event
364 * @wka_port: well known address port
365 * @req_id: request ID to correlate with potential HBA trace record
367 void zfcp_dbf_rec_run_wka(char *tag
, struct zfcp_fc_wka_port
*wka_port
,
370 struct zfcp_dbf
*dbf
= wka_port
->adapter
->dbf
;
371 struct zfcp_dbf_rec
*rec
= &dbf
->rec_buf
;
372 static int const level
= 1;
375 if (unlikely(!debug_level_enabled(dbf
->rec
, level
)))
378 spin_lock_irqsave(&dbf
->rec_lock
, flags
);
379 memset(rec
, 0, sizeof(*rec
));
381 rec
->id
= ZFCP_DBF_REC_RUN
;
382 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
383 rec
->port_status
= wka_port
->status
;
384 rec
->d_id
= wka_port
->d_id
;
385 rec
->lun
= ZFCP_DBF_INVALID_LUN
;
387 rec
->u
.run
.fsf_req_id
= req_id
;
388 rec
->u
.run
.rec_status
= ~0;
389 rec
->u
.run
.rec_step
= ~0;
390 rec
->u
.run
.rec_action
= ~0;
391 rec
->u
.run
.rec_count
= ~0;
393 debug_event(dbf
->rec
, level
, rec
, sizeof(*rec
));
394 spin_unlock_irqrestore(&dbf
->rec_lock
, flags
);
397 #define ZFCP_DBF_SAN_LEVEL 1
400 void zfcp_dbf_san(char *tag
, struct zfcp_dbf
*dbf
,
401 char *paytag
, struct scatterlist
*sg
, u8 id
, u16 len
,
402 u64 req_id
, u32 d_id
, u16 cap_len
)
404 struct zfcp_dbf_san
*rec
= &dbf
->san_buf
;
407 struct zfcp_dbf_pay
*payload
= &dbf
->pay_buf
;
410 spin_lock_irqsave(&dbf
->san_lock
, flags
);
411 memset(rec
, 0, sizeof(*rec
));
414 rec
->fsf_req_id
= req_id
;
416 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
417 rec
->pl_len
= len
; /* full length even if we cap pay below */
420 rec_len
= min_t(unsigned int, sg
->length
, ZFCP_DBF_SAN_MAX_PAYLOAD
);
421 memcpy(rec
->payload
, sg_virt(sg
), rec_len
); /* part of 1st sg entry */
423 goto out
; /* skip pay record if full content in rec->payload */
425 /* if (len > rec_len):
426 * dump data up to cap_len ignoring small duplicate in rec->payload
428 spin_lock(&dbf
->pay_lock
);
429 memset(payload
, 0, sizeof(*payload
));
430 memcpy(payload
->area
, paytag
, ZFCP_DBF_TAG_LEN
);
431 payload
->fsf_req_id
= req_id
;
432 payload
->counter
= 0;
433 for (; sg
&& pay_sum
< cap_len
; sg
= sg_next(sg
)) {
434 u16 pay_len
, offset
= 0;
436 while (offset
< sg
->length
&& pay_sum
< cap_len
) {
437 pay_len
= min((u16
)ZFCP_DBF_PAY_MAX_REC
,
438 (u16
)(sg
->length
- offset
));
439 /* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */
440 memcpy(payload
->data
, sg_virt(sg
) + offset
, pay_len
);
441 debug_event(dbf
->pay
, ZFCP_DBF_SAN_LEVEL
, payload
,
442 zfcp_dbf_plen(pay_len
));
448 spin_unlock(&dbf
->pay_lock
);
451 debug_event(dbf
->san
, ZFCP_DBF_SAN_LEVEL
, rec
, sizeof(*rec
));
452 spin_unlock_irqrestore(&dbf
->san_lock
, flags
);
456 * zfcp_dbf_san_req - trace event for issued SAN request
457 * @tag: identifier for event
458 * @fsf_req: request containing issued CT data
459 * d_id: destination ID
461 void zfcp_dbf_san_req(char *tag
, struct zfcp_fsf_req
*fsf
, u32 d_id
)
463 struct zfcp_dbf
*dbf
= fsf
->adapter
->dbf
;
464 struct zfcp_fsf_ct_els
*ct_els
= fsf
->data
;
467 if (unlikely(!debug_level_enabled(dbf
->san
, ZFCP_DBF_SAN_LEVEL
)))
470 length
= (u16
)zfcp_qdio_real_bytes(ct_els
->req
);
471 zfcp_dbf_san(tag
, dbf
, "san_req", ct_els
->req
, ZFCP_DBF_SAN_REQ
,
472 length
, fsf
->req_id
, d_id
, length
);
475 static u16
zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag
,
476 struct zfcp_fsf_req
*fsf
,
479 struct zfcp_fsf_ct_els
*ct_els
= fsf
->data
;
480 struct fc_ct_hdr
*reqh
= sg_virt(ct_els
->req
);
481 struct fc_ns_gid_ft
*reqn
= (struct fc_ns_gid_ft
*)(reqh
+ 1);
482 struct scatterlist
*resp_entry
= ct_els
->resp
;
483 struct fc_ct_hdr
*resph
;
484 struct fc_gpn_ft_resp
*acc
;
485 int max_entries
, x
, last
= 0;
487 if (!(memcmp(tag
, "fsscth2", 7) == 0
488 && ct_els
->d_id
== FC_FID_DIR_SERV
489 && reqh
->ct_rev
== FC_CT_REV
490 && reqh
->ct_in_id
[0] == 0
491 && reqh
->ct_in_id
[1] == 0
492 && reqh
->ct_in_id
[2] == 0
493 && reqh
->ct_fs_type
== FC_FST_DIR
494 && reqh
->ct_fs_subtype
== FC_NS_SUBTYPE
495 && reqh
->ct_options
== 0
496 && reqh
->_ct_resvd1
== 0
497 && reqh
->ct_cmd
== cpu_to_be16(FC_NS_GPN_FT
)
498 /* reqh->ct_mr_size can vary so do not match but read below */
499 && reqh
->_ct_resvd2
== 0
500 && reqh
->ct_reason
== 0
501 && reqh
->ct_explan
== 0
502 && reqh
->ct_vendor
== 0
503 && reqn
->fn_resvd
== 0
504 && reqn
->fn_domain_id_scope
== 0
505 && reqn
->fn_area_id_scope
== 0
506 && reqn
->fn_fc4_type
== FC_TYPE_FCP
))
507 return len
; /* not GPN_FT response so do not cap */
509 acc
= sg_virt(resp_entry
);
511 /* cap all but accept CT responses to at least the CT header */
512 resph
= (struct fc_ct_hdr
*)acc
;
513 if ((ct_els
->status
) ||
514 (resph
->ct_cmd
!= cpu_to_be16(FC_FS_ACC
)))
515 return max(FC_CT_HDR_LEN
, ZFCP_DBF_SAN_MAX_PAYLOAD
);
517 max_entries
= (be16_to_cpu(reqh
->ct_mr_size
) * 4 /
518 sizeof(struct fc_gpn_ft_resp
))
519 + 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
520 * to account for header as 1st pseudo "entry" */;
522 /* the basic CT_IU preamble is the same size as one entry in the GPN_FT
523 * response, allowing us to skip special handling for it - just skip it
525 for (x
= 1; x
< max_entries
&& !last
; x
++) {
526 if (x
% (ZFCP_FC_GPN_FT_ENT_PAGE
+ 1))
529 acc
= sg_virt(++resp_entry
);
531 last
= acc
->fp_flags
& FC_NS_FID_LAST
;
533 len
= min(len
, (u16
)(x
* sizeof(struct fc_gpn_ft_resp
)));
534 return len
; /* cap after last entry */
538 * zfcp_dbf_san_res - trace event for received SAN request
539 * @tag: identifier for event
540 * @fsf_req: request containing issued CT data
542 void zfcp_dbf_san_res(char *tag
, struct zfcp_fsf_req
*fsf
)
544 struct zfcp_dbf
*dbf
= fsf
->adapter
->dbf
;
545 struct zfcp_fsf_ct_els
*ct_els
= fsf
->data
;
548 if (unlikely(!debug_level_enabled(dbf
->san
, ZFCP_DBF_SAN_LEVEL
)))
551 length
= (u16
)zfcp_qdio_real_bytes(ct_els
->resp
);
552 zfcp_dbf_san(tag
, dbf
, "san_res", ct_els
->resp
, ZFCP_DBF_SAN_RES
,
553 length
, fsf
->req_id
, ct_els
->d_id
,
554 zfcp_dbf_san_res_cap_len_if_gpn_ft(tag
, fsf
, length
));
558 * zfcp_dbf_san_in_els - trace event for incoming ELS
559 * @tag: identifier for event
560 * @fsf_req: request containing issued CT data
562 void zfcp_dbf_san_in_els(char *tag
, struct zfcp_fsf_req
*fsf
)
564 struct zfcp_dbf
*dbf
= fsf
->adapter
->dbf
;
565 struct fsf_status_read_buffer
*srb
=
566 (struct fsf_status_read_buffer
*) fsf
->data
;
568 struct scatterlist sg
;
570 if (unlikely(!debug_level_enabled(dbf
->san
, ZFCP_DBF_SAN_LEVEL
)))
573 length
= (u16
)(srb
->length
-
574 offsetof(struct fsf_status_read_buffer
, payload
));
575 sg_init_one(&sg
, srb
->payload
.data
, length
);
576 zfcp_dbf_san(tag
, dbf
, "san_els", &sg
, ZFCP_DBF_SAN_ELS
, length
,
577 fsf
->req_id
, ntoh24(srb
->d_id
), length
);
581 * zfcp_dbf_scsi - trace event for scsi commands
582 * @tag: identifier for event
583 * @sc: pointer to struct scsi_cmnd
584 * @fsf: pointer to struct zfcp_fsf_req
586 void zfcp_dbf_scsi(char *tag
, int level
, struct scsi_cmnd
*sc
,
587 struct zfcp_fsf_req
*fsf
)
589 struct zfcp_adapter
*adapter
=
590 (struct zfcp_adapter
*) sc
->device
->host
->hostdata
[0];
591 struct zfcp_dbf
*dbf
= adapter
->dbf
;
592 struct zfcp_dbf_scsi
*rec
= &dbf
->scsi_buf
;
593 struct fcp_resp_with_ext
*fcp_rsp
;
594 struct fcp_resp_rsp_info
*fcp_rsp_info
;
597 spin_lock_irqsave(&dbf
->scsi_lock
, flags
);
598 memset(rec
, 0, sizeof(*rec
));
600 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
601 rec
->id
= ZFCP_DBF_SCSI_CMND
;
602 rec
->scsi_result
= sc
->result
;
603 rec
->scsi_retries
= sc
->retries
;
604 rec
->scsi_allowed
= sc
->allowed
;
605 rec
->scsi_id
= sc
->device
->id
;
606 rec
->scsi_lun
= (u32
)sc
->device
->lun
;
607 rec
->scsi_lun_64_hi
= (u32
)(sc
->device
->lun
>> 32);
608 rec
->host_scribble
= (unsigned long)sc
->host_scribble
;
610 memcpy(rec
->scsi_opcode
, sc
->cmnd
,
611 min((int)sc
->cmd_len
, ZFCP_DBF_SCSI_OPCODE
));
614 rec
->fsf_req_id
= fsf
->req_id
;
615 rec
->pl_len
= FCP_RESP_WITH_EXT
;
616 fcp_rsp
= &(fsf
->qtcb
->bottom
.io
.fcp_rsp
.iu
);
617 /* mandatory parts of FCP_RSP IU in this SCSI record */
618 memcpy(&rec
->fcp_rsp
, fcp_rsp
, FCP_RESP_WITH_EXT
);
619 if (fcp_rsp
->resp
.fr_flags
& FCP_RSP_LEN_VAL
) {
620 fcp_rsp_info
= (struct fcp_resp_rsp_info
*) &fcp_rsp
[1];
621 rec
->fcp_rsp_info
= fcp_rsp_info
->rsp_code
;
622 rec
->pl_len
+= be32_to_cpu(fcp_rsp
->ext
.fr_rsp_len
);
624 if (fcp_rsp
->resp
.fr_flags
& FCP_SNS_LEN_VAL
) {
625 rec
->pl_len
+= be32_to_cpu(fcp_rsp
->ext
.fr_sns_len
);
627 /* complete FCP_RSP IU in associated PAYload record
628 * but only if there are optional parts
630 if (fcp_rsp
->resp
.fr_flags
!= 0)
633 /* at least one full PAY record
634 * but not beyond hardware response field
636 min_t(u16
, max_t(u16
, rec
->pl_len
,
637 ZFCP_DBF_PAY_MAX_REC
),
639 "fcp_riu", fsf
->req_id
);
642 debug_event(dbf
->scsi
, level
, rec
, sizeof(*rec
));
643 spin_unlock_irqrestore(&dbf
->scsi_lock
, flags
);
646 static debug_info_t
*zfcp_dbf_reg(const char *name
, int size
, int rec_size
)
648 struct debug_info
*d
;
650 d
= debug_register(name
, size
, 1, rec_size
);
654 debug_register_view(d
, &debug_hex_ascii_view
);
655 debug_set_level(d
, dbflevel
);
660 static void zfcp_dbf_unregister(struct zfcp_dbf
*dbf
)
665 debug_unregister(dbf
->scsi
);
666 debug_unregister(dbf
->san
);
667 debug_unregister(dbf
->hba
);
668 debug_unregister(dbf
->pay
);
669 debug_unregister(dbf
->rec
);
674 * zfcp_adapter_debug_register - registers debug feature for an adapter
675 * @adapter: pointer to adapter for which debug features should be registered
676 * return: -ENOMEM on error, 0 otherwise
678 int zfcp_dbf_adapter_register(struct zfcp_adapter
*adapter
)
680 char name
[DEBUG_MAX_NAME_LEN
];
681 struct zfcp_dbf
*dbf
;
683 dbf
= kzalloc(sizeof(struct zfcp_dbf
), GFP_KERNEL
);
687 spin_lock_init(&dbf
->pay_lock
);
688 spin_lock_init(&dbf
->hba_lock
);
689 spin_lock_init(&dbf
->san_lock
);
690 spin_lock_init(&dbf
->scsi_lock
);
691 spin_lock_init(&dbf
->rec_lock
);
693 /* debug feature area which records recovery activity */
694 sprintf(name
, "zfcp_%s_rec", dev_name(&adapter
->ccw_device
->dev
));
695 dbf
->rec
= zfcp_dbf_reg(name
, dbfsize
, sizeof(struct zfcp_dbf_rec
));
699 /* debug feature area which records HBA (FSF and QDIO) conditions */
700 sprintf(name
, "zfcp_%s_hba", dev_name(&adapter
->ccw_device
->dev
));
701 dbf
->hba
= zfcp_dbf_reg(name
, dbfsize
, sizeof(struct zfcp_dbf_hba
));
705 /* debug feature area which records payload info */
706 sprintf(name
, "zfcp_%s_pay", dev_name(&adapter
->ccw_device
->dev
));
707 dbf
->pay
= zfcp_dbf_reg(name
, dbfsize
* 2, sizeof(struct zfcp_dbf_pay
));
711 /* debug feature area which records SAN command failures and recovery */
712 sprintf(name
, "zfcp_%s_san", dev_name(&adapter
->ccw_device
->dev
));
713 dbf
->san
= zfcp_dbf_reg(name
, dbfsize
, sizeof(struct zfcp_dbf_san
));
717 /* debug feature area which records SCSI command failures and recovery */
718 sprintf(name
, "zfcp_%s_scsi", dev_name(&adapter
->ccw_device
->dev
));
719 dbf
->scsi
= zfcp_dbf_reg(name
, dbfsize
, sizeof(struct zfcp_dbf_scsi
));
727 zfcp_dbf_unregister(dbf
);
732 * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
733 * @adapter: pointer to adapter for which debug features should be unregistered
735 void zfcp_dbf_adapter_unregister(struct zfcp_adapter
*adapter
)
737 struct zfcp_dbf
*dbf
= adapter
->dbf
;
740 zfcp_dbf_unregister(dbf
);