1 // SPDX-License-Identifier: GPL-2.0
5 * Debug traces for zfcp.
7 * Copyright IBM Corp. 2002, 2018
10 #define KMSG_COMPONENT "zfcp"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 #include <linux/module.h>
14 #include <linux/ctype.h>
15 #include <linux/slab.h>
16 #include <asm/debug.h>
21 static u32 dbfsize
= 4;
23 module_param(dbfsize
, uint
, 0400);
24 MODULE_PARM_DESC(dbfsize
,
25 "number of pages for each debug feature area (default 4)");
27 static u32 dbflevel
= 3;
29 module_param(dbflevel
, uint
, 0400);
30 MODULE_PARM_DESC(dbflevel
,
31 "log level for each debug feature area "
32 "(default 3, range 0..6)");
34 static inline unsigned int zfcp_dbf_plen(unsigned int offset
)
36 return sizeof(struct zfcp_dbf_pay
) + offset
- ZFCP_DBF_PAY_MAX_REC
;
40 void zfcp_dbf_pl_write(struct zfcp_dbf
*dbf
, void *data
, u16 length
, char *area
,
43 struct zfcp_dbf_pay
*pl
= &dbf
->pay_buf
;
44 u16 offset
= 0, rec_length
;
46 spin_lock(&dbf
->pay_lock
);
47 memset(pl
, 0, sizeof(*pl
));
48 pl
->fsf_req_id
= req_id
;
49 memcpy(pl
->area
, area
, ZFCP_DBF_TAG_LEN
);
51 while (offset
< length
) {
52 rec_length
= min((u16
) ZFCP_DBF_PAY_MAX_REC
,
53 (u16
) (length
- offset
));
54 memcpy(pl
->data
, data
+ offset
, rec_length
);
55 debug_event(dbf
->pay
, 1, pl
, zfcp_dbf_plen(rec_length
));
61 spin_unlock(&dbf
->pay_lock
);
65 * zfcp_dbf_hba_fsf_res - trace event for fsf responses
66 * @tag: tag indicating which kind of unsolicited status has been received
67 * @req: request for which a response was received
69 void zfcp_dbf_hba_fsf_res(char *tag
, int level
, struct zfcp_fsf_req
*req
)
71 struct zfcp_dbf
*dbf
= req
->adapter
->dbf
;
72 struct fsf_qtcb_prefix
*q_pref
= &req
->qtcb
->prefix
;
73 struct fsf_qtcb_header
*q_head
= &req
->qtcb
->header
;
74 struct zfcp_dbf_hba
*rec
= &dbf
->hba_buf
;
77 spin_lock_irqsave(&dbf
->hba_lock
, flags
);
78 memset(rec
, 0, sizeof(*rec
));
80 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
81 rec
->id
= ZFCP_DBF_HBA_RES
;
82 rec
->fsf_req_id
= req
->req_id
;
83 rec
->fsf_req_status
= req
->status
;
84 rec
->fsf_cmd
= req
->fsf_command
;
85 rec
->fsf_seq_no
= req
->seq_no
;
86 rec
->u
.res
.req_issued
= req
->issued
;
87 rec
->u
.res
.prot_status
= q_pref
->prot_status
;
88 rec
->u
.res
.fsf_status
= q_head
->fsf_status
;
89 rec
->u
.res
.port_handle
= q_head
->port_handle
;
90 rec
->u
.res
.lun_handle
= q_head
->lun_handle
;
92 memcpy(rec
->u
.res
.prot_status_qual
, &q_pref
->prot_status_qual
,
93 FSF_PROT_STATUS_QUAL_SIZE
);
94 memcpy(rec
->u
.res
.fsf_status_qual
, &q_head
->fsf_status_qual
,
95 FSF_STATUS_QUALIFIER_SIZE
);
97 if (req
->fsf_command
!= FSF_QTCB_FCP_CMND
) {
98 rec
->pl_len
= q_head
->log_length
;
99 zfcp_dbf_pl_write(dbf
, (char *)q_pref
+ q_head
->log_start
,
100 rec
->pl_len
, "fsf_res", req
->req_id
);
103 debug_event(dbf
->hba
, level
, rec
, sizeof(*rec
));
104 spin_unlock_irqrestore(&dbf
->hba_lock
, flags
);
108 * zfcp_dbf_hba_fsf_uss - trace event for an unsolicited status buffer
109 * @tag: tag indicating which kind of unsolicited status has been received
110 * @req: request providing the unsolicited status
112 void zfcp_dbf_hba_fsf_uss(char *tag
, struct zfcp_fsf_req
*req
)
114 struct zfcp_dbf
*dbf
= req
->adapter
->dbf
;
115 struct fsf_status_read_buffer
*srb
= req
->data
;
116 struct zfcp_dbf_hba
*rec
= &dbf
->hba_buf
;
117 static int const level
= 2;
120 if (unlikely(!debug_level_enabled(dbf
->hba
, level
)))
123 spin_lock_irqsave(&dbf
->hba_lock
, flags
);
124 memset(rec
, 0, sizeof(*rec
));
126 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
127 rec
->id
= ZFCP_DBF_HBA_USS
;
128 rec
->fsf_req_id
= req
->req_id
;
129 rec
->fsf_req_status
= req
->status
;
130 rec
->fsf_cmd
= req
->fsf_command
;
135 rec
->u
.uss
.status_type
= srb
->status_type
;
136 rec
->u
.uss
.status_subtype
= srb
->status_subtype
;
137 rec
->u
.uss
.d_id
= ntoh24(srb
->d_id
);
138 rec
->u
.uss
.lun
= srb
->fcp_lun
;
139 memcpy(&rec
->u
.uss
.queue_designator
, &srb
->queue_designator
,
140 sizeof(rec
->u
.uss
.queue_designator
));
142 /* status read buffer payload length */
143 rec
->pl_len
= (!srb
->length
) ? 0 : srb
->length
-
144 offsetof(struct fsf_status_read_buffer
, payload
);
147 zfcp_dbf_pl_write(dbf
, srb
->payload
.data
, rec
->pl_len
,
148 "fsf_uss", req
->req_id
);
150 debug_event(dbf
->hba
, level
, rec
, sizeof(*rec
));
151 spin_unlock_irqrestore(&dbf
->hba_lock
, flags
);
155 * zfcp_dbf_hba_bit_err - trace event for bit error conditions
156 * @tag: tag indicating which kind of unsolicited status has been received
157 * @req: request which caused the bit_error condition
159 void zfcp_dbf_hba_bit_err(char *tag
, struct zfcp_fsf_req
*req
)
161 struct zfcp_dbf
*dbf
= req
->adapter
->dbf
;
162 struct zfcp_dbf_hba
*rec
= &dbf
->hba_buf
;
163 struct fsf_status_read_buffer
*sr_buf
= req
->data
;
164 static int const level
= 1;
167 if (unlikely(!debug_level_enabled(dbf
->hba
, level
)))
170 spin_lock_irqsave(&dbf
->hba_lock
, flags
);
171 memset(rec
, 0, sizeof(*rec
));
173 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
174 rec
->id
= ZFCP_DBF_HBA_BIT
;
175 rec
->fsf_req_id
= req
->req_id
;
176 rec
->fsf_req_status
= req
->status
;
177 rec
->fsf_cmd
= req
->fsf_command
;
178 memcpy(&rec
->u
.be
, &sr_buf
->payload
.bit_error
,
179 sizeof(struct fsf_bit_error_payload
));
181 debug_event(dbf
->hba
, level
, rec
, sizeof(*rec
));
182 spin_unlock_irqrestore(&dbf
->hba_lock
, flags
);
186 * zfcp_dbf_hba_def_err - trace event for deferred error messages
187 * @adapter: pointer to struct zfcp_adapter
188 * @req_id: request id which caused the deferred error message
189 * @scount: number of sbals incl. the signaling sbal
190 * @pl: array of all involved sbals
192 void zfcp_dbf_hba_def_err(struct zfcp_adapter
*adapter
, u64 req_id
, u16 scount
,
195 struct zfcp_dbf
*dbf
= adapter
->dbf
;
196 struct zfcp_dbf_pay
*payload
= &dbf
->pay_buf
;
198 static int const level
= 1;
201 if (unlikely(!debug_level_enabled(dbf
->pay
, level
)))
207 spin_lock_irqsave(&dbf
->pay_lock
, flags
);
208 memset(payload
, 0, sizeof(*payload
));
210 memcpy(payload
->area
, "def_err", 7);
211 payload
->fsf_req_id
= req_id
;
212 payload
->counter
= 0;
213 length
= min((u16
)sizeof(struct qdio_buffer
),
214 (u16
)ZFCP_DBF_PAY_MAX_REC
);
216 while (payload
->counter
< scount
&& (char *)pl
[payload
->counter
]) {
217 memcpy(payload
->data
, (char *)pl
[payload
->counter
], length
);
218 debug_event(dbf
->pay
, level
, payload
, zfcp_dbf_plen(length
));
222 spin_unlock_irqrestore(&dbf
->pay_lock
, flags
);
226 * zfcp_dbf_hba_basic - trace event for basic adapter events
227 * @adapter: pointer to struct zfcp_adapter
229 void zfcp_dbf_hba_basic(char *tag
, struct zfcp_adapter
*adapter
)
231 struct zfcp_dbf
*dbf
= adapter
->dbf
;
232 struct zfcp_dbf_hba
*rec
= &dbf
->hba_buf
;
233 static int const level
= 1;
236 if (unlikely(!debug_level_enabled(dbf
->hba
, level
)))
239 spin_lock_irqsave(&dbf
->hba_lock
, flags
);
240 memset(rec
, 0, sizeof(*rec
));
242 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
243 rec
->id
= ZFCP_DBF_HBA_BASIC
;
245 debug_event(dbf
->hba
, level
, rec
, sizeof(*rec
));
246 spin_unlock_irqrestore(&dbf
->hba_lock
, flags
);
249 static void zfcp_dbf_set_common(struct zfcp_dbf_rec
*rec
,
250 struct zfcp_adapter
*adapter
,
251 struct zfcp_port
*port
,
252 struct scsi_device
*sdev
)
254 rec
->adapter_status
= atomic_read(&adapter
->status
);
256 rec
->port_status
= atomic_read(&port
->status
);
257 rec
->wwpn
= port
->wwpn
;
258 rec
->d_id
= port
->d_id
;
261 rec
->lun_status
= atomic_read(&sdev_to_zfcp(sdev
)->status
);
262 rec
->lun
= zfcp_scsi_dev_lun(sdev
);
264 rec
->lun
= ZFCP_DBF_INVALID_LUN
;
268 * zfcp_dbf_rec_trig - trace event related to triggered recovery
269 * @tag: identifier for event
270 * @adapter: adapter on which the erp_action should run
271 * @port: remote port involved in the erp_action
272 * @sdev: scsi device involved in the erp_action
273 * @want: wanted erp_action
274 * @need: required erp_action
276 * The adapter->erp_lock has to be held.
278 void zfcp_dbf_rec_trig(char *tag
, struct zfcp_adapter
*adapter
,
279 struct zfcp_port
*port
, struct scsi_device
*sdev
,
282 struct zfcp_dbf
*dbf
= adapter
->dbf
;
283 struct zfcp_dbf_rec
*rec
= &dbf
->rec_buf
;
284 static int const level
= 1;
285 struct list_head
*entry
;
288 lockdep_assert_held(&adapter
->erp_lock
);
290 if (unlikely(!debug_level_enabled(dbf
->rec
, level
)))
293 spin_lock_irqsave(&dbf
->rec_lock
, flags
);
294 memset(rec
, 0, sizeof(*rec
));
296 rec
->id
= ZFCP_DBF_REC_TRIG
;
297 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
298 zfcp_dbf_set_common(rec
, adapter
, port
, sdev
);
300 list_for_each(entry
, &adapter
->erp_ready_head
)
303 list_for_each(entry
, &adapter
->erp_running_head
)
304 rec
->u
.trig
.running
++;
306 rec
->u
.trig
.want
= want
;
307 rec
->u
.trig
.need
= need
;
309 debug_event(dbf
->rec
, level
, rec
, sizeof(*rec
));
310 spin_unlock_irqrestore(&dbf
->rec_lock
, flags
);
314 * zfcp_dbf_rec_trig_lock - trace event related to triggered recovery with lock
315 * @tag: identifier for event
316 * @adapter: adapter on which the erp_action should run
317 * @port: remote port involved in the erp_action
318 * @sdev: scsi device involved in the erp_action
319 * @want: wanted erp_action
320 * @need: required erp_action
322 * The adapter->erp_lock must not be held.
324 void zfcp_dbf_rec_trig_lock(char *tag
, struct zfcp_adapter
*adapter
,
325 struct zfcp_port
*port
, struct scsi_device
*sdev
,
330 read_lock_irqsave(&adapter
->erp_lock
, flags
);
331 zfcp_dbf_rec_trig(tag
, adapter
, port
, sdev
, want
, need
);
332 read_unlock_irqrestore(&adapter
->erp_lock
, flags
);
336 * zfcp_dbf_rec_run_lvl - trace event related to running recovery
337 * @level: trace level to be used for event
338 * @tag: identifier for event
339 * @erp: erp_action running
341 void zfcp_dbf_rec_run_lvl(int level
, char *tag
, struct zfcp_erp_action
*erp
)
343 struct zfcp_dbf
*dbf
= erp
->adapter
->dbf
;
344 struct zfcp_dbf_rec
*rec
= &dbf
->rec_buf
;
347 if (!debug_level_enabled(dbf
->rec
, level
))
350 spin_lock_irqsave(&dbf
->rec_lock
, flags
);
351 memset(rec
, 0, sizeof(*rec
));
353 rec
->id
= ZFCP_DBF_REC_RUN
;
354 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
355 zfcp_dbf_set_common(rec
, erp
->adapter
, erp
->port
, erp
->sdev
);
357 rec
->u
.run
.fsf_req_id
= erp
->fsf_req_id
;
358 rec
->u
.run
.rec_status
= erp
->status
;
359 rec
->u
.run
.rec_step
= erp
->step
;
360 rec
->u
.run
.rec_action
= erp
->action
;
363 rec
->u
.run
.rec_count
=
364 atomic_read(&sdev_to_zfcp(erp
->sdev
)->erp_counter
);
366 rec
->u
.run
.rec_count
= atomic_read(&erp
->port
->erp_counter
);
368 rec
->u
.run
.rec_count
= atomic_read(&erp
->adapter
->erp_counter
);
370 debug_event(dbf
->rec
, level
, rec
, sizeof(*rec
));
371 spin_unlock_irqrestore(&dbf
->rec_lock
, flags
);
375 * zfcp_dbf_rec_run - trace event related to running recovery
376 * @tag: identifier for event
377 * @erp: erp_action running
379 void zfcp_dbf_rec_run(char *tag
, struct zfcp_erp_action
*erp
)
381 zfcp_dbf_rec_run_lvl(1, tag
, erp
);
385 * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
386 * @tag: identifier for event
387 * @wka_port: well known address port
388 * @req_id: request ID to correlate with potential HBA trace record
390 void zfcp_dbf_rec_run_wka(char *tag
, struct zfcp_fc_wka_port
*wka_port
,
393 struct zfcp_dbf
*dbf
= wka_port
->adapter
->dbf
;
394 struct zfcp_dbf_rec
*rec
= &dbf
->rec_buf
;
395 static int const level
= 1;
398 if (unlikely(!debug_level_enabled(dbf
->rec
, level
)))
401 spin_lock_irqsave(&dbf
->rec_lock
, flags
);
402 memset(rec
, 0, sizeof(*rec
));
404 rec
->id
= ZFCP_DBF_REC_RUN
;
405 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
406 rec
->port_status
= wka_port
->status
;
407 rec
->d_id
= wka_port
->d_id
;
408 rec
->lun
= ZFCP_DBF_INVALID_LUN
;
410 rec
->u
.run
.fsf_req_id
= req_id
;
411 rec
->u
.run
.rec_status
= ~0;
412 rec
->u
.run
.rec_step
= ~0;
413 rec
->u
.run
.rec_action
= ~0;
414 rec
->u
.run
.rec_count
= ~0;
416 debug_event(dbf
->rec
, level
, rec
, sizeof(*rec
));
417 spin_unlock_irqrestore(&dbf
->rec_lock
, flags
);
420 #define ZFCP_DBF_SAN_LEVEL 1
423 void zfcp_dbf_san(char *tag
, struct zfcp_dbf
*dbf
,
424 char *paytag
, struct scatterlist
*sg
, u8 id
, u16 len
,
425 u64 req_id
, u32 d_id
, u16 cap_len
)
427 struct zfcp_dbf_san
*rec
= &dbf
->san_buf
;
430 struct zfcp_dbf_pay
*payload
= &dbf
->pay_buf
;
433 spin_lock_irqsave(&dbf
->san_lock
, flags
);
434 memset(rec
, 0, sizeof(*rec
));
437 rec
->fsf_req_id
= req_id
;
439 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
440 rec
->pl_len
= len
; /* full length even if we cap pay below */
443 rec_len
= min_t(unsigned int, sg
->length
, ZFCP_DBF_SAN_MAX_PAYLOAD
);
444 memcpy(rec
->payload
, sg_virt(sg
), rec_len
); /* part of 1st sg entry */
446 goto out
; /* skip pay record if full content in rec->payload */
448 /* if (len > rec_len):
449 * dump data up to cap_len ignoring small duplicate in rec->payload
451 spin_lock(&dbf
->pay_lock
);
452 memset(payload
, 0, sizeof(*payload
));
453 memcpy(payload
->area
, paytag
, ZFCP_DBF_TAG_LEN
);
454 payload
->fsf_req_id
= req_id
;
455 payload
->counter
= 0;
456 for (; sg
&& pay_sum
< cap_len
; sg
= sg_next(sg
)) {
457 u16 pay_len
, offset
= 0;
459 while (offset
< sg
->length
&& pay_sum
< cap_len
) {
460 pay_len
= min((u16
)ZFCP_DBF_PAY_MAX_REC
,
461 (u16
)(sg
->length
- offset
));
462 /* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */
463 memcpy(payload
->data
, sg_virt(sg
) + offset
, pay_len
);
464 debug_event(dbf
->pay
, ZFCP_DBF_SAN_LEVEL
, payload
,
465 zfcp_dbf_plen(pay_len
));
471 spin_unlock(&dbf
->pay_lock
);
474 debug_event(dbf
->san
, ZFCP_DBF_SAN_LEVEL
, rec
, sizeof(*rec
));
475 spin_unlock_irqrestore(&dbf
->san_lock
, flags
);
479 * zfcp_dbf_san_req - trace event for issued SAN request
480 * @tag: identifier for event
481 * @fsf_req: request containing issued CT data
482 * d_id: destination ID
484 void zfcp_dbf_san_req(char *tag
, struct zfcp_fsf_req
*fsf
, u32 d_id
)
486 struct zfcp_dbf
*dbf
= fsf
->adapter
->dbf
;
487 struct zfcp_fsf_ct_els
*ct_els
= fsf
->data
;
490 if (unlikely(!debug_level_enabled(dbf
->san
, ZFCP_DBF_SAN_LEVEL
)))
493 length
= (u16
)zfcp_qdio_real_bytes(ct_els
->req
);
494 zfcp_dbf_san(tag
, dbf
, "san_req", ct_els
->req
, ZFCP_DBF_SAN_REQ
,
495 length
, fsf
->req_id
, d_id
, length
);
498 static u16
zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag
,
499 struct zfcp_fsf_req
*fsf
,
502 struct zfcp_fsf_ct_els
*ct_els
= fsf
->data
;
503 struct fc_ct_hdr
*reqh
= sg_virt(ct_els
->req
);
504 struct fc_ns_gid_ft
*reqn
= (struct fc_ns_gid_ft
*)(reqh
+ 1);
505 struct scatterlist
*resp_entry
= ct_els
->resp
;
506 struct fc_ct_hdr
*resph
;
507 struct fc_gpn_ft_resp
*acc
;
508 int max_entries
, x
, last
= 0;
510 if (!(memcmp(tag
, "fsscth2", 7) == 0
511 && ct_els
->d_id
== FC_FID_DIR_SERV
512 && reqh
->ct_rev
== FC_CT_REV
513 && reqh
->ct_in_id
[0] == 0
514 && reqh
->ct_in_id
[1] == 0
515 && reqh
->ct_in_id
[2] == 0
516 && reqh
->ct_fs_type
== FC_FST_DIR
517 && reqh
->ct_fs_subtype
== FC_NS_SUBTYPE
518 && reqh
->ct_options
== 0
519 && reqh
->_ct_resvd1
== 0
520 && reqh
->ct_cmd
== cpu_to_be16(FC_NS_GPN_FT
)
521 /* reqh->ct_mr_size can vary so do not match but read below */
522 && reqh
->_ct_resvd2
== 0
523 && reqh
->ct_reason
== 0
524 && reqh
->ct_explan
== 0
525 && reqh
->ct_vendor
== 0
526 && reqn
->fn_resvd
== 0
527 && reqn
->fn_domain_id_scope
== 0
528 && reqn
->fn_area_id_scope
== 0
529 && reqn
->fn_fc4_type
== FC_TYPE_FCP
))
530 return len
; /* not GPN_FT response so do not cap */
532 acc
= sg_virt(resp_entry
);
534 /* cap all but accept CT responses to at least the CT header */
535 resph
= (struct fc_ct_hdr
*)acc
;
536 if ((ct_els
->status
) ||
537 (resph
->ct_cmd
!= cpu_to_be16(FC_FS_ACC
)))
538 return max(FC_CT_HDR_LEN
, ZFCP_DBF_SAN_MAX_PAYLOAD
);
540 max_entries
= (be16_to_cpu(reqh
->ct_mr_size
) * 4 /
541 sizeof(struct fc_gpn_ft_resp
))
542 + 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
543 * to account for header as 1st pseudo "entry" */;
545 /* the basic CT_IU preamble is the same size as one entry in the GPN_FT
546 * response, allowing us to skip special handling for it - just skip it
548 for (x
= 1; x
< max_entries
&& !last
; x
++) {
549 if (x
% (ZFCP_FC_GPN_FT_ENT_PAGE
+ 1))
552 acc
= sg_virt(++resp_entry
);
554 last
= acc
->fp_flags
& FC_NS_FID_LAST
;
556 len
= min(len
, (u16
)(x
* sizeof(struct fc_gpn_ft_resp
)));
557 return len
; /* cap after last entry */
561 * zfcp_dbf_san_res - trace event for received SAN request
562 * @tag: identifier for event
563 * @fsf_req: request containing issued CT data
565 void zfcp_dbf_san_res(char *tag
, struct zfcp_fsf_req
*fsf
)
567 struct zfcp_dbf
*dbf
= fsf
->adapter
->dbf
;
568 struct zfcp_fsf_ct_els
*ct_els
= fsf
->data
;
571 if (unlikely(!debug_level_enabled(dbf
->san
, ZFCP_DBF_SAN_LEVEL
)))
574 length
= (u16
)zfcp_qdio_real_bytes(ct_els
->resp
);
575 zfcp_dbf_san(tag
, dbf
, "san_res", ct_els
->resp
, ZFCP_DBF_SAN_RES
,
576 length
, fsf
->req_id
, ct_els
->d_id
,
577 zfcp_dbf_san_res_cap_len_if_gpn_ft(tag
, fsf
, length
));
581 * zfcp_dbf_san_in_els - trace event for incoming ELS
582 * @tag: identifier for event
583 * @fsf_req: request containing issued CT data
585 void zfcp_dbf_san_in_els(char *tag
, struct zfcp_fsf_req
*fsf
)
587 struct zfcp_dbf
*dbf
= fsf
->adapter
->dbf
;
588 struct fsf_status_read_buffer
*srb
=
589 (struct fsf_status_read_buffer
*) fsf
->data
;
591 struct scatterlist sg
;
593 if (unlikely(!debug_level_enabled(dbf
->san
, ZFCP_DBF_SAN_LEVEL
)))
596 length
= (u16
)(srb
->length
-
597 offsetof(struct fsf_status_read_buffer
, payload
));
598 sg_init_one(&sg
, srb
->payload
.data
, length
);
599 zfcp_dbf_san(tag
, dbf
, "san_els", &sg
, ZFCP_DBF_SAN_ELS
, length
,
600 fsf
->req_id
, ntoh24(srb
->d_id
), length
);
604 * zfcp_dbf_scsi_common() - Common trace event helper for scsi.
605 * @tag: Identifier for event.
606 * @level: trace level of event.
607 * @sdev: Pointer to SCSI device as context for this event.
608 * @sc: Pointer to SCSI command, or NULL with task management function (TMF).
609 * @fsf: Pointer to FSF request, or NULL.
611 void zfcp_dbf_scsi_common(char *tag
, int level
, struct scsi_device
*sdev
,
612 struct scsi_cmnd
*sc
, struct zfcp_fsf_req
*fsf
)
614 struct zfcp_adapter
*adapter
=
615 (struct zfcp_adapter
*) sdev
->host
->hostdata
[0];
616 struct zfcp_dbf
*dbf
= adapter
->dbf
;
617 struct zfcp_dbf_scsi
*rec
= &dbf
->scsi_buf
;
618 struct fcp_resp_with_ext
*fcp_rsp
;
619 struct fcp_resp_rsp_info
*fcp_rsp_info
;
622 spin_lock_irqsave(&dbf
->scsi_lock
, flags
);
623 memset(rec
, 0, sizeof(*rec
));
625 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
626 rec
->id
= ZFCP_DBF_SCSI_CMND
;
628 rec
->scsi_result
= sc
->result
;
629 rec
->scsi_retries
= sc
->retries
;
630 rec
->scsi_allowed
= sc
->allowed
;
631 rec
->scsi_id
= sc
->device
->id
;
632 rec
->scsi_lun
= (u32
)sc
->device
->lun
;
633 rec
->scsi_lun_64_hi
= (u32
)(sc
->device
->lun
>> 32);
634 rec
->host_scribble
= (unsigned long)sc
->host_scribble
;
636 memcpy(rec
->scsi_opcode
, sc
->cmnd
,
637 min_t(int, sc
->cmd_len
, ZFCP_DBF_SCSI_OPCODE
));
639 rec
->scsi_result
= ~0;
640 rec
->scsi_retries
= ~0;
641 rec
->scsi_allowed
= ~0;
642 rec
->scsi_id
= sdev
->id
;
643 rec
->scsi_lun
= (u32
)sdev
->lun
;
644 rec
->scsi_lun_64_hi
= (u32
)(sdev
->lun
>> 32);
645 rec
->host_scribble
= ~0;
647 memset(rec
->scsi_opcode
, 0xff, ZFCP_DBF_SCSI_OPCODE
);
651 rec
->fsf_req_id
= fsf
->req_id
;
652 rec
->pl_len
= FCP_RESP_WITH_EXT
;
653 fcp_rsp
= &(fsf
->qtcb
->bottom
.io
.fcp_rsp
.iu
);
654 /* mandatory parts of FCP_RSP IU in this SCSI record */
655 memcpy(&rec
->fcp_rsp
, fcp_rsp
, FCP_RESP_WITH_EXT
);
656 if (fcp_rsp
->resp
.fr_flags
& FCP_RSP_LEN_VAL
) {
657 fcp_rsp_info
= (struct fcp_resp_rsp_info
*) &fcp_rsp
[1];
658 rec
->fcp_rsp_info
= fcp_rsp_info
->rsp_code
;
659 rec
->pl_len
+= be32_to_cpu(fcp_rsp
->ext
.fr_rsp_len
);
661 if (fcp_rsp
->resp
.fr_flags
& FCP_SNS_LEN_VAL
) {
662 rec
->pl_len
+= be32_to_cpu(fcp_rsp
->ext
.fr_sns_len
);
664 /* complete FCP_RSP IU in associated PAYload record
665 * but only if there are optional parts
667 if (fcp_rsp
->resp
.fr_flags
!= 0)
670 /* at least one full PAY record
671 * but not beyond hardware response field
673 min_t(u16
, max_t(u16
, rec
->pl_len
,
674 ZFCP_DBF_PAY_MAX_REC
),
676 "fcp_riu", fsf
->req_id
);
679 debug_event(dbf
->scsi
, level
, rec
, sizeof(*rec
));
680 spin_unlock_irqrestore(&dbf
->scsi_lock
, flags
);
684 * zfcp_dbf_scsi_eh() - Trace event for special cases of scsi_eh callbacks.
685 * @tag: Identifier for event.
686 * @adapter: Pointer to zfcp adapter as context for this event.
687 * @scsi_id: SCSI ID/target to indicate scope of task management function (TMF).
688 * @ret: Return value of calling function.
690 * This SCSI trace variant does not depend on any of:
691 * scsi_cmnd, zfcp_fsf_req, scsi_device.
693 void zfcp_dbf_scsi_eh(char *tag
, struct zfcp_adapter
*adapter
,
694 unsigned int scsi_id
, int ret
)
696 struct zfcp_dbf
*dbf
= adapter
->dbf
;
697 struct zfcp_dbf_scsi
*rec
= &dbf
->scsi_buf
;
699 static int const level
= 1;
701 if (unlikely(!debug_level_enabled(adapter
->dbf
->scsi
, level
)))
704 spin_lock_irqsave(&dbf
->scsi_lock
, flags
);
705 memset(rec
, 0, sizeof(*rec
));
707 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
708 rec
->id
= ZFCP_DBF_SCSI_CMND
;
709 rec
->scsi_result
= ret
; /* re-use field, int is 4 bytes and fits */
710 rec
->scsi_retries
= ~0;
711 rec
->scsi_allowed
= ~0;
712 rec
->fcp_rsp_info
= ~0;
713 rec
->scsi_id
= scsi_id
;
714 rec
->scsi_lun
= (u32
)ZFCP_DBF_INVALID_LUN
;
715 rec
->scsi_lun_64_hi
= (u32
)(ZFCP_DBF_INVALID_LUN
>> 32);
716 rec
->host_scribble
= ~0;
717 memset(rec
->scsi_opcode
, 0xff, ZFCP_DBF_SCSI_OPCODE
);
719 debug_event(dbf
->scsi
, level
, rec
, sizeof(*rec
));
720 spin_unlock_irqrestore(&dbf
->scsi_lock
, flags
);
723 static debug_info_t
*zfcp_dbf_reg(const char *name
, int size
, int rec_size
)
725 struct debug_info
*d
;
727 d
= debug_register(name
, size
, 1, rec_size
);
731 debug_register_view(d
, &debug_hex_ascii_view
);
732 debug_set_level(d
, dbflevel
);
737 static void zfcp_dbf_unregister(struct zfcp_dbf
*dbf
)
742 debug_unregister(dbf
->scsi
);
743 debug_unregister(dbf
->san
);
744 debug_unregister(dbf
->hba
);
745 debug_unregister(dbf
->pay
);
746 debug_unregister(dbf
->rec
);
751 * zfcp_adapter_debug_register - registers debug feature for an adapter
752 * @adapter: pointer to adapter for which debug features should be registered
753 * return: -ENOMEM on error, 0 otherwise
755 int zfcp_dbf_adapter_register(struct zfcp_adapter
*adapter
)
757 char name
[DEBUG_MAX_NAME_LEN
];
758 struct zfcp_dbf
*dbf
;
760 dbf
= kzalloc(sizeof(struct zfcp_dbf
), GFP_KERNEL
);
764 spin_lock_init(&dbf
->pay_lock
);
765 spin_lock_init(&dbf
->hba_lock
);
766 spin_lock_init(&dbf
->san_lock
);
767 spin_lock_init(&dbf
->scsi_lock
);
768 spin_lock_init(&dbf
->rec_lock
);
770 /* debug feature area which records recovery activity */
771 sprintf(name
, "zfcp_%s_rec", dev_name(&adapter
->ccw_device
->dev
));
772 dbf
->rec
= zfcp_dbf_reg(name
, dbfsize
, sizeof(struct zfcp_dbf_rec
));
776 /* debug feature area which records HBA (FSF and QDIO) conditions */
777 sprintf(name
, "zfcp_%s_hba", dev_name(&adapter
->ccw_device
->dev
));
778 dbf
->hba
= zfcp_dbf_reg(name
, dbfsize
, sizeof(struct zfcp_dbf_hba
));
782 /* debug feature area which records payload info */
783 sprintf(name
, "zfcp_%s_pay", dev_name(&adapter
->ccw_device
->dev
));
784 dbf
->pay
= zfcp_dbf_reg(name
, dbfsize
* 2, sizeof(struct zfcp_dbf_pay
));
788 /* debug feature area which records SAN command failures and recovery */
789 sprintf(name
, "zfcp_%s_san", dev_name(&adapter
->ccw_device
->dev
));
790 dbf
->san
= zfcp_dbf_reg(name
, dbfsize
, sizeof(struct zfcp_dbf_san
));
794 /* debug feature area which records SCSI command failures and recovery */
795 sprintf(name
, "zfcp_%s_scsi", dev_name(&adapter
->ccw_device
->dev
));
796 dbf
->scsi
= zfcp_dbf_reg(name
, dbfsize
, sizeof(struct zfcp_dbf_scsi
));
804 zfcp_dbf_unregister(dbf
);
809 * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
810 * @adapter: pointer to adapter for which debug features should be unregistered
812 void zfcp_dbf_adapter_unregister(struct zfcp_adapter
*adapter
)
814 struct zfcp_dbf
*dbf
= adapter
->dbf
;
817 zfcp_dbf_unregister(dbf
);