1 // SPDX-License-Identifier: GPL-2.0
5 * Debug traces for zfcp.
7 * Copyright IBM Corp. 2002, 2018
10 #define KMSG_COMPONENT "zfcp"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 #include <linux/module.h>
14 #include <linux/ctype.h>
15 #include <linux/slab.h>
16 #include <asm/debug.h>
21 static u32 dbfsize
= 4;
23 module_param(dbfsize
, uint
, 0400);
24 MODULE_PARM_DESC(dbfsize
,
25 "number of pages for each debug feature area (default 4)");
27 static u32 dbflevel
= 3;
29 module_param(dbflevel
, uint
, 0400);
30 MODULE_PARM_DESC(dbflevel
,
31 "log level for each debug feature area "
32 "(default 3, range 0..6)");
34 static inline unsigned int zfcp_dbf_plen(unsigned int offset
)
36 return sizeof(struct zfcp_dbf_pay
) + offset
- ZFCP_DBF_PAY_MAX_REC
;
40 void zfcp_dbf_pl_write(struct zfcp_dbf
*dbf
, void *data
, u16 length
, char *area
,
43 struct zfcp_dbf_pay
*pl
= &dbf
->pay_buf
;
44 u16 offset
= 0, rec_length
;
46 spin_lock(&dbf
->pay_lock
);
47 memset(pl
, 0, sizeof(*pl
));
48 pl
->fsf_req_id
= req_id
;
49 memcpy(pl
->area
, area
, ZFCP_DBF_TAG_LEN
);
51 while (offset
< length
) {
52 rec_length
= min((u16
) ZFCP_DBF_PAY_MAX_REC
,
53 (u16
) (length
- offset
));
54 memcpy(pl
->data
, data
+ offset
, rec_length
);
55 debug_event(dbf
->pay
, 1, pl
, zfcp_dbf_plen(rec_length
));
61 spin_unlock(&dbf
->pay_lock
);
65 * zfcp_dbf_hba_fsf_res - trace event for fsf responses
66 * @tag: tag indicating which kind of FSF response has been received
67 * @level: trace level to be used for event
68 * @req: request for which a response was received
70 void zfcp_dbf_hba_fsf_res(char *tag
, int level
, struct zfcp_fsf_req
*req
)
72 struct zfcp_dbf
*dbf
= req
->adapter
->dbf
;
73 struct fsf_qtcb_prefix
*q_pref
= &req
->qtcb
->prefix
;
74 struct fsf_qtcb_header
*q_head
= &req
->qtcb
->header
;
75 struct zfcp_dbf_hba
*rec
= &dbf
->hba_buf
;
78 spin_lock_irqsave(&dbf
->hba_lock
, flags
);
79 memset(rec
, 0, sizeof(*rec
));
81 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
82 rec
->id
= ZFCP_DBF_HBA_RES
;
83 rec
->fsf_req_id
= req
->req_id
;
84 rec
->fsf_req_status
= req
->status
;
85 rec
->fsf_cmd
= q_head
->fsf_command
;
86 rec
->fsf_seq_no
= q_pref
->req_seq_no
;
87 rec
->u
.res
.req_issued
= req
->issued
;
88 rec
->u
.res
.prot_status
= q_pref
->prot_status
;
89 rec
->u
.res
.fsf_status
= q_head
->fsf_status
;
90 rec
->u
.res
.port_handle
= q_head
->port_handle
;
91 rec
->u
.res
.lun_handle
= q_head
->lun_handle
;
93 memcpy(rec
->u
.res
.prot_status_qual
, &q_pref
->prot_status_qual
,
94 FSF_PROT_STATUS_QUAL_SIZE
);
95 memcpy(rec
->u
.res
.fsf_status_qual
, &q_head
->fsf_status_qual
,
96 FSF_STATUS_QUALIFIER_SIZE
);
98 rec
->pl_len
= q_head
->log_length
;
99 zfcp_dbf_pl_write(dbf
, (char *)q_pref
+ q_head
->log_start
,
100 rec
->pl_len
, "fsf_res", req
->req_id
);
102 debug_event(dbf
->hba
, level
, rec
, sizeof(*rec
));
103 spin_unlock_irqrestore(&dbf
->hba_lock
, flags
);
107 * zfcp_dbf_hba_fsf_uss - trace event for an unsolicited status buffer
108 * @tag: tag indicating which kind of unsolicited status has been received
109 * @req: request providing the unsolicited status
111 void zfcp_dbf_hba_fsf_uss(char *tag
, struct zfcp_fsf_req
*req
)
113 struct zfcp_dbf
*dbf
= req
->adapter
->dbf
;
114 struct fsf_status_read_buffer
*srb
= req
->data
;
115 struct zfcp_dbf_hba
*rec
= &dbf
->hba_buf
;
116 static int const level
= 2;
119 if (unlikely(!debug_level_enabled(dbf
->hba
, level
)))
122 spin_lock_irqsave(&dbf
->hba_lock
, flags
);
123 memset(rec
, 0, sizeof(*rec
));
125 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
126 rec
->id
= ZFCP_DBF_HBA_USS
;
127 rec
->fsf_req_id
= req
->req_id
;
128 rec
->fsf_req_status
= req
->status
;
129 rec
->fsf_cmd
= FSF_QTCB_UNSOLICITED_STATUS
;
134 rec
->u
.uss
.status_type
= srb
->status_type
;
135 rec
->u
.uss
.status_subtype
= srb
->status_subtype
;
136 rec
->u
.uss
.d_id
= ntoh24(srb
->d_id
);
137 rec
->u
.uss
.lun
= srb
->fcp_lun
;
138 memcpy(&rec
->u
.uss
.queue_designator
, &srb
->queue_designator
,
139 sizeof(rec
->u
.uss
.queue_designator
));
141 /* status read buffer payload length */
142 rec
->pl_len
= (!srb
->length
) ? 0 : srb
->length
-
143 offsetof(struct fsf_status_read_buffer
, payload
);
146 zfcp_dbf_pl_write(dbf
, srb
->payload
.data
, rec
->pl_len
,
147 "fsf_uss", req
->req_id
);
149 debug_event(dbf
->hba
, level
, rec
, sizeof(*rec
));
150 spin_unlock_irqrestore(&dbf
->hba_lock
, flags
);
154 * zfcp_dbf_hba_bit_err - trace event for bit error conditions
155 * @tag: tag indicating which kind of bit error unsolicited status was received
156 * @req: request which caused the bit_error condition
158 void zfcp_dbf_hba_bit_err(char *tag
, struct zfcp_fsf_req
*req
)
160 struct zfcp_dbf
*dbf
= req
->adapter
->dbf
;
161 struct zfcp_dbf_hba
*rec
= &dbf
->hba_buf
;
162 struct fsf_status_read_buffer
*sr_buf
= req
->data
;
163 static int const level
= 1;
166 if (unlikely(!debug_level_enabled(dbf
->hba
, level
)))
169 spin_lock_irqsave(&dbf
->hba_lock
, flags
);
170 memset(rec
, 0, sizeof(*rec
));
172 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
173 rec
->id
= ZFCP_DBF_HBA_BIT
;
174 rec
->fsf_req_id
= req
->req_id
;
175 rec
->fsf_req_status
= req
->status
;
176 rec
->fsf_cmd
= FSF_QTCB_UNSOLICITED_STATUS
;
177 memcpy(&rec
->u
.be
, &sr_buf
->payload
.bit_error
,
178 sizeof(struct fsf_bit_error_payload
));
180 debug_event(dbf
->hba
, level
, rec
, sizeof(*rec
));
181 spin_unlock_irqrestore(&dbf
->hba_lock
, flags
);
185 * zfcp_dbf_hba_def_err - trace event for deferred error messages
186 * @adapter: pointer to struct zfcp_adapter
187 * @req_id: request id which caused the deferred error message
188 * @scount: number of sbals incl. the signaling sbal
189 * @pl: array of all involved sbals
191 void zfcp_dbf_hba_def_err(struct zfcp_adapter
*adapter
, u64 req_id
, u16 scount
,
194 struct zfcp_dbf
*dbf
= adapter
->dbf
;
195 struct zfcp_dbf_pay
*payload
= &dbf
->pay_buf
;
197 static int const level
= 1;
200 if (unlikely(!debug_level_enabled(dbf
->pay
, level
)))
206 spin_lock_irqsave(&dbf
->pay_lock
, flags
);
207 memset(payload
, 0, sizeof(*payload
));
209 memcpy(payload
->area
, "def_err", 7);
210 payload
->fsf_req_id
= req_id
;
211 payload
->counter
= 0;
212 length
= min((u16
)sizeof(struct qdio_buffer
),
213 (u16
)ZFCP_DBF_PAY_MAX_REC
);
215 while (payload
->counter
< scount
&& (char *)pl
[payload
->counter
]) {
216 memcpy(payload
->data
, (char *)pl
[payload
->counter
], length
);
217 debug_event(dbf
->pay
, level
, payload
, zfcp_dbf_plen(length
));
221 spin_unlock_irqrestore(&dbf
->pay_lock
, flags
);
225 * zfcp_dbf_hba_basic - trace event for basic adapter events
226 * @tag: identifier for event
227 * @adapter: pointer to struct zfcp_adapter
229 void zfcp_dbf_hba_basic(char *tag
, struct zfcp_adapter
*adapter
)
231 struct zfcp_dbf
*dbf
= adapter
->dbf
;
232 struct zfcp_dbf_hba
*rec
= &dbf
->hba_buf
;
233 static int const level
= 1;
236 if (unlikely(!debug_level_enabled(dbf
->hba
, level
)))
239 spin_lock_irqsave(&dbf
->hba_lock
, flags
);
240 memset(rec
, 0, sizeof(*rec
));
242 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
243 rec
->id
= ZFCP_DBF_HBA_BASIC
;
245 debug_event(dbf
->hba
, level
, rec
, sizeof(*rec
));
246 spin_unlock_irqrestore(&dbf
->hba_lock
, flags
);
249 static void zfcp_dbf_set_common(struct zfcp_dbf_rec
*rec
,
250 struct zfcp_adapter
*adapter
,
251 struct zfcp_port
*port
,
252 struct scsi_device
*sdev
)
254 rec
->adapter_status
= atomic_read(&adapter
->status
);
256 rec
->port_status
= atomic_read(&port
->status
);
257 rec
->wwpn
= port
->wwpn
;
258 rec
->d_id
= port
->d_id
;
261 rec
->lun_status
= atomic_read(&sdev_to_zfcp(sdev
)->status
);
262 rec
->lun
= zfcp_scsi_dev_lun(sdev
);
264 rec
->lun
= ZFCP_DBF_INVALID_LUN
;
268 * zfcp_dbf_rec_trig - trace event related to triggered recovery
269 * @tag: identifier for event
270 * @adapter: adapter on which the erp_action should run
271 * @port: remote port involved in the erp_action
272 * @sdev: scsi device involved in the erp_action
273 * @want: wanted erp_action
274 * @need: required erp_action
276 * The adapter->erp_lock has to be held.
278 void zfcp_dbf_rec_trig(char *tag
, struct zfcp_adapter
*adapter
,
279 struct zfcp_port
*port
, struct scsi_device
*sdev
,
282 struct zfcp_dbf
*dbf
= adapter
->dbf
;
283 struct zfcp_dbf_rec
*rec
= &dbf
->rec_buf
;
284 static int const level
= 1;
285 struct list_head
*entry
;
288 lockdep_assert_held(&adapter
->erp_lock
);
290 if (unlikely(!debug_level_enabled(dbf
->rec
, level
)))
293 spin_lock_irqsave(&dbf
->rec_lock
, flags
);
294 memset(rec
, 0, sizeof(*rec
));
296 rec
->id
= ZFCP_DBF_REC_TRIG
;
297 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
298 zfcp_dbf_set_common(rec
, adapter
, port
, sdev
);
300 list_for_each(entry
, &adapter
->erp_ready_head
)
303 list_for_each(entry
, &adapter
->erp_running_head
)
304 rec
->u
.trig
.running
++;
306 rec
->u
.trig
.want
= want
;
307 rec
->u
.trig
.need
= need
;
309 debug_event(dbf
->rec
, level
, rec
, sizeof(*rec
));
310 spin_unlock_irqrestore(&dbf
->rec_lock
, flags
);
314 * zfcp_dbf_rec_trig_lock - trace event related to triggered recovery with lock
315 * @tag: identifier for event
316 * @adapter: adapter on which the erp_action should run
317 * @port: remote port involved in the erp_action
318 * @sdev: scsi device involved in the erp_action
319 * @want: wanted erp_action
320 * @need: required erp_action
322 * The adapter->erp_lock must not be held.
324 void zfcp_dbf_rec_trig_lock(char *tag
, struct zfcp_adapter
*adapter
,
325 struct zfcp_port
*port
, struct scsi_device
*sdev
,
330 read_lock_irqsave(&adapter
->erp_lock
, flags
);
331 zfcp_dbf_rec_trig(tag
, adapter
, port
, sdev
, want
, need
);
332 read_unlock_irqrestore(&adapter
->erp_lock
, flags
);
336 * zfcp_dbf_rec_run_lvl - trace event related to running recovery
337 * @level: trace level to be used for event
338 * @tag: identifier for event
339 * @erp: erp_action running
341 void zfcp_dbf_rec_run_lvl(int level
, char *tag
, struct zfcp_erp_action
*erp
)
343 struct zfcp_dbf
*dbf
= erp
->adapter
->dbf
;
344 struct zfcp_dbf_rec
*rec
= &dbf
->rec_buf
;
347 if (!debug_level_enabled(dbf
->rec
, level
))
350 spin_lock_irqsave(&dbf
->rec_lock
, flags
);
351 memset(rec
, 0, sizeof(*rec
));
353 rec
->id
= ZFCP_DBF_REC_RUN
;
354 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
355 zfcp_dbf_set_common(rec
, erp
->adapter
, erp
->port
, erp
->sdev
);
357 rec
->u
.run
.fsf_req_id
= erp
->fsf_req_id
;
358 rec
->u
.run
.rec_status
= erp
->status
;
359 rec
->u
.run
.rec_step
= erp
->step
;
360 rec
->u
.run
.rec_action
= erp
->type
;
363 rec
->u
.run
.rec_count
=
364 atomic_read(&sdev_to_zfcp(erp
->sdev
)->erp_counter
);
366 rec
->u
.run
.rec_count
= atomic_read(&erp
->port
->erp_counter
);
368 rec
->u
.run
.rec_count
= atomic_read(&erp
->adapter
->erp_counter
);
370 debug_event(dbf
->rec
, level
, rec
, sizeof(*rec
));
371 spin_unlock_irqrestore(&dbf
->rec_lock
, flags
);
375 * zfcp_dbf_rec_run - trace event related to running recovery
376 * @tag: identifier for event
377 * @erp: erp_action running
379 void zfcp_dbf_rec_run(char *tag
, struct zfcp_erp_action
*erp
)
381 zfcp_dbf_rec_run_lvl(1, tag
, erp
);
385 * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
386 * @tag: identifier for event
387 * @wka_port: well known address port
388 * @req_id: request ID to correlate with potential HBA trace record
390 void zfcp_dbf_rec_run_wka(char *tag
, struct zfcp_fc_wka_port
*wka_port
,
393 struct zfcp_dbf
*dbf
= wka_port
->adapter
->dbf
;
394 struct zfcp_dbf_rec
*rec
= &dbf
->rec_buf
;
395 static int const level
= 1;
398 if (unlikely(!debug_level_enabled(dbf
->rec
, level
)))
401 spin_lock_irqsave(&dbf
->rec_lock
, flags
);
402 memset(rec
, 0, sizeof(*rec
));
404 rec
->id
= ZFCP_DBF_REC_RUN
;
405 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
406 rec
->port_status
= wka_port
->status
;
407 rec
->d_id
= wka_port
->d_id
;
408 rec
->lun
= ZFCP_DBF_INVALID_LUN
;
410 rec
->u
.run
.fsf_req_id
= req_id
;
411 rec
->u
.run
.rec_status
= ~0;
412 rec
->u
.run
.rec_step
= ~0;
413 rec
->u
.run
.rec_action
= ~0;
414 rec
->u
.run
.rec_count
= ~0;
416 debug_event(dbf
->rec
, level
, rec
, sizeof(*rec
));
417 spin_unlock_irqrestore(&dbf
->rec_lock
, flags
);
420 #define ZFCP_DBF_SAN_LEVEL 1
423 void zfcp_dbf_san(char *tag
, struct zfcp_dbf
*dbf
,
424 char *paytag
, struct scatterlist
*sg
, u8 id
, u16 len
,
425 u64 req_id
, u32 d_id
, u16 cap_len
)
427 struct zfcp_dbf_san
*rec
= &dbf
->san_buf
;
430 struct zfcp_dbf_pay
*payload
= &dbf
->pay_buf
;
433 spin_lock_irqsave(&dbf
->san_lock
, flags
);
434 memset(rec
, 0, sizeof(*rec
));
437 rec
->fsf_req_id
= req_id
;
439 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
440 rec
->pl_len
= len
; /* full length even if we cap pay below */
443 rec_len
= min_t(unsigned int, sg
->length
, ZFCP_DBF_SAN_MAX_PAYLOAD
);
444 memcpy(rec
->payload
, sg_virt(sg
), rec_len
); /* part of 1st sg entry */
446 goto out
; /* skip pay record if full content in rec->payload */
448 /* if (len > rec_len):
449 * dump data up to cap_len ignoring small duplicate in rec->payload
451 spin_lock(&dbf
->pay_lock
);
452 memset(payload
, 0, sizeof(*payload
));
453 memcpy(payload
->area
, paytag
, ZFCP_DBF_TAG_LEN
);
454 payload
->fsf_req_id
= req_id
;
455 payload
->counter
= 0;
456 for (; sg
&& pay_sum
< cap_len
; sg
= sg_next(sg
)) {
457 u16 pay_len
, offset
= 0;
459 while (offset
< sg
->length
&& pay_sum
< cap_len
) {
460 pay_len
= min((u16
)ZFCP_DBF_PAY_MAX_REC
,
461 (u16
)(sg
->length
- offset
));
462 /* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */
463 memcpy(payload
->data
, sg_virt(sg
) + offset
, pay_len
);
464 debug_event(dbf
->pay
, ZFCP_DBF_SAN_LEVEL
, payload
,
465 zfcp_dbf_plen(pay_len
));
471 spin_unlock(&dbf
->pay_lock
);
474 debug_event(dbf
->san
, ZFCP_DBF_SAN_LEVEL
, rec
, sizeof(*rec
));
475 spin_unlock_irqrestore(&dbf
->san_lock
, flags
);
479 * zfcp_dbf_san_req - trace event for issued SAN request
480 * @tag: identifier for event
481 * @fsf: request containing issued CT or ELS data
482 * @d_id: N_Port_ID where SAN request is sent to
483 * d_id: destination ID
485 void zfcp_dbf_san_req(char *tag
, struct zfcp_fsf_req
*fsf
, u32 d_id
)
487 struct zfcp_dbf
*dbf
= fsf
->adapter
->dbf
;
488 struct zfcp_fsf_ct_els
*ct_els
= fsf
->data
;
491 if (unlikely(!debug_level_enabled(dbf
->san
, ZFCP_DBF_SAN_LEVEL
)))
494 length
= (u16
)zfcp_qdio_real_bytes(ct_els
->req
);
495 zfcp_dbf_san(tag
, dbf
, "san_req", ct_els
->req
, ZFCP_DBF_SAN_REQ
,
496 length
, fsf
->req_id
, d_id
, length
);
499 static u16
zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag
,
500 struct zfcp_fsf_req
*fsf
,
503 struct zfcp_fsf_ct_els
*ct_els
= fsf
->data
;
504 struct fc_ct_hdr
*reqh
= sg_virt(ct_els
->req
);
505 struct fc_ns_gid_ft
*reqn
= (struct fc_ns_gid_ft
*)(reqh
+ 1);
506 struct scatterlist
*resp_entry
= ct_els
->resp
;
507 struct fc_ct_hdr
*resph
;
508 struct fc_gpn_ft_resp
*acc
;
509 int max_entries
, x
, last
= 0;
511 if (!(memcmp(tag
, "fsscth2", 7) == 0
512 && ct_els
->d_id
== FC_FID_DIR_SERV
513 && reqh
->ct_rev
== FC_CT_REV
514 && reqh
->ct_in_id
[0] == 0
515 && reqh
->ct_in_id
[1] == 0
516 && reqh
->ct_in_id
[2] == 0
517 && reqh
->ct_fs_type
== FC_FST_DIR
518 && reqh
->ct_fs_subtype
== FC_NS_SUBTYPE
519 && reqh
->ct_options
== 0
520 && reqh
->_ct_resvd1
== 0
521 && reqh
->ct_cmd
== cpu_to_be16(FC_NS_GPN_FT
)
522 /* reqh->ct_mr_size can vary so do not match but read below */
523 && reqh
->_ct_resvd2
== 0
524 && reqh
->ct_reason
== 0
525 && reqh
->ct_explan
== 0
526 && reqh
->ct_vendor
== 0
527 && reqn
->fn_resvd
== 0
528 && reqn
->fn_domain_id_scope
== 0
529 && reqn
->fn_area_id_scope
== 0
530 && reqn
->fn_fc4_type
== FC_TYPE_FCP
))
531 return len
; /* not GPN_FT response so do not cap */
533 acc
= sg_virt(resp_entry
);
535 /* cap all but accept CT responses to at least the CT header */
536 resph
= (struct fc_ct_hdr
*)acc
;
537 if ((ct_els
->status
) ||
538 (resph
->ct_cmd
!= cpu_to_be16(FC_FS_ACC
)))
539 return max(FC_CT_HDR_LEN
, ZFCP_DBF_SAN_MAX_PAYLOAD
);
541 max_entries
= (be16_to_cpu(reqh
->ct_mr_size
) * 4 /
542 sizeof(struct fc_gpn_ft_resp
))
543 + 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
544 * to account for header as 1st pseudo "entry" */;
546 /* the basic CT_IU preamble is the same size as one entry in the GPN_FT
547 * response, allowing us to skip special handling for it - just skip it
549 for (x
= 1; x
< max_entries
&& !last
; x
++) {
550 if (x
% (ZFCP_FC_GPN_FT_ENT_PAGE
+ 1))
553 acc
= sg_virt(++resp_entry
);
555 last
= acc
->fp_flags
& FC_NS_FID_LAST
;
557 len
= min(len
, (u16
)(x
* sizeof(struct fc_gpn_ft_resp
)));
558 return len
; /* cap after last entry */
562 * zfcp_dbf_san_res - trace event for received SAN request
563 * @tag: identifier for event
564 * @fsf: request containing received CT or ELS data
566 void zfcp_dbf_san_res(char *tag
, struct zfcp_fsf_req
*fsf
)
568 struct zfcp_dbf
*dbf
= fsf
->adapter
->dbf
;
569 struct zfcp_fsf_ct_els
*ct_els
= fsf
->data
;
572 if (unlikely(!debug_level_enabled(dbf
->san
, ZFCP_DBF_SAN_LEVEL
)))
575 length
= (u16
)zfcp_qdio_real_bytes(ct_els
->resp
);
576 zfcp_dbf_san(tag
, dbf
, "san_res", ct_els
->resp
, ZFCP_DBF_SAN_RES
,
577 length
, fsf
->req_id
, ct_els
->d_id
,
578 zfcp_dbf_san_res_cap_len_if_gpn_ft(tag
, fsf
, length
));
582 * zfcp_dbf_san_in_els - trace event for incoming ELS
583 * @tag: identifier for event
584 * @fsf: request containing received ELS data
586 void zfcp_dbf_san_in_els(char *tag
, struct zfcp_fsf_req
*fsf
)
588 struct zfcp_dbf
*dbf
= fsf
->adapter
->dbf
;
589 struct fsf_status_read_buffer
*srb
=
590 (struct fsf_status_read_buffer
*) fsf
->data
;
592 struct scatterlist sg
;
594 if (unlikely(!debug_level_enabled(dbf
->san
, ZFCP_DBF_SAN_LEVEL
)))
597 length
= (u16
)(srb
->length
-
598 offsetof(struct fsf_status_read_buffer
, payload
));
599 sg_init_one(&sg
, srb
->payload
.data
, length
);
600 zfcp_dbf_san(tag
, dbf
, "san_els", &sg
, ZFCP_DBF_SAN_ELS
, length
,
601 fsf
->req_id
, ntoh24(srb
->d_id
), length
);
605 * zfcp_dbf_scsi_common() - Common trace event helper for scsi.
606 * @tag: Identifier for event.
607 * @level: trace level of event.
608 * @sdev: Pointer to SCSI device as context for this event.
609 * @sc: Pointer to SCSI command, or NULL with task management function (TMF).
610 * @fsf: Pointer to FSF request, or NULL.
612 void zfcp_dbf_scsi_common(char *tag
, int level
, struct scsi_device
*sdev
,
613 struct scsi_cmnd
*sc
, struct zfcp_fsf_req
*fsf
)
615 struct zfcp_adapter
*adapter
=
616 (struct zfcp_adapter
*) sdev
->host
->hostdata
[0];
617 struct zfcp_dbf
*dbf
= adapter
->dbf
;
618 struct zfcp_dbf_scsi
*rec
= &dbf
->scsi_buf
;
619 struct fcp_resp_with_ext
*fcp_rsp
;
620 struct fcp_resp_rsp_info
*fcp_rsp_info
;
623 spin_lock_irqsave(&dbf
->scsi_lock
, flags
);
624 memset(rec
, 0, sizeof(*rec
));
626 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
627 rec
->id
= ZFCP_DBF_SCSI_CMND
;
629 rec
->scsi_result
= sc
->result
;
630 rec
->scsi_retries
= sc
->retries
;
631 rec
->scsi_allowed
= sc
->allowed
;
632 rec
->scsi_id
= sc
->device
->id
;
633 rec
->scsi_lun
= (u32
)sc
->device
->lun
;
634 rec
->scsi_lun_64_hi
= (u32
)(sc
->device
->lun
>> 32);
635 rec
->host_scribble
= (unsigned long)sc
->host_scribble
;
637 memcpy(rec
->scsi_opcode
, sc
->cmnd
,
638 min_t(int, sc
->cmd_len
, ZFCP_DBF_SCSI_OPCODE
));
640 rec
->scsi_result
= ~0;
641 rec
->scsi_retries
= ~0;
642 rec
->scsi_allowed
= ~0;
643 rec
->scsi_id
= sdev
->id
;
644 rec
->scsi_lun
= (u32
)sdev
->lun
;
645 rec
->scsi_lun_64_hi
= (u32
)(sdev
->lun
>> 32);
646 rec
->host_scribble
= ~0;
648 memset(rec
->scsi_opcode
, 0xff, ZFCP_DBF_SCSI_OPCODE
);
652 rec
->fsf_req_id
= fsf
->req_id
;
653 rec
->pl_len
= FCP_RESP_WITH_EXT
;
654 fcp_rsp
= &(fsf
->qtcb
->bottom
.io
.fcp_rsp
.iu
);
655 /* mandatory parts of FCP_RSP IU in this SCSI record */
656 memcpy(&rec
->fcp_rsp
, fcp_rsp
, FCP_RESP_WITH_EXT
);
657 if (fcp_rsp
->resp
.fr_flags
& FCP_RSP_LEN_VAL
) {
658 fcp_rsp_info
= (struct fcp_resp_rsp_info
*) &fcp_rsp
[1];
659 rec
->fcp_rsp_info
= fcp_rsp_info
->rsp_code
;
660 rec
->pl_len
+= be32_to_cpu(fcp_rsp
->ext
.fr_rsp_len
);
662 if (fcp_rsp
->resp
.fr_flags
& FCP_SNS_LEN_VAL
) {
663 rec
->pl_len
+= be32_to_cpu(fcp_rsp
->ext
.fr_sns_len
);
665 /* complete FCP_RSP IU in associated PAYload record
666 * but only if there are optional parts
668 if (fcp_rsp
->resp
.fr_flags
!= 0)
671 /* at least one full PAY record
672 * but not beyond hardware response field
674 min_t(u16
, max_t(u16
, rec
->pl_len
,
675 ZFCP_DBF_PAY_MAX_REC
),
677 "fcp_riu", fsf
->req_id
);
680 debug_event(dbf
->scsi
, level
, rec
, sizeof(*rec
));
681 spin_unlock_irqrestore(&dbf
->scsi_lock
, flags
);
685 * zfcp_dbf_scsi_eh() - Trace event for special cases of scsi_eh callbacks.
686 * @tag: Identifier for event.
687 * @adapter: Pointer to zfcp adapter as context for this event.
688 * @scsi_id: SCSI ID/target to indicate scope of task management function (TMF).
689 * @ret: Return value of calling function.
691 * This SCSI trace variant does not depend on any of:
692 * scsi_cmnd, zfcp_fsf_req, scsi_device.
694 void zfcp_dbf_scsi_eh(char *tag
, struct zfcp_adapter
*adapter
,
695 unsigned int scsi_id
, int ret
)
697 struct zfcp_dbf
*dbf
= adapter
->dbf
;
698 struct zfcp_dbf_scsi
*rec
= &dbf
->scsi_buf
;
700 static int const level
= 1;
702 if (unlikely(!debug_level_enabled(adapter
->dbf
->scsi
, level
)))
705 spin_lock_irqsave(&dbf
->scsi_lock
, flags
);
706 memset(rec
, 0, sizeof(*rec
));
708 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
709 rec
->id
= ZFCP_DBF_SCSI_CMND
;
710 rec
->scsi_result
= ret
; /* re-use field, int is 4 bytes and fits */
711 rec
->scsi_retries
= ~0;
712 rec
->scsi_allowed
= ~0;
713 rec
->fcp_rsp_info
= ~0;
714 rec
->scsi_id
= scsi_id
;
715 rec
->scsi_lun
= (u32
)ZFCP_DBF_INVALID_LUN
;
716 rec
->scsi_lun_64_hi
= (u32
)(ZFCP_DBF_INVALID_LUN
>> 32);
717 rec
->host_scribble
= ~0;
718 memset(rec
->scsi_opcode
, 0xff, ZFCP_DBF_SCSI_OPCODE
);
720 debug_event(dbf
->scsi
, level
, rec
, sizeof(*rec
));
721 spin_unlock_irqrestore(&dbf
->scsi_lock
, flags
);
724 static debug_info_t
*zfcp_dbf_reg(const char *name
, int size
, int rec_size
)
726 struct debug_info
*d
;
728 d
= debug_register(name
, size
, 1, rec_size
);
732 debug_register_view(d
, &debug_hex_ascii_view
);
733 debug_set_level(d
, dbflevel
);
738 static void zfcp_dbf_unregister(struct zfcp_dbf
*dbf
)
743 debug_unregister(dbf
->scsi
);
744 debug_unregister(dbf
->san
);
745 debug_unregister(dbf
->hba
);
746 debug_unregister(dbf
->pay
);
747 debug_unregister(dbf
->rec
);
752 * zfcp_adapter_debug_register - registers debug feature for an adapter
753 * @adapter: pointer to adapter for which debug features should be registered
754 * return: -ENOMEM on error, 0 otherwise
756 int zfcp_dbf_adapter_register(struct zfcp_adapter
*adapter
)
758 char name
[DEBUG_MAX_NAME_LEN
];
759 struct zfcp_dbf
*dbf
;
761 dbf
= kzalloc(sizeof(struct zfcp_dbf
), GFP_KERNEL
);
765 spin_lock_init(&dbf
->pay_lock
);
766 spin_lock_init(&dbf
->hba_lock
);
767 spin_lock_init(&dbf
->san_lock
);
768 spin_lock_init(&dbf
->scsi_lock
);
769 spin_lock_init(&dbf
->rec_lock
);
771 /* debug feature area which records recovery activity */
772 sprintf(name
, "zfcp_%s_rec", dev_name(&adapter
->ccw_device
->dev
));
773 dbf
->rec
= zfcp_dbf_reg(name
, dbfsize
, sizeof(struct zfcp_dbf_rec
));
777 /* debug feature area which records HBA (FSF and QDIO) conditions */
778 sprintf(name
, "zfcp_%s_hba", dev_name(&adapter
->ccw_device
->dev
));
779 dbf
->hba
= zfcp_dbf_reg(name
, dbfsize
, sizeof(struct zfcp_dbf_hba
));
783 /* debug feature area which records payload info */
784 sprintf(name
, "zfcp_%s_pay", dev_name(&adapter
->ccw_device
->dev
));
785 dbf
->pay
= zfcp_dbf_reg(name
, dbfsize
* 2, sizeof(struct zfcp_dbf_pay
));
789 /* debug feature area which records SAN command failures and recovery */
790 sprintf(name
, "zfcp_%s_san", dev_name(&adapter
->ccw_device
->dev
));
791 dbf
->san
= zfcp_dbf_reg(name
, dbfsize
, sizeof(struct zfcp_dbf_san
));
795 /* debug feature area which records SCSI command failures and recovery */
796 sprintf(name
, "zfcp_%s_scsi", dev_name(&adapter
->ccw_device
->dev
));
797 dbf
->scsi
= zfcp_dbf_reg(name
, dbfsize
, sizeof(struct zfcp_dbf_scsi
));
805 zfcp_dbf_unregister(dbf
);
810 * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
811 * @adapter: pointer to adapter for which debug features should be unregistered
813 void zfcp_dbf_adapter_unregister(struct zfcp_adapter
*adapter
)
815 struct zfcp_dbf
*dbf
= adapter
->dbf
;
818 zfcp_dbf_unregister(dbf
);