1 // SPDX-License-Identifier: GPL-2.0
5 * Debug traces for zfcp.
7 * Copyright IBM Corp. 2002, 2018
10 #define KMSG_COMPONENT "zfcp"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 #include <linux/module.h>
14 #include <linux/ctype.h>
15 #include <linux/slab.h>
16 #include <asm/debug.h>
21 static u32 dbfsize
= 4;
23 module_param(dbfsize
, uint
, 0400);
24 MODULE_PARM_DESC(dbfsize
,
25 "number of pages for each debug feature area (default 4)");
27 static u32 dbflevel
= 3;
29 module_param(dbflevel
, uint
, 0400);
30 MODULE_PARM_DESC(dbflevel
,
31 "log level for each debug feature area "
32 "(default 3, range 0..6)");
34 static inline unsigned int zfcp_dbf_plen(unsigned int offset
)
36 return sizeof(struct zfcp_dbf_pay
) + offset
- ZFCP_DBF_PAY_MAX_REC
;
40 void zfcp_dbf_pl_write(struct zfcp_dbf
*dbf
, void *data
, u16 length
, char *area
,
43 struct zfcp_dbf_pay
*pl
= &dbf
->pay_buf
;
44 u16 offset
= 0, rec_length
;
46 spin_lock(&dbf
->pay_lock
);
47 memset(pl
, 0, sizeof(*pl
));
48 pl
->fsf_req_id
= req_id
;
49 memcpy(pl
->area
, area
, ZFCP_DBF_TAG_LEN
);
51 while (offset
< length
) {
52 rec_length
= min((u16
) ZFCP_DBF_PAY_MAX_REC
,
53 (u16
) (length
- offset
));
54 memcpy(pl
->data
, data
+ offset
, rec_length
);
55 debug_event(dbf
->pay
, 1, pl
, zfcp_dbf_plen(rec_length
));
61 spin_unlock(&dbf
->pay_lock
);
65 * zfcp_dbf_hba_fsf_res - trace event for fsf responses
66 * @tag: tag indicating which kind of unsolicited status has been received
67 * @req: request for which a response was received
69 void zfcp_dbf_hba_fsf_res(char *tag
, int level
, struct zfcp_fsf_req
*req
)
71 struct zfcp_dbf
*dbf
= req
->adapter
->dbf
;
72 struct fsf_qtcb_prefix
*q_pref
= &req
->qtcb
->prefix
;
73 struct fsf_qtcb_header
*q_head
= &req
->qtcb
->header
;
74 struct zfcp_dbf_hba
*rec
= &dbf
->hba_buf
;
77 spin_lock_irqsave(&dbf
->hba_lock
, flags
);
78 memset(rec
, 0, sizeof(*rec
));
80 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
81 rec
->id
= ZFCP_DBF_HBA_RES
;
82 rec
->fsf_req_id
= req
->req_id
;
83 rec
->fsf_req_status
= req
->status
;
84 rec
->fsf_cmd
= req
->fsf_command
;
85 rec
->fsf_seq_no
= req
->seq_no
;
86 rec
->u
.res
.req_issued
= req
->issued
;
87 rec
->u
.res
.prot_status
= q_pref
->prot_status
;
88 rec
->u
.res
.fsf_status
= q_head
->fsf_status
;
89 rec
->u
.res
.port_handle
= q_head
->port_handle
;
90 rec
->u
.res
.lun_handle
= q_head
->lun_handle
;
92 memcpy(rec
->u
.res
.prot_status_qual
, &q_pref
->prot_status_qual
,
93 FSF_PROT_STATUS_QUAL_SIZE
);
94 memcpy(rec
->u
.res
.fsf_status_qual
, &q_head
->fsf_status_qual
,
95 FSF_STATUS_QUALIFIER_SIZE
);
97 rec
->pl_len
= q_head
->log_length
;
98 zfcp_dbf_pl_write(dbf
, (char *)q_pref
+ q_head
->log_start
,
99 rec
->pl_len
, "fsf_res", req
->req_id
);
101 debug_event(dbf
->hba
, level
, rec
, sizeof(*rec
));
102 spin_unlock_irqrestore(&dbf
->hba_lock
, flags
);
106 * zfcp_dbf_hba_fsf_uss - trace event for an unsolicited status buffer
107 * @tag: tag indicating which kind of unsolicited status has been received
108 * @req: request providing the unsolicited status
110 void zfcp_dbf_hba_fsf_uss(char *tag
, struct zfcp_fsf_req
*req
)
112 struct zfcp_dbf
*dbf
= req
->adapter
->dbf
;
113 struct fsf_status_read_buffer
*srb
= req
->data
;
114 struct zfcp_dbf_hba
*rec
= &dbf
->hba_buf
;
115 static int const level
= 2;
118 if (unlikely(!debug_level_enabled(dbf
->hba
, level
)))
121 spin_lock_irqsave(&dbf
->hba_lock
, flags
);
122 memset(rec
, 0, sizeof(*rec
));
124 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
125 rec
->id
= ZFCP_DBF_HBA_USS
;
126 rec
->fsf_req_id
= req
->req_id
;
127 rec
->fsf_req_status
= req
->status
;
128 rec
->fsf_cmd
= req
->fsf_command
;
133 rec
->u
.uss
.status_type
= srb
->status_type
;
134 rec
->u
.uss
.status_subtype
= srb
->status_subtype
;
135 rec
->u
.uss
.d_id
= ntoh24(srb
->d_id
);
136 rec
->u
.uss
.lun
= srb
->fcp_lun
;
137 memcpy(&rec
->u
.uss
.queue_designator
, &srb
->queue_designator
,
138 sizeof(rec
->u
.uss
.queue_designator
));
140 /* status read buffer payload length */
141 rec
->pl_len
= (!srb
->length
) ? 0 : srb
->length
-
142 offsetof(struct fsf_status_read_buffer
, payload
);
145 zfcp_dbf_pl_write(dbf
, srb
->payload
.data
, rec
->pl_len
,
146 "fsf_uss", req
->req_id
);
148 debug_event(dbf
->hba
, level
, rec
, sizeof(*rec
));
149 spin_unlock_irqrestore(&dbf
->hba_lock
, flags
);
153 * zfcp_dbf_hba_bit_err - trace event for bit error conditions
154 * @tag: tag indicating which kind of unsolicited status has been received
155 * @req: request which caused the bit_error condition
157 void zfcp_dbf_hba_bit_err(char *tag
, struct zfcp_fsf_req
*req
)
159 struct zfcp_dbf
*dbf
= req
->adapter
->dbf
;
160 struct zfcp_dbf_hba
*rec
= &dbf
->hba_buf
;
161 struct fsf_status_read_buffer
*sr_buf
= req
->data
;
162 static int const level
= 1;
165 if (unlikely(!debug_level_enabled(dbf
->hba
, level
)))
168 spin_lock_irqsave(&dbf
->hba_lock
, flags
);
169 memset(rec
, 0, sizeof(*rec
));
171 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
172 rec
->id
= ZFCP_DBF_HBA_BIT
;
173 rec
->fsf_req_id
= req
->req_id
;
174 rec
->fsf_req_status
= req
->status
;
175 rec
->fsf_cmd
= req
->fsf_command
;
176 memcpy(&rec
->u
.be
, &sr_buf
->payload
.bit_error
,
177 sizeof(struct fsf_bit_error_payload
));
179 debug_event(dbf
->hba
, level
, rec
, sizeof(*rec
));
180 spin_unlock_irqrestore(&dbf
->hba_lock
, flags
);
184 * zfcp_dbf_hba_def_err - trace event for deferred error messages
185 * @adapter: pointer to struct zfcp_adapter
186 * @req_id: request id which caused the deferred error message
187 * @scount: number of sbals incl. the signaling sbal
188 * @pl: array of all involved sbals
190 void zfcp_dbf_hba_def_err(struct zfcp_adapter
*adapter
, u64 req_id
, u16 scount
,
193 struct zfcp_dbf
*dbf
= adapter
->dbf
;
194 struct zfcp_dbf_pay
*payload
= &dbf
->pay_buf
;
196 static int const level
= 1;
199 if (unlikely(!debug_level_enabled(dbf
->pay
, level
)))
205 spin_lock_irqsave(&dbf
->pay_lock
, flags
);
206 memset(payload
, 0, sizeof(*payload
));
208 memcpy(payload
->area
, "def_err", 7);
209 payload
->fsf_req_id
= req_id
;
210 payload
->counter
= 0;
211 length
= min((u16
)sizeof(struct qdio_buffer
),
212 (u16
)ZFCP_DBF_PAY_MAX_REC
);
214 while (payload
->counter
< scount
&& (char *)pl
[payload
->counter
]) {
215 memcpy(payload
->data
, (char *)pl
[payload
->counter
], length
);
216 debug_event(dbf
->pay
, level
, payload
, zfcp_dbf_plen(length
));
220 spin_unlock_irqrestore(&dbf
->pay_lock
, flags
);
224 * zfcp_dbf_hba_basic - trace event for basic adapter events
225 * @adapter: pointer to struct zfcp_adapter
227 void zfcp_dbf_hba_basic(char *tag
, struct zfcp_adapter
*adapter
)
229 struct zfcp_dbf
*dbf
= adapter
->dbf
;
230 struct zfcp_dbf_hba
*rec
= &dbf
->hba_buf
;
231 static int const level
= 1;
234 if (unlikely(!debug_level_enabled(dbf
->hba
, level
)))
237 spin_lock_irqsave(&dbf
->hba_lock
, flags
);
238 memset(rec
, 0, sizeof(*rec
));
240 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
241 rec
->id
= ZFCP_DBF_HBA_BASIC
;
243 debug_event(dbf
->hba
, level
, rec
, sizeof(*rec
));
244 spin_unlock_irqrestore(&dbf
->hba_lock
, flags
);
247 static void zfcp_dbf_set_common(struct zfcp_dbf_rec
*rec
,
248 struct zfcp_adapter
*adapter
,
249 struct zfcp_port
*port
,
250 struct scsi_device
*sdev
)
252 rec
->adapter_status
= atomic_read(&adapter
->status
);
254 rec
->port_status
= atomic_read(&port
->status
);
255 rec
->wwpn
= port
->wwpn
;
256 rec
->d_id
= port
->d_id
;
259 rec
->lun_status
= atomic_read(&sdev_to_zfcp(sdev
)->status
);
260 rec
->lun
= zfcp_scsi_dev_lun(sdev
);
262 rec
->lun
= ZFCP_DBF_INVALID_LUN
;
266 * zfcp_dbf_rec_trig - trace event related to triggered recovery
267 * @tag: identifier for event
268 * @adapter: adapter on which the erp_action should run
269 * @port: remote port involved in the erp_action
270 * @sdev: scsi device involved in the erp_action
271 * @want: wanted erp_action
272 * @need: required erp_action
274 * The adapter->erp_lock has to be held.
276 void zfcp_dbf_rec_trig(char *tag
, struct zfcp_adapter
*adapter
,
277 struct zfcp_port
*port
, struct scsi_device
*sdev
,
280 struct zfcp_dbf
*dbf
= adapter
->dbf
;
281 struct zfcp_dbf_rec
*rec
= &dbf
->rec_buf
;
282 static int const level
= 1;
283 struct list_head
*entry
;
286 lockdep_assert_held(&adapter
->erp_lock
);
288 if (unlikely(!debug_level_enabled(dbf
->rec
, level
)))
291 spin_lock_irqsave(&dbf
->rec_lock
, flags
);
292 memset(rec
, 0, sizeof(*rec
));
294 rec
->id
= ZFCP_DBF_REC_TRIG
;
295 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
296 zfcp_dbf_set_common(rec
, adapter
, port
, sdev
);
298 list_for_each(entry
, &adapter
->erp_ready_head
)
301 list_for_each(entry
, &adapter
->erp_running_head
)
302 rec
->u
.trig
.running
++;
304 rec
->u
.trig
.want
= want
;
305 rec
->u
.trig
.need
= need
;
307 debug_event(dbf
->rec
, level
, rec
, sizeof(*rec
));
308 spin_unlock_irqrestore(&dbf
->rec_lock
, flags
);
312 * zfcp_dbf_rec_trig_lock - trace event related to triggered recovery with lock
313 * @tag: identifier for event
314 * @adapter: adapter on which the erp_action should run
315 * @port: remote port involved in the erp_action
316 * @sdev: scsi device involved in the erp_action
317 * @want: wanted erp_action
318 * @need: required erp_action
320 * The adapter->erp_lock must not be held.
322 void zfcp_dbf_rec_trig_lock(char *tag
, struct zfcp_adapter
*adapter
,
323 struct zfcp_port
*port
, struct scsi_device
*sdev
,
328 read_lock_irqsave(&adapter
->erp_lock
, flags
);
329 zfcp_dbf_rec_trig(tag
, adapter
, port
, sdev
, want
, need
);
330 read_unlock_irqrestore(&adapter
->erp_lock
, flags
);
334 * zfcp_dbf_rec_run_lvl - trace event related to running recovery
335 * @level: trace level to be used for event
336 * @tag: identifier for event
337 * @erp: erp_action running
339 void zfcp_dbf_rec_run_lvl(int level
, char *tag
, struct zfcp_erp_action
*erp
)
341 struct zfcp_dbf
*dbf
= erp
->adapter
->dbf
;
342 struct zfcp_dbf_rec
*rec
= &dbf
->rec_buf
;
345 if (!debug_level_enabled(dbf
->rec
, level
))
348 spin_lock_irqsave(&dbf
->rec_lock
, flags
);
349 memset(rec
, 0, sizeof(*rec
));
351 rec
->id
= ZFCP_DBF_REC_RUN
;
352 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
353 zfcp_dbf_set_common(rec
, erp
->adapter
, erp
->port
, erp
->sdev
);
355 rec
->u
.run
.fsf_req_id
= erp
->fsf_req_id
;
356 rec
->u
.run
.rec_status
= erp
->status
;
357 rec
->u
.run
.rec_step
= erp
->step
;
358 rec
->u
.run
.rec_action
= erp
->action
;
361 rec
->u
.run
.rec_count
=
362 atomic_read(&sdev_to_zfcp(erp
->sdev
)->erp_counter
);
364 rec
->u
.run
.rec_count
= atomic_read(&erp
->port
->erp_counter
);
366 rec
->u
.run
.rec_count
= atomic_read(&erp
->adapter
->erp_counter
);
368 debug_event(dbf
->rec
, level
, rec
, sizeof(*rec
));
369 spin_unlock_irqrestore(&dbf
->rec_lock
, flags
);
373 * zfcp_dbf_rec_run - trace event related to running recovery
374 * @tag: identifier for event
375 * @erp: erp_action running
377 void zfcp_dbf_rec_run(char *tag
, struct zfcp_erp_action
*erp
)
379 zfcp_dbf_rec_run_lvl(1, tag
, erp
);
383 * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
384 * @tag: identifier for event
385 * @wka_port: well known address port
386 * @req_id: request ID to correlate with potential HBA trace record
388 void zfcp_dbf_rec_run_wka(char *tag
, struct zfcp_fc_wka_port
*wka_port
,
391 struct zfcp_dbf
*dbf
= wka_port
->adapter
->dbf
;
392 struct zfcp_dbf_rec
*rec
= &dbf
->rec_buf
;
393 static int const level
= 1;
396 if (unlikely(!debug_level_enabled(dbf
->rec
, level
)))
399 spin_lock_irqsave(&dbf
->rec_lock
, flags
);
400 memset(rec
, 0, sizeof(*rec
));
402 rec
->id
= ZFCP_DBF_REC_RUN
;
403 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
404 rec
->port_status
= wka_port
->status
;
405 rec
->d_id
= wka_port
->d_id
;
406 rec
->lun
= ZFCP_DBF_INVALID_LUN
;
408 rec
->u
.run
.fsf_req_id
= req_id
;
409 rec
->u
.run
.rec_status
= ~0;
410 rec
->u
.run
.rec_step
= ~0;
411 rec
->u
.run
.rec_action
= ~0;
412 rec
->u
.run
.rec_count
= ~0;
414 debug_event(dbf
->rec
, level
, rec
, sizeof(*rec
));
415 spin_unlock_irqrestore(&dbf
->rec_lock
, flags
);
418 #define ZFCP_DBF_SAN_LEVEL 1
421 void zfcp_dbf_san(char *tag
, struct zfcp_dbf
*dbf
,
422 char *paytag
, struct scatterlist
*sg
, u8 id
, u16 len
,
423 u64 req_id
, u32 d_id
, u16 cap_len
)
425 struct zfcp_dbf_san
*rec
= &dbf
->san_buf
;
428 struct zfcp_dbf_pay
*payload
= &dbf
->pay_buf
;
431 spin_lock_irqsave(&dbf
->san_lock
, flags
);
432 memset(rec
, 0, sizeof(*rec
));
435 rec
->fsf_req_id
= req_id
;
437 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
438 rec
->pl_len
= len
; /* full length even if we cap pay below */
441 rec_len
= min_t(unsigned int, sg
->length
, ZFCP_DBF_SAN_MAX_PAYLOAD
);
442 memcpy(rec
->payload
, sg_virt(sg
), rec_len
); /* part of 1st sg entry */
444 goto out
; /* skip pay record if full content in rec->payload */
446 /* if (len > rec_len):
447 * dump data up to cap_len ignoring small duplicate in rec->payload
449 spin_lock(&dbf
->pay_lock
);
450 memset(payload
, 0, sizeof(*payload
));
451 memcpy(payload
->area
, paytag
, ZFCP_DBF_TAG_LEN
);
452 payload
->fsf_req_id
= req_id
;
453 payload
->counter
= 0;
454 for (; sg
&& pay_sum
< cap_len
; sg
= sg_next(sg
)) {
455 u16 pay_len
, offset
= 0;
457 while (offset
< sg
->length
&& pay_sum
< cap_len
) {
458 pay_len
= min((u16
)ZFCP_DBF_PAY_MAX_REC
,
459 (u16
)(sg
->length
- offset
));
460 /* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */
461 memcpy(payload
->data
, sg_virt(sg
) + offset
, pay_len
);
462 debug_event(dbf
->pay
, ZFCP_DBF_SAN_LEVEL
, payload
,
463 zfcp_dbf_plen(pay_len
));
469 spin_unlock(&dbf
->pay_lock
);
472 debug_event(dbf
->san
, ZFCP_DBF_SAN_LEVEL
, rec
, sizeof(*rec
));
473 spin_unlock_irqrestore(&dbf
->san_lock
, flags
);
477 * zfcp_dbf_san_req - trace event for issued SAN request
478 * @tag: identifier for event
479 * @fsf_req: request containing issued CT data
480 * d_id: destination ID
482 void zfcp_dbf_san_req(char *tag
, struct zfcp_fsf_req
*fsf
, u32 d_id
)
484 struct zfcp_dbf
*dbf
= fsf
->adapter
->dbf
;
485 struct zfcp_fsf_ct_els
*ct_els
= fsf
->data
;
488 if (unlikely(!debug_level_enabled(dbf
->san
, ZFCP_DBF_SAN_LEVEL
)))
491 length
= (u16
)zfcp_qdio_real_bytes(ct_els
->req
);
492 zfcp_dbf_san(tag
, dbf
, "san_req", ct_els
->req
, ZFCP_DBF_SAN_REQ
,
493 length
, fsf
->req_id
, d_id
, length
);
496 static u16
zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag
,
497 struct zfcp_fsf_req
*fsf
,
500 struct zfcp_fsf_ct_els
*ct_els
= fsf
->data
;
501 struct fc_ct_hdr
*reqh
= sg_virt(ct_els
->req
);
502 struct fc_ns_gid_ft
*reqn
= (struct fc_ns_gid_ft
*)(reqh
+ 1);
503 struct scatterlist
*resp_entry
= ct_els
->resp
;
504 struct fc_ct_hdr
*resph
;
505 struct fc_gpn_ft_resp
*acc
;
506 int max_entries
, x
, last
= 0;
508 if (!(memcmp(tag
, "fsscth2", 7) == 0
509 && ct_els
->d_id
== FC_FID_DIR_SERV
510 && reqh
->ct_rev
== FC_CT_REV
511 && reqh
->ct_in_id
[0] == 0
512 && reqh
->ct_in_id
[1] == 0
513 && reqh
->ct_in_id
[2] == 0
514 && reqh
->ct_fs_type
== FC_FST_DIR
515 && reqh
->ct_fs_subtype
== FC_NS_SUBTYPE
516 && reqh
->ct_options
== 0
517 && reqh
->_ct_resvd1
== 0
518 && reqh
->ct_cmd
== cpu_to_be16(FC_NS_GPN_FT
)
519 /* reqh->ct_mr_size can vary so do not match but read below */
520 && reqh
->_ct_resvd2
== 0
521 && reqh
->ct_reason
== 0
522 && reqh
->ct_explan
== 0
523 && reqh
->ct_vendor
== 0
524 && reqn
->fn_resvd
== 0
525 && reqn
->fn_domain_id_scope
== 0
526 && reqn
->fn_area_id_scope
== 0
527 && reqn
->fn_fc4_type
== FC_TYPE_FCP
))
528 return len
; /* not GPN_FT response so do not cap */
530 acc
= sg_virt(resp_entry
);
532 /* cap all but accept CT responses to at least the CT header */
533 resph
= (struct fc_ct_hdr
*)acc
;
534 if ((ct_els
->status
) ||
535 (resph
->ct_cmd
!= cpu_to_be16(FC_FS_ACC
)))
536 return max(FC_CT_HDR_LEN
, ZFCP_DBF_SAN_MAX_PAYLOAD
);
538 max_entries
= (be16_to_cpu(reqh
->ct_mr_size
) * 4 /
539 sizeof(struct fc_gpn_ft_resp
))
540 + 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
541 * to account for header as 1st pseudo "entry" */;
543 /* the basic CT_IU preamble is the same size as one entry in the GPN_FT
544 * response, allowing us to skip special handling for it - just skip it
546 for (x
= 1; x
< max_entries
&& !last
; x
++) {
547 if (x
% (ZFCP_FC_GPN_FT_ENT_PAGE
+ 1))
550 acc
= sg_virt(++resp_entry
);
552 last
= acc
->fp_flags
& FC_NS_FID_LAST
;
554 len
= min(len
, (u16
)(x
* sizeof(struct fc_gpn_ft_resp
)));
555 return len
; /* cap after last entry */
559 * zfcp_dbf_san_res - trace event for received SAN request
560 * @tag: identifier for event
561 * @fsf_req: request containing issued CT data
563 void zfcp_dbf_san_res(char *tag
, struct zfcp_fsf_req
*fsf
)
565 struct zfcp_dbf
*dbf
= fsf
->adapter
->dbf
;
566 struct zfcp_fsf_ct_els
*ct_els
= fsf
->data
;
569 if (unlikely(!debug_level_enabled(dbf
->san
, ZFCP_DBF_SAN_LEVEL
)))
572 length
= (u16
)zfcp_qdio_real_bytes(ct_els
->resp
);
573 zfcp_dbf_san(tag
, dbf
, "san_res", ct_els
->resp
, ZFCP_DBF_SAN_RES
,
574 length
, fsf
->req_id
, ct_els
->d_id
,
575 zfcp_dbf_san_res_cap_len_if_gpn_ft(tag
, fsf
, length
));
579 * zfcp_dbf_san_in_els - trace event for incoming ELS
580 * @tag: identifier for event
581 * @fsf_req: request containing issued CT data
583 void zfcp_dbf_san_in_els(char *tag
, struct zfcp_fsf_req
*fsf
)
585 struct zfcp_dbf
*dbf
= fsf
->adapter
->dbf
;
586 struct fsf_status_read_buffer
*srb
=
587 (struct fsf_status_read_buffer
*) fsf
->data
;
589 struct scatterlist sg
;
591 if (unlikely(!debug_level_enabled(dbf
->san
, ZFCP_DBF_SAN_LEVEL
)))
594 length
= (u16
)(srb
->length
-
595 offsetof(struct fsf_status_read_buffer
, payload
));
596 sg_init_one(&sg
, srb
->payload
.data
, length
);
597 zfcp_dbf_san(tag
, dbf
, "san_els", &sg
, ZFCP_DBF_SAN_ELS
, length
,
598 fsf
->req_id
, ntoh24(srb
->d_id
), length
);
602 * zfcp_dbf_scsi_common() - Common trace event helper for scsi.
603 * @tag: Identifier for event.
604 * @level: trace level of event.
605 * @sdev: Pointer to SCSI device as context for this event.
606 * @sc: Pointer to SCSI command, or NULL with task management function (TMF).
607 * @fsf: Pointer to FSF request, or NULL.
609 void zfcp_dbf_scsi_common(char *tag
, int level
, struct scsi_device
*sdev
,
610 struct scsi_cmnd
*sc
, struct zfcp_fsf_req
*fsf
)
612 struct zfcp_adapter
*adapter
=
613 (struct zfcp_adapter
*) sdev
->host
->hostdata
[0];
614 struct zfcp_dbf
*dbf
= adapter
->dbf
;
615 struct zfcp_dbf_scsi
*rec
= &dbf
->scsi_buf
;
616 struct fcp_resp_with_ext
*fcp_rsp
;
617 struct fcp_resp_rsp_info
*fcp_rsp_info
;
620 spin_lock_irqsave(&dbf
->scsi_lock
, flags
);
621 memset(rec
, 0, sizeof(*rec
));
623 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
624 rec
->id
= ZFCP_DBF_SCSI_CMND
;
626 rec
->scsi_result
= sc
->result
;
627 rec
->scsi_retries
= sc
->retries
;
628 rec
->scsi_allowed
= sc
->allowed
;
629 rec
->scsi_id
= sc
->device
->id
;
630 rec
->scsi_lun
= (u32
)sc
->device
->lun
;
631 rec
->scsi_lun_64_hi
= (u32
)(sc
->device
->lun
>> 32);
632 rec
->host_scribble
= (unsigned long)sc
->host_scribble
;
634 memcpy(rec
->scsi_opcode
, sc
->cmnd
,
635 min_t(int, sc
->cmd_len
, ZFCP_DBF_SCSI_OPCODE
));
637 rec
->scsi_result
= ~0;
638 rec
->scsi_retries
= ~0;
639 rec
->scsi_allowed
= ~0;
640 rec
->scsi_id
= sdev
->id
;
641 rec
->scsi_lun
= (u32
)sdev
->lun
;
642 rec
->scsi_lun_64_hi
= (u32
)(sdev
->lun
>> 32);
643 rec
->host_scribble
= ~0;
645 memset(rec
->scsi_opcode
, 0xff, ZFCP_DBF_SCSI_OPCODE
);
649 rec
->fsf_req_id
= fsf
->req_id
;
650 rec
->pl_len
= FCP_RESP_WITH_EXT
;
651 fcp_rsp
= &(fsf
->qtcb
->bottom
.io
.fcp_rsp
.iu
);
652 /* mandatory parts of FCP_RSP IU in this SCSI record */
653 memcpy(&rec
->fcp_rsp
, fcp_rsp
, FCP_RESP_WITH_EXT
);
654 if (fcp_rsp
->resp
.fr_flags
& FCP_RSP_LEN_VAL
) {
655 fcp_rsp_info
= (struct fcp_resp_rsp_info
*) &fcp_rsp
[1];
656 rec
->fcp_rsp_info
= fcp_rsp_info
->rsp_code
;
657 rec
->pl_len
+= be32_to_cpu(fcp_rsp
->ext
.fr_rsp_len
);
659 if (fcp_rsp
->resp
.fr_flags
& FCP_SNS_LEN_VAL
) {
660 rec
->pl_len
+= be32_to_cpu(fcp_rsp
->ext
.fr_sns_len
);
662 /* complete FCP_RSP IU in associated PAYload record
663 * but only if there are optional parts
665 if (fcp_rsp
->resp
.fr_flags
!= 0)
668 /* at least one full PAY record
669 * but not beyond hardware response field
671 min_t(u16
, max_t(u16
, rec
->pl_len
,
672 ZFCP_DBF_PAY_MAX_REC
),
674 "fcp_riu", fsf
->req_id
);
677 debug_event(dbf
->scsi
, level
, rec
, sizeof(*rec
));
678 spin_unlock_irqrestore(&dbf
->scsi_lock
, flags
);
682 * zfcp_dbf_scsi_eh() - Trace event for special cases of scsi_eh callbacks.
683 * @tag: Identifier for event.
684 * @adapter: Pointer to zfcp adapter as context for this event.
685 * @scsi_id: SCSI ID/target to indicate scope of task management function (TMF).
686 * @ret: Return value of calling function.
688 * This SCSI trace variant does not depend on any of:
689 * scsi_cmnd, zfcp_fsf_req, scsi_device.
691 void zfcp_dbf_scsi_eh(char *tag
, struct zfcp_adapter
*adapter
,
692 unsigned int scsi_id
, int ret
)
694 struct zfcp_dbf
*dbf
= adapter
->dbf
;
695 struct zfcp_dbf_scsi
*rec
= &dbf
->scsi_buf
;
697 static int const level
= 1;
699 if (unlikely(!debug_level_enabled(adapter
->dbf
->scsi
, level
)))
702 spin_lock_irqsave(&dbf
->scsi_lock
, flags
);
703 memset(rec
, 0, sizeof(*rec
));
705 memcpy(rec
->tag
, tag
, ZFCP_DBF_TAG_LEN
);
706 rec
->id
= ZFCP_DBF_SCSI_CMND
;
707 rec
->scsi_result
= ret
; /* re-use field, int is 4 bytes and fits */
708 rec
->scsi_retries
= ~0;
709 rec
->scsi_allowed
= ~0;
710 rec
->fcp_rsp_info
= ~0;
711 rec
->scsi_id
= scsi_id
;
712 rec
->scsi_lun
= (u32
)ZFCP_DBF_INVALID_LUN
;
713 rec
->scsi_lun_64_hi
= (u32
)(ZFCP_DBF_INVALID_LUN
>> 32);
714 rec
->host_scribble
= ~0;
715 memset(rec
->scsi_opcode
, 0xff, ZFCP_DBF_SCSI_OPCODE
);
717 debug_event(dbf
->scsi
, level
, rec
, sizeof(*rec
));
718 spin_unlock_irqrestore(&dbf
->scsi_lock
, flags
);
721 static debug_info_t
*zfcp_dbf_reg(const char *name
, int size
, int rec_size
)
723 struct debug_info
*d
;
725 d
= debug_register(name
, size
, 1, rec_size
);
729 debug_register_view(d
, &debug_hex_ascii_view
);
730 debug_set_level(d
, dbflevel
);
735 static void zfcp_dbf_unregister(struct zfcp_dbf
*dbf
)
740 debug_unregister(dbf
->scsi
);
741 debug_unregister(dbf
->san
);
742 debug_unregister(dbf
->hba
);
743 debug_unregister(dbf
->pay
);
744 debug_unregister(dbf
->rec
);
749 * zfcp_adapter_debug_register - registers debug feature for an adapter
750 * @adapter: pointer to adapter for which debug features should be registered
751 * return: -ENOMEM on error, 0 otherwise
753 int zfcp_dbf_adapter_register(struct zfcp_adapter
*adapter
)
755 char name
[DEBUG_MAX_NAME_LEN
];
756 struct zfcp_dbf
*dbf
;
758 dbf
= kzalloc(sizeof(struct zfcp_dbf
), GFP_KERNEL
);
762 spin_lock_init(&dbf
->pay_lock
);
763 spin_lock_init(&dbf
->hba_lock
);
764 spin_lock_init(&dbf
->san_lock
);
765 spin_lock_init(&dbf
->scsi_lock
);
766 spin_lock_init(&dbf
->rec_lock
);
768 /* debug feature area which records recovery activity */
769 sprintf(name
, "zfcp_%s_rec", dev_name(&adapter
->ccw_device
->dev
));
770 dbf
->rec
= zfcp_dbf_reg(name
, dbfsize
, sizeof(struct zfcp_dbf_rec
));
774 /* debug feature area which records HBA (FSF and QDIO) conditions */
775 sprintf(name
, "zfcp_%s_hba", dev_name(&adapter
->ccw_device
->dev
));
776 dbf
->hba
= zfcp_dbf_reg(name
, dbfsize
, sizeof(struct zfcp_dbf_hba
));
780 /* debug feature area which records payload info */
781 sprintf(name
, "zfcp_%s_pay", dev_name(&adapter
->ccw_device
->dev
));
782 dbf
->pay
= zfcp_dbf_reg(name
, dbfsize
* 2, sizeof(struct zfcp_dbf_pay
));
786 /* debug feature area which records SAN command failures and recovery */
787 sprintf(name
, "zfcp_%s_san", dev_name(&adapter
->ccw_device
->dev
));
788 dbf
->san
= zfcp_dbf_reg(name
, dbfsize
, sizeof(struct zfcp_dbf_san
));
792 /* debug feature area which records SCSI command failures and recovery */
793 sprintf(name
, "zfcp_%s_scsi", dev_name(&adapter
->ccw_device
->dev
));
794 dbf
->scsi
= zfcp_dbf_reg(name
, dbfsize
, sizeof(struct zfcp_dbf_scsi
));
802 zfcp_dbf_unregister(dbf
);
807 * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
808 * @adapter: pointer to adapter for which debug features should be unregistered
810 void zfcp_dbf_adapter_unregister(struct zfcp_adapter
*adapter
)
812 struct zfcp_dbf
*dbf
= adapter
->dbf
;
815 zfcp_dbf_unregister(dbf
);