1 // SPDX-License-Identifier: GPL-2.0
3 * NVM Express target device driver tracepoints
4 * Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH
7 #include <linux/unaligned.h>
10 static const char *nvmet_trace_admin_identify(struct trace_seq
*p
, u8
*cdw10
)
12 const char *ret
= trace_seq_buffer_ptr(p
);
14 u16 ctrlid
= get_unaligned_le16(cdw10
+ 2);
16 trace_seq_printf(p
, "cns=%u, ctrlid=%u", cns
, ctrlid
);
22 static const char *nvmet_trace_admin_get_features(struct trace_seq
*p
,
25 const char *ret
= trace_seq_buffer_ptr(p
);
27 u8 sel
= cdw10
[1] & 0x7;
28 u32 cdw11
= get_unaligned_le32(cdw10
+ 4);
30 trace_seq_printf(p
, "fid=0x%x, sel=0x%x, cdw11=0x%x", fid
, sel
, cdw11
);
36 static const char *nvmet_trace_get_lba_status(struct trace_seq
*p
,
39 const char *ret
= trace_seq_buffer_ptr(p
);
40 u64 slba
= get_unaligned_le64(cdw10
);
41 u32 mndw
= get_unaligned_le32(cdw10
+ 8);
42 u16 rl
= get_unaligned_le16(cdw10
+ 12);
45 trace_seq_printf(p
, "slba=0x%llx, mndw=0x%x, rl=0x%x, atype=%u",
46 slba
, mndw
, rl
, atype
);
52 static const char *nvmet_trace_admin_set_features(struct trace_seq
*p
,
55 const char *ret
= trace_seq_buffer_ptr(p
);
57 u8 sv
= cdw10
[3] & 0x8;
58 u32 cdw11
= get_unaligned_le32(cdw10
+ 4);
60 trace_seq_printf(p
, "fid=0x%x, sv=0x%x, cdw11=0x%x", fid
, sv
, cdw11
);
66 static const char *nvmet_trace_read_write(struct trace_seq
*p
, u8
*cdw10
)
68 const char *ret
= trace_seq_buffer_ptr(p
);
69 u64 slba
= get_unaligned_le64(cdw10
);
70 u16 length
= get_unaligned_le16(cdw10
+ 8);
71 u16 control
= get_unaligned_le16(cdw10
+ 10);
72 u32 dsmgmt
= get_unaligned_le32(cdw10
+ 12);
73 u32 reftag
= get_unaligned_le32(cdw10
+ 16);
76 "slba=%llu, len=%u, ctrl=0x%x, dsmgmt=%u, reftag=%u",
77 slba
, length
, control
, dsmgmt
, reftag
);
83 static const char *nvmet_trace_dsm(struct trace_seq
*p
, u8
*cdw10
)
85 const char *ret
= trace_seq_buffer_ptr(p
);
87 trace_seq_printf(p
, "nr=%u, attributes=%u",
88 get_unaligned_le32(cdw10
),
89 get_unaligned_le32(cdw10
+ 4));
95 static const char *nvmet_trace_common(struct trace_seq
*p
, u8
*cdw10
)
97 const char *ret
= trace_seq_buffer_ptr(p
);
99 trace_seq_printf(p
, "cdw10=%*ph", 24, cdw10
);
100 trace_seq_putc(p
, 0);
105 const char *nvmet_trace_parse_admin_cmd(struct trace_seq
*p
,
106 u8 opcode
, u8
*cdw10
)
109 case nvme_admin_identify
:
110 return nvmet_trace_admin_identify(p
, cdw10
);
111 case nvme_admin_set_features
:
112 return nvmet_trace_admin_set_features(p
, cdw10
);
113 case nvme_admin_get_features
:
114 return nvmet_trace_admin_get_features(p
, cdw10
);
115 case nvme_admin_get_lba_status
:
116 return nvmet_trace_get_lba_status(p
, cdw10
);
118 return nvmet_trace_common(p
, cdw10
);
122 static const char *nvmet_trace_zone_mgmt_send(struct trace_seq
*p
, u8
*cdw10
)
124 static const char * const zsa_strs
[] = {
125 [0x01] = "close zone",
126 [0x02] = "finish zone",
127 [0x03] = "open zone",
128 [0x04] = "reset zone",
129 [0x05] = "offline zone",
130 [0x10] = "set zone descriptor extension"
132 const char *ret
= trace_seq_buffer_ptr(p
);
133 u64 slba
= get_unaligned_le64(cdw10
);
138 if (zsa
< ARRAY_SIZE(zsa_strs
) && zsa_strs
[zsa
])
139 zsa_str
= zsa_strs
[zsa
];
141 zsa_str
= "reserved";
143 trace_seq_printf(p
, "slba=%llu, zsa=%u:%s, all=%u",
144 slba
, zsa
, zsa_str
, all
);
145 trace_seq_putc(p
, 0);
150 static const char *nvmet_trace_zone_mgmt_recv(struct trace_seq
*p
, u8
*cdw10
)
152 static const char * const zrasf_strs
[] = {
153 [0x00] = "list all zones",
154 [0x01] = "list the zones in the ZSE: Empty state",
155 [0x02] = "list the zones in the ZSIO: Implicitly Opened state",
156 [0x03] = "list the zones in the ZSEO: Explicitly Opened state",
157 [0x04] = "list the zones in the ZSC: Closed state",
158 [0x05] = "list the zones in the ZSF: Full state",
159 [0x06] = "list the zones in the ZSRO: Read Only state",
160 [0x07] = "list the zones in the ZSO: Offline state",
161 [0x09] = "list the zones that have the zone attribute"
163 const char *ret
= trace_seq_buffer_ptr(p
);
164 u64 slba
= get_unaligned_le64(cdw10
);
165 u32 numd
= get_unaligned_le32(&cdw10
[8]);
167 u8 zrasf
= cdw10
[13];
168 const char *zrasf_str
;
171 if (zrasf
< ARRAY_SIZE(zrasf_strs
) && zrasf_strs
[zrasf
])
172 zrasf_str
= zrasf_strs
[zrasf
];
174 zrasf_str
= "reserved";
176 trace_seq_printf(p
, "slba=%llu, numd=%u, zra=%u, zrasf=%u:%s, pr=%u",
177 slba
, numd
, zra
, zrasf
, zrasf_str
, pr
);
178 trace_seq_putc(p
, 0);
183 static const char *nvmet_trace_resv_reg(struct trace_seq
*p
, u8
*cdw10
)
185 static const char * const rrega_strs
[] = {
187 [0x01] = "unregister",
190 const char *ret
= trace_seq_buffer_ptr(p
);
191 u8 rrega
= cdw10
[0] & 0x7;
192 u8 iekey
= (cdw10
[0] >> 3) & 0x1;
193 u8 ptpl
= (cdw10
[3] >> 6) & 0x3;
194 const char *rrega_str
;
196 if (rrega
< ARRAY_SIZE(rrega_strs
) && rrega_strs
[rrega
])
197 rrega_str
= rrega_strs
[rrega
];
199 rrega_str
= "reserved";
201 trace_seq_printf(p
, "rrega=%u:%s, iekey=%u, ptpl=%u",
202 rrega
, rrega_str
, iekey
, ptpl
);
203 trace_seq_putc(p
, 0);
208 static const char * const rtype_strs
[] = {
210 [0x01] = "write exclusive",
211 [0x02] = "exclusive access",
212 [0x03] = "write exclusive registrants only",
213 [0x04] = "exclusive access registrants only",
214 [0x05] = "write exclusive all registrants",
215 [0x06] = "exclusive access all registrants",
218 static const char *nvmet_trace_resv_acq(struct trace_seq
*p
, u8
*cdw10
)
220 static const char * const racqa_strs
[] = {
223 [0x02] = "preempt and abort",
225 const char *ret
= trace_seq_buffer_ptr(p
);
226 u8 racqa
= cdw10
[0] & 0x7;
227 u8 iekey
= (cdw10
[0] >> 3) & 0x1;
229 const char *racqa_str
= "reserved";
230 const char *rtype_str
= "reserved";
232 if (racqa
< ARRAY_SIZE(racqa_strs
) && racqa_strs
[racqa
])
233 racqa_str
= racqa_strs
[racqa
];
235 if (rtype
< ARRAY_SIZE(rtype_strs
) && rtype_strs
[rtype
])
236 rtype_str
= rtype_strs
[rtype
];
238 trace_seq_printf(p
, "racqa=%u:%s, iekey=%u, rtype=%u:%s",
239 racqa
, racqa_str
, iekey
, rtype
, rtype_str
);
240 trace_seq_putc(p
, 0);
245 static const char *nvmet_trace_resv_rel(struct trace_seq
*p
, u8
*cdw10
)
247 static const char * const rrela_strs
[] = {
251 const char *ret
= trace_seq_buffer_ptr(p
);
252 u8 rrela
= cdw10
[0] & 0x7;
253 u8 iekey
= (cdw10
[0] >> 3) & 0x1;
255 const char *rrela_str
= "reserved";
256 const char *rtype_str
= "reserved";
258 if (rrela
< ARRAY_SIZE(rrela_strs
) && rrela_strs
[rrela
])
259 rrela_str
= rrela_strs
[rrela
];
261 if (rtype
< ARRAY_SIZE(rtype_strs
) && rtype_strs
[rtype
])
262 rtype_str
= rtype_strs
[rtype
];
264 trace_seq_printf(p
, "rrela=%u:%s, iekey=%u, rtype=%u:%s",
265 rrela
, rrela_str
, iekey
, rtype
, rtype_str
);
266 trace_seq_putc(p
, 0);
271 static const char *nvmet_trace_resv_report(struct trace_seq
*p
, u8
*cdw10
)
273 const char *ret
= trace_seq_buffer_ptr(p
);
274 u32 numd
= get_unaligned_le32(cdw10
);
275 u8 eds
= cdw10
[4] & 0x1;
277 trace_seq_printf(p
, "numd=%u, eds=%u", numd
, eds
);
278 trace_seq_putc(p
, 0);
283 const char *nvmet_trace_parse_nvm_cmd(struct trace_seq
*p
,
284 u8 opcode
, u8
*cdw10
)
289 case nvme_cmd_write_zeroes
:
290 case nvme_cmd_zone_append
:
291 return nvmet_trace_read_write(p
, cdw10
);
293 return nvmet_trace_dsm(p
, cdw10
);
294 case nvme_cmd_zone_mgmt_send
:
295 return nvmet_trace_zone_mgmt_send(p
, cdw10
);
296 case nvme_cmd_zone_mgmt_recv
:
297 return nvmet_trace_zone_mgmt_recv(p
, cdw10
);
298 case nvme_cmd_resv_register
:
299 return nvmet_trace_resv_reg(p
, cdw10
);
300 case nvme_cmd_resv_acquire
:
301 return nvmet_trace_resv_acq(p
, cdw10
);
302 case nvme_cmd_resv_release
:
303 return nvmet_trace_resv_rel(p
, cdw10
);
304 case nvme_cmd_resv_report
:
305 return nvmet_trace_resv_report(p
, cdw10
);
307 return nvmet_trace_common(p
, cdw10
);
311 static const char *nvmet_trace_fabrics_property_set(struct trace_seq
*p
,
314 const char *ret
= trace_seq_buffer_ptr(p
);
316 u32 ofst
= get_unaligned_le32(spc
+ 4);
317 u64 value
= get_unaligned_le64(spc
+ 8);
319 trace_seq_printf(p
, "attrib=%u, ofst=0x%x, value=0x%llx",
320 attrib
, ofst
, value
);
321 trace_seq_putc(p
, 0);
325 static const char *nvmet_trace_fabrics_connect(struct trace_seq
*p
,
328 const char *ret
= trace_seq_buffer_ptr(p
);
329 u16 recfmt
= get_unaligned_le16(spc
);
330 u16 qid
= get_unaligned_le16(spc
+ 2);
331 u16 sqsize
= get_unaligned_le16(spc
+ 4);
333 u32 kato
= get_unaligned_le32(spc
+ 8);
335 trace_seq_printf(p
, "recfmt=%u, qid=%u, sqsize=%u, cattr=%u, kato=%u",
336 recfmt
, qid
, sqsize
, cattr
, kato
);
337 trace_seq_putc(p
, 0);
341 static const char *nvmet_trace_fabrics_property_get(struct trace_seq
*p
,
344 const char *ret
= trace_seq_buffer_ptr(p
);
346 u32 ofst
= get_unaligned_le32(spc
+ 4);
348 trace_seq_printf(p
, "attrib=%u, ofst=0x%x", attrib
, ofst
);
349 trace_seq_putc(p
, 0);
353 static const char *nvmet_trace_fabrics_auth_send(struct trace_seq
*p
, u8
*spc
)
355 const char *ret
= trace_seq_buffer_ptr(p
);
359 u32 tl
= get_unaligned_le32(spc
+ 4);
361 trace_seq_printf(p
, "spsp0=%02x, spsp1=%02x, secp=%02x, tl=%u",
362 spsp0
, spsp1
, secp
, tl
);
363 trace_seq_putc(p
, 0);
367 static const char *nvmet_trace_fabrics_auth_receive(struct trace_seq
*p
, u8
*spc
)
369 const char *ret
= trace_seq_buffer_ptr(p
);
373 u32 al
= get_unaligned_le32(spc
+ 4);
375 trace_seq_printf(p
, "spsp0=%02x, spsp1=%02x, secp=%02x, al=%u",
376 spsp0
, spsp1
, secp
, al
);
377 trace_seq_putc(p
, 0);
381 static const char *nvmet_trace_fabrics_common(struct trace_seq
*p
, u8
*spc
)
383 const char *ret
= trace_seq_buffer_ptr(p
);
385 trace_seq_printf(p
, "specific=%*ph", 24, spc
);
386 trace_seq_putc(p
, 0);
390 const char *nvmet_trace_parse_fabrics_cmd(struct trace_seq
*p
,
394 case nvme_fabrics_type_property_set
:
395 return nvmet_trace_fabrics_property_set(p
, spc
);
396 case nvme_fabrics_type_connect
:
397 return nvmet_trace_fabrics_connect(p
, spc
);
398 case nvme_fabrics_type_property_get
:
399 return nvmet_trace_fabrics_property_get(p
, spc
);
400 case nvme_fabrics_type_auth_send
:
401 return nvmet_trace_fabrics_auth_send(p
, spc
);
402 case nvme_fabrics_type_auth_receive
:
403 return nvmet_trace_fabrics_auth_receive(p
, spc
);
405 return nvmet_trace_fabrics_common(p
, spc
);
409 const char *nvmet_trace_disk_name(struct trace_seq
*p
, char *name
)
411 const char *ret
= trace_seq_buffer_ptr(p
);
414 trace_seq_printf(p
, "disk=%s, ", name
);
415 trace_seq_putc(p
, 0);
420 const char *nvmet_trace_ctrl_id(struct trace_seq
*p
, u16 ctrl_id
)
422 const char *ret
= trace_seq_buffer_ptr(p
);
425 * XXX: We don't know the controller instance before executing the
426 * connect command itself because the connect command for the admin
427 * queue will not provide the cntlid which will be allocated in this
428 * command. In case of io queues, the controller instance will be
429 * mapped by the extra data of the connect command.
430 * If we can know the extra data of the connect command in this stage,
431 * we can update this print statement later.
434 trace_seq_printf(p
, "%d", ctrl_id
);
436 trace_seq_printf(p
, "_");
437 trace_seq_putc(p
, 0);