2 * Routines for NVM Express over Fabrics(TCP) dissection
3 * Code by Solganik Alexander <solganik@gmail.com>
5 * Wireshark - Network traffic analyzer
6 * By Gerald Combs <gerald@wireshark.org>
7 * Copyright 1998 Gerald Combs
9 * SPDX-License-Identifier: GPL-2.0-or-later
13 * Copyright (C) 2019 Lightbits Labs Ltd. - All Rights Reserved
17 NVM Express is high speed interface for accessing solid state drives.
18 NVM Express specifications are maintained by NVM Express industry
19 association at http://www.nvmexpress.org.
21 This file adds support to dissect NVM Express over fabrics packets
22 for TCP. This adds very basic support for dissecting commands
25 Current dissection supports dissection of
27 (b) NVMe Fabric command and cqe
28 As part of it, it also calculates cmd completion latencies.
30 NVM Express TCP TCP port assigned by IANA that maps to NVMe-oF service
31 TCP port can be found at
32 http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=NVM+Express
37 #include <epan/packet.h>
38 #include <epan/prefs.h>
39 #include <epan/conversation.h>
40 #include <epan/crc32-tvb.h>
42 #include <wsutil/array.h>
43 #include "packet-tcp.h"
44 #include "packet-nvme.h"
46 #include "packet-tls.h"
48 static int proto_nvme_tcp
;
49 static dissector_handle_t nvmet_tcp_handle
;
50 static dissector_handle_t nvmet_tls_handle
;
52 #define NVME_TCP_PORT_RANGE "4420,8009" /* IANA registered */
54 #define NVME_FABRICS_TCP "NVMe/TCP"
55 #define NVME_TCP_HEADER_SIZE 8
56 #define PDU_LEN_OFFSET_FROM_HEADER 4
57 static range_t
*gPORT_RANGE
;
58 static bool nvme_tcp_check_hdgst
;
59 static bool nvme_tcp_check_ddgst
;
60 #define NVME_TCP_DATA_PDU_SIZE 24
62 enum nvme_tcp_pdu_type
{
64 nvme_tcp_icresp
= 0x1,
65 nvme_tcp_h2c_term
= 0x2,
66 nvme_tcp_c2h_term
= 0x3,
69 nvme_tcp_h2c_data
= 0x6,
70 nvme_tcp_c2h_data
= 0x7,
73 nvme_tcp_kdresp
= 0xb,
74 NVMET_MAX_PDU_TYPE
= nvme_tcp_kdresp
77 static const value_string nvme_tcp_pdu_type_vals
[] = {
78 { nvme_tcp_icreq
, "ICReq" },
79 { nvme_tcp_icresp
, "ICResp" },
80 { nvme_tcp_h2c_term
, "H2CTerm" },
81 { nvme_tcp_c2h_term
, "C2HTerm" },
82 { nvme_tcp_cmd
, "CapsuleCommand" },
83 { nvme_tcp_rsp
, "CapsuleResponse" },
84 { nvme_tcp_h2c_data
, "H2CData" },
85 { nvme_tcp_c2h_data
, "C2HData" },
86 { nvme_tcp_r2t
, "Ready To Transfer" },
87 { nvme_tcp_kdreq
, "Kickstart Discovery Request" },
88 { nvme_tcp_kdresp
, "Kickstart Discovery Response" },
92 static const value_string nvme_tcp_termreq_fes
[] = {
94 {0x1, "Invalid PDU Header Field" },
95 {0x2, "PDU Sequence Error" },
96 {0x3, "Header Digest Error" },
97 {0x4, "Data Transfer Out of Range" },
98 {0x5, "R2T Limit Exceeded" },
99 {0x6, "Unsupported Parameter" },
103 enum nvme_tcp_fatal_error_status
105 NVME_TCP_FES_INVALID_PDU_HDR
= 0x01,
106 NVME_TCP_FES_PDU_SEQ_ERR
= 0x02,
107 NVME_TCP_FES_HDR_DIGEST_ERR
= 0x03,
108 NVME_TCP_FES_DATA_OUT_OF_RANGE
= 0x04,
109 NVME_TCP_FES_R2T_LIMIT_EXCEEDED
= 0x05,
110 NVME_TCP_FES_DATA_LIMIT_EXCEEDED
= 0x05,
111 NVME_TCP_FES_UNSUPPORTED_PARAM
= 0x06,
114 enum nvme_tcp_pdu_flags
{
115 NVME_TCP_F_HDGST
= (1 << 0),
116 NVME_TCP_F_DDGST
= (1 << 1),
117 NVME_TCP_F_DATA_LAST
= (1 << 2),
118 NVME_TCP_F_DATA_SUCCESS
= (1 << 3),
122 enum nvme_tcp_digest_option
{
123 NVME_TCP_HDR_DIGEST_ENABLE
= (1 << 0),
124 NVME_TCP_DATA_DIGEST_ENABLE
= (1 << 1),
128 #define NVME_FABRIC_CMD_SIZE NVME_CMD_SIZE
129 #define NVME_FABRIC_CQE_SIZE NVME_CQE_SIZE
130 #define NVME_TCP_DIGEST_LENGTH 4
132 struct nvme_tcp_q_ctx
{
133 struct nvme_q_ctx n_q_ctx
;
136 struct nvme_tcp_cmd_ctx
{
137 struct nvme_cmd_ctx n_cmd_ctx
;
140 void proto_reg_handoff_nvme_tcp(void);
141 void proto_register_nvme_tcp(void);
144 static int hf_nvme_tcp_type
;
145 static int hf_nvme_tcp_flags
;
146 static int hf_pdu_flags_hdgst
;
147 static int hf_pdu_flags_ddgst
;
148 static int hf_pdu_flags_data_last
;
149 static int hf_pdu_flags_data_success
;
151 static int * const nvme_tcp_pdu_flags
[] = {
154 &hf_pdu_flags_data_last
,
155 &hf_pdu_flags_data_success
,
159 static int hf_nvme_tcp_hdgst
;
160 static int hf_nvme_tcp_ddgst
;
161 static int hf_nvme_tcp_hlen
;
162 static int hf_nvme_tcp_pdo
;
163 static int hf_nvme_tcp_plen
;
164 static int hf_nvme_tcp_hdgst_status
;
165 static int hf_nvme_tcp_ddgst_status
;
167 /* NVMe tcp icreq/icresp fields */
168 static int hf_nvme_tcp_icreq
;
169 static int hf_nvme_tcp_icreq_pfv
;
170 static int hf_nvme_tcp_icreq_maxr2t
;
171 static int hf_nvme_tcp_icreq_hpda
;
172 static int hf_nvme_tcp_icreq_digest
;
173 static int hf_nvme_tcp_icresp
;
174 static int hf_nvme_tcp_icresp_pfv
;
175 static int hf_nvme_tcp_icresp_cpda
;
176 static int hf_nvme_tcp_icresp_digest
;
177 static int hf_nvme_tcp_icresp_maxdata
;
179 /* NVMe tcp c2h/h2c termreq fields */
180 static int hf_nvme_tcp_c2htermreq
;
181 static int hf_nvme_tcp_c2htermreq_fes
;
182 static int hf_nvme_tcp_c2htermreq_phfo
;
183 static int hf_nvme_tcp_c2htermreq_phd
;
184 static int hf_nvme_tcp_c2htermreq_upfo
;
185 static int hf_nvme_tcp_c2htermreq_reserved
;
186 static int hf_nvme_tcp_c2htermreq_data
;
187 static int hf_nvme_tcp_h2ctermreq
;
188 static int hf_nvme_tcp_h2ctermreq_fes
;
189 static int hf_nvme_tcp_h2ctermreq_phfo
;
190 static int hf_nvme_tcp_h2ctermreq_phd
;
191 static int hf_nvme_tcp_h2ctermreq_upfo
;
192 static int hf_nvme_tcp_h2ctermreq_reserved
;
193 static int hf_nvme_tcp_h2ctermreq_data
;
195 /* NVMe fabrics command */
196 static int hf_nvme_fabrics_cmd_cid
;
198 /* NVMe fabrics command data*/
199 static int hf_nvme_fabrics_cmd_data
;
200 static int hf_nvme_tcp_unknown_data
;
202 static int hf_nvme_tcp_r2t_pdu
;
203 static int hf_nvme_tcp_r2t_offset
;
204 static int hf_nvme_tcp_r2t_length
;
205 static int hf_nvme_tcp_r2t_resvd
;
207 /* tracking Cmd and its respective CQE */
208 static int hf_nvme_tcp_cmd_pkt
;
209 static int hf_nvme_fabrics_cmd_qid
;
211 /* Data response fields */
212 static int hf_nvme_tcp_data_pdu
;
213 static int hf_nvme_tcp_pdu_ttag
;
214 static int hf_nvme_tcp_data_pdu_data_offset
;
215 static int hf_nvme_tcp_data_pdu_data_length
;
216 static int hf_nvme_tcp_data_pdu_data_resvd
;
218 static int ett_nvme_tcp
;
221 get_nvme_tcp_pdu_len(packet_info
*pinfo _U_
,
226 return tvb_get_letohl(tvb
, offset
+ PDU_LEN_OFFSET_FROM_HEADER
);
230 dissect_nvme_tcp_icreq(tvbuff_t
*tvb
,
236 proto_item
*icreq_tree
;
238 col_set_str(pinfo
->cinfo
, COL_INFO
, "Initialize Connection Request");
239 tf
= proto_tree_add_item(tree
, hf_nvme_tcp_icreq
, tvb
, offset
, 8, ENC_NA
);
240 icreq_tree
= proto_item_add_subtree(tf
, ett_nvme_tcp
);
242 proto_tree_add_item(icreq_tree
, hf_nvme_tcp_icreq_pfv
, tvb
, offset
, 2,
244 proto_tree_add_item(icreq_tree
, hf_nvme_tcp_icreq_hpda
, tvb
, offset
+ 2, 1,
246 proto_tree_add_item(icreq_tree
, hf_nvme_tcp_icreq_digest
, tvb
, offset
+ 3,
248 proto_tree_add_item(icreq_tree
, hf_nvme_tcp_icreq_maxr2t
, tvb
, offset
+ 4,
249 4, ENC_LITTLE_ENDIAN
);
253 dissect_nvme_tcp_icresp(tvbuff_t
*tvb
,
259 proto_item
*icresp_tree
;
261 col_set_str(pinfo
->cinfo
, COL_INFO
, "Initialize Connection Response");
262 tf
= proto_tree_add_item(tree
, hf_nvme_tcp_icresp
, tvb
, offset
, 8, ENC_NA
);
263 icresp_tree
= proto_item_add_subtree(tf
, ett_nvme_tcp
);
265 proto_tree_add_item(icresp_tree
, hf_nvme_tcp_icresp_pfv
, tvb
, offset
, 2,
267 proto_tree_add_item(icresp_tree
, hf_nvme_tcp_icresp_cpda
, tvb
, offset
+ 2,
269 proto_tree_add_item(icresp_tree
, hf_nvme_tcp_icresp_digest
, tvb
, offset
+ 3,
271 proto_tree_add_item(icresp_tree
, hf_nvme_tcp_icresp_maxdata
, tvb
,
272 offset
+ 4, 4, ENC_LITTLE_ENDIAN
);
275 static struct nvme_tcp_cmd_ctx
*
276 bind_cmd_to_qctx(packet_info
*pinfo
,
277 struct nvme_q_ctx
*q_ctx
,
280 struct nvme_tcp_cmd_ctx
*ctx
;
282 /* wireshark will dissect same packet multiple times
283 * when display is refreshed*/
284 if (!PINFO_FD_VISITED(pinfo
)) {
285 ctx
= wmem_new0(wmem_file_scope(), struct nvme_tcp_cmd_ctx
);
286 nvme_add_cmd_to_pending_list(pinfo
, q_ctx
, &ctx
->n_cmd_ctx
, (void*) ctx
,
289 /* Already visited this frame */
290 ctx
= (struct nvme_tcp_cmd_ctx
*) nvme_lookup_cmd_in_done_list(pinfo
,
292 /* if we have already visited frame but haven't found completion yet,
293 * we won't find cmd in done q, so allocate a dummy ctx for doing
294 * rest of the processing.
297 ctx
= wmem_new0(wmem_file_scope(), struct nvme_tcp_cmd_ctx
);
304 dissect_nvme_tcp_command(tvbuff_t
*tvb
,
306 proto_tree
*root_tree
,
307 proto_tree
*nvme_tcp_tree
,
308 proto_item
*nvme_tcp_ti
,
309 struct nvme_tcp_q_ctx
*queue
, int offset
,
310 uint32_t incapsuled_data_size
,
311 uint32_t data_offset
)
313 struct nvme_tcp_cmd_ctx
*cmd_ctx
;
316 const char *cmd_string
;
318 opcode
= tvb_get_uint8(tvb
, offset
);
319 cmd_id
= tvb_get_uint16(tvb
, offset
+ 2, ENC_LITTLE_ENDIAN
);
320 cmd_ctx
= bind_cmd_to_qctx(pinfo
, &queue
->n_q_ctx
, cmd_id
);
322 /* if record did not contain connect command we wont know qid,
323 * so lets guess if this is an admin queue */
324 if ((queue
->n_q_ctx
.qid
== UINT16_MAX
) && !nvme_is_io_queue_opcode(opcode
))
325 queue
->n_q_ctx
.qid
= 0;
327 if (opcode
== NVME_FABRIC_OPC
) {
328 cmd_ctx
->n_cmd_ctx
.fabric
= true;
329 dissect_nvmeof_fabric_cmd(tvb
, pinfo
, nvme_tcp_tree
, &queue
->n_q_ctx
, &cmd_ctx
->n_cmd_ctx
, offset
, false);
330 if (cmd_ctx
->n_cmd_ctx
.cmd_ctx
.fabric_cmd
.fctype
== NVME_FCTYPE_CONNECT
)
331 queue
->n_q_ctx
.qid
= cmd_ctx
->n_cmd_ctx
.cmd_ctx
.fabric_cmd
.cnct
.qid
;
332 cmd_string
= get_nvmeof_cmd_string(cmd_ctx
->n_cmd_ctx
.cmd_ctx
.fabric_cmd
.fctype
);
333 proto_item_append_text(nvme_tcp_ti
,
334 ", Fabrics Type: %s (0x%02x) Cmd ID: 0x%04x", cmd_string
,
335 cmd_ctx
->n_cmd_ctx
.cmd_ctx
.fabric_cmd
.fctype
, cmd_id
);
336 if (incapsuled_data_size
> 0) {
337 proto_tree
*data_tree
;
340 ti
= proto_tree_add_item(nvme_tcp_tree
, hf_nvme_fabrics_cmd_data
, tvb
, offset
, incapsuled_data_size
, ENC_NA
);
341 data_tree
= proto_item_add_subtree(ti
, ett_nvme_tcp
);
342 dissect_nvmeof_cmd_data(tvb
, pinfo
, data_tree
, offset
+ NVME_FABRIC_CMD_SIZE
+ data_offset
, &queue
->n_q_ctx
, &cmd_ctx
->n_cmd_ctx
, incapsuled_data_size
);
347 /* In case of incapsuled nvme command tcp length is only a header */
348 proto_item_set_len(nvme_tcp_ti
, NVME_TCP_HEADER_SIZE
);
349 tvbuff_t
*nvme_tvbuff
;
350 cmd_ctx
->n_cmd_ctx
.fabric
= false;
351 nvme_tvbuff
= tvb_new_subset_remaining(tvb
, NVME_TCP_HEADER_SIZE
);
352 cmd_string
= nvme_get_opcode_string(opcode
, queue
->n_q_ctx
.qid
);
353 dissect_nvme_cmd(nvme_tvbuff
, pinfo
, root_tree
, &queue
->n_q_ctx
,
354 &cmd_ctx
->n_cmd_ctx
);
355 proto_item_append_text(nvme_tcp_ti
,
356 ", NVMe Opcode: %s (0x%02x) Cmd ID: 0x%04x", cmd_string
, opcode
,
359 /* This is an inline write */
360 if (incapsuled_data_size
> 0) {
363 nvme_data
= tvb_new_subset_remaining(tvb
, offset
+
364 NVME_CMD_SIZE
+ data_offset
);
365 dissect_nvme_data_response(nvme_data
, pinfo
, root_tree
, &queue
->n_q_ctx
,
366 &cmd_ctx
->n_cmd_ctx
, incapsuled_data_size
, true);
371 dissect_nvme_tcp_data_pdu(tvbuff_t
*tvb
,
375 uint32_t data_length
;
377 proto_item
*data_tree
;
379 col_set_str(pinfo
->cinfo
, COL_PROTOCOL
, "NVMe");
381 tf
= proto_tree_add_item(tree
, hf_nvme_tcp_data_pdu
, tvb
, offset
,
382 NVME_TCP_DATA_PDU_SIZE
- NVME_TCP_HEADER_SIZE
, ENC_NA
);
383 data_tree
= proto_item_add_subtree(tf
, ett_nvme_tcp
);
385 proto_tree_add_item(data_tree
, hf_nvme_fabrics_cmd_cid
, tvb
, offset
, 2,
388 proto_tree_add_item(data_tree
, hf_nvme_tcp_pdu_ttag
, tvb
, offset
+ 2, 2,
391 proto_tree_add_item(data_tree
, hf_nvme_tcp_data_pdu_data_offset
, tvb
,
392 offset
+ 4, 4, ENC_LITTLE_ENDIAN
);
394 data_length
= tvb_get_uint32(tvb
, offset
+ 8, ENC_LITTLE_ENDIAN
);
395 proto_tree_add_item(data_tree
, hf_nvme_tcp_data_pdu_data_length
, tvb
,
396 offset
+ 8, 4, ENC_LITTLE_ENDIAN
);
398 proto_tree_add_item(data_tree
, hf_nvme_tcp_data_pdu_data_resvd
, tvb
,
399 offset
+ 12, 4, ENC_NA
);
405 dissect_nvme_tcp_c2h_data(tvbuff_t
*tvb
,
407 proto_tree
*root_tree
,
408 proto_tree
*nvme_tcp_tree
,
409 proto_item
*nvme_tcp_ti
,
410 struct nvme_tcp_q_ctx
*queue
,
412 uint32_t data_offset
)
414 struct nvme_tcp_cmd_ctx
*cmd_ctx
;
416 uint32_t data_length
;
418 const char *cmd_string
;
420 cmd_id
= tvb_get_uint16(tvb
, offset
, ENC_LITTLE_ENDIAN
);
421 data_length
= dissect_nvme_tcp_data_pdu(tvb
, pinfo
, offset
, nvme_tcp_tree
);
423 /* This can identify our packet uniquely */
424 if (!PINFO_FD_VISITED(pinfo
)) {
425 cmd_ctx
= (struct nvme_tcp_cmd_ctx
*) nvme_lookup_cmd_in_pending_list(
426 &queue
->n_q_ctx
, cmd_id
);
428 proto_tree_add_item(root_tree
, hf_nvme_tcp_unknown_data
, tvb
, offset
+ 16,
429 data_length
, ENC_NA
);
433 /* In order to later lookup for command context lets add this command
434 * to data responses */
435 cmd_ctx
->n_cmd_ctx
.data_tr_pkt_num
[0] = pinfo
->num
;
436 nvme_add_data_tr_pkt(&queue
->n_q_ctx
, &cmd_ctx
->n_cmd_ctx
, cmd_id
, pinfo
->num
);
438 cmd_ctx
= (struct nvme_tcp_cmd_ctx
*) nvme_lookup_data_tr_pkt(&queue
->n_q_ctx
,
441 proto_tree_add_item(root_tree
, hf_nvme_tcp_unknown_data
, tvb
, offset
+ 16,
442 data_length
, ENC_NA
);
447 nvme_publish_to_cmd_link(nvme_tcp_tree
, tvb
,
448 hf_nvme_tcp_cmd_pkt
, &cmd_ctx
->n_cmd_ctx
);
450 if (cmd_ctx
->n_cmd_ctx
.fabric
) {
451 cmd_string
= get_nvmeof_cmd_string(cmd_ctx
->n_cmd_ctx
.cmd_ctx
.fabric_cmd
.fctype
);
452 proto_item_append_text(nvme_tcp_ti
,
453 ", C2HData Fabrics Type: %s (0x%02x), Cmd ID: 0x%04x, Len: %u",
454 cmd_string
, cmd_ctx
->n_cmd_ctx
.cmd_ctx
.fabric_cmd
.fctype
, cmd_id
, data_length
);
456 cmd_string
= nvme_get_opcode_string(cmd_ctx
->n_cmd_ctx
.opcode
,
458 proto_item_append_text(nvme_tcp_ti
,
459 ", C2HData Opcode: %s (0x%02x), Cmd ID: 0x%04x, Len: %u",
460 cmd_string
, cmd_ctx
->n_cmd_ctx
.opcode
, cmd_id
, data_length
);
463 nvme_data
= tvb_new_subset_remaining(tvb
, NVME_TCP_DATA_PDU_SIZE
+ data_offset
);
465 dissect_nvme_data_response(nvme_data
, pinfo
, root_tree
, &queue
->n_q_ctx
,
466 &cmd_ctx
->n_cmd_ctx
, data_length
, false);
470 static void nvme_tcp_build_cmd_key(uint32_t *frame_num
, uint32_t *cmd_id
, wmem_tree_key_t
*key
)
472 key
[0].key
= frame_num
;
480 static void nvme_tcp_add_data_request(packet_info
*pinfo
, struct nvme_q_ctx
*q_ctx
,
481 struct nvme_tcp_cmd_ctx
*cmd_ctx
, uint16_t cmd_id
)
483 wmem_tree_key_t cmd_key
[3];
484 uint32_t cmd_id_key
= cmd_id
;
486 nvme_tcp_build_cmd_key(&pinfo
->num
, &cmd_id_key
, cmd_key
);
487 cmd_ctx
->n_cmd_ctx
.data_req_pkt_num
= pinfo
->num
;
488 cmd_ctx
->n_cmd_ctx
.data_tr_pkt_num
[0] = 0;
489 wmem_tree_insert32_array(q_ctx
->data_requests
, cmd_key
, (void *)cmd_ctx
);
492 static struct nvme_tcp_cmd_ctx
* nvme_tcp_lookup_data_request(packet_info
*pinfo
,
493 struct nvme_q_ctx
*q_ctx
,
496 wmem_tree_key_t cmd_key
[3];
497 uint32_t cmd_id_key
= cmd_id
;
499 nvme_tcp_build_cmd_key(&pinfo
->num
, &cmd_id_key
, cmd_key
);
500 return (struct nvme_tcp_cmd_ctx
*)wmem_tree_lookup32_array(q_ctx
->data_requests
, cmd_key
);
504 dissect_nvme_tcp_h2c_data(tvbuff_t
*tvb
,
506 proto_tree
*root_tree
,
507 proto_tree
*nvme_tcp_tree
,
508 proto_item
*nvme_tcp_ti
,
509 struct nvme_tcp_q_ctx
*queue
,
511 uint32_t data_offset
)
513 struct nvme_tcp_cmd_ctx
*cmd_ctx
;
515 uint32_t data_length
;
517 const char *cmd_string
;
519 cmd_id
= tvb_get_uint16(tvb
, offset
, ENC_LITTLE_ENDIAN
);
520 data_length
= dissect_nvme_tcp_data_pdu(tvb
, pinfo
, offset
, nvme_tcp_tree
);
522 if (!PINFO_FD_VISITED(pinfo
)) {
523 cmd_ctx
= (struct nvme_tcp_cmd_ctx
*) nvme_lookup_cmd_in_pending_list(
524 &queue
->n_q_ctx
, cmd_id
);
526 proto_tree_add_item(root_tree
, hf_nvme_tcp_unknown_data
, tvb
, offset
+ 16,
527 data_length
, ENC_NA
);
531 /* Fill this for "adding data request call,
532 * this will be the key to fetch data request later */
533 nvme_tcp_add_data_request(pinfo
, &queue
->n_q_ctx
, cmd_ctx
, cmd_id
);
535 cmd_ctx
= nvme_tcp_lookup_data_request(pinfo
, &queue
->n_q_ctx
, cmd_id
);
537 proto_tree_add_item(root_tree
, hf_nvme_tcp_unknown_data
, tvb
, offset
+ 16,
538 data_length
, ENC_NA
);
543 nvme_publish_to_cmd_link(nvme_tcp_tree
, tvb
,
544 hf_nvme_tcp_cmd_pkt
, &cmd_ctx
->n_cmd_ctx
);
546 /* fabrics commands should not have h2cdata*/
547 if (cmd_ctx
->n_cmd_ctx
.fabric
) {
548 cmd_string
= get_nvmeof_cmd_string(cmd_ctx
->n_cmd_ctx
.cmd_ctx
.fabric_cmd
.fctype
);
549 proto_item_append_text(nvme_tcp_ti
,
550 ", H2CData Fabrics Type: %s (0x%02x), Cmd ID: 0x%04x, Len: %u",
551 cmd_string
, cmd_ctx
->n_cmd_ctx
.cmd_ctx
.fabric_cmd
.fctype
, cmd_id
, data_length
);
552 proto_tree_add_item(root_tree
, hf_nvme_tcp_unknown_data
, tvb
, offset
+ 16,
553 data_length
, ENC_NA
);
557 cmd_string
= nvme_get_opcode_string(cmd_ctx
->n_cmd_ctx
.opcode
,
559 proto_item_append_text(nvme_tcp_ti
,
560 ", H2CData Opcode: %s (0x%02x), Cmd ID: 0x%04x, Len: %u",
561 cmd_string
, cmd_ctx
->n_cmd_ctx
.opcode
, cmd_id
, data_length
);
563 nvme_data
= tvb_new_subset_remaining(tvb
, NVME_TCP_DATA_PDU_SIZE
+ data_offset
);
564 dissect_nvme_data_response(nvme_data
, pinfo
, root_tree
, &queue
->n_q_ctx
,
565 &cmd_ctx
->n_cmd_ctx
, data_length
, false);
569 dissect_nvme_tcp_h2ctermreq(tvbuff_t
*tvb
, packet_info
*pinfo
,
570 proto_tree
*tree
, uint32_t packet_len
, int offset
)
573 proto_item
*h2ctermreq_tree
;
576 col_set_str(pinfo
->cinfo
, COL_INFO
,
577 "Host to Controller Termination Request");
578 tf
= proto_tree_add_item(tree
, hf_nvme_tcp_h2ctermreq
,
579 tvb
, offset
, 8, ENC_NA
);
580 h2ctermreq_tree
= proto_item_add_subtree(tf
, ett_nvme_tcp
);
582 proto_tree_add_item(h2ctermreq_tree
, hf_nvme_tcp_h2ctermreq_fes
,
583 tvb
, offset
+ 8, 2, ENC_LITTLE_ENDIAN
);
584 fes
= tvb_get_uint16(tvb
, offset
+ 8, ENC_LITTLE_ENDIAN
);
586 case NVME_TCP_FES_INVALID_PDU_HDR
:
587 proto_tree_add_item(h2ctermreq_tree
, hf_nvme_tcp_h2ctermreq_phfo
,
588 tvb
, offset
+ 10, 4, ENC_LITTLE_ENDIAN
);
590 case NVME_TCP_FES_HDR_DIGEST_ERR
:
591 proto_tree_add_item(h2ctermreq_tree
, hf_nvme_tcp_h2ctermreq_phd
,
592 tvb
, offset
+ 10, 4, ENC_LITTLE_ENDIAN
);
594 case NVME_TCP_FES_UNSUPPORTED_PARAM
:
595 proto_tree_add_item(h2ctermreq_tree
, hf_nvme_tcp_h2ctermreq_upfo
,
596 tvb
, offset
+ 10, 4, ENC_LITTLE_ENDIAN
);
599 proto_tree_add_item(h2ctermreq_tree
, hf_nvme_tcp_h2ctermreq_reserved
,
600 tvb
, offset
+ 10, 4, ENC_LITTLE_ENDIAN
);
603 proto_tree_add_item(h2ctermreq_tree
, hf_nvme_tcp_h2ctermreq_data
,
604 tvb
, offset
+ 24, packet_len
- 24, ENC_NA
);
608 dissect_nvme_tcp_c2htermreq(tvbuff_t
*tvb
, packet_info
*pinfo
,
609 proto_tree
*tree
, uint32_t packet_len
, int offset
)
612 proto_item
*c2htermreq_tree
;
615 col_set_str(pinfo
->cinfo
, COL_INFO
,
616 "Controller to Host Termination Request");
617 tf
= proto_tree_add_item(tree
, hf_nvme_tcp_c2htermreq
,
618 tvb
, offset
, 8, ENC_NA
);
619 c2htermreq_tree
= proto_item_add_subtree(tf
, ett_nvme_tcp
);
621 proto_tree_add_item(tree
, hf_nvme_tcp_c2htermreq_fes
, tvb
, offset
+ 8, 2,
623 fes
= tvb_get_uint16(tvb
, offset
+ 8, ENC_LITTLE_ENDIAN
);
625 case NVME_TCP_FES_INVALID_PDU_HDR
:
626 proto_tree_add_item(c2htermreq_tree
, hf_nvme_tcp_c2htermreq_phfo
,
627 tvb
, offset
+ 10, 4, ENC_LITTLE_ENDIAN
);
629 case NVME_TCP_FES_HDR_DIGEST_ERR
:
630 proto_tree_add_item(c2htermreq_tree
, hf_nvme_tcp_c2htermreq_phd
,
631 tvb
, offset
+ 10, 4, ENC_LITTLE_ENDIAN
);
633 case NVME_TCP_FES_UNSUPPORTED_PARAM
:
634 proto_tree_add_item(c2htermreq_tree
, hf_nvme_tcp_c2htermreq_upfo
,
635 tvb
, offset
+ 10, 4, ENC_LITTLE_ENDIAN
);
638 proto_tree_add_item(c2htermreq_tree
, hf_nvme_tcp_c2htermreq_reserved
,
639 tvb
, offset
+ 10, 4, ENC_LITTLE_ENDIAN
);
642 proto_tree_add_item(c2htermreq_tree
, hf_nvme_tcp_c2htermreq_data
,
643 tvb
, offset
+ 24, packet_len
- 24, ENC_NA
);
647 dissect_nvme_tcp_cqe(tvbuff_t
*tvb
,
649 proto_tree
*root_tree
,
650 proto_tree
*nvme_tree
,
652 struct nvme_tcp_q_ctx
*queue
,
655 struct nvme_tcp_cmd_ctx
*cmd_ctx
;
657 const char *cmd_string
;
659 cmd_id
= tvb_get_uint16(tvb
, offset
+ 12, ENC_LITTLE_ENDIAN
);
661 /* wireshark will dissect packet several times when display is refreshed
662 * we need to track state changes only once */
663 if (!PINFO_FD_VISITED(pinfo
)) {
664 cmd_ctx
= (struct nvme_tcp_cmd_ctx
*) nvme_lookup_cmd_in_pending_list(
665 &queue
->n_q_ctx
, cmd_id
);
666 if (!cmd_ctx
|| cmd_ctx
->n_cmd_ctx
.cqe_pkt_num
) {
667 proto_tree_add_item(nvme_tree
, hf_nvme_tcp_unknown_data
, tvb
, offset
,
668 NVME_FABRIC_CQE_SIZE
, ENC_NA
);
672 cmd_ctx
->n_cmd_ctx
.cqe_pkt_num
= pinfo
->num
;
673 nvme_add_cmd_cqe_to_done_list(&queue
->n_q_ctx
, &cmd_ctx
->n_cmd_ctx
,
677 cmd_ctx
= (struct nvme_tcp_cmd_ctx
*) nvme_lookup_cmd_in_done_list(pinfo
,
678 &queue
->n_q_ctx
, cmd_id
);
680 proto_tree_add_item(nvme_tree
, hf_nvme_tcp_unknown_data
, tvb
, offset
,
681 NVME_FABRIC_CQE_SIZE
, ENC_NA
);
686 nvme_update_cmd_end_info(pinfo
, &cmd_ctx
->n_cmd_ctx
);
688 if (cmd_ctx
->n_cmd_ctx
.fabric
) {
689 cmd_string
= get_nvmeof_cmd_string(cmd_ctx
->n_cmd_ctx
.cmd_ctx
.fabric_cmd
.fctype
);
690 proto_item_append_text(ti
,
691 ", Cqe Fabrics Cmd: %s (0x%02x) Cmd ID: 0x%04x", cmd_string
,
692 cmd_ctx
->n_cmd_ctx
.cmd_ctx
.fabric_cmd
.fctype
, cmd_id
);
694 dissect_nvmeof_fabric_cqe(tvb
, pinfo
, nvme_tree
, &cmd_ctx
->n_cmd_ctx
, offset
);
697 proto_item_set_len(ti
, NVME_TCP_HEADER_SIZE
);
698 cmd_string
= nvme_get_opcode_string(cmd_ctx
->n_cmd_ctx
.opcode
,
701 proto_item_append_text(ti
, ", Cqe NVMe Cmd: %s (0x%02x) Cmd ID: 0x%04x",
702 cmd_string
, cmd_ctx
->n_cmd_ctx
.opcode
, cmd_id
);
703 /* get incapsuled nvme command */
704 nvme_tvb
= tvb_new_subset_remaining(tvb
, NVME_TCP_HEADER_SIZE
);
705 dissect_nvme_cqe(nvme_tvb
, pinfo
, root_tree
, &queue
->n_q_ctx
, &cmd_ctx
->n_cmd_ctx
);
710 dissect_nvme_tcp_r2t(tvbuff_t
*tvb
,
716 proto_item
*r2t_tree
;
718 tf
= proto_tree_add_item(tree
, hf_nvme_tcp_r2t_pdu
, tvb
, offset
, -1,
720 r2t_tree
= proto_item_add_subtree(tf
, ett_nvme_tcp
);
722 col_append_sep_fstr(pinfo
->cinfo
, COL_INFO
, " | ", "Ready To Transfer");
724 proto_tree_add_item(r2t_tree
, hf_nvme_fabrics_cmd_cid
, tvb
, offset
, 2,
726 proto_tree_add_item(r2t_tree
, hf_nvme_tcp_pdu_ttag
, tvb
, offset
+ 2, 2,
728 proto_tree_add_item(r2t_tree
, hf_nvme_tcp_r2t_offset
, tvb
, offset
+ 4, 4,
730 proto_tree_add_item(r2t_tree
, hf_nvme_tcp_r2t_length
, tvb
, offset
+ 8, 4,
732 proto_tree_add_item(r2t_tree
, hf_nvme_tcp_r2t_resvd
, tvb
, offset
+ 12, 4,
737 dissect_nvme_tcp_pdu(tvbuff_t
*tvb
,
742 conversation_t
*conversation
;
743 struct nvme_tcp_q_ctx
*q_ctx
;
746 int nvme_tcp_pdu_offset
;
747 proto_tree
*nvme_tcp_tree
;
748 unsigned packet_type
;
752 uint32_t incapsuled_data_size
;
753 uint32_t pdu_data_offset
= 0;
755 conversation
= find_or_create_conversation(pinfo
);
756 q_ctx
= (struct nvme_tcp_q_ctx
*)
757 conversation_get_proto_data(conversation
, proto_nvme_tcp
);
760 q_ctx
= wmem_new0(wmem_file_scope(), struct nvme_tcp_q_ctx
);
761 q_ctx
->n_q_ctx
.pending_cmds
= wmem_tree_new(wmem_file_scope());
762 q_ctx
->n_q_ctx
.done_cmds
= wmem_tree_new(wmem_file_scope());
763 q_ctx
->n_q_ctx
.data_requests
= wmem_tree_new(wmem_file_scope());
764 q_ctx
->n_q_ctx
.data_responses
= wmem_tree_new(wmem_file_scope());
765 /* Initially set to non-0 so that by default queues are io queues
766 * this is required to be able to dissect correctly even
767 * if we miss connect command*/
768 q_ctx
->n_q_ctx
.qid
= UINT16_MAX
;
769 conversation_add_proto_data(conversation
, proto_nvme_tcp
, q_ctx
);
772 ti
= proto_tree_add_item(tree
, proto_nvme_tcp
, tvb
, 0, -1, ENC_NA
);
773 nvme_tcp_tree
= proto_item_add_subtree(ti
, ett_nvme_tcp
);
775 if (q_ctx
->n_q_ctx
.qid
!= UINT16_MAX
)
776 nvme_publish_qid(nvme_tcp_tree
, hf_nvme_fabrics_cmd_qid
,
779 packet_type
= tvb_get_uint8(tvb
, offset
);
780 proto_tree_add_item(nvme_tcp_tree
, hf_nvme_tcp_type
, tvb
, offset
, 1,
783 pdu_flags
= tvb_get_uint8(tvb
, offset
+ 1);
784 proto_tree_add_bitmask_value(nvme_tcp_tree
, tvb
, offset
+ 1, hf_nvme_tcp_flags
,
785 ett_nvme_tcp
, nvme_tcp_pdu_flags
, (uint64_t)pdu_flags
);
787 hlen
= tvb_get_int8(tvb
, offset
+ 2);
788 proto_tree_add_item(nvme_tcp_tree
, hf_nvme_tcp_hlen
, tvb
, offset
+ 2, 1,
791 pdo
= tvb_get_int8(tvb
, offset
+ 3);
792 proto_tree_add_uint(nvme_tcp_tree
, hf_nvme_tcp_pdo
, tvb
, offset
+ 3, 1,
794 plen
= tvb_get_letohl(tvb
, offset
+ 4);
795 proto_tree_add_item(nvme_tcp_tree
, hf_nvme_tcp_plen
, tvb
, offset
+ 4, 4,
797 col_set_str(pinfo
->cinfo
, COL_PROTOCOL
, NVME_FABRICS_TCP
);
799 if (pdu_flags
& NVME_TCP_F_HDGST
) {
800 unsigned hdgst_flags
= PROTO_CHECKSUM_NO_FLAGS
;
803 if (nvme_tcp_check_hdgst
) {
804 hdgst_flags
= PROTO_CHECKSUM_VERIFY
;
805 crc
= ~crc32c_tvb_offset_calculate(tvb
, 0, hlen
, ~0);
807 proto_tree_add_checksum(nvme_tcp_tree
, tvb
, hlen
, hf_nvme_tcp_hdgst
,
808 hf_nvme_tcp_hdgst_status
, NULL
, pinfo
,
809 crc
, ENC_NA
, hdgst_flags
);
810 pdu_data_offset
= NVME_TCP_DIGEST_LENGTH
;
813 nvme_tcp_pdu_offset
= offset
+ NVME_TCP_HEADER_SIZE
;
814 incapsuled_data_size
= plen
- hlen
- pdu_data_offset
;
816 /* check for overflow (invalid packet)*/
817 if (incapsuled_data_size
> tvb_reported_length(tvb
)) {
818 proto_tree_add_item(nvme_tcp_tree
, hf_nvme_tcp_unknown_data
,
819 tvb
, NVME_TCP_HEADER_SIZE
, -1, ENC_NA
);
820 return tvb_reported_length(tvb
);
823 if (pdu_flags
& NVME_TCP_F_DDGST
) {
824 unsigned ddgst_flags
= PROTO_CHECKSUM_NO_FLAGS
;
827 /* Check that data has enough space (invalid packet) */
828 if (incapsuled_data_size
<= NVME_TCP_DIGEST_LENGTH
) {
829 proto_tree_add_item(nvme_tcp_tree
, hf_nvme_tcp_unknown_data
,
830 tvb
, NVME_TCP_HEADER_SIZE
, -1, ENC_NA
);
831 return tvb_reported_length(tvb
);
834 incapsuled_data_size
-= NVME_TCP_DIGEST_LENGTH
;
835 if (nvme_tcp_check_ddgst
) {
836 ddgst_flags
= PROTO_CHECKSUM_VERIFY
;
837 crc
= ~crc32c_tvb_offset_calculate(tvb
, pdo
,
838 incapsuled_data_size
, ~0);
840 proto_tree_add_checksum(nvme_tcp_tree
, tvb
,
841 plen
- NVME_TCP_DIGEST_LENGTH
, hf_nvme_tcp_ddgst
,
842 hf_nvme_tcp_ddgst_status
, NULL
, pinfo
,
843 crc
, ENC_NA
, ddgst_flags
);
846 switch (packet_type
) {
848 dissect_nvme_tcp_icreq(tvb
, pinfo
, nvme_tcp_pdu_offset
, nvme_tcp_tree
);
849 proto_item_set_len(ti
, hlen
);
851 case nvme_tcp_icresp
:
852 dissect_nvme_tcp_icresp(tvb
, pinfo
, nvme_tcp_pdu_offset
, nvme_tcp_tree
);
853 proto_item_set_len(ti
, hlen
);
856 dissect_nvme_tcp_command(tvb
, pinfo
, tree
, nvme_tcp_tree
, ti
, q_ctx
,
857 nvme_tcp_pdu_offset
, incapsuled_data_size
, pdu_data_offset
);
860 dissect_nvme_tcp_cqe(tvb
, pinfo
, tree
, nvme_tcp_tree
, ti
, q_ctx
,
861 nvme_tcp_pdu_offset
);
862 proto_item_set_len(ti
, NVME_TCP_HEADER_SIZE
);
864 case nvme_tcp_c2h_data
:
865 dissect_nvme_tcp_c2h_data(tvb
, pinfo
, tree
, nvme_tcp_tree
, ti
, q_ctx
,
866 nvme_tcp_pdu_offset
, pdu_data_offset
);
867 proto_item_set_len(ti
, NVME_TCP_DATA_PDU_SIZE
);
869 case nvme_tcp_h2c_data
:
870 dissect_nvme_tcp_h2c_data(tvb
, pinfo
, tree
, nvme_tcp_tree
, ti
, q_ctx
,
871 nvme_tcp_pdu_offset
, pdu_data_offset
);
872 proto_item_set_len(ti
, NVME_TCP_DATA_PDU_SIZE
);
875 dissect_nvme_tcp_r2t(tvb
, pinfo
, nvme_tcp_pdu_offset
, nvme_tcp_tree
);
877 case nvme_tcp_h2c_term
:
878 dissect_nvme_tcp_h2ctermreq(tvb
, pinfo
, tree
, plen
, offset
);
880 case nvme_tcp_c2h_term
:
881 dissect_nvme_tcp_c2htermreq(tvb
, pinfo
, tree
, plen
, offset
);
884 // TODO: nvme_tcp_kdreq, nvme_tcp_kdresp
885 proto_tree_add_item(nvme_tcp_tree
, hf_nvme_tcp_unknown_data
, tvb
,
886 offset
, plen
, ENC_NA
);
890 return tvb_reported_length(tvb
);
894 dissect_nvme_tcp(tvbuff_t
*tvb
,
899 col_clear(pinfo
->cinfo
, COL_INFO
);
900 col_set_str(pinfo
->cinfo
, COL_PROTOCOL
, NVME_FABRICS_TCP
);
901 tcp_dissect_pdus(tvb
, pinfo
, tree
, true, NVME_TCP_HEADER_SIZE
,
902 get_nvme_tcp_pdu_len
, dissect_nvme_tcp_pdu
, data
);
904 return tvb_reported_length(tvb
);
908 test_nvme(packet_info
*pinfo _U_
, tvbuff_t
*tvb
, int offset
, void *data _U_
)
910 /* This is not the strongest heuristic, but the port is IANA assigned,
911 * so this is not a normal heuristic dissector but simply to distinguish
912 * between NVMe/TCP and NVMe/TLS/TCP, and also to detect PDU starts.
914 if (tvb_captured_length_remaining(tvb
, offset
) < NVME_TCP_HEADER_SIZE
) {
918 if (tvb_get_uint8(tvb
, offset
) > NVMET_MAX_PDU_TYPE
) {
923 if (tvb_get_uint8(tvb
, offset
) < NVME_TCP_HEADER_SIZE
) {
924 // Header length - we could strengthen by using the PDU type.
928 // Next byte is PDU Data Offset. Reserved in most types. (Does that
929 // mean zero? That would strengthen the heuristic.)
932 if (tvb_get_uint32(tvb
, offset
, ENC_LITTLE_ENDIAN
) < NVME_TCP_HEADER_SIZE
) {
933 // PDU Length (inc. header) - could strengthen by using the PDU type.
941 dissect_nvme_tcp_heur(tvbuff_t
*tvb
, packet_info
*pinfo
, proto_tree
*tree
, void* data
)
943 /* NVMe/TCP allows PDUs to span TCP segments (see Figure 5 of the NVMe/TCP
944 * Transport Specification.) Also, some connections are over TLS.
945 * Luckily, the PDU types for NVMe/TCP occupy the first byte, same as
946 * the Content Type for TLS Records, and while these PDU types go to 11,
947 * TLS Content Types start at 20 (and won't change, to enable multiplexing,
950 * So if this doesn't look like the start of a NVMe/TCP PDU, reject it.
951 * It might be TLS, or it might be the middle of a PDU.
953 if (!test_nvme(pinfo
, tvb
, 0, data
)) {
955 /* The TLS heuristic dissector should catch the TLS version. */
958 /* The start of a PDU. Set the other handle for this connection.
959 * We can call tcp_dissect_pdus safely starting from here.
961 conversation_t
*conversation
= find_or_create_conversation(pinfo
);
962 conversation_set_dissector_from_frame_number(conversation
, pinfo
->num
, nvmet_tls_handle
);
964 return dissect_nvme_tcp(tvb
, pinfo
, tree
, data
);
967 void proto_register_nvme_tcp(void) {
969 static hf_register_info hf
[] = {
971 { "Pdu Type", "nvme-tcp.type",
972 FT_UINT8
, BASE_DEC
, VALS(nvme_tcp_pdu_type_vals
),
973 0x0, NULL
, HFILL
} },
974 { &hf_nvme_tcp_flags
,
975 { "Pdu Specific Flags", "nvme-tcp.flags",
976 FT_UINT8
, BASE_HEX
, NULL
, 0x0, NULL
, HFILL
} },
977 { &hf_pdu_flags_hdgst
,
978 { "PDU Header Digest", "nvme-tcp.flags.pdu.hdgst",
979 FT_BOOLEAN
, 8, TFS(&tfs_set_notset
),
980 NVME_TCP_F_HDGST
, NULL
, HFILL
} },
981 { &hf_pdu_flags_ddgst
,
982 { "PDU Data Digest", "nvme-tcp.flags.pdu.ddgst",
983 FT_BOOLEAN
, 8, TFS(&tfs_set_notset
),
984 NVME_TCP_F_DDGST
, NULL
, HFILL
} },
985 { &hf_pdu_flags_data_last
,
986 { "PDU Data Last", "nvme-tcp.flags.pdu.data_last",
987 FT_BOOLEAN
, 8, TFS(&tfs_set_notset
),
988 NVME_TCP_F_DATA_LAST
, NULL
, HFILL
} },
989 { &hf_pdu_flags_data_success
,
990 { "PDU Data Success", "nvme-tcp.flags.pdu.data_success",
991 FT_BOOLEAN
, 8, TFS(&tfs_set_notset
),
992 NVME_TCP_F_DATA_SUCCESS
, NULL
, HFILL
} },
993 { &hf_nvme_tcp_hdgst
,
994 { "PDU Header Digest", "nvme-tcp.hdgst",
995 FT_UINT32
, BASE_HEX
, NULL
, 0x0, NULL
, HFILL
} },
996 { &hf_nvme_tcp_ddgst
,
997 { "PDU Data Digest", "nvme-tcp.ddgst",
998 FT_UINT32
, BASE_HEX
, NULL
, 0x0, NULL
, HFILL
} },
999 { &hf_nvme_tcp_hdgst_status
,
1000 { "Header Digest Status", "nvme-tcp.hdgst.status",
1001 FT_UINT8
, BASE_NONE
, VALS(proto_checksum_vals
),
1002 0x0, NULL
, HFILL
}},
1003 { &hf_nvme_tcp_ddgst_status
,
1004 { "Data Digest Status", "nvme-tcp.ddgst.status",
1005 FT_UINT8
, BASE_NONE
, VALS(proto_checksum_vals
),
1006 0x0, NULL
, HFILL
}},
1007 { &hf_nvme_tcp_hlen
,
1008 { "Pdu Header Length", "nvme-tcp.hlen",
1009 FT_UINT8
, BASE_DEC
, NULL
, 0x0, NULL
, HFILL
} },
1011 { "Pdu Data Offset", "nvme-tcp.pdo",
1012 FT_UINT8
, BASE_DEC
, NULL
, 0x0, NULL
, HFILL
} },
1013 { &hf_nvme_tcp_plen
,
1014 { "Packet Length", "nvme-tcp.plen",
1015 FT_UINT32
, BASE_DEC
, NULL
, 0x0, NULL
, HFILL
} },
1016 { &hf_nvme_tcp_icreq
,
1017 { "ICReq", "nvme-tcp.icreq",
1018 FT_NONE
, BASE_NONE
, NULL
, 0x0, NULL
, HFILL
} },
1019 { &hf_nvme_tcp_icreq_pfv
,
1020 { "Pdu Version Format", "nvme-tcp.icreq.pfv",
1021 FT_UINT16
, BASE_DEC
, NULL
, 0x0, NULL
, HFILL
} },
1022 { &hf_nvme_tcp_icreq_maxr2t
,
1023 { "Maximum r2ts per request", "nvme-tcp.icreq.maxr2t",
1024 FT_UINT32
, BASE_DEC
, NULL
, 0x0, NULL
, HFILL
} },
1025 { &hf_nvme_tcp_icreq_hpda
,
1026 { "Host Pdu data alignment", "nvme-tcp.icreq.hpda",
1027 FT_UINT8
, BASE_DEC
, NULL
, 0x0, NULL
, HFILL
} },
1028 { &hf_nvme_tcp_icreq_digest
,
1029 { "Digest Types Enabled", "nvme-tcp.icreq.digest",
1030 FT_UINT8
, BASE_DEC
, NULL
, 0x0, NULL
, HFILL
} },
1031 { &hf_nvme_tcp_icresp
,
1032 { "ICResp", "nvme-tcp.icresp",
1033 FT_NONE
, BASE_NONE
, NULL
, 0x0, NULL
, HFILL
} },
1034 { &hf_nvme_tcp_icresp_pfv
,
1035 { "Pdu Version Format", "nvme-tcp.icresp.pfv",
1036 FT_UINT16
, BASE_DEC
, NULL
, 0x0,
1038 { &hf_nvme_tcp_icresp_cpda
,
1039 { "Controller Pdu data alignment", "nvme-tcp.icresp.cpda",
1040 FT_UINT32
, BASE_DEC
, NULL
, 0x0, NULL
, HFILL
} },
1041 { &hf_nvme_tcp_icresp_digest
,
1042 { "Digest types enabled", "nvme-tcp.icresp.digest",
1043 FT_UINT8
, BASE_DEC
, NULL
, 0x0, NULL
, HFILL
} },
1044 { &hf_nvme_tcp_icresp_maxdata
,
1045 { "Maximum data capsules per r2t supported", "nvme-tcp.icresp.maxdata",
1046 FT_UINT32
, BASE_DEC
, NULL
, 0x0, NULL
, HFILL
} },
1047 /* NVMe tcp c2h/h2c termreq fields */
1048 { &hf_nvme_tcp_c2htermreq
,
1049 { "C2HTermReq", "nvme-tcp.c2htermreq",
1050 FT_NONE
, BASE_NONE
, NULL
, 0x0, NULL
, HFILL
} },
1051 { &hf_nvme_tcp_c2htermreq_fes
,
1052 { "Fatal error status", "nvme-tcp.c2htermreq.fes",
1053 FT_UINT16
, BASE_HEX
, VALS(nvme_tcp_termreq_fes
),
1054 0x0, NULL
, HFILL
} },
1055 { &hf_nvme_tcp_c2htermreq_phfo
,
1056 { "PDU header field offset", "nvme-tcp.c2htermreq.phfo",
1057 FT_UINT32
, BASE_HEX
, NULL
, 0, NULL
, HFILL
} },
1058 { &hf_nvme_tcp_c2htermreq_phd
,
1059 { "PDU header digest", "nvme-tcp.c2htermreq.phd",
1060 FT_UINT32
, BASE_HEX
, NULL
, 0, NULL
, HFILL
} },
1061 { &hf_nvme_tcp_c2htermreq_upfo
,
1062 { "Unsupported parameter field offset", "nvme-tcp.c2htermreq.upfo",
1063 FT_UINT32
, BASE_HEX
, NULL
, 0, NULL
, HFILL
} },
1064 { &hf_nvme_tcp_c2htermreq_reserved
,
1065 { "Reserved", "nvme-tcp.c2htermreq.reserved",
1066 FT_UINT32
, BASE_HEX
, NULL
, 0, NULL
, HFILL
} },
1067 { &hf_nvme_tcp_c2htermreq_data
,
1068 { "Terminated PDU header", "nvme-tcp.c2htermreq.data",
1069 FT_NONE
, BASE_NONE
, NULL
, 0, NULL
, HFILL
} },
1070 { &hf_nvme_tcp_h2ctermreq
,
1071 { "H2CTermReq", "nvme-tcp.h2ctermreq",
1072 FT_NONE
, BASE_NONE
, NULL
, 0x0, NULL
, HFILL
} },
1073 { &hf_nvme_tcp_h2ctermreq_fes
,
1074 { "Fatal error status", "nvme-tcp.h2ctermreq.fes",
1075 FT_UINT16
, BASE_HEX
, VALS(nvme_tcp_termreq_fes
),
1076 0x0, NULL
, HFILL
} },
1077 { &hf_nvme_tcp_h2ctermreq_phfo
,
1078 { "PDU header field offset", "nvme-tcp.h2ctermreq.phfo",
1079 FT_UINT32
, BASE_HEX
, NULL
, 0, NULL
, HFILL
} },
1080 { &hf_nvme_tcp_h2ctermreq_phd
,
1081 { "PDU header digest", "nvme-tcp.h2ctermreq.phd",
1082 FT_UINT32
, BASE_HEX
, NULL
, 0, NULL
, HFILL
} },
1083 { &hf_nvme_tcp_h2ctermreq_upfo
,
1084 { "Unsupported parameter field offset", "nvme-tcp.h2ctermreq.upfo",
1085 FT_UINT32
, BASE_HEX
, NULL
, 0, NULL
, HFILL
} },
1086 { &hf_nvme_tcp_h2ctermreq_reserved
,
1087 { "Reserved", "nvme-tcp.h2ctermreq.reserved",
1088 FT_UINT32
, BASE_HEX
, NULL
, 0, NULL
, HFILL
} },
1089 { &hf_nvme_tcp_h2ctermreq_data
,
1090 { "Terminated PDU header", "nvme-tcp.h2ctermreq.data",
1091 FT_NONE
, BASE_NONE
, NULL
, 0, NULL
, HFILL
} },
1092 { &hf_nvme_fabrics_cmd_cid
,
1093 { "Command ID", "nvme-tcp.cmd.cid",
1094 FT_UINT16
, BASE_HEX
, NULL
, 0x0, NULL
, HFILL
} },
1095 { &hf_nvme_tcp_unknown_data
,
1096 { "Unknown Data", "nvme-tcp.unknown_data",
1097 FT_BYTES
, BASE_NONE
, NULL
, 0x0, NULL
, HFILL
} },
1098 /* NVMe command data */
1099 { &hf_nvme_fabrics_cmd_data
,
1100 { "Data", "nvme-tcp.cmd.data",
1101 FT_NONE
, BASE_NONE
, NULL
, 0x0, NULL
, HFILL
} },
1102 { &hf_nvme_tcp_cmd_pkt
,
1103 { "Cmd in", "nvme-tcp.cmd_pkt",
1104 FT_FRAMENUM
, BASE_NONE
, NULL
, 0,
1105 "The Cmd for this transaction is in this frame", HFILL
} },
1106 { &hf_nvme_fabrics_cmd_qid
,
1107 { "Cmd Qid", "nvme-tcp.cmd.qid",
1108 FT_UINT16
, BASE_HEX
, NULL
, 0x0,
1109 "Qid on which command is issued", HFILL
} },
1110 /* NVMe TCP data response */
1111 { &hf_nvme_tcp_data_pdu
,
1112 { "NVMe/TCP Data PDU", "nvme-tcp.data",
1113 FT_NONE
, BASE_NONE
, NULL
, 0x0, NULL
, HFILL
} },
1114 { &hf_nvme_tcp_pdu_ttag
,
1115 { "Transfer Tag", "nvme-tcp.ttag",
1116 FT_UINT16
, BASE_HEX
, NULL
, 0x0,
1117 "Transfer tag (controller generated)", HFILL
} },
1118 { &hf_nvme_tcp_data_pdu_data_offset
,
1119 { "Data Offset", "nvme-tcp.data.offset",
1120 FT_UINT32
, BASE_DEC
, NULL
, 0x0,
1121 "Offset from the start of the command data", HFILL
} },
1122 { &hf_nvme_tcp_data_pdu_data_length
,
1123 { "Data Length", "nvme-tcp.data.length",
1124 FT_UINT32
, BASE_DEC
, NULL
, 0x0,
1125 "Length of the data stream", HFILL
} },
1126 { &hf_nvme_tcp_data_pdu_data_resvd
,
1127 { "Reserved", "nvme-tcp.data.rsvd",
1128 FT_BYTES
, BASE_NONE
, NULL
, 0x0, NULL
, HFILL
} },
1129 /* NVMEe TCP R2T pdu */
1130 { &hf_nvme_tcp_r2t_pdu
,
1131 { "R2T", "nvme-tcp.r2t",
1132 FT_NONE
, BASE_NONE
, NULL
, 0x0, NULL
, HFILL
} },
1133 { &hf_nvme_tcp_r2t_offset
,
1134 { "R2T Offset", "nvme-tcp.r2t.offset",
1135 FT_UINT32
, BASE_DEC
, NULL
, 0x0,
1136 "Offset from the start of the command data", HFILL
} },
1137 { &hf_nvme_tcp_r2t_length
,
1138 { "R2T Length", "nvme-tcp.r2t.length",
1139 FT_UINT32
, BASE_DEC
, NULL
, 0x0,
1140 "Length of the data stream", HFILL
} },
1141 { &hf_nvme_tcp_r2t_resvd
,
1142 { "Reserved", "nvme-tcp.r2t.rsvd",
1143 FT_BYTES
, BASE_NONE
, NULL
, 0x0, NULL
, HFILL
} }
1146 static int *ett
[] = {
1150 proto_nvme_tcp
= proto_register_protocol("NVM Express Fabrics TCP",
1151 NVME_FABRICS_TCP
, "nvme-tcp");
1153 proto_register_field_array(proto_nvme_tcp
, hf
, array_length(hf
));
1154 proto_register_subtree_array(ett
, array_length(ett
));
1156 /* These names actually work for their purpose. Note if we're already
1157 * over TLS we don't need to do heuristics (it can't be more TLS instead
1158 * instead, and since we managed to decrypt the TLS we shouldn't have
1159 * missing frames and thus aren't in the middle of a PDU.)
1161 nvmet_tcp_handle
= register_dissector("nvme-tcp", dissect_nvme_tcp_heur
,
1163 nvmet_tls_handle
= register_dissector_with_description("nvme-tls",
1164 "NVMe-over-TCP with TLS", dissect_nvme_tcp
, proto_nvme_tcp
);
1167 void proto_reg_handoff_nvme_tcp(void) {
1168 module_t
*nvme_tcp_module
;
1169 nvme_tcp_module
= prefs_register_protocol(proto_nvme_tcp
, NULL
);
1170 range_convert_str(wmem_epan_scope(), &gPORT_RANGE
, NVME_TCP_PORT_RANGE
,
1172 prefs_register_range_preference(nvme_tcp_module
,
1174 "Subsystem Ports Range",
1175 "Range of NVMe Subsystem ports"
1176 "(default " NVME_TCP_PORT_RANGE
")",
1179 prefs_register_bool_preference(nvme_tcp_module
, "check_hdgst",
1180 "Validate PDU header digest",
1181 "Whether to validate the PDU header digest or not.",
1182 &nvme_tcp_check_hdgst
);
1183 prefs_register_bool_preference(nvme_tcp_module
, "check_ddgst",
1184 "Validate PDU data digest",
1185 "Whether to validate the PDU data digest or not.",
1186 &nvme_tcp_check_ddgst
);
1187 ssl_dissector_add(0, nvmet_tls_handle
);
1188 dissector_add_uint_range("tcp.port", gPORT_RANGE
, nvmet_tcp_handle
);
1189 dissector_add_uint_range("tls.port", gPORT_RANGE
, nvmet_tls_handle
);
1193 * Editor modelines - https://www.wireshark.org/tools/modelines.html
1198 * indent-tabs-mode: nil
1201 * vi: set shiftwidth=4 tabstop=8 expandtab:
1202 * :indentSize=4:tabSize=8:noTabs=true: