Revert "TODO epan/dissectors/asn1/kerberos/packet-kerberos-template.c new GSS flags"
[wireshark-sm.git] / epan / dissectors / packet-nvme-tcp.c
blob00c64df676cd3bf18bfea03f52147f882ee2421e
1 /* packet-nvme-tcp.c
2 * Routines for NVM Express over Fabrics(TCP) dissection
3 * Code by Solganik Alexander <solganik@gmail.com>
5 * Wireshark - Network traffic analyzer
6 * By Gerald Combs <gerald@wireshark.org>
7 * Copyright 1998 Gerald Combs
9 * SPDX-License-Identifier: GPL-2.0-or-later
13 * Copyright (C) 2019 Lightbits Labs Ltd. - All Rights Reserved
17 NVM Express is high speed interface for accessing solid state drives.
18 NVM Express specifications are maintained by NVM Express industry
19 association at http://www.nvmexpress.org.
21 This file adds support to dissect NVM Express over fabrics packets
22 for TCP. This adds very basic support for dissecting commands
23 completions.
25 Current dissection supports dissection of
26 (a) NVMe cmd and cqe
27 (b) NVMe Fabric command and cqe
28 As part of it, it also calculates cmd completion latencies.
30 NVM Express TCP TCP port assigned by IANA that maps to NVMe-oF service
31 TCP port can be found at
32 http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=NVM+Express
36 #include "config.h"
37 #include <epan/packet.h>
38 #include <epan/prefs.h>
39 #include <epan/conversation.h>
40 #include <epan/crc32-tvb.h>
41 #include <epan/tfs.h>
42 #include <wsutil/array.h>
43 #include "packet-tcp.h"
44 #include "packet-nvme.h"
46 #include "packet-tls.h"
48 static int proto_nvme_tcp;
49 static dissector_handle_t nvmet_tcp_handle;
50 static dissector_handle_t nvmet_tls_handle;
52 #define NVME_TCP_PORT_RANGE "4420,8009" /* IANA registered */
54 #define NVME_FABRICS_TCP "NVMe/TCP"
55 #define NVME_TCP_HEADER_SIZE 8
56 #define PDU_LEN_OFFSET_FROM_HEADER 4
57 static range_t *gPORT_RANGE;
58 static bool nvme_tcp_check_hdgst;
59 static bool nvme_tcp_check_ddgst;
60 #define NVME_TCP_DATA_PDU_SIZE 24
62 enum nvme_tcp_pdu_type {
63 nvme_tcp_icreq = 0x0,
64 nvme_tcp_icresp = 0x1,
65 nvme_tcp_h2c_term = 0x2,
66 nvme_tcp_c2h_term = 0x3,
67 nvme_tcp_cmd = 0x4,
68 nvme_tcp_rsp = 0x5,
69 nvme_tcp_h2c_data = 0x6,
70 nvme_tcp_c2h_data = 0x7,
71 nvme_tcp_r2t = 0x9,
72 nvme_tcp_kdreq = 0xa,
73 nvme_tcp_kdresp = 0xb,
74 NVMET_MAX_PDU_TYPE = nvme_tcp_kdresp
77 static const value_string nvme_tcp_pdu_type_vals[] = {
78 { nvme_tcp_icreq, "ICReq" },
79 { nvme_tcp_icresp, "ICResp" },
80 { nvme_tcp_h2c_term, "H2CTerm" },
81 { nvme_tcp_c2h_term, "C2HTerm" },
82 { nvme_tcp_cmd, "CapsuleCommand" },
83 { nvme_tcp_rsp, "CapsuleResponse" },
84 { nvme_tcp_h2c_data, "H2CData" },
85 { nvme_tcp_c2h_data, "C2HData" },
86 { nvme_tcp_r2t, "Ready To Transfer" },
87 { nvme_tcp_kdreq, "Kickstart Discovery Request" },
88 { nvme_tcp_kdresp, "Kickstart Discovery Response" },
89 { 0, NULL }
92 static const value_string nvme_tcp_termreq_fes[] = {
93 {0x0, "Reserved" },
94 {0x1, "Invalid PDU Header Field" },
95 {0x2, "PDU Sequence Error" },
96 {0x3, "Header Digest Error" },
97 {0x4, "Data Transfer Out of Range" },
98 {0x5, "R2T Limit Exceeded" },
99 {0x6, "Unsupported Parameter" },
100 {0, NULL },
103 enum nvme_tcp_fatal_error_status
105 NVME_TCP_FES_INVALID_PDU_HDR = 0x01,
106 NVME_TCP_FES_PDU_SEQ_ERR = 0x02,
107 NVME_TCP_FES_HDR_DIGEST_ERR = 0x03,
108 NVME_TCP_FES_DATA_OUT_OF_RANGE = 0x04,
109 NVME_TCP_FES_R2T_LIMIT_EXCEEDED = 0x05,
110 NVME_TCP_FES_DATA_LIMIT_EXCEEDED = 0x05,
111 NVME_TCP_FES_UNSUPPORTED_PARAM = 0x06,
114 enum nvme_tcp_pdu_flags {
115 NVME_TCP_F_HDGST = (1 << 0),
116 NVME_TCP_F_DDGST = (1 << 1),
117 NVME_TCP_F_DATA_LAST = (1 << 2),
118 NVME_TCP_F_DATA_SUCCESS = (1 << 3),
122 enum nvme_tcp_digest_option {
123 NVME_TCP_HDR_DIGEST_ENABLE = (1 << 0),
124 NVME_TCP_DATA_DIGEST_ENABLE = (1 << 1),
128 #define NVME_FABRIC_CMD_SIZE NVME_CMD_SIZE
129 #define NVME_FABRIC_CQE_SIZE NVME_CQE_SIZE
130 #define NVME_TCP_DIGEST_LENGTH 4
132 struct nvme_tcp_q_ctx {
133 struct nvme_q_ctx n_q_ctx;
136 struct nvme_tcp_cmd_ctx {
137 struct nvme_cmd_ctx n_cmd_ctx;
140 void proto_reg_handoff_nvme_tcp(void);
141 void proto_register_nvme_tcp(void);
144 static int hf_nvme_tcp_type;
145 static int hf_nvme_tcp_flags;
146 static int hf_pdu_flags_hdgst;
147 static int hf_pdu_flags_ddgst;
148 static int hf_pdu_flags_data_last;
149 static int hf_pdu_flags_data_success;
151 static int * const nvme_tcp_pdu_flags[] = {
152 &hf_pdu_flags_hdgst,
153 &hf_pdu_flags_ddgst,
154 &hf_pdu_flags_data_last,
155 &hf_pdu_flags_data_success,
156 NULL
159 static int hf_nvme_tcp_hdgst;
160 static int hf_nvme_tcp_ddgst;
161 static int hf_nvme_tcp_hlen;
162 static int hf_nvme_tcp_pdo;
163 static int hf_nvme_tcp_plen;
164 static int hf_nvme_tcp_hdgst_status;
165 static int hf_nvme_tcp_ddgst_status;
167 /* NVMe tcp icreq/icresp fields */
168 static int hf_nvme_tcp_icreq;
169 static int hf_nvme_tcp_icreq_pfv;
170 static int hf_nvme_tcp_icreq_maxr2t;
171 static int hf_nvme_tcp_icreq_hpda;
172 static int hf_nvme_tcp_icreq_digest;
173 static int hf_nvme_tcp_icresp;
174 static int hf_nvme_tcp_icresp_pfv;
175 static int hf_nvme_tcp_icresp_cpda;
176 static int hf_nvme_tcp_icresp_digest;
177 static int hf_nvme_tcp_icresp_maxdata;
179 /* NVMe tcp c2h/h2c termreq fields */
180 static int hf_nvme_tcp_c2htermreq;
181 static int hf_nvme_tcp_c2htermreq_fes;
182 static int hf_nvme_tcp_c2htermreq_phfo;
183 static int hf_nvme_tcp_c2htermreq_phd;
184 static int hf_nvme_tcp_c2htermreq_upfo;
185 static int hf_nvme_tcp_c2htermreq_reserved;
186 static int hf_nvme_tcp_c2htermreq_data;
187 static int hf_nvme_tcp_h2ctermreq;
188 static int hf_nvme_tcp_h2ctermreq_fes;
189 static int hf_nvme_tcp_h2ctermreq_phfo;
190 static int hf_nvme_tcp_h2ctermreq_phd;
191 static int hf_nvme_tcp_h2ctermreq_upfo;
192 static int hf_nvme_tcp_h2ctermreq_reserved;
193 static int hf_nvme_tcp_h2ctermreq_data;
195 /* NVMe fabrics command */
196 static int hf_nvme_fabrics_cmd_cid;
198 /* NVMe fabrics command data*/
199 static int hf_nvme_fabrics_cmd_data;
200 static int hf_nvme_tcp_unknown_data;
202 static int hf_nvme_tcp_r2t_pdu;
203 static int hf_nvme_tcp_r2t_offset;
204 static int hf_nvme_tcp_r2t_length;
205 static int hf_nvme_tcp_r2t_resvd;
207 /* tracking Cmd and its respective CQE */
208 static int hf_nvme_tcp_cmd_pkt;
209 static int hf_nvme_fabrics_cmd_qid;
211 /* Data response fields */
212 static int hf_nvme_tcp_data_pdu;
213 static int hf_nvme_tcp_pdu_ttag;
214 static int hf_nvme_tcp_data_pdu_data_offset;
215 static int hf_nvme_tcp_data_pdu_data_length;
216 static int hf_nvme_tcp_data_pdu_data_resvd;
218 static int ett_nvme_tcp;
220 static unsigned
221 get_nvme_tcp_pdu_len(packet_info *pinfo _U_,
222 tvbuff_t *tvb,
223 int offset,
224 void* data _U_)
226 return tvb_get_letohl(tvb, offset + PDU_LEN_OFFSET_FROM_HEADER);
229 static void
230 dissect_nvme_tcp_icreq(tvbuff_t *tvb,
231 packet_info *pinfo,
232 int offset,
233 proto_tree *tree)
235 proto_item *tf;
236 proto_item *icreq_tree;
238 col_set_str(pinfo->cinfo, COL_INFO, "Initialize Connection Request");
239 tf = proto_tree_add_item(tree, hf_nvme_tcp_icreq, tvb, offset, 8, ENC_NA);
240 icreq_tree = proto_item_add_subtree(tf, ett_nvme_tcp);
242 proto_tree_add_item(icreq_tree, hf_nvme_tcp_icreq_pfv, tvb, offset, 2,
243 ENC_LITTLE_ENDIAN);
244 proto_tree_add_item(icreq_tree, hf_nvme_tcp_icreq_hpda, tvb, offset + 2, 1,
245 ENC_NA);
246 proto_tree_add_item(icreq_tree, hf_nvme_tcp_icreq_digest, tvb, offset + 3,
247 1, ENC_NA);
248 proto_tree_add_item(icreq_tree, hf_nvme_tcp_icreq_maxr2t, tvb, offset + 4,
249 4, ENC_LITTLE_ENDIAN);
252 static void
253 dissect_nvme_tcp_icresp(tvbuff_t *tvb,
254 packet_info *pinfo,
255 int offset,
256 proto_tree *tree)
258 proto_item *tf;
259 proto_item *icresp_tree;
261 col_set_str(pinfo->cinfo, COL_INFO, "Initialize Connection Response");
262 tf = proto_tree_add_item(tree, hf_nvme_tcp_icresp, tvb, offset, 8, ENC_NA);
263 icresp_tree = proto_item_add_subtree(tf, ett_nvme_tcp);
265 proto_tree_add_item(icresp_tree, hf_nvme_tcp_icresp_pfv, tvb, offset, 2,
266 ENC_LITTLE_ENDIAN);
267 proto_tree_add_item(icresp_tree, hf_nvme_tcp_icresp_cpda, tvb, offset + 2,
268 1, ENC_NA);
269 proto_tree_add_item(icresp_tree, hf_nvme_tcp_icresp_digest, tvb, offset + 3,
270 1, ENC_NA);
271 proto_tree_add_item(icresp_tree, hf_nvme_tcp_icresp_maxdata, tvb,
272 offset + 4, 4, ENC_LITTLE_ENDIAN);
275 static struct nvme_tcp_cmd_ctx*
276 bind_cmd_to_qctx(packet_info *pinfo,
277 struct nvme_q_ctx *q_ctx,
278 uint16_t cmd_id)
280 struct nvme_tcp_cmd_ctx *ctx;
282 /* wireshark will dissect same packet multiple times
283 * when display is refreshed*/
284 if (!PINFO_FD_VISITED(pinfo)) {
285 ctx = wmem_new0(wmem_file_scope(), struct nvme_tcp_cmd_ctx);
286 nvme_add_cmd_to_pending_list(pinfo, q_ctx, &ctx->n_cmd_ctx, (void*) ctx,
287 cmd_id);
288 } else {
289 /* Already visited this frame */
290 ctx = (struct nvme_tcp_cmd_ctx*) nvme_lookup_cmd_in_done_list(pinfo,
291 q_ctx, cmd_id);
292 /* if we have already visited frame but haven't found completion yet,
293 * we won't find cmd in done q, so allocate a dummy ctx for doing
294 * rest of the processing.
296 if (!ctx)
297 ctx = wmem_new0(wmem_file_scope(), struct nvme_tcp_cmd_ctx);
300 return ctx;
303 static void
304 dissect_nvme_tcp_command(tvbuff_t *tvb,
305 packet_info *pinfo,
306 proto_tree *root_tree,
307 proto_tree *nvme_tcp_tree,
308 proto_item *nvme_tcp_ti,
309 struct nvme_tcp_q_ctx *queue, int offset,
310 uint32_t incapsuled_data_size,
311 uint32_t data_offset)
313 struct nvme_tcp_cmd_ctx *cmd_ctx;
314 uint16_t cmd_id;
315 uint8_t opcode;
316 const char *cmd_string;
318 opcode = tvb_get_uint8(tvb, offset);
319 cmd_id = tvb_get_uint16(tvb, offset + 2, ENC_LITTLE_ENDIAN);
320 cmd_ctx = bind_cmd_to_qctx(pinfo, &queue->n_q_ctx, cmd_id);
322 /* if record did not contain connect command we wont know qid,
323 * so lets guess if this is an admin queue */
324 if ((queue->n_q_ctx.qid == UINT16_MAX) && !nvme_is_io_queue_opcode(opcode))
325 queue->n_q_ctx.qid = 0;
327 if (opcode == NVME_FABRIC_OPC) {
328 cmd_ctx->n_cmd_ctx.fabric = true;
329 dissect_nvmeof_fabric_cmd(tvb, pinfo, nvme_tcp_tree, &queue->n_q_ctx, &cmd_ctx->n_cmd_ctx, offset, false);
330 if (cmd_ctx->n_cmd_ctx.cmd_ctx.fabric_cmd.fctype == NVME_FCTYPE_CONNECT)
331 queue->n_q_ctx.qid = cmd_ctx->n_cmd_ctx.cmd_ctx.fabric_cmd.cnct.qid;
332 cmd_string = get_nvmeof_cmd_string(cmd_ctx->n_cmd_ctx.cmd_ctx.fabric_cmd.fctype);
333 proto_item_append_text(nvme_tcp_ti,
334 ", Fabrics Type: %s (0x%02x) Cmd ID: 0x%04x", cmd_string,
335 cmd_ctx->n_cmd_ctx.cmd_ctx.fabric_cmd.fctype, cmd_id);
336 if (incapsuled_data_size > 0) {
337 proto_tree *data_tree;
338 proto_item *ti;
340 ti = proto_tree_add_item(nvme_tcp_tree, hf_nvme_fabrics_cmd_data, tvb, offset, incapsuled_data_size, ENC_NA);
341 data_tree = proto_item_add_subtree(ti, ett_nvme_tcp);
342 dissect_nvmeof_cmd_data(tvb, pinfo, data_tree, offset + NVME_FABRIC_CMD_SIZE + data_offset, &queue->n_q_ctx, &cmd_ctx->n_cmd_ctx, incapsuled_data_size);
344 return;
347 /* In case of incapsuled nvme command tcp length is only a header */
348 proto_item_set_len(nvme_tcp_ti, NVME_TCP_HEADER_SIZE);
349 tvbuff_t *nvme_tvbuff;
350 cmd_ctx->n_cmd_ctx.fabric = false;
351 nvme_tvbuff = tvb_new_subset_remaining(tvb, NVME_TCP_HEADER_SIZE);
352 cmd_string = nvme_get_opcode_string(opcode, queue->n_q_ctx.qid);
353 dissect_nvme_cmd(nvme_tvbuff, pinfo, root_tree, &queue->n_q_ctx,
354 &cmd_ctx->n_cmd_ctx);
355 proto_item_append_text(nvme_tcp_ti,
356 ", NVMe Opcode: %s (0x%02x) Cmd ID: 0x%04x", cmd_string, opcode,
357 cmd_id);
359 /* This is an inline write */
360 if (incapsuled_data_size > 0) {
361 tvbuff_t *nvme_data;
363 nvme_data = tvb_new_subset_remaining(tvb, offset +
364 NVME_CMD_SIZE + data_offset);
365 dissect_nvme_data_response(nvme_data, pinfo, root_tree, &queue->n_q_ctx,
366 &cmd_ctx->n_cmd_ctx, incapsuled_data_size, true);
370 static uint32_t
371 dissect_nvme_tcp_data_pdu(tvbuff_t *tvb,
372 packet_info *pinfo,
373 int offset,
374 proto_tree *tree) {
375 uint32_t data_length;
376 proto_item *tf;
377 proto_item *data_tree;
379 col_set_str(pinfo->cinfo, COL_PROTOCOL, "NVMe");
381 tf = proto_tree_add_item(tree, hf_nvme_tcp_data_pdu, tvb, offset,
382 NVME_TCP_DATA_PDU_SIZE - NVME_TCP_HEADER_SIZE, ENC_NA);
383 data_tree = proto_item_add_subtree(tf, ett_nvme_tcp);
385 proto_tree_add_item(data_tree, hf_nvme_fabrics_cmd_cid, tvb, offset, 2,
386 ENC_LITTLE_ENDIAN);
388 proto_tree_add_item(data_tree, hf_nvme_tcp_pdu_ttag, tvb, offset + 2, 2,
389 ENC_LITTLE_ENDIAN);
391 proto_tree_add_item(data_tree, hf_nvme_tcp_data_pdu_data_offset, tvb,
392 offset + 4, 4, ENC_LITTLE_ENDIAN);
394 data_length = tvb_get_uint32(tvb, offset + 8, ENC_LITTLE_ENDIAN);
395 proto_tree_add_item(data_tree, hf_nvme_tcp_data_pdu_data_length, tvb,
396 offset + 8, 4, ENC_LITTLE_ENDIAN);
398 proto_tree_add_item(data_tree, hf_nvme_tcp_data_pdu_data_resvd, tvb,
399 offset + 12, 4, ENC_NA);
401 return data_length;
404 static void
405 dissect_nvme_tcp_c2h_data(tvbuff_t *tvb,
406 packet_info *pinfo,
407 proto_tree *root_tree,
408 proto_tree *nvme_tcp_tree,
409 proto_item *nvme_tcp_ti,
410 struct nvme_tcp_q_ctx *queue,
411 int offset,
412 uint32_t data_offset)
414 struct nvme_tcp_cmd_ctx *cmd_ctx;
415 uint32_t cmd_id;
416 uint32_t data_length;
417 tvbuff_t *nvme_data;
418 const char *cmd_string;
420 cmd_id = tvb_get_uint16(tvb, offset, ENC_LITTLE_ENDIAN);
421 data_length = dissect_nvme_tcp_data_pdu(tvb, pinfo, offset, nvme_tcp_tree);
423 /* This can identify our packet uniquely */
424 if (!PINFO_FD_VISITED(pinfo)) {
425 cmd_ctx = (struct nvme_tcp_cmd_ctx*) nvme_lookup_cmd_in_pending_list(
426 &queue->n_q_ctx, cmd_id);
427 if (!cmd_ctx) {
428 proto_tree_add_item(root_tree, hf_nvme_tcp_unknown_data, tvb, offset + 16,
429 data_length, ENC_NA);
430 return;
433 /* In order to later lookup for command context lets add this command
434 * to data responses */
435 cmd_ctx->n_cmd_ctx.data_tr_pkt_num[0] = pinfo->num;
436 nvme_add_data_tr_pkt(&queue->n_q_ctx, &cmd_ctx->n_cmd_ctx, cmd_id, pinfo->num);
437 } else {
438 cmd_ctx = (struct nvme_tcp_cmd_ctx*) nvme_lookup_data_tr_pkt(&queue->n_q_ctx,
439 cmd_id, pinfo->num);
440 if (!cmd_ctx) {
441 proto_tree_add_item(root_tree, hf_nvme_tcp_unknown_data, tvb, offset + 16,
442 data_length, ENC_NA);
443 return;
447 nvme_publish_to_cmd_link(nvme_tcp_tree, tvb,
448 hf_nvme_tcp_cmd_pkt, &cmd_ctx->n_cmd_ctx);
450 if (cmd_ctx->n_cmd_ctx.fabric) {
451 cmd_string = get_nvmeof_cmd_string(cmd_ctx->n_cmd_ctx.cmd_ctx.fabric_cmd.fctype);
452 proto_item_append_text(nvme_tcp_ti,
453 ", C2HData Fabrics Type: %s (0x%02x), Cmd ID: 0x%04x, Len: %u",
454 cmd_string, cmd_ctx->n_cmd_ctx.cmd_ctx.fabric_cmd.fctype, cmd_id, data_length);
455 } else {
456 cmd_string = nvme_get_opcode_string(cmd_ctx->n_cmd_ctx.opcode,
457 queue->n_q_ctx.qid);
458 proto_item_append_text(nvme_tcp_ti,
459 ", C2HData Opcode: %s (0x%02x), Cmd ID: 0x%04x, Len: %u",
460 cmd_string, cmd_ctx->n_cmd_ctx.opcode, cmd_id, data_length);
463 nvme_data = tvb_new_subset_remaining(tvb, NVME_TCP_DATA_PDU_SIZE + data_offset);
465 dissect_nvme_data_response(nvme_data, pinfo, root_tree, &queue->n_q_ctx,
466 &cmd_ctx->n_cmd_ctx, data_length, false);
470 static void nvme_tcp_build_cmd_key(uint32_t *frame_num, uint32_t *cmd_id, wmem_tree_key_t *key)
472 key[0].key = frame_num;
473 key[0].length = 1;
474 key[1].key = cmd_id;
475 key[1].length = 1;
476 key[2].key = NULL;
477 key[2].length = 0;
480 static void nvme_tcp_add_data_request(packet_info *pinfo, struct nvme_q_ctx *q_ctx,
481 struct nvme_tcp_cmd_ctx *cmd_ctx, uint16_t cmd_id)
483 wmem_tree_key_t cmd_key[3];
484 uint32_t cmd_id_key = cmd_id;
486 nvme_tcp_build_cmd_key(&pinfo->num, &cmd_id_key, cmd_key);
487 cmd_ctx->n_cmd_ctx.data_req_pkt_num = pinfo->num;
488 cmd_ctx->n_cmd_ctx.data_tr_pkt_num[0] = 0;
489 wmem_tree_insert32_array(q_ctx->data_requests, cmd_key, (void *)cmd_ctx);
492 static struct nvme_tcp_cmd_ctx* nvme_tcp_lookup_data_request(packet_info *pinfo,
493 struct nvme_q_ctx *q_ctx,
494 uint16_t cmd_id)
496 wmem_tree_key_t cmd_key[3];
497 uint32_t cmd_id_key = cmd_id;
499 nvme_tcp_build_cmd_key(&pinfo->num, &cmd_id_key, cmd_key);
500 return (struct nvme_tcp_cmd_ctx*)wmem_tree_lookup32_array(q_ctx->data_requests, cmd_key);
503 static void
504 dissect_nvme_tcp_h2c_data(tvbuff_t *tvb,
505 packet_info *pinfo,
506 proto_tree *root_tree,
507 proto_tree *nvme_tcp_tree,
508 proto_item *nvme_tcp_ti,
509 struct nvme_tcp_q_ctx *queue,
510 int offset,
511 uint32_t data_offset)
513 struct nvme_tcp_cmd_ctx *cmd_ctx;
514 uint16_t cmd_id;
515 uint32_t data_length;
516 tvbuff_t *nvme_data;
517 const char *cmd_string;
519 cmd_id = tvb_get_uint16(tvb, offset, ENC_LITTLE_ENDIAN);
520 data_length = dissect_nvme_tcp_data_pdu(tvb, pinfo, offset, nvme_tcp_tree);
522 if (!PINFO_FD_VISITED(pinfo)) {
523 cmd_ctx = (struct nvme_tcp_cmd_ctx*) nvme_lookup_cmd_in_pending_list(
524 &queue->n_q_ctx, cmd_id);
525 if (!cmd_ctx) {
526 proto_tree_add_item(root_tree, hf_nvme_tcp_unknown_data, tvb, offset + 16,
527 data_length, ENC_NA);
528 return;
531 /* Fill this for "adding data request call,
532 * this will be the key to fetch data request later */
533 nvme_tcp_add_data_request(pinfo, &queue->n_q_ctx, cmd_ctx, cmd_id);
534 } else {
535 cmd_ctx = nvme_tcp_lookup_data_request(pinfo, &queue->n_q_ctx, cmd_id);
536 if (!cmd_ctx) {
537 proto_tree_add_item(root_tree, hf_nvme_tcp_unknown_data, tvb, offset + 16,
538 data_length, ENC_NA);
539 return;
543 nvme_publish_to_cmd_link(nvme_tcp_tree, tvb,
544 hf_nvme_tcp_cmd_pkt, &cmd_ctx->n_cmd_ctx);
546 /* fabrics commands should not have h2cdata*/
547 if (cmd_ctx->n_cmd_ctx.fabric) {
548 cmd_string = get_nvmeof_cmd_string(cmd_ctx->n_cmd_ctx.cmd_ctx.fabric_cmd.fctype);
549 proto_item_append_text(nvme_tcp_ti,
550 ", H2CData Fabrics Type: %s (0x%02x), Cmd ID: 0x%04x, Len: %u",
551 cmd_string, cmd_ctx->n_cmd_ctx.cmd_ctx.fabric_cmd.fctype, cmd_id, data_length);
552 proto_tree_add_item(root_tree, hf_nvme_tcp_unknown_data, tvb, offset + 16,
553 data_length, ENC_NA);
554 return;
557 cmd_string = nvme_get_opcode_string(cmd_ctx->n_cmd_ctx.opcode,
558 queue->n_q_ctx.qid);
559 proto_item_append_text(nvme_tcp_ti,
560 ", H2CData Opcode: %s (0x%02x), Cmd ID: 0x%04x, Len: %u",
561 cmd_string, cmd_ctx->n_cmd_ctx.opcode, cmd_id, data_length);
563 nvme_data = tvb_new_subset_remaining(tvb, NVME_TCP_DATA_PDU_SIZE + data_offset);
564 dissect_nvme_data_response(nvme_data, pinfo, root_tree, &queue->n_q_ctx,
565 &cmd_ctx->n_cmd_ctx, data_length, false);
568 static void
569 dissect_nvme_tcp_h2ctermreq(tvbuff_t *tvb, packet_info *pinfo,
570 proto_tree *tree, uint32_t packet_len, int offset)
572 proto_item *tf;
573 proto_item *h2ctermreq_tree;
574 uint16_t fes;
576 col_set_str(pinfo->cinfo, COL_INFO,
577 "Host to Controller Termination Request");
578 tf = proto_tree_add_item(tree, hf_nvme_tcp_h2ctermreq,
579 tvb, offset, 8, ENC_NA);
580 h2ctermreq_tree = proto_item_add_subtree(tf, ett_nvme_tcp);
582 proto_tree_add_item(h2ctermreq_tree, hf_nvme_tcp_h2ctermreq_fes,
583 tvb, offset + 8, 2, ENC_LITTLE_ENDIAN);
584 fes = tvb_get_uint16(tvb, offset + 8, ENC_LITTLE_ENDIAN);
585 switch (fes) {
586 case NVME_TCP_FES_INVALID_PDU_HDR:
587 proto_tree_add_item(h2ctermreq_tree, hf_nvme_tcp_h2ctermreq_phfo,
588 tvb, offset + 10, 4, ENC_LITTLE_ENDIAN);
589 break;
590 case NVME_TCP_FES_HDR_DIGEST_ERR:
591 proto_tree_add_item(h2ctermreq_tree, hf_nvme_tcp_h2ctermreq_phd,
592 tvb, offset + 10, 4, ENC_LITTLE_ENDIAN);
593 break;
594 case NVME_TCP_FES_UNSUPPORTED_PARAM:
595 proto_tree_add_item(h2ctermreq_tree, hf_nvme_tcp_h2ctermreq_upfo,
596 tvb, offset + 10, 4, ENC_LITTLE_ENDIAN);
597 break;
598 default:
599 proto_tree_add_item(h2ctermreq_tree, hf_nvme_tcp_h2ctermreq_reserved,
600 tvb, offset + 10, 4, ENC_LITTLE_ENDIAN);
601 break;
603 proto_tree_add_item(h2ctermreq_tree, hf_nvme_tcp_h2ctermreq_data,
604 tvb, offset + 24, packet_len - 24, ENC_NA);
607 static void
608 dissect_nvme_tcp_c2htermreq(tvbuff_t *tvb, packet_info *pinfo,
609 proto_tree *tree, uint32_t packet_len, int offset)
611 proto_item *tf;
612 proto_item *c2htermreq_tree;
613 uint16_t fes;
615 col_set_str(pinfo->cinfo, COL_INFO,
616 "Controller to Host Termination Request");
617 tf = proto_tree_add_item(tree, hf_nvme_tcp_c2htermreq,
618 tvb, offset, 8, ENC_NA);
619 c2htermreq_tree = proto_item_add_subtree(tf, ett_nvme_tcp);
621 proto_tree_add_item(tree, hf_nvme_tcp_c2htermreq_fes, tvb, offset + 8, 2,
622 ENC_LITTLE_ENDIAN);
623 fes = tvb_get_uint16(tvb, offset + 8, ENC_LITTLE_ENDIAN);
624 switch (fes) {
625 case NVME_TCP_FES_INVALID_PDU_HDR:
626 proto_tree_add_item(c2htermreq_tree, hf_nvme_tcp_c2htermreq_phfo,
627 tvb, offset + 10, 4, ENC_LITTLE_ENDIAN);
628 break;
629 case NVME_TCP_FES_HDR_DIGEST_ERR:
630 proto_tree_add_item(c2htermreq_tree, hf_nvme_tcp_c2htermreq_phd,
631 tvb, offset + 10, 4, ENC_LITTLE_ENDIAN);
632 break;
633 case NVME_TCP_FES_UNSUPPORTED_PARAM:
634 proto_tree_add_item(c2htermreq_tree, hf_nvme_tcp_c2htermreq_upfo,
635 tvb, offset + 10, 4, ENC_LITTLE_ENDIAN);
636 break;
637 default:
638 proto_tree_add_item(c2htermreq_tree, hf_nvme_tcp_c2htermreq_reserved,
639 tvb, offset + 10, 4, ENC_LITTLE_ENDIAN);
640 break;
642 proto_tree_add_item(c2htermreq_tree, hf_nvme_tcp_c2htermreq_data,
643 tvb, offset + 24, packet_len - 24, ENC_NA);
646 static void
647 dissect_nvme_tcp_cqe(tvbuff_t *tvb,
648 packet_info *pinfo,
649 proto_tree *root_tree,
650 proto_tree *nvme_tree,
651 proto_item *ti,
652 struct nvme_tcp_q_ctx *queue,
653 int offset)
655 struct nvme_tcp_cmd_ctx *cmd_ctx;
656 uint16_t cmd_id;
657 const char *cmd_string;
659 cmd_id = tvb_get_uint16(tvb, offset + 12, ENC_LITTLE_ENDIAN);
661 /* wireshark will dissect packet several times when display is refreshed
662 * we need to track state changes only once */
663 if (!PINFO_FD_VISITED(pinfo)) {
664 cmd_ctx = (struct nvme_tcp_cmd_ctx*) nvme_lookup_cmd_in_pending_list(
665 &queue->n_q_ctx, cmd_id);
666 if (!cmd_ctx || cmd_ctx->n_cmd_ctx.cqe_pkt_num) {
667 proto_tree_add_item(nvme_tree, hf_nvme_tcp_unknown_data, tvb, offset,
668 NVME_FABRIC_CQE_SIZE, ENC_NA);
669 return;
672 cmd_ctx->n_cmd_ctx.cqe_pkt_num = pinfo->num;
673 nvme_add_cmd_cqe_to_done_list(&queue->n_q_ctx, &cmd_ctx->n_cmd_ctx,
674 cmd_id);
676 } else {
677 cmd_ctx = (struct nvme_tcp_cmd_ctx *) nvme_lookup_cmd_in_done_list(pinfo,
678 &queue->n_q_ctx, cmd_id);
679 if (!cmd_ctx) {
680 proto_tree_add_item(nvme_tree, hf_nvme_tcp_unknown_data, tvb, offset,
681 NVME_FABRIC_CQE_SIZE, ENC_NA);
682 return;
686 nvme_update_cmd_end_info(pinfo, &cmd_ctx->n_cmd_ctx);
688 if (cmd_ctx->n_cmd_ctx.fabric) {
689 cmd_string = get_nvmeof_cmd_string(cmd_ctx->n_cmd_ctx.cmd_ctx.fabric_cmd.fctype);
690 proto_item_append_text(ti,
691 ", Cqe Fabrics Cmd: %s (0x%02x) Cmd ID: 0x%04x", cmd_string,
692 cmd_ctx->n_cmd_ctx.cmd_ctx.fabric_cmd.fctype , cmd_id);
694 dissect_nvmeof_fabric_cqe(tvb, pinfo, nvme_tree, &cmd_ctx->n_cmd_ctx, offset);
695 } else {
696 tvbuff_t *nvme_tvb;
697 proto_item_set_len(ti, NVME_TCP_HEADER_SIZE);
698 cmd_string = nvme_get_opcode_string(cmd_ctx->n_cmd_ctx.opcode,
699 queue->n_q_ctx.qid);
701 proto_item_append_text(ti, ", Cqe NVMe Cmd: %s (0x%02x) Cmd ID: 0x%04x",
702 cmd_string, cmd_ctx->n_cmd_ctx.opcode, cmd_id);
703 /* get incapsuled nvme command */
704 nvme_tvb = tvb_new_subset_remaining(tvb, NVME_TCP_HEADER_SIZE);
705 dissect_nvme_cqe(nvme_tvb, pinfo, root_tree, &queue->n_q_ctx, &cmd_ctx->n_cmd_ctx);
709 static void
710 dissect_nvme_tcp_r2t(tvbuff_t *tvb,
711 packet_info *pinfo,
712 int offset,
713 proto_tree *tree)
715 proto_item *tf;
716 proto_item *r2t_tree;
718 tf = proto_tree_add_item(tree, hf_nvme_tcp_r2t_pdu, tvb, offset, -1,
719 ENC_NA);
720 r2t_tree = proto_item_add_subtree(tf, ett_nvme_tcp);
722 col_append_sep_fstr(pinfo->cinfo, COL_INFO, " | ", "Ready To Transfer");
724 proto_tree_add_item(r2t_tree, hf_nvme_fabrics_cmd_cid, tvb, offset, 2,
725 ENC_LITTLE_ENDIAN);
726 proto_tree_add_item(r2t_tree, hf_nvme_tcp_pdu_ttag, tvb, offset + 2, 2,
727 ENC_LITTLE_ENDIAN);
728 proto_tree_add_item(r2t_tree, hf_nvme_tcp_r2t_offset, tvb, offset + 4, 4,
729 ENC_LITTLE_ENDIAN);
730 proto_tree_add_item(r2t_tree, hf_nvme_tcp_r2t_length, tvb, offset + 8, 4,
731 ENC_LITTLE_ENDIAN);
732 proto_tree_add_item(r2t_tree, hf_nvme_tcp_r2t_resvd, tvb, offset + 12, 4,
733 ENC_NA);
736 static int
737 dissect_nvme_tcp_pdu(tvbuff_t *tvb,
738 packet_info *pinfo,
739 proto_tree *tree,
740 void* data _U_)
742 conversation_t *conversation;
743 struct nvme_tcp_q_ctx *q_ctx;
744 proto_item *ti;
745 int offset = 0;
746 int nvme_tcp_pdu_offset;
747 proto_tree *nvme_tcp_tree;
748 unsigned packet_type;
749 uint8_t hlen, pdo;
750 uint8_t pdu_flags;
751 uint32_t plen;
752 uint32_t incapsuled_data_size;
753 uint32_t pdu_data_offset = 0;
755 conversation = find_or_create_conversation(pinfo);
756 q_ctx = (struct nvme_tcp_q_ctx *)
757 conversation_get_proto_data(conversation, proto_nvme_tcp);
759 if (!q_ctx) {
760 q_ctx = wmem_new0(wmem_file_scope(), struct nvme_tcp_q_ctx);
761 q_ctx->n_q_ctx.pending_cmds = wmem_tree_new(wmem_file_scope());
762 q_ctx->n_q_ctx.done_cmds = wmem_tree_new(wmem_file_scope());
763 q_ctx->n_q_ctx.data_requests = wmem_tree_new(wmem_file_scope());
764 q_ctx->n_q_ctx.data_responses = wmem_tree_new(wmem_file_scope());
765 /* Initially set to non-0 so that by default queues are io queues
766 * this is required to be able to dissect correctly even
767 * if we miss connect command*/
768 q_ctx->n_q_ctx.qid = UINT16_MAX;
769 conversation_add_proto_data(conversation, proto_nvme_tcp, q_ctx);
772 ti = proto_tree_add_item(tree, proto_nvme_tcp, tvb, 0, -1, ENC_NA);
773 nvme_tcp_tree = proto_item_add_subtree(ti, ett_nvme_tcp);
775 if (q_ctx->n_q_ctx.qid != UINT16_MAX)
776 nvme_publish_qid(nvme_tcp_tree, hf_nvme_fabrics_cmd_qid,
777 q_ctx->n_q_ctx.qid);
779 packet_type = tvb_get_uint8(tvb, offset);
780 proto_tree_add_item(nvme_tcp_tree, hf_nvme_tcp_type, tvb, offset, 1,
781 ENC_NA);
783 pdu_flags = tvb_get_uint8(tvb, offset + 1);
784 proto_tree_add_bitmask_value(nvme_tcp_tree, tvb, offset + 1, hf_nvme_tcp_flags,
785 ett_nvme_tcp, nvme_tcp_pdu_flags, (uint64_t)pdu_flags);
787 hlen = tvb_get_int8(tvb, offset + 2);
788 proto_tree_add_item(nvme_tcp_tree, hf_nvme_tcp_hlen, tvb, offset + 2, 1,
789 ENC_NA);
791 pdo = tvb_get_int8(tvb, offset + 3);
792 proto_tree_add_uint(nvme_tcp_tree, hf_nvme_tcp_pdo, tvb, offset + 3, 1,
793 pdo);
794 plen = tvb_get_letohl(tvb, offset + 4);
795 proto_tree_add_item(nvme_tcp_tree, hf_nvme_tcp_plen, tvb, offset + 4, 4,
796 ENC_LITTLE_ENDIAN);
797 col_set_str(pinfo->cinfo, COL_PROTOCOL, NVME_FABRICS_TCP);
799 if (pdu_flags & NVME_TCP_F_HDGST) {
800 unsigned hdgst_flags = PROTO_CHECKSUM_NO_FLAGS;
801 uint32_t crc = 0;
803 if (nvme_tcp_check_hdgst) {
804 hdgst_flags = PROTO_CHECKSUM_VERIFY;
805 crc = ~crc32c_tvb_offset_calculate(tvb, 0, hlen, ~0);
807 proto_tree_add_checksum(nvme_tcp_tree, tvb, hlen, hf_nvme_tcp_hdgst,
808 hf_nvme_tcp_hdgst_status, NULL, pinfo,
809 crc, ENC_NA, hdgst_flags);
810 pdu_data_offset = NVME_TCP_DIGEST_LENGTH;
813 nvme_tcp_pdu_offset = offset + NVME_TCP_HEADER_SIZE;
814 incapsuled_data_size = plen - hlen - pdu_data_offset;
816 /* check for overflow (invalid packet)*/
817 if (incapsuled_data_size > tvb_reported_length(tvb)) {
818 proto_tree_add_item(nvme_tcp_tree, hf_nvme_tcp_unknown_data,
819 tvb, NVME_TCP_HEADER_SIZE, -1, ENC_NA);
820 return tvb_reported_length(tvb);
823 if (pdu_flags & NVME_TCP_F_DDGST) {
824 unsigned ddgst_flags = PROTO_CHECKSUM_NO_FLAGS;
825 uint32_t crc = 0;
827 /* Check that data has enough space (invalid packet) */
828 if (incapsuled_data_size <= NVME_TCP_DIGEST_LENGTH) {
829 proto_tree_add_item(nvme_tcp_tree, hf_nvme_tcp_unknown_data,
830 tvb, NVME_TCP_HEADER_SIZE, -1, ENC_NA);
831 return tvb_reported_length(tvb);
834 incapsuled_data_size -= NVME_TCP_DIGEST_LENGTH;
835 if (nvme_tcp_check_ddgst) {
836 ddgst_flags = PROTO_CHECKSUM_VERIFY;
837 crc = ~crc32c_tvb_offset_calculate(tvb, pdo,
838 incapsuled_data_size, ~0);
840 proto_tree_add_checksum(nvme_tcp_tree, tvb,
841 plen - NVME_TCP_DIGEST_LENGTH, hf_nvme_tcp_ddgst,
842 hf_nvme_tcp_ddgst_status, NULL, pinfo,
843 crc, ENC_NA, ddgst_flags);
846 switch (packet_type) {
847 case nvme_tcp_icreq:
848 dissect_nvme_tcp_icreq(tvb, pinfo, nvme_tcp_pdu_offset, nvme_tcp_tree);
849 proto_item_set_len(ti, hlen);
850 break;
851 case nvme_tcp_icresp:
852 dissect_nvme_tcp_icresp(tvb, pinfo, nvme_tcp_pdu_offset, nvme_tcp_tree);
853 proto_item_set_len(ti, hlen);
854 break;
855 case nvme_tcp_cmd:
856 dissect_nvme_tcp_command(tvb, pinfo, tree, nvme_tcp_tree, ti, q_ctx,
857 nvme_tcp_pdu_offset, incapsuled_data_size, pdu_data_offset);
858 break;
859 case nvme_tcp_rsp:
860 dissect_nvme_tcp_cqe(tvb, pinfo, tree, nvme_tcp_tree, ti, q_ctx,
861 nvme_tcp_pdu_offset);
862 proto_item_set_len(ti, NVME_TCP_HEADER_SIZE);
863 break;
864 case nvme_tcp_c2h_data:
865 dissect_nvme_tcp_c2h_data(tvb, pinfo, tree, nvme_tcp_tree, ti, q_ctx,
866 nvme_tcp_pdu_offset, pdu_data_offset);
867 proto_item_set_len(ti, NVME_TCP_DATA_PDU_SIZE);
868 break;
869 case nvme_tcp_h2c_data:
870 dissect_nvme_tcp_h2c_data(tvb, pinfo, tree, nvme_tcp_tree, ti, q_ctx,
871 nvme_tcp_pdu_offset, pdu_data_offset);
872 proto_item_set_len(ti, NVME_TCP_DATA_PDU_SIZE);
873 break;
874 case nvme_tcp_r2t:
875 dissect_nvme_tcp_r2t(tvb, pinfo, nvme_tcp_pdu_offset, nvme_tcp_tree);
876 break;
877 case nvme_tcp_h2c_term:
878 dissect_nvme_tcp_h2ctermreq(tvb, pinfo, tree, plen, offset);
879 break;
880 case nvme_tcp_c2h_term:
881 dissect_nvme_tcp_c2htermreq(tvb, pinfo, tree, plen, offset);
882 break;
883 default:
884 // TODO: nvme_tcp_kdreq, nvme_tcp_kdresp
885 proto_tree_add_item(nvme_tcp_tree, hf_nvme_tcp_unknown_data, tvb,
886 offset, plen, ENC_NA);
887 break;
890 return tvb_reported_length(tvb);
893 static int
894 dissect_nvme_tcp(tvbuff_t *tvb,
895 packet_info *pinfo,
896 proto_tree *tree,
897 void *data)
899 col_clear(pinfo->cinfo, COL_INFO);
900 col_set_str(pinfo->cinfo, COL_PROTOCOL, NVME_FABRICS_TCP);
901 tcp_dissect_pdus(tvb, pinfo, tree, true, NVME_TCP_HEADER_SIZE,
902 get_nvme_tcp_pdu_len, dissect_nvme_tcp_pdu, data);
904 return tvb_reported_length(tvb);
907 static bool
908 test_nvme(packet_info *pinfo _U_, tvbuff_t *tvb, int offset, void *data _U_)
910 /* This is not the strongest heuristic, but the port is IANA assigned,
911 * so this is not a normal heuristic dissector but simply to distinguish
912 * between NVMe/TCP and NVMe/TLS/TCP, and also to detect PDU starts.
914 if (tvb_captured_length_remaining(tvb, offset) < NVME_TCP_HEADER_SIZE) {
915 return false;
918 if (tvb_get_uint8(tvb, offset) > NVMET_MAX_PDU_TYPE) {
919 return false;
922 offset += 2;
923 if (tvb_get_uint8(tvb, offset) < NVME_TCP_HEADER_SIZE) {
924 // Header length - we could strengthen by using the PDU type.
925 return false;
928 // Next byte is PDU Data Offset. Reserved in most types. (Does that
929 // mean zero? That would strengthen the heuristic.)
931 offset += 2;
932 if (tvb_get_uint32(tvb, offset, ENC_LITTLE_ENDIAN) < NVME_TCP_HEADER_SIZE) {
933 // PDU Length (inc. header) - could strengthen by using the PDU type.
934 return false;
937 return true;
940 static int
941 dissect_nvme_tcp_heur(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void* data)
943 /* NVMe/TCP allows PDUs to span TCP segments (see Figure 5 of the NVMe/TCP
944 * Transport Specification.) Also, some connections are over TLS.
945 * Luckily, the PDU types for NVMe/TCP occupy the first byte, same as
946 * the Content Type for TLS Records, and while these PDU types go to 11,
947 * TLS Content Types start at 20 (and won't change, to enable multiplexing,
948 * see RFC 9443.)
950 * So if this doesn't look like the start of a NVMe/TCP PDU, reject it.
951 * It might be TLS, or it might be the middle of a PDU.
953 if (!test_nvme(pinfo, tvb, 0, data)) {
954 return 0;
955 /* The TLS heuristic dissector should catch the TLS version. */
958 /* The start of a PDU. Set the other handle for this connection.
959 * We can call tcp_dissect_pdus safely starting from here.
961 conversation_t *conversation = find_or_create_conversation(pinfo);
962 conversation_set_dissector_from_frame_number(conversation, pinfo->num, nvmet_tls_handle);
964 return dissect_nvme_tcp(tvb, pinfo, tree, data);
967 void proto_register_nvme_tcp(void) {
969 static hf_register_info hf[] = {
970 { &hf_nvme_tcp_type,
971 { "Pdu Type", "nvme-tcp.type",
972 FT_UINT8, BASE_DEC, VALS(nvme_tcp_pdu_type_vals),
973 0x0, NULL, HFILL } },
974 { &hf_nvme_tcp_flags,
975 { "Pdu Specific Flags", "nvme-tcp.flags",
976 FT_UINT8, BASE_HEX, NULL, 0x0, NULL, HFILL } },
977 { &hf_pdu_flags_hdgst,
978 { "PDU Header Digest", "nvme-tcp.flags.pdu.hdgst",
979 FT_BOOLEAN, 8, TFS(&tfs_set_notset),
980 NVME_TCP_F_HDGST, NULL, HFILL} },
981 { &hf_pdu_flags_ddgst,
982 { "PDU Data Digest", "nvme-tcp.flags.pdu.ddgst",
983 FT_BOOLEAN, 8, TFS(&tfs_set_notset),
984 NVME_TCP_F_DDGST, NULL, HFILL} },
985 { &hf_pdu_flags_data_last,
986 { "PDU Data Last", "nvme-tcp.flags.pdu.data_last",
987 FT_BOOLEAN, 8, TFS(&tfs_set_notset),
988 NVME_TCP_F_DATA_LAST, NULL, HFILL} },
989 { &hf_pdu_flags_data_success,
990 { "PDU Data Success", "nvme-tcp.flags.pdu.data_success",
991 FT_BOOLEAN, 8, TFS(&tfs_set_notset),
992 NVME_TCP_F_DATA_SUCCESS, NULL, HFILL} },
993 { &hf_nvme_tcp_hdgst,
994 { "PDU Header Digest", "nvme-tcp.hdgst",
995 FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL } },
996 { &hf_nvme_tcp_ddgst,
997 { "PDU Data Digest", "nvme-tcp.ddgst",
998 FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL } },
999 { &hf_nvme_tcp_hdgst_status,
1000 { "Header Digest Status", "nvme-tcp.hdgst.status",
1001 FT_UINT8, BASE_NONE, VALS(proto_checksum_vals),
1002 0x0, NULL, HFILL }},
1003 { &hf_nvme_tcp_ddgst_status,
1004 { "Data Digest Status", "nvme-tcp.ddgst.status",
1005 FT_UINT8, BASE_NONE, VALS(proto_checksum_vals),
1006 0x0, NULL, HFILL }},
1007 { &hf_nvme_tcp_hlen,
1008 { "Pdu Header Length", "nvme-tcp.hlen",
1009 FT_UINT8, BASE_DEC, NULL, 0x0, NULL, HFILL } },
1010 { &hf_nvme_tcp_pdo,
1011 { "Pdu Data Offset", "nvme-tcp.pdo",
1012 FT_UINT8, BASE_DEC, NULL, 0x0, NULL, HFILL } },
1013 { &hf_nvme_tcp_plen,
1014 { "Packet Length", "nvme-tcp.plen",
1015 FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL } },
1016 { &hf_nvme_tcp_icreq,
1017 { "ICReq", "nvme-tcp.icreq",
1018 FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } },
1019 { &hf_nvme_tcp_icreq_pfv,
1020 { "Pdu Version Format", "nvme-tcp.icreq.pfv",
1021 FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL } },
1022 { &hf_nvme_tcp_icreq_maxr2t,
1023 { "Maximum r2ts per request", "nvme-tcp.icreq.maxr2t",
1024 FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL } },
1025 { &hf_nvme_tcp_icreq_hpda,
1026 { "Host Pdu data alignment", "nvme-tcp.icreq.hpda",
1027 FT_UINT8, BASE_DEC, NULL, 0x0, NULL, HFILL } },
1028 { &hf_nvme_tcp_icreq_digest,
1029 { "Digest Types Enabled", "nvme-tcp.icreq.digest",
1030 FT_UINT8, BASE_DEC, NULL, 0x0, NULL, HFILL } },
1031 { &hf_nvme_tcp_icresp,
1032 { "ICResp", "nvme-tcp.icresp",
1033 FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } },
1034 { &hf_nvme_tcp_icresp_pfv,
1035 { "Pdu Version Format", "nvme-tcp.icresp.pfv",
1036 FT_UINT16, BASE_DEC, NULL, 0x0,
1037 NULL, HFILL } },
1038 { &hf_nvme_tcp_icresp_cpda,
1039 { "Controller Pdu data alignment", "nvme-tcp.icresp.cpda",
1040 FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL } },
1041 { &hf_nvme_tcp_icresp_digest,
1042 { "Digest types enabled", "nvme-tcp.icresp.digest",
1043 FT_UINT8, BASE_DEC, NULL, 0x0, NULL, HFILL } },
1044 { &hf_nvme_tcp_icresp_maxdata,
1045 { "Maximum data capsules per r2t supported", "nvme-tcp.icresp.maxdata",
1046 FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL } },
1047 /* NVMe tcp c2h/h2c termreq fields */
1048 { &hf_nvme_tcp_c2htermreq,
1049 { "C2HTermReq", "nvme-tcp.c2htermreq",
1050 FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } },
1051 { &hf_nvme_tcp_c2htermreq_fes,
1052 { "Fatal error status", "nvme-tcp.c2htermreq.fes",
1053 FT_UINT16, BASE_HEX, VALS(nvme_tcp_termreq_fes),
1054 0x0, NULL, HFILL } },
1055 { &hf_nvme_tcp_c2htermreq_phfo,
1056 { "PDU header field offset", "nvme-tcp.c2htermreq.phfo",
1057 FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL } },
1058 { &hf_nvme_tcp_c2htermreq_phd,
1059 { "PDU header digest", "nvme-tcp.c2htermreq.phd",
1060 FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL } },
1061 { &hf_nvme_tcp_c2htermreq_upfo,
1062 { "Unsupported parameter field offset", "nvme-tcp.c2htermreq.upfo",
1063 FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL } },
1064 { &hf_nvme_tcp_c2htermreq_reserved,
1065 { "Reserved", "nvme-tcp.c2htermreq.reserved",
1066 FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL } },
1067 { &hf_nvme_tcp_c2htermreq_data,
1068 { "Terminated PDU header", "nvme-tcp.c2htermreq.data",
1069 FT_NONE, BASE_NONE, NULL, 0, NULL, HFILL } },
1070 { &hf_nvme_tcp_h2ctermreq,
1071 { "H2CTermReq", "nvme-tcp.h2ctermreq",
1072 FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } },
1073 { &hf_nvme_tcp_h2ctermreq_fes,
1074 { "Fatal error status", "nvme-tcp.h2ctermreq.fes",
1075 FT_UINT16, BASE_HEX, VALS(nvme_tcp_termreq_fes),
1076 0x0, NULL, HFILL } },
1077 { &hf_nvme_tcp_h2ctermreq_phfo,
1078 { "PDU header field offset", "nvme-tcp.h2ctermreq.phfo",
1079 FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL } },
1080 { &hf_nvme_tcp_h2ctermreq_phd,
1081 { "PDU header digest", "nvme-tcp.h2ctermreq.phd",
1082 FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL } },
1083 { &hf_nvme_tcp_h2ctermreq_upfo,
1084 { "Unsupported parameter field offset", "nvme-tcp.h2ctermreq.upfo",
1085 FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL } },
1086 { &hf_nvme_tcp_h2ctermreq_reserved,
1087 { "Reserved", "nvme-tcp.h2ctermreq.reserved",
1088 FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL } },
1089 { &hf_nvme_tcp_h2ctermreq_data,
1090 { "Terminated PDU header", "nvme-tcp.h2ctermreq.data",
1091 FT_NONE, BASE_NONE, NULL, 0, NULL, HFILL } },
1092 { &hf_nvme_fabrics_cmd_cid,
1093 { "Command ID", "nvme-tcp.cmd.cid",
1094 FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL } },
1095 { &hf_nvme_tcp_unknown_data,
1096 { "Unknown Data", "nvme-tcp.unknown_data",
1097 FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL } },
1098 /* NVMe command data */
1099 { &hf_nvme_fabrics_cmd_data,
1100 { "Data", "nvme-tcp.cmd.data",
1101 FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } },
1102 { &hf_nvme_tcp_cmd_pkt,
1103 { "Cmd in", "nvme-tcp.cmd_pkt",
1104 FT_FRAMENUM, BASE_NONE, NULL, 0,
1105 "The Cmd for this transaction is in this frame", HFILL } },
1106 { &hf_nvme_fabrics_cmd_qid,
1107 { "Cmd Qid", "nvme-tcp.cmd.qid",
1108 FT_UINT16, BASE_HEX, NULL, 0x0,
1109 "Qid on which command is issued", HFILL } },
1110 /* NVMe TCP data response */
1111 { &hf_nvme_tcp_data_pdu,
1112 { "NVMe/TCP Data PDU", "nvme-tcp.data",
1113 FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } },
1114 { &hf_nvme_tcp_pdu_ttag,
1115 { "Transfer Tag", "nvme-tcp.ttag",
1116 FT_UINT16, BASE_HEX, NULL, 0x0,
1117 "Transfer tag (controller generated)", HFILL } },
1118 { &hf_nvme_tcp_data_pdu_data_offset,
1119 { "Data Offset", "nvme-tcp.data.offset",
1120 FT_UINT32, BASE_DEC, NULL, 0x0,
1121 "Offset from the start of the command data", HFILL } },
1122 { &hf_nvme_tcp_data_pdu_data_length,
1123 { "Data Length", "nvme-tcp.data.length",
1124 FT_UINT32, BASE_DEC, NULL, 0x0,
1125 "Length of the data stream", HFILL } },
1126 { &hf_nvme_tcp_data_pdu_data_resvd,
1127 { "Reserved", "nvme-tcp.data.rsvd",
1128 FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL } },
1129 /* NVMEe TCP R2T pdu */
1130 { &hf_nvme_tcp_r2t_pdu,
1131 { "R2T", "nvme-tcp.r2t",
1132 FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } },
1133 { &hf_nvme_tcp_r2t_offset,
1134 { "R2T Offset", "nvme-tcp.r2t.offset",
1135 FT_UINT32, BASE_DEC, NULL, 0x0,
1136 "Offset from the start of the command data", HFILL } },
1137 { &hf_nvme_tcp_r2t_length,
1138 { "R2T Length", "nvme-tcp.r2t.length",
1139 FT_UINT32, BASE_DEC, NULL, 0x0,
1140 "Length of the data stream", HFILL } },
1141 { &hf_nvme_tcp_r2t_resvd,
1142 { "Reserved", "nvme-tcp.r2t.rsvd",
1143 FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL } }
1146 static int *ett[] = {
1147 &ett_nvme_tcp
1150 proto_nvme_tcp = proto_register_protocol("NVM Express Fabrics TCP",
1151 NVME_FABRICS_TCP, "nvme-tcp");
1153 proto_register_field_array(proto_nvme_tcp, hf, array_length(hf));
1154 proto_register_subtree_array(ett, array_length(ett));
1156 /* These names actually work for their purpose. Note if we're already
1157 * over TLS we don't need to do heuristics (it can't be more TLS instead
1158 * instead, and since we managed to decrypt the TLS we shouldn't have
1159 * missing frames and thus aren't in the middle of a PDU.)
1161 nvmet_tcp_handle = register_dissector("nvme-tcp", dissect_nvme_tcp_heur,
1162 proto_nvme_tcp);
1163 nvmet_tls_handle = register_dissector_with_description("nvme-tls",
1164 "NVMe-over-TCP with TLS", dissect_nvme_tcp, proto_nvme_tcp);
1167 void proto_reg_handoff_nvme_tcp(void) {
1168 module_t *nvme_tcp_module;
1169 nvme_tcp_module = prefs_register_protocol(proto_nvme_tcp, NULL);
1170 range_convert_str(wmem_epan_scope(), &gPORT_RANGE, NVME_TCP_PORT_RANGE,
1171 MAX_TCP_PORT);
1172 prefs_register_range_preference(nvme_tcp_module,
1173 "subsystem_ports",
1174 "Subsystem Ports Range",
1175 "Range of NVMe Subsystem ports"
1176 "(default " NVME_TCP_PORT_RANGE ")",
1177 &gPORT_RANGE,
1178 MAX_TCP_PORT);
1179 prefs_register_bool_preference(nvme_tcp_module, "check_hdgst",
1180 "Validate PDU header digest",
1181 "Whether to validate the PDU header digest or not.",
1182 &nvme_tcp_check_hdgst);
1183 prefs_register_bool_preference(nvme_tcp_module, "check_ddgst",
1184 "Validate PDU data digest",
1185 "Whether to validate the PDU data digest or not.",
1186 &nvme_tcp_check_ddgst);
1187 ssl_dissector_add(0, nvmet_tls_handle);
1188 dissector_add_uint_range("tcp.port", gPORT_RANGE, nvmet_tcp_handle);
1189 dissector_add_uint_range("tls.port", gPORT_RANGE, nvmet_tls_handle);
1193 * Editor modelines - https://www.wireshark.org/tools/modelines.html
1195 * Local variables:
1196 * c-basic-offset: 4
1197 * tab-width: 8
1198 * indent-tabs-mode: nil
1199 * End:
1201 * vi: set shiftwidth=4 tabstop=8 expandtab:
1202 * :indentSize=4:tabSize=8:noTabs=true: