1 // SPDX-License-Identifier: GPL-2.0-only
3 * intel_pt_decoder.c: Intel Processor Trace support
4 * Copyright (c) 2013-2014, Intel Corporation.
16 #include <linux/compiler.h>
17 #include <linux/string.h>
18 #include <linux/zalloc.h>
20 #include "../auxtrace.h"
22 #include "intel-pt-insn-decoder.h"
23 #include "intel-pt-pkt-decoder.h"
24 #include "intel-pt-decoder.h"
25 #include "intel-pt-log.h"
27 #define BITULL(x) (1ULL << (x))
29 /* IA32_RTIT_CTL MSR bits */
30 #define INTEL_PT_CYC_ENABLE BITULL(1)
31 #define INTEL_PT_CYC_THRESHOLD (BITULL(22) | BITULL(21) | BITULL(20) | BITULL(19))
32 #define INTEL_PT_CYC_THRESHOLD_SHIFT 19
34 #define INTEL_PT_BLK_SIZE 1024
36 #define BIT63 (((uint64_t)1 << 63))
38 #define SEVEN_BYTES 0xffffffffffffffULL
40 #define NO_VMCS 0xffffffffffULL
42 #define INTEL_PT_RETURN 1
45 * Default maximum number of loops with no packets consumed i.e. stuck in a
48 #define INTEL_PT_MAX_LOOPS 100000
51 struct intel_pt_blk
*prev
;
52 uint64_t ip
[INTEL_PT_BLK_SIZE
];
55 struct intel_pt_stack
{
56 struct intel_pt_blk
*blk
;
57 struct intel_pt_blk
*spare
;
61 enum intel_pt_p_once
{
62 INTEL_PT_PRT_ONCE_UNK_VMCS
,
63 INTEL_PT_PRT_ONCE_ERANGE
,
66 enum intel_pt_pkt_state
{
67 INTEL_PT_STATE_NO_PSB
,
69 INTEL_PT_STATE_ERR_RESYNC
,
70 INTEL_PT_STATE_IN_SYNC
,
71 INTEL_PT_STATE_TNT_CONT
,
74 INTEL_PT_STATE_TIP_PGD
,
76 INTEL_PT_STATE_FUP_NO_TIP
,
77 INTEL_PT_STATE_FUP_IN_PSB
,
78 INTEL_PT_STATE_RESAMPLE
,
79 INTEL_PT_STATE_VM_TIME_CORRELATION
,
82 static inline bool intel_pt_sample_time(enum intel_pt_pkt_state pkt_state
)
85 case INTEL_PT_STATE_NO_PSB
:
86 case INTEL_PT_STATE_NO_IP
:
87 case INTEL_PT_STATE_ERR_RESYNC
:
88 case INTEL_PT_STATE_IN_SYNC
:
89 case INTEL_PT_STATE_TNT_CONT
:
90 case INTEL_PT_STATE_RESAMPLE
:
91 case INTEL_PT_STATE_VM_TIME_CORRELATION
:
93 case INTEL_PT_STATE_TNT
:
94 case INTEL_PT_STATE_TIP
:
95 case INTEL_PT_STATE_TIP_PGD
:
96 case INTEL_PT_STATE_FUP
:
97 case INTEL_PT_STATE_FUP_NO_TIP
:
98 case INTEL_PT_STATE_FUP_IN_PSB
:
105 #ifdef INTEL_PT_STRICT
106 #define INTEL_PT_STATE_ERR1 INTEL_PT_STATE_NO_PSB
107 #define INTEL_PT_STATE_ERR2 INTEL_PT_STATE_NO_PSB
108 #define INTEL_PT_STATE_ERR3 INTEL_PT_STATE_NO_PSB
109 #define INTEL_PT_STATE_ERR4 INTEL_PT_STATE_NO_PSB
111 #define INTEL_PT_STATE_ERR1 (decoder->pkt_state)
112 #define INTEL_PT_STATE_ERR2 INTEL_PT_STATE_NO_IP
113 #define INTEL_PT_STATE_ERR3 INTEL_PT_STATE_ERR_RESYNC
114 #define INTEL_PT_STATE_ERR4 INTEL_PT_STATE_IN_SYNC
117 struct intel_pt_decoder
{
118 int (*get_trace
)(struct intel_pt_buffer
*buffer
, void *data
);
119 int (*walk_insn
)(struct intel_pt_insn
*intel_pt_insn
,
120 uint64_t *insn_cnt_ptr
, uint64_t *ip
, uint64_t to_ip
,
121 uint64_t max_insn_cnt
, void *data
);
122 bool (*pgd_ip
)(uint64_t ip
, void *data
);
123 int (*lookahead
)(void *data
, intel_pt_lookahead_cb_t cb
, void *cb_data
);
124 struct intel_pt_vmcs_info
*(*findnew_vmcs_info
)(void *data
, uint64_t vmcs
);
126 struct intel_pt_state state
;
127 const unsigned char *buf
;
129 bool return_compression
;
140 bool emulated_ptwrite
;
141 bool vm_time_correlation
;
142 bool vm_tm_corr_dry_run
;
143 bool vm_tm_corr_reliable
;
144 bool vm_tm_corr_same_buf
;
145 bool vm_tm_corr_continuous
;
150 enum intel_pt_param_flags flags
;
154 uint64_t pip_payload
;
156 uint64_t tsc_timestamp
;
157 uint64_t ref_timestamp
;
158 uint64_t buf_timestamp
;
159 uint64_t sample_timestamp
;
161 uint64_t ctc_timestamp
;
164 uint64_t cyc_ref_timestamp
;
165 uint64_t first_timestamp
;
166 uint64_t last_reliable_timestamp
;
171 uint32_t tsc_ctc_ratio_n
;
172 uint32_t tsc_ctc_ratio_d
;
173 uint32_t tsc_ctc_mult
;
175 uint32_t ctc_rem_mask
;
177 struct intel_pt_stack stack
;
178 enum intel_pt_pkt_state pkt_state
;
179 enum intel_pt_pkt_ctx pkt_ctx
;
180 enum intel_pt_pkt_ctx prev_pkt_ctx
;
181 enum intel_pt_blk_type blk_type
;
183 struct intel_pt_pkt packet
;
184 struct intel_pt_pkt tnt
;
187 int last_packet_type
;
189 unsigned int cbr_seen
;
190 unsigned int max_non_turbo_ratio
;
191 double max_non_turbo_ratio_fp
;
192 double cbr_cyc_to_tsc
;
193 double calc_cyc_to_tsc
;
194 bool have_calc_cyc_to_tsc
;
196 unsigned int insn_bytes
;
198 enum intel_pt_period_type period_type
;
199 uint64_t tot_insn_cnt
;
200 uint64_t period_insn_cnt
;
201 uint64_t period_mask
;
202 uint64_t period_ticks
;
203 uint64_t last_masked_timestamp
;
204 uint64_t tot_cyc_cnt
;
205 uint64_t sample_tot_cyc_cnt
;
206 uint64_t base_cyc_cnt
;
207 uint64_t cyc_cnt_timestamp
;
209 uint64_t cyc_threshold
;
211 bool continuous_period
;
213 bool set_fup_tx_flags
;
221 bool set_fup_mode_exec
;
223 unsigned int fup_tx_flags
;
224 unsigned int tx_flags
;
225 uint64_t fup_ptw_payload
;
226 uint64_t fup_mwait_payload
;
227 uint64_t fup_pwre_payload
;
228 uint64_t cbr_payload
;
229 uint64_t timestamp_insn_cnt
;
230 uint64_t sample_insn_cnt
;
232 struct intel_pt_pkt fup_cfe_pkt
;
238 const unsigned char *next_buf
;
240 unsigned char temp_buf
[INTEL_PT_PKT_MAX_SZ
];
242 struct intel_pt_evd evd
[INTEL_PT_MAX_EVDS
];
245 static uint64_t intel_pt_lower_power_of_2(uint64_t x
)
249 for (i
= 0; x
!= 1; i
++)
256 static void p_log(const char *fmt
, ...)
262 vsnprintf(buf
, sizeof(buf
), fmt
, args
);
265 fprintf(stderr
, "%s\n", buf
);
266 intel_pt_log("%s\n", buf
);
269 static bool intel_pt_print_once(struct intel_pt_decoder
*decoder
,
270 enum intel_pt_p_once id
)
272 uint64_t bit
= 1ULL << id
;
274 if (decoder
->print_once
& bit
)
276 decoder
->print_once
|= bit
;
280 static uint64_t intel_pt_cyc_threshold(uint64_t ctl
)
282 if (!(ctl
& INTEL_PT_CYC_ENABLE
))
285 return (ctl
& INTEL_PT_CYC_THRESHOLD
) >> INTEL_PT_CYC_THRESHOLD_SHIFT
;
288 static void intel_pt_setup_period(struct intel_pt_decoder
*decoder
)
290 if (decoder
->period_type
== INTEL_PT_PERIOD_TICKS
) {
293 period
= intel_pt_lower_power_of_2(decoder
->period
);
294 decoder
->period_mask
= ~(period
- 1);
295 decoder
->period_ticks
= period
;
299 static uint64_t multdiv(uint64_t t
, uint32_t n
, uint32_t d
)
303 return (t
/ d
) * n
+ ((t
% d
) * n
) / d
;
306 struct intel_pt_decoder
*intel_pt_decoder_new(struct intel_pt_params
*params
)
308 struct intel_pt_decoder
*decoder
;
310 if (!params
->get_trace
|| !params
->walk_insn
)
313 decoder
= zalloc(sizeof(struct intel_pt_decoder
));
317 decoder
->get_trace
= params
->get_trace
;
318 decoder
->walk_insn
= params
->walk_insn
;
319 decoder
->pgd_ip
= params
->pgd_ip
;
320 decoder
->lookahead
= params
->lookahead
;
321 decoder
->findnew_vmcs_info
= params
->findnew_vmcs_info
;
322 decoder
->data
= params
->data
;
323 decoder
->return_compression
= params
->return_compression
;
324 decoder
->branch_enable
= params
->branch_enable
;
325 decoder
->hop
= params
->quick
>= 1;
326 decoder
->leap
= params
->quick
>= 2;
327 decoder
->vm_time_correlation
= params
->vm_time_correlation
;
328 decoder
->vm_tm_corr_dry_run
= params
->vm_tm_corr_dry_run
;
329 decoder
->first_timestamp
= params
->first_timestamp
;
330 decoder
->last_reliable_timestamp
= params
->first_timestamp
;
331 decoder
->max_loops
= params
->max_loops
? params
->max_loops
: INTEL_PT_MAX_LOOPS
;
333 decoder
->flags
= params
->flags
;
335 decoder
->ctl
= params
->ctl
;
336 decoder
->period
= params
->period
;
337 decoder
->period_type
= params
->period_type
;
339 decoder
->max_non_turbo_ratio
= params
->max_non_turbo_ratio
;
340 decoder
->max_non_turbo_ratio_fp
= params
->max_non_turbo_ratio
;
342 decoder
->cyc_threshold
= intel_pt_cyc_threshold(decoder
->ctl
);
344 intel_pt_setup_period(decoder
);
346 decoder
->mtc_shift
= params
->mtc_period
;
347 decoder
->ctc_rem_mask
= (1 << decoder
->mtc_shift
) - 1;
349 decoder
->tsc_ctc_ratio_n
= params
->tsc_ctc_ratio_n
;
350 decoder
->tsc_ctc_ratio_d
= params
->tsc_ctc_ratio_d
;
352 if (!decoder
->tsc_ctc_ratio_n
)
353 decoder
->tsc_ctc_ratio_d
= 0;
355 if (decoder
->tsc_ctc_ratio_d
) {
356 if (!(decoder
->tsc_ctc_ratio_n
% decoder
->tsc_ctc_ratio_d
))
357 decoder
->tsc_ctc_mult
= decoder
->tsc_ctc_ratio_n
/
358 decoder
->tsc_ctc_ratio_d
;
362 * A TSC packet can slip past MTC packets so that the timestamp appears
363 * to go backwards. One estimate is that can be up to about 40 CPU
364 * cycles, which is certainly less than 0x1000 TSC ticks, but accept
365 * slippage an order of magnitude more to be on the safe side.
367 decoder
->tsc_slip
= 0x10000;
369 intel_pt_log("timestamp: mtc_shift %u\n", decoder
->mtc_shift
);
370 intel_pt_log("timestamp: tsc_ctc_ratio_n %u\n", decoder
->tsc_ctc_ratio_n
);
371 intel_pt_log("timestamp: tsc_ctc_ratio_d %u\n", decoder
->tsc_ctc_ratio_d
);
372 intel_pt_log("timestamp: tsc_ctc_mult %u\n", decoder
->tsc_ctc_mult
);
373 intel_pt_log("timestamp: tsc_slip %#x\n", decoder
->tsc_slip
);
376 intel_pt_log("Hop mode: decoding FUP and TIPs, but not TNT\n");
381 void intel_pt_set_first_timestamp(struct intel_pt_decoder
*decoder
,
382 uint64_t first_timestamp
)
384 decoder
->first_timestamp
= first_timestamp
;
387 static void intel_pt_pop_blk(struct intel_pt_stack
*stack
)
389 struct intel_pt_blk
*blk
= stack
->blk
;
391 stack
->blk
= blk
->prev
;
398 static uint64_t intel_pt_pop(struct intel_pt_stack
*stack
)
403 intel_pt_pop_blk(stack
);
406 stack
->pos
= INTEL_PT_BLK_SIZE
;
408 return stack
->blk
->ip
[--stack
->pos
];
411 static int intel_pt_alloc_blk(struct intel_pt_stack
*stack
)
413 struct intel_pt_blk
*blk
;
419 blk
= malloc(sizeof(struct intel_pt_blk
));
424 blk
->prev
= stack
->blk
;
430 static int intel_pt_push(struct intel_pt_stack
*stack
, uint64_t ip
)
434 if (!stack
->blk
|| stack
->pos
== INTEL_PT_BLK_SIZE
) {
435 err
= intel_pt_alloc_blk(stack
);
440 stack
->blk
->ip
[stack
->pos
++] = ip
;
444 static void intel_pt_clear_stack(struct intel_pt_stack
*stack
)
447 intel_pt_pop_blk(stack
);
451 static void intel_pt_free_stack(struct intel_pt_stack
*stack
)
453 intel_pt_clear_stack(stack
);
455 zfree(&stack
->spare
);
458 void intel_pt_decoder_free(struct intel_pt_decoder
*decoder
)
460 intel_pt_free_stack(&decoder
->stack
);
464 static int intel_pt_ext_err(int code
)
468 return INTEL_PT_ERR_NOMEM
;
470 return INTEL_PT_ERR_INTERN
;
472 return INTEL_PT_ERR_BADPKT
;
474 return INTEL_PT_ERR_NODATA
;
476 return INTEL_PT_ERR_NOINSN
;
478 return INTEL_PT_ERR_MISMAT
;
480 return INTEL_PT_ERR_OVR
;
482 return INTEL_PT_ERR_LOST
;
484 return INTEL_PT_ERR_NELOOP
;
486 return INTEL_PT_ERR_EPTW
;
488 return INTEL_PT_ERR_UNK
;
492 static const char *intel_pt_err_msgs
[] = {
493 [INTEL_PT_ERR_NOMEM
] = "Memory allocation failed",
494 [INTEL_PT_ERR_INTERN
] = "Internal error",
495 [INTEL_PT_ERR_BADPKT
] = "Bad packet",
496 [INTEL_PT_ERR_NODATA
] = "No more data",
497 [INTEL_PT_ERR_NOINSN
] = "Failed to get instruction",
498 [INTEL_PT_ERR_MISMAT
] = "Trace doesn't match instruction",
499 [INTEL_PT_ERR_OVR
] = "Overflow packet",
500 [INTEL_PT_ERR_LOST
] = "Lost trace data",
501 [INTEL_PT_ERR_UNK
] = "Unknown error!",
502 [INTEL_PT_ERR_NELOOP
] = "Never-ending loop (refer perf config intel-pt.max-loops)",
503 [INTEL_PT_ERR_EPTW
] = "Broken emulated ptwrite",
506 int intel_pt__strerror(int code
, char *buf
, size_t buflen
)
508 if (code
< 1 || code
>= INTEL_PT_ERR_MAX
)
509 code
= INTEL_PT_ERR_UNK
;
510 strlcpy(buf
, intel_pt_err_msgs
[code
], buflen
);
514 static uint64_t intel_pt_calc_ip(const struct intel_pt_pkt
*packet
,
519 switch (packet
->count
) {
521 ip
= (last_ip
& (uint64_t)0xffffffffffff0000ULL
) |
525 ip
= (last_ip
& (uint64_t)0xffffffff00000000ULL
) |
529 ip
= packet
->payload
;
530 /* Sign-extend 6-byte ip */
531 if (ip
& (uint64_t)0x800000000000ULL
)
532 ip
|= (uint64_t)0xffff000000000000ULL
;
535 ip
= (last_ip
& (uint64_t)0xffff000000000000ULL
) |
539 ip
= packet
->payload
;
548 static inline void intel_pt_set_last_ip(struct intel_pt_decoder
*decoder
)
550 decoder
->last_ip
= intel_pt_calc_ip(&decoder
->packet
, decoder
->last_ip
);
551 decoder
->have_last_ip
= true;
554 static inline void intel_pt_set_ip(struct intel_pt_decoder
*decoder
)
556 intel_pt_set_last_ip(decoder
);
557 decoder
->ip
= decoder
->last_ip
;
560 static void intel_pt_decoder_log_packet(struct intel_pt_decoder
*decoder
)
562 intel_pt_log_packet(&decoder
->packet
, decoder
->pkt_len
, decoder
->pos
,
566 static int intel_pt_bug(struct intel_pt_decoder
*decoder
)
568 intel_pt_log("ERROR: Internal error\n");
569 decoder
->pkt_state
= INTEL_PT_STATE_NO_PSB
;
573 static inline void intel_pt_clear_tx_flags(struct intel_pt_decoder
*decoder
)
575 decoder
->tx_flags
= 0;
578 static inline void intel_pt_update_in_tx(struct intel_pt_decoder
*decoder
)
580 decoder
->tx_flags
= decoder
->packet
.payload
& INTEL_PT_IN_TX
;
583 static inline void intel_pt_update_pip(struct intel_pt_decoder
*decoder
)
585 decoder
->pip_payload
= decoder
->packet
.payload
;
588 static inline void intel_pt_update_nr(struct intel_pt_decoder
*decoder
)
590 decoder
->next_nr
= decoder
->pip_payload
& 1;
593 static inline void intel_pt_set_nr(struct intel_pt_decoder
*decoder
)
595 decoder
->nr
= decoder
->pip_payload
& 1;
596 decoder
->next_nr
= decoder
->nr
;
599 static inline void intel_pt_set_pip(struct intel_pt_decoder
*decoder
)
601 intel_pt_update_pip(decoder
);
602 intel_pt_set_nr(decoder
);
605 static int intel_pt_bad_packet(struct intel_pt_decoder
*decoder
)
607 intel_pt_clear_tx_flags(decoder
);
608 decoder
->have_tma
= false;
609 decoder
->pkt_len
= 1;
610 decoder
->pkt_step
= 1;
611 intel_pt_decoder_log_packet(decoder
);
612 if (decoder
->pkt_state
!= INTEL_PT_STATE_NO_PSB
) {
613 intel_pt_log("ERROR: Bad packet\n");
614 decoder
->pkt_state
= INTEL_PT_STATE_ERR1
;
619 static inline void intel_pt_update_sample_time(struct intel_pt_decoder
*decoder
)
621 decoder
->sample_timestamp
= decoder
->timestamp
;
622 decoder
->sample_insn_cnt
= decoder
->timestamp_insn_cnt
;
623 decoder
->state
.cycles
= decoder
->tot_cyc_cnt
;
626 static void intel_pt_reposition(struct intel_pt_decoder
*decoder
)
629 decoder
->pkt_state
= INTEL_PT_STATE_NO_PSB
;
630 decoder
->timestamp
= 0;
631 decoder
->have_tma
= false;
634 static int intel_pt_get_data(struct intel_pt_decoder
*decoder
, bool reposition
)
636 struct intel_pt_buffer buffer
= { .buf
= 0, };
639 decoder
->pkt_step
= 0;
641 intel_pt_log("Getting more data\n");
642 ret
= decoder
->get_trace(&buffer
, decoder
->data
);
645 decoder
->buf
= buffer
.buf
;
646 decoder
->len
= buffer
.len
;
648 intel_pt_log("No more data\n");
651 decoder
->buf_timestamp
= buffer
.ref_timestamp
;
652 if (!buffer
.consecutive
|| reposition
) {
653 intel_pt_reposition(decoder
);
654 decoder
->ref_timestamp
= buffer
.ref_timestamp
;
655 decoder
->state
.trace_nr
= buffer
.trace_nr
;
656 decoder
->vm_tm_corr_same_buf
= false;
657 intel_pt_log("Reference timestamp 0x%" PRIx64
"\n",
658 decoder
->ref_timestamp
);
665 static int intel_pt_get_next_data(struct intel_pt_decoder
*decoder
,
668 if (!decoder
->next_buf
)
669 return intel_pt_get_data(decoder
, reposition
);
671 decoder
->buf
= decoder
->next_buf
;
672 decoder
->len
= decoder
->next_len
;
673 decoder
->next_buf
= 0;
674 decoder
->next_len
= 0;
678 static int intel_pt_get_split_packet(struct intel_pt_decoder
*decoder
)
680 unsigned char *buf
= decoder
->temp_buf
;
681 size_t old_len
, len
, n
;
684 old_len
= decoder
->len
;
686 memcpy(buf
, decoder
->buf
, len
);
688 ret
= intel_pt_get_data(decoder
, false);
690 decoder
->pos
+= old_len
;
691 return ret
< 0 ? ret
: -EINVAL
;
694 n
= INTEL_PT_PKT_MAX_SZ
- len
;
695 if (n
> decoder
->len
)
697 memcpy(buf
+ len
, decoder
->buf
, n
);
700 decoder
->prev_pkt_ctx
= decoder
->pkt_ctx
;
701 ret
= intel_pt_get_packet(buf
, len
, &decoder
->packet
, &decoder
->pkt_ctx
);
702 if (ret
< (int)old_len
) {
703 decoder
->next_buf
= decoder
->buf
;
704 decoder
->next_len
= decoder
->len
;
706 decoder
->len
= old_len
;
707 return intel_pt_bad_packet(decoder
);
710 decoder
->next_buf
= decoder
->buf
+ (ret
- old_len
);
711 decoder
->next_len
= decoder
->len
- (ret
- old_len
);
719 struct intel_pt_pkt_info
{
720 struct intel_pt_decoder
*decoder
;
721 struct intel_pt_pkt packet
;
724 int last_packet_type
;
728 typedef int (*intel_pt_pkt_cb_t
)(struct intel_pt_pkt_info
*pkt_info
);
730 /* Lookahead packets in current buffer */
731 static int intel_pt_pkt_lookahead(struct intel_pt_decoder
*decoder
,
732 intel_pt_pkt_cb_t cb
, void *data
)
734 struct intel_pt_pkt_info pkt_info
;
735 const unsigned char *buf
= decoder
->buf
;
736 enum intel_pt_pkt_ctx pkt_ctx
= decoder
->pkt_ctx
;
737 size_t len
= decoder
->len
;
740 pkt_info
.decoder
= decoder
;
741 pkt_info
.pos
= decoder
->pos
;
742 pkt_info
.pkt_len
= decoder
->pkt_step
;
743 pkt_info
.last_packet_type
= decoder
->last_packet_type
;
744 pkt_info
.data
= data
;
748 pkt_info
.pos
+= pkt_info
.pkt_len
;
749 buf
+= pkt_info
.pkt_len
;
750 len
-= pkt_info
.pkt_len
;
753 return INTEL_PT_NEED_MORE_BYTES
;
755 ret
= intel_pt_get_packet(buf
, len
, &pkt_info
.packet
,
758 return INTEL_PT_NEED_MORE_BYTES
;
762 pkt_info
.pkt_len
= ret
;
763 } while (pkt_info
.packet
.type
== INTEL_PT_PAD
);
769 pkt_info
.last_packet_type
= pkt_info
.packet
.type
;
773 struct intel_pt_calc_cyc_to_tsc_info
{
777 uint64_t ctc_timestamp
;
779 uint64_t tsc_timestamp
;
784 double cbr_cyc_to_tsc
;
788 * MTC provides a 8-bit slice of CTC but the TMA packet only provides the lower
789 * 16 bits of CTC. If mtc_shift > 8 then some of the MTC bits are not in the CTC
790 * provided by the TMA packet. Fix-up the last_mtc calculated from the TMA
791 * packet by copying the missing bits from the current MTC assuming the least
792 * difference between the two, and that the current MTC comes after last_mtc.
794 static void intel_pt_fixup_last_mtc(uint32_t mtc
, int mtc_shift
,
797 uint32_t first_missing_bit
= 1U << (16 - mtc_shift
);
798 uint32_t mask
= ~(first_missing_bit
- 1);
800 *last_mtc
|= mtc
& mask
;
801 if (*last_mtc
>= mtc
) {
802 *last_mtc
-= first_missing_bit
;
807 static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info
*pkt_info
)
809 struct intel_pt_decoder
*decoder
= pkt_info
->decoder
;
810 struct intel_pt_calc_cyc_to_tsc_info
*data
= pkt_info
->data
;
814 uint32_t mtc
, mtc_delta
, ctc
, fc
, ctc_rem
;
816 switch (pkt_info
->packet
.type
) {
818 case INTEL_PT_TIP_PGE
:
823 case INTEL_PT_MODE_EXEC
:
824 case INTEL_PT_MODE_TSX
:
825 case INTEL_PT_PSBEND
:
829 case INTEL_PT_PTWRITE
:
830 case INTEL_PT_PTWRITE_IP
:
834 case INTEL_PT_BEP_IP
:
836 case INTEL_PT_CFE_IP
:
844 mtc
= pkt_info
->packet
.payload
;
845 if (decoder
->mtc_shift
> 8 && data
->fixup_last_mtc
) {
846 data
->fixup_last_mtc
= false;
847 intel_pt_fixup_last_mtc(mtc
, decoder
->mtc_shift
,
850 if (mtc
> data
->last_mtc
)
851 mtc_delta
= mtc
- data
->last_mtc
;
853 mtc_delta
= mtc
+ 256 - data
->last_mtc
;
854 data
->ctc_delta
+= mtc_delta
<< decoder
->mtc_shift
;
855 data
->last_mtc
= mtc
;
857 if (decoder
->tsc_ctc_mult
) {
858 timestamp
= data
->ctc_timestamp
+
859 data
->ctc_delta
* decoder
->tsc_ctc_mult
;
861 timestamp
= data
->ctc_timestamp
+
862 multdiv(data
->ctc_delta
,
863 decoder
->tsc_ctc_ratio_n
,
864 decoder
->tsc_ctc_ratio_d
);
867 if (timestamp
< data
->timestamp
)
870 if (pkt_info
->last_packet_type
!= INTEL_PT_CYC
) {
871 data
->timestamp
= timestamp
;
879 * For now, do not support using TSC packets - refer
880 * intel_pt_calc_cyc_to_tsc().
884 timestamp
= pkt_info
->packet
.payload
|
885 (data
->timestamp
& (0xffULL
<< 56));
886 if (data
->from_mtc
&& timestamp
< data
->timestamp
&&
887 data
->timestamp
- timestamp
< decoder
->tsc_slip
)
889 if (timestamp
< data
->timestamp
)
890 timestamp
+= (1ULL << 56);
891 if (pkt_info
->last_packet_type
!= INTEL_PT_CYC
) {
894 data
->tsc_timestamp
= timestamp
;
895 data
->timestamp
= timestamp
;
904 if (!decoder
->tsc_ctc_ratio_d
)
907 ctc
= pkt_info
->packet
.payload
;
908 fc
= pkt_info
->packet
.count
;
909 ctc_rem
= ctc
& decoder
->ctc_rem_mask
;
911 data
->last_mtc
= (ctc
>> decoder
->mtc_shift
) & 0xff;
913 data
->ctc_timestamp
= data
->tsc_timestamp
- fc
;
914 if (decoder
->tsc_ctc_mult
) {
915 data
->ctc_timestamp
-= ctc_rem
* decoder
->tsc_ctc_mult
;
917 data
->ctc_timestamp
-=
918 multdiv(ctc_rem
, decoder
->tsc_ctc_ratio_n
,
919 decoder
->tsc_ctc_ratio_d
);
923 data
->have_tma
= true;
924 data
->fixup_last_mtc
= true;
929 data
->cycle_cnt
+= pkt_info
->packet
.payload
;
933 cbr
= pkt_info
->packet
.payload
;
934 if (data
->cbr
&& data
->cbr
!= cbr
)
937 data
->cbr_cyc_to_tsc
= decoder
->max_non_turbo_ratio_fp
/ cbr
;
940 case INTEL_PT_TIP_PGD
:
941 case INTEL_PT_TRACESTOP
:
942 case INTEL_PT_EXSTOP
:
943 case INTEL_PT_EXSTOP_IP
:
948 case INTEL_PT_BAD
: /* Does not happen */
953 if (!data
->cbr
&& decoder
->cbr
) {
954 data
->cbr
= decoder
->cbr
;
955 data
->cbr_cyc_to_tsc
= decoder
->cbr_cyc_to_tsc
;
958 if (!data
->cycle_cnt
)
961 cyc_to_tsc
= (double)(timestamp
- decoder
->timestamp
) / data
->cycle_cnt
;
963 if (data
->cbr
&& cyc_to_tsc
> data
->cbr_cyc_to_tsc
&&
964 cyc_to_tsc
/ data
->cbr_cyc_to_tsc
> 1.25) {
965 intel_pt_log("Timestamp: calculated %g TSC ticks per cycle too big (c.f. CBR-based value %g), pos " x64_fmt
"\n",
966 cyc_to_tsc
, data
->cbr_cyc_to_tsc
, pkt_info
->pos
);
970 decoder
->calc_cyc_to_tsc
= cyc_to_tsc
;
971 decoder
->have_calc_cyc_to_tsc
= true;
974 intel_pt_log("Timestamp: calculated %g TSC ticks per cycle c.f. CBR-based value %g, pos " x64_fmt
"\n",
975 cyc_to_tsc
, data
->cbr_cyc_to_tsc
, pkt_info
->pos
);
977 intel_pt_log("Timestamp: calculated %g TSC ticks per cycle c.f. unknown CBR-based value, pos " x64_fmt
"\n",
978 cyc_to_tsc
, pkt_info
->pos
);
984 static void intel_pt_calc_cyc_to_tsc(struct intel_pt_decoder
*decoder
,
987 struct intel_pt_calc_cyc_to_tsc_info data
= {
990 .last_mtc
= decoder
->last_mtc
,
991 .ctc_timestamp
= decoder
->ctc_timestamp
,
992 .ctc_delta
= decoder
->ctc_delta
,
993 .tsc_timestamp
= decoder
->tsc_timestamp
,
994 .timestamp
= decoder
->timestamp
,
995 .have_tma
= decoder
->have_tma
,
996 .fixup_last_mtc
= decoder
->fixup_last_mtc
,
997 .from_mtc
= from_mtc
,
1002 * For now, do not support using TSC packets for at least the reasons:
1003 * 1) timing might have stopped
1004 * 2) TSC packets within PSB+ can slip against CYC packets
1009 intel_pt_pkt_lookahead(decoder
, intel_pt_calc_cyc_cb
, &data
);
1012 static int intel_pt_get_next_packet(struct intel_pt_decoder
*decoder
)
1016 decoder
->last_packet_type
= decoder
->packet
.type
;
1019 decoder
->pos
+= decoder
->pkt_step
;
1020 decoder
->buf
+= decoder
->pkt_step
;
1021 decoder
->len
-= decoder
->pkt_step
;
1023 if (!decoder
->len
) {
1024 ret
= intel_pt_get_next_data(decoder
, false);
1029 decoder
->prev_pkt_ctx
= decoder
->pkt_ctx
;
1030 ret
= intel_pt_get_packet(decoder
->buf
, decoder
->len
,
1031 &decoder
->packet
, &decoder
->pkt_ctx
);
1032 if (ret
== INTEL_PT_NEED_MORE_BYTES
&& BITS_PER_LONG
== 32 &&
1033 decoder
->len
< INTEL_PT_PKT_MAX_SZ
&& !decoder
->next_buf
) {
1034 ret
= intel_pt_get_split_packet(decoder
);
1039 return intel_pt_bad_packet(decoder
);
1041 decoder
->pkt_len
= ret
;
1042 decoder
->pkt_step
= ret
;
1043 intel_pt_decoder_log_packet(decoder
);
1044 } while (decoder
->packet
.type
== INTEL_PT_PAD
);
1049 static uint64_t intel_pt_next_period(struct intel_pt_decoder
*decoder
)
1051 uint64_t timestamp
, masked_timestamp
;
1053 timestamp
= decoder
->timestamp
+ decoder
->timestamp_insn_cnt
;
1054 masked_timestamp
= timestamp
& decoder
->period_mask
;
1055 if (decoder
->continuous_period
) {
1056 if (masked_timestamp
> decoder
->last_masked_timestamp
)
1060 masked_timestamp
= timestamp
& decoder
->period_mask
;
1061 if (masked_timestamp
> decoder
->last_masked_timestamp
) {
1062 decoder
->last_masked_timestamp
= masked_timestamp
;
1063 decoder
->continuous_period
= true;
1067 if (masked_timestamp
< decoder
->last_masked_timestamp
)
1068 return decoder
->period_ticks
;
1070 return decoder
->period_ticks
- (timestamp
- masked_timestamp
);
1073 static uint64_t intel_pt_next_sample(struct intel_pt_decoder
*decoder
)
1075 switch (decoder
->period_type
) {
1076 case INTEL_PT_PERIOD_INSTRUCTIONS
:
1077 return decoder
->period
- decoder
->period_insn_cnt
;
1078 case INTEL_PT_PERIOD_TICKS
:
1079 return intel_pt_next_period(decoder
);
1080 case INTEL_PT_PERIOD_NONE
:
1081 case INTEL_PT_PERIOD_MTC
:
1087 static void intel_pt_sample_insn(struct intel_pt_decoder
*decoder
)
1089 uint64_t timestamp
, masked_timestamp
;
1091 switch (decoder
->period_type
) {
1092 case INTEL_PT_PERIOD_INSTRUCTIONS
:
1093 decoder
->period_insn_cnt
= 0;
1095 case INTEL_PT_PERIOD_TICKS
:
1096 timestamp
= decoder
->timestamp
+ decoder
->timestamp_insn_cnt
;
1097 masked_timestamp
= timestamp
& decoder
->period_mask
;
1098 if (masked_timestamp
> decoder
->last_masked_timestamp
)
1099 decoder
->last_masked_timestamp
= masked_timestamp
;
1101 decoder
->last_masked_timestamp
+= decoder
->period_ticks
;
1103 case INTEL_PT_PERIOD_NONE
:
1104 case INTEL_PT_PERIOD_MTC
:
1109 decoder
->state
.type
|= INTEL_PT_INSTRUCTION
;
1113 * Sample FUP instruction at the same time as reporting the FUP event, so the
1114 * instruction sample gets the same flags as the FUP event.
1116 static void intel_pt_sample_fup_insn(struct intel_pt_decoder
*decoder
)
1118 struct intel_pt_insn intel_pt_insn
;
1119 uint64_t max_insn_cnt
, insn_cnt
= 0;
1122 decoder
->state
.insn_op
= INTEL_PT_OP_OTHER
;
1123 decoder
->state
.insn_len
= 0;
1125 if (!decoder
->branch_enable
|| !decoder
->pge
|| decoder
->hop
||
1126 decoder
->ip
!= decoder
->last_ip
)
1129 if (!decoder
->mtc_insn
)
1130 decoder
->mtc_insn
= true;
1132 max_insn_cnt
= intel_pt_next_sample(decoder
);
1133 if (max_insn_cnt
!= 1)
1136 err
= decoder
->walk_insn(&intel_pt_insn
, &insn_cnt
, &decoder
->ip
,
1137 0, max_insn_cnt
, decoder
->data
);
1138 /* Ignore error, it will be reported next walk anyway */
1142 if (intel_pt_insn
.branch
!= INTEL_PT_BR_NO_BRANCH
) {
1143 intel_pt_log_at("ERROR: Unexpected branch at FUP instruction", decoder
->ip
);
1147 decoder
->tot_insn_cnt
+= insn_cnt
;
1148 decoder
->timestamp_insn_cnt
+= insn_cnt
;
1149 decoder
->sample_insn_cnt
+= insn_cnt
;
1150 decoder
->period_insn_cnt
+= insn_cnt
;
1152 intel_pt_sample_insn(decoder
);
1154 decoder
->state
.type
|= INTEL_PT_INSTRUCTION
;
1155 decoder
->ip
+= intel_pt_insn
.length
;
1158 static int intel_pt_walk_insn(struct intel_pt_decoder
*decoder
,
1159 struct intel_pt_insn
*intel_pt_insn
, uint64_t ip
)
1161 uint64_t max_insn_cnt
, insn_cnt
= 0;
1164 if (!decoder
->mtc_insn
)
1165 decoder
->mtc_insn
= true;
1167 max_insn_cnt
= intel_pt_next_sample(decoder
);
1169 err
= decoder
->walk_insn(intel_pt_insn
, &insn_cnt
, &decoder
->ip
, ip
,
1170 max_insn_cnt
, decoder
->data
);
1172 decoder
->tot_insn_cnt
+= insn_cnt
;
1173 decoder
->timestamp_insn_cnt
+= insn_cnt
;
1174 decoder
->sample_insn_cnt
+= insn_cnt
;
1175 decoder
->period_insn_cnt
+= insn_cnt
;
1178 decoder
->no_progress
= 0;
1179 decoder
->pkt_state
= INTEL_PT_STATE_ERR2
;
1180 intel_pt_log_at("ERROR: Failed to get instruction",
1187 if (ip
&& decoder
->ip
== ip
) {
1192 if (max_insn_cnt
&& insn_cnt
>= max_insn_cnt
)
1193 intel_pt_sample_insn(decoder
);
1195 if (intel_pt_insn
->branch
== INTEL_PT_BR_NO_BRANCH
) {
1196 decoder
->state
.type
= INTEL_PT_INSTRUCTION
;
1197 decoder
->state
.from_ip
= decoder
->ip
;
1198 decoder
->state
.to_ip
= 0;
1199 decoder
->ip
+= intel_pt_insn
->length
;
1200 err
= INTEL_PT_RETURN
;
1204 if (intel_pt_insn
->op
== INTEL_PT_OP_CALL
) {
1205 /* Zero-length calls are excluded */
1206 if (intel_pt_insn
->branch
!= INTEL_PT_BR_UNCONDITIONAL
||
1207 intel_pt_insn
->rel
) {
1208 err
= intel_pt_push(&decoder
->stack
, decoder
->ip
+
1209 intel_pt_insn
->length
);
1213 } else if (intel_pt_insn
->op
== INTEL_PT_OP_RET
) {
1214 decoder
->ret_addr
= intel_pt_pop(&decoder
->stack
);
1217 if (intel_pt_insn
->branch
== INTEL_PT_BR_UNCONDITIONAL
) {
1218 int cnt
= decoder
->no_progress
++;
1220 decoder
->state
.from_ip
= decoder
->ip
;
1221 decoder
->ip
+= intel_pt_insn
->length
+
1223 decoder
->state
.to_ip
= decoder
->ip
;
1224 err
= INTEL_PT_RETURN
;
1227 * Check for being stuck in a loop. This can happen if a
1228 * decoder error results in the decoder erroneously setting the
1229 * ip to an address that is itself in an infinite loop that
1230 * consumes no packets. When that happens, there must be an
1231 * unconditional branch.
1235 decoder
->stuck_ip
= decoder
->state
.to_ip
;
1236 decoder
->stuck_ip_prd
= 1;
1237 decoder
->stuck_ip_cnt
= 1;
1238 } else if (cnt
> decoder
->max_loops
||
1239 decoder
->state
.to_ip
== decoder
->stuck_ip
) {
1240 intel_pt_log_at("ERROR: Never-ending loop",
1241 decoder
->state
.to_ip
);
1242 decoder
->pkt_state
= INTEL_PT_STATE_ERR_RESYNC
;
1245 } else if (!--decoder
->stuck_ip_cnt
) {
1246 decoder
->stuck_ip_prd
+= 1;
1247 decoder
->stuck_ip_cnt
= decoder
->stuck_ip_prd
;
1248 decoder
->stuck_ip
= decoder
->state
.to_ip
;
1251 goto out_no_progress
;
1254 decoder
->no_progress
= 0;
1256 decoder
->state
.insn_op
= intel_pt_insn
->op
;
1257 decoder
->state
.insn_len
= intel_pt_insn
->length
;
1258 memcpy(decoder
->state
.insn
, intel_pt_insn
->buf
,
1259 INTEL_PT_INSN_BUF_SZ
);
1261 if (decoder
->tx_flags
& INTEL_PT_IN_TX
)
1262 decoder
->state
.flags
|= INTEL_PT_IN_TX
;
1267 static void intel_pt_mode_exec_status(struct intel_pt_decoder
*decoder
)
1269 bool iflag
= decoder
->packet
.count
& INTEL_PT_IFLAG
;
1271 decoder
->exec_mode
= decoder
->packet
.payload
;
1272 decoder
->iflag
= iflag
;
1273 decoder
->next_iflag
= iflag
;
1274 decoder
->state
.from_iflag
= iflag
;
1275 decoder
->state
.to_iflag
= iflag
;
1278 static void intel_pt_mode_exec(struct intel_pt_decoder
*decoder
)
1280 bool iflag
= decoder
->packet
.count
& INTEL_PT_IFLAG
;
1282 decoder
->exec_mode
= decoder
->packet
.payload
;
1283 decoder
->next_iflag
= iflag
;
1286 static void intel_pt_sample_iflag(struct intel_pt_decoder
*decoder
)
1288 decoder
->state
.type
|= INTEL_PT_IFLAG_CHG
;
1289 decoder
->state
.from_iflag
= decoder
->iflag
;
1290 decoder
->state
.to_iflag
= decoder
->next_iflag
;
1291 decoder
->iflag
= decoder
->next_iflag
;
1294 static void intel_pt_sample_iflag_chg(struct intel_pt_decoder
*decoder
)
1296 if (decoder
->iflag
!= decoder
->next_iflag
)
1297 intel_pt_sample_iflag(decoder
);
1300 static void intel_pt_clear_fup_event(struct intel_pt_decoder
*decoder
)
1302 decoder
->set_fup_tx_flags
= false;
1303 decoder
->set_fup_ptw
= false;
1304 decoder
->set_fup_mwait
= false;
1305 decoder
->set_fup_pwre
= false;
1306 decoder
->set_fup_exstop
= false;
1307 decoder
->set_fup_bep
= false;
1308 decoder
->set_fup_cfe_ip
= false;
1309 decoder
->set_fup_cfe
= false;
1310 decoder
->evd_cnt
= 0;
1311 decoder
->set_fup_mode_exec
= false;
1312 decoder
->iflag
= decoder
->next_iflag
;
1315 static bool intel_pt_fup_event(struct intel_pt_decoder
*decoder
, bool no_tip
)
1317 enum intel_pt_sample_type type
= decoder
->state
.type
;
1318 bool sample_fup_insn
= false;
1321 decoder
->state
.type
&= ~INTEL_PT_BRANCH
;
1322 decoder
->state
.insn_op
= INTEL_PT_OP_OTHER
;
1323 decoder
->state
.insn_len
= 0;
1325 if (decoder
->set_fup_cfe_ip
|| decoder
->set_fup_cfe
) {
1326 bool ip
= decoder
->set_fup_cfe_ip
;
1328 decoder
->set_fup_cfe_ip
= false;
1329 decoder
->set_fup_cfe
= false;
1330 decoder
->state
.type
|= INTEL_PT_EVT
;
1331 if (!ip
&& decoder
->pge
)
1332 decoder
->state
.type
|= INTEL_PT_BRANCH
;
1333 decoder
->state
.cfe_type
= decoder
->fup_cfe_pkt
.count
;
1334 decoder
->state
.cfe_vector
= decoder
->fup_cfe_pkt
.payload
;
1335 decoder
->state
.evd_cnt
= decoder
->evd_cnt
;
1336 decoder
->state
.evd
= decoder
->evd
;
1337 decoder
->evd_cnt
= 0;
1338 if (ip
|| decoder
->pge
)
1339 decoder
->state
.flags
|= INTEL_PT_FUP_IP
;
1342 if (decoder
->set_fup_mode_exec
) {
1343 decoder
->set_fup_mode_exec
= false;
1344 intel_pt_sample_iflag(decoder
);
1345 sample_fup_insn
= no_tip
;
1348 if (decoder
->set_fup_tx_flags
) {
1349 decoder
->set_fup_tx_flags
= false;
1350 decoder
->tx_flags
= decoder
->fup_tx_flags
;
1351 decoder
->state
.type
|= INTEL_PT_TRANSACTION
;
1352 if (decoder
->fup_tx_flags
& INTEL_PT_ABORT_TX
)
1353 decoder
->state
.type
|= INTEL_PT_BRANCH
;
1354 decoder
->state
.flags
= decoder
->fup_tx_flags
;
1357 if (decoder
->set_fup_ptw
) {
1358 decoder
->set_fup_ptw
= false;
1359 decoder
->state
.type
|= INTEL_PT_PTW
;
1360 decoder
->state
.flags
|= INTEL_PT_FUP_IP
;
1361 decoder
->state
.ptw_payload
= decoder
->fup_ptw_payload
;
1364 if (decoder
->set_fup_mwait
) {
1365 decoder
->set_fup_mwait
= false;
1366 decoder
->state
.type
|= INTEL_PT_MWAIT_OP
;
1367 decoder
->state
.mwait_payload
= decoder
->fup_mwait_payload
;
1370 if (decoder
->set_fup_pwre
) {
1371 decoder
->set_fup_pwre
= false;
1372 decoder
->state
.type
|= INTEL_PT_PWR_ENTRY
;
1373 decoder
->state
.pwre_payload
= decoder
->fup_pwre_payload
;
1376 if (decoder
->set_fup_exstop
) {
1377 decoder
->set_fup_exstop
= false;
1378 decoder
->state
.type
|= INTEL_PT_EX_STOP
;
1379 decoder
->state
.flags
|= INTEL_PT_FUP_IP
;
1382 if (decoder
->set_fup_bep
) {
1383 decoder
->set_fup_bep
= false;
1384 decoder
->state
.type
|= INTEL_PT_BLK_ITEMS
;
1387 if (decoder
->overflow
) {
1388 decoder
->overflow
= false;
1389 if (!ret
&& !decoder
->pge
) {
1391 decoder
->state
.type
= 0;
1392 decoder
->pkt_state
= INTEL_PT_STATE_RESAMPLE
;
1394 decoder
->pge
= true;
1395 decoder
->state
.type
|= INTEL_PT_BRANCH
| INTEL_PT_TRACE_BEGIN
;
1396 decoder
->state
.from_ip
= 0;
1397 decoder
->state
.to_ip
= decoder
->ip
;
1402 decoder
->state
.from_ip
= decoder
->ip
;
1403 decoder
->state
.to_ip
= 0;
1404 if (sample_fup_insn
)
1405 intel_pt_sample_fup_insn(decoder
);
1407 decoder
->state
.type
= type
;
1412 static inline bool intel_pt_fup_with_nlip(struct intel_pt_decoder
*decoder
,
1413 struct intel_pt_insn
*intel_pt_insn
,
1414 uint64_t ip
, int err
)
1416 return decoder
->flags
& INTEL_PT_FUP_WITH_NLIP
&& !err
&&
1417 intel_pt_insn
->branch
== INTEL_PT_BR_INDIRECT
&&
1418 ip
== decoder
->ip
+ intel_pt_insn
->length
;
1421 static int intel_pt_walk_fup(struct intel_pt_decoder
*decoder
)
1423 struct intel_pt_insn intel_pt_insn
;
1427 ip
= decoder
->last_ip
;
1430 err
= intel_pt_walk_insn(decoder
, &intel_pt_insn
, ip
);
1431 if (err
== INTEL_PT_RETURN
)
1433 if (err
== -EAGAIN
||
1434 intel_pt_fup_with_nlip(decoder
, &intel_pt_insn
, ip
, err
)) {
1435 bool no_tip
= decoder
->pkt_state
!= INTEL_PT_STATE_FUP
;
1437 decoder
->pkt_state
= INTEL_PT_STATE_IN_SYNC
;
1438 if (intel_pt_fup_event(decoder
, no_tip
) && no_tip
)
1442 decoder
->set_fup_tx_flags
= false;
1446 if (intel_pt_insn
.branch
== INTEL_PT_BR_INDIRECT
) {
1447 intel_pt_log_at("ERROR: Unexpected indirect branch",
1449 decoder
->pkt_state
= INTEL_PT_STATE_ERR_RESYNC
;
1453 if (intel_pt_insn
.branch
== INTEL_PT_BR_CONDITIONAL
) {
1454 intel_pt_log_at("ERROR: Unexpected conditional branch",
1456 decoder
->pkt_state
= INTEL_PT_STATE_ERR_RESYNC
;
1460 intel_pt_bug(decoder
);
1464 static int intel_pt_walk_tip(struct intel_pt_decoder
*decoder
)
1466 struct intel_pt_insn intel_pt_insn
;
1469 err
= intel_pt_walk_insn(decoder
, &intel_pt_insn
, 0);
1470 if (err
== INTEL_PT_RETURN
&&
1472 decoder
->pkt_state
== INTEL_PT_STATE_TIP_PGD
&&
1473 (decoder
->state
.type
& INTEL_PT_BRANCH
) &&
1474 decoder
->pgd_ip(decoder
->state
.to_ip
, decoder
->data
)) {
1475 /* Unconditional branch leaving filter region */
1476 decoder
->no_progress
= 0;
1477 decoder
->pge
= false;
1478 decoder
->continuous_period
= false;
1479 decoder
->pkt_state
= INTEL_PT_STATE_IN_SYNC
;
1480 decoder
->state
.type
|= INTEL_PT_TRACE_END
;
1481 intel_pt_update_nr(decoder
);
1484 if (err
== INTEL_PT_RETURN
)
1489 intel_pt_update_nr(decoder
);
1490 intel_pt_sample_iflag_chg(decoder
);
1492 if (intel_pt_insn
.branch
== INTEL_PT_BR_INDIRECT
) {
1493 if (decoder
->pkt_state
== INTEL_PT_STATE_TIP_PGD
) {
1494 decoder
->pge
= false;
1495 decoder
->continuous_period
= false;
1496 decoder
->pkt_state
= INTEL_PT_STATE_IN_SYNC
;
1497 decoder
->state
.from_ip
= decoder
->ip
;
1498 if (decoder
->packet
.count
== 0) {
1499 decoder
->state
.to_ip
= 0;
1501 decoder
->state
.to_ip
= decoder
->last_ip
;
1502 decoder
->ip
= decoder
->last_ip
;
1504 decoder
->state
.type
|= INTEL_PT_TRACE_END
;
1506 decoder
->pkt_state
= INTEL_PT_STATE_IN_SYNC
;
1507 decoder
->state
.from_ip
= decoder
->ip
;
1508 if (decoder
->packet
.count
== 0) {
1509 decoder
->state
.to_ip
= 0;
1511 decoder
->state
.to_ip
= decoder
->last_ip
;
1512 decoder
->ip
= decoder
->last_ip
;
1518 if (intel_pt_insn
.branch
== INTEL_PT_BR_CONDITIONAL
) {
1519 uint64_t to_ip
= decoder
->ip
+ intel_pt_insn
.length
+
1522 if (decoder
->pgd_ip
&&
1523 decoder
->pkt_state
== INTEL_PT_STATE_TIP_PGD
&&
1524 decoder
->pgd_ip(to_ip
, decoder
->data
)) {
1525 /* Conditional branch leaving filter region */
1526 decoder
->pge
= false;
1527 decoder
->continuous_period
= false;
1528 decoder
->pkt_state
= INTEL_PT_STATE_IN_SYNC
;
1529 decoder
->ip
= to_ip
;
1530 decoder
->state
.from_ip
= decoder
->ip
;
1531 decoder
->state
.to_ip
= to_ip
;
1532 decoder
->state
.type
|= INTEL_PT_TRACE_END
;
1535 intel_pt_log_at("ERROR: Conditional branch when expecting indirect branch",
1537 decoder
->pkt_state
= INTEL_PT_STATE_ERR_RESYNC
;
1541 return intel_pt_bug(decoder
);
1549 static int intel_pt_eptw_lookahead_cb(struct intel_pt_pkt_info
*pkt_info
)
1551 struct eptw_data
*data
= pkt_info
->data
;
1554 switch (pkt_info
->packet
.type
) {
1557 case INTEL_PT_MODE_EXEC
:
1558 case INTEL_PT_MODE_TSX
:
1568 case INTEL_PT_PSBEND
:
1569 case INTEL_PT_PTWRITE
:
1570 case INTEL_PT_PTWRITE_IP
:
1571 case INTEL_PT_EXSTOP
:
1572 case INTEL_PT_EXSTOP_IP
:
1573 case INTEL_PT_MWAIT
:
1579 case INTEL_PT_BEP_IP
:
1581 case INTEL_PT_CFE_IP
:
1586 nr_bits
= data
->bit_countdown
;
1587 if (nr_bits
> pkt_info
->packet
.count
)
1588 nr_bits
= pkt_info
->packet
.count
;
1589 data
->payload
<<= nr_bits
;
1590 data
->payload
|= pkt_info
->packet
.payload
>> (64 - nr_bits
);
1591 data
->bit_countdown
-= nr_bits
;
1592 return !data
->bit_countdown
;
1594 case INTEL_PT_TIP_PGE
:
1595 case INTEL_PT_TIP_PGD
:
1599 case INTEL_PT_TRACESTOP
:
1607 static int intel_pt_emulated_ptwrite(struct intel_pt_decoder
*decoder
)
1609 int n
= 64 - decoder
->tnt
.count
;
1610 struct eptw_data data
= {
1612 .payload
= decoder
->tnt
.payload
>> n
,
1615 decoder
->emulated_ptwrite
= false;
1616 intel_pt_log("Emulated ptwrite detected\n");
1618 intel_pt_pkt_lookahead(decoder
, intel_pt_eptw_lookahead_cb
, &data
);
1619 if (data
.bit_countdown
)
1622 decoder
->state
.type
= INTEL_PT_PTW
;
1623 decoder
->state
.from_ip
= decoder
->ip
;
1624 decoder
->state
.to_ip
= 0;
1625 decoder
->state
.ptw_payload
= data
.payload
;
1629 static int intel_pt_walk_tnt(struct intel_pt_decoder
*decoder
)
1631 struct intel_pt_insn intel_pt_insn
;
1635 if (decoder
->emulated_ptwrite
)
1636 return intel_pt_emulated_ptwrite(decoder
);
1637 err
= intel_pt_walk_insn(decoder
, &intel_pt_insn
, 0);
1638 if (err
== INTEL_PT_RETURN
) {
1639 decoder
->emulated_ptwrite
= intel_pt_insn
.emulated_ptwrite
;
1643 decoder
->emulated_ptwrite
= false;
1647 if (intel_pt_insn
.op
== INTEL_PT_OP_RET
) {
1648 if (!decoder
->return_compression
) {
1649 intel_pt_log_at("ERROR: RET when expecting conditional branch",
1651 decoder
->pkt_state
= INTEL_PT_STATE_ERR3
;
1654 if (!decoder
->ret_addr
) {
1655 intel_pt_log_at("ERROR: Bad RET compression (stack empty)",
1657 decoder
->pkt_state
= INTEL_PT_STATE_ERR3
;
1660 if (!(decoder
->tnt
.payload
& BIT63
)) {
1661 intel_pt_log_at("ERROR: Bad RET compression (TNT=N)",
1663 decoder
->pkt_state
= INTEL_PT_STATE_ERR3
;
1666 decoder
->tnt
.count
-= 1;
1667 if (decoder
->tnt
.count
)
1668 decoder
->pkt_state
= INTEL_PT_STATE_TNT_CONT
;
1670 decoder
->pkt_state
= INTEL_PT_STATE_IN_SYNC
;
1671 decoder
->tnt
.payload
<<= 1;
1672 decoder
->state
.from_ip
= decoder
->ip
;
1673 decoder
->ip
= decoder
->ret_addr
;
1674 decoder
->state
.to_ip
= decoder
->ip
;
1678 if (intel_pt_insn
.branch
== INTEL_PT_BR_INDIRECT
) {
1679 /* Handle deferred TIPs */
1680 err
= intel_pt_get_next_packet(decoder
);
1683 if (decoder
->packet
.type
!= INTEL_PT_TIP
||
1684 decoder
->packet
.count
== 0) {
1685 intel_pt_log_at("ERROR: Missing deferred TIP for indirect branch",
1687 decoder
->pkt_state
= INTEL_PT_STATE_ERR3
;
1688 decoder
->pkt_step
= 0;
1691 intel_pt_set_last_ip(decoder
);
1692 decoder
->state
.from_ip
= decoder
->ip
;
1693 decoder
->state
.to_ip
= decoder
->last_ip
;
1694 decoder
->ip
= decoder
->last_ip
;
1695 intel_pt_update_nr(decoder
);
1696 intel_pt_sample_iflag_chg(decoder
);
1700 if (intel_pt_insn
.branch
== INTEL_PT_BR_CONDITIONAL
) {
1701 decoder
->tnt
.count
-= 1;
1702 if (decoder
->tnt
.count
)
1703 decoder
->pkt_state
= INTEL_PT_STATE_TNT_CONT
;
1705 decoder
->pkt_state
= INTEL_PT_STATE_IN_SYNC
;
1706 if (decoder
->tnt
.payload
& BIT63
) {
1707 decoder
->tnt
.payload
<<= 1;
1708 decoder
->state
.from_ip
= decoder
->ip
;
1709 decoder
->ip
+= intel_pt_insn
.length
+
1711 decoder
->state
.to_ip
= decoder
->ip
;
1714 /* Instruction sample for a non-taken branch */
1715 if (decoder
->state
.type
& INTEL_PT_INSTRUCTION
) {
1716 decoder
->tnt
.payload
<<= 1;
1717 decoder
->state
.type
= INTEL_PT_INSTRUCTION
;
1718 decoder
->state
.from_ip
= decoder
->ip
;
1719 decoder
->state
.to_ip
= 0;
1720 decoder
->ip
+= intel_pt_insn
.length
;
1723 decoder
->sample_cyc
= false;
1724 decoder
->ip
+= intel_pt_insn
.length
;
1725 if (!decoder
->tnt
.count
) {
1726 intel_pt_update_sample_time(decoder
);
1729 decoder
->tnt
.payload
<<= 1;
1733 return intel_pt_bug(decoder
);
1737 static int intel_pt_mode_tsx(struct intel_pt_decoder
*decoder
, bool *no_tip
)
1739 unsigned int fup_tx_flags
;
1742 fup_tx_flags
= decoder
->packet
.payload
&
1743 (INTEL_PT_IN_TX
| INTEL_PT_ABORT_TX
);
1744 err
= intel_pt_get_next_packet(decoder
);
1747 if (decoder
->packet
.type
== INTEL_PT_FUP
) {
1748 decoder
->fup_tx_flags
= fup_tx_flags
;
1749 decoder
->set_fup_tx_flags
= true;
1750 if (!(decoder
->fup_tx_flags
& INTEL_PT_ABORT_TX
))
1753 intel_pt_log_at("ERROR: Missing FUP after MODE.TSX",
1755 intel_pt_update_in_tx(decoder
);
1760 static int intel_pt_evd(struct intel_pt_decoder
*decoder
)
1762 if (decoder
->evd_cnt
>= INTEL_PT_MAX_EVDS
) {
1763 intel_pt_log_at("ERROR: Too many EVD packets", decoder
->pos
);
1766 decoder
->evd
[decoder
->evd_cnt
++] = (struct intel_pt_evd
){
1767 .type
= decoder
->packet
.count
,
1768 .payload
= decoder
->packet
.payload
,
1773 static uint64_t intel_pt_8b_tsc(uint64_t timestamp
, uint64_t ref_timestamp
)
1775 timestamp
|= (ref_timestamp
& (0xffULL
<< 56));
1777 if (timestamp
< ref_timestamp
) {
1778 if (ref_timestamp
- timestamp
> (1ULL << 55))
1779 timestamp
+= (1ULL << 56);
1781 if (timestamp
- ref_timestamp
> (1ULL << 55))
1782 timestamp
-= (1ULL << 56);
1788 /* For use only when decoder->vm_time_correlation is true */
1789 static bool intel_pt_time_in_range(struct intel_pt_decoder
*decoder
,
1792 uint64_t max_timestamp
= decoder
->buf_timestamp
;
1794 if (!max_timestamp
) {
1795 max_timestamp
= decoder
->last_reliable_timestamp
+
1798 return timestamp
>= decoder
->last_reliable_timestamp
&&
1799 timestamp
< decoder
->buf_timestamp
;
1802 static void intel_pt_calc_tsc_timestamp(struct intel_pt_decoder
*decoder
)
1807 decoder
->have_tma
= false;
1809 if (decoder
->ref_timestamp
) {
1810 timestamp
= intel_pt_8b_tsc(decoder
->packet
.payload
,
1811 decoder
->ref_timestamp
);
1812 decoder
->tsc_timestamp
= timestamp
;
1813 decoder
->timestamp
= timestamp
;
1814 decoder
->ref_timestamp
= 0;
1815 decoder
->timestamp_insn_cnt
= 0;
1816 } else if (decoder
->timestamp
) {
1817 timestamp
= decoder
->packet
.payload
|
1818 (decoder
->timestamp
& (0xffULL
<< 56));
1819 decoder
->tsc_timestamp
= timestamp
;
1820 if (timestamp
< decoder
->timestamp
&&
1821 decoder
->timestamp
- timestamp
< decoder
->tsc_slip
) {
1822 intel_pt_log_to("Suppressing backwards timestamp",
1824 timestamp
= decoder
->timestamp
;
1826 if (timestamp
< decoder
->timestamp
) {
1827 if (!decoder
->buf_timestamp
||
1828 (timestamp
+ (1ULL << 56) < decoder
->buf_timestamp
)) {
1829 intel_pt_log_to("Wraparound timestamp", timestamp
);
1830 timestamp
+= (1ULL << 56);
1831 decoder
->tsc_timestamp
= timestamp
;
1833 intel_pt_log_to("Suppressing bad timestamp", timestamp
);
1834 timestamp
= decoder
->timestamp
;
1838 if (decoder
->vm_time_correlation
&&
1839 (bad
|| !intel_pt_time_in_range(decoder
, timestamp
)) &&
1840 intel_pt_print_once(decoder
, INTEL_PT_PRT_ONCE_ERANGE
))
1841 p_log("Timestamp out of range");
1842 decoder
->timestamp
= timestamp
;
1843 decoder
->timestamp_insn_cnt
= 0;
1846 if (decoder
->last_packet_type
== INTEL_PT_CYC
) {
1847 decoder
->cyc_ref_timestamp
= decoder
->timestamp
;
1848 decoder
->cycle_cnt
= 0;
1849 decoder
->have_calc_cyc_to_tsc
= false;
1850 intel_pt_calc_cyc_to_tsc(decoder
, false);
1853 intel_pt_log_to("Setting timestamp", decoder
->timestamp
);
1856 static int intel_pt_overflow(struct intel_pt_decoder
*decoder
)
1858 intel_pt_log("ERROR: Buffer overflow\n");
1859 intel_pt_clear_tx_flags(decoder
);
1860 intel_pt_set_nr(decoder
);
1861 decoder
->timestamp_insn_cnt
= 0;
1862 decoder
->pkt_state
= INTEL_PT_STATE_IN_SYNC
;
1863 decoder
->state
.from_ip
= decoder
->ip
;
1865 decoder
->pge
= false;
1866 intel_pt_clear_fup_event(decoder
);
1867 decoder
->overflow
= true;
1871 static inline void intel_pt_mtc_cyc_cnt_pge(struct intel_pt_decoder
*decoder
)
1873 if (decoder
->have_cyc
)
1876 decoder
->cyc_cnt_timestamp
= decoder
->timestamp
;
1877 decoder
->base_cyc_cnt
= decoder
->tot_cyc_cnt
;
1880 static inline void intel_pt_mtc_cyc_cnt_cbr(struct intel_pt_decoder
*decoder
)
1882 decoder
->tsc_to_cyc
= decoder
->cbr
/ decoder
->max_non_turbo_ratio_fp
;
1885 intel_pt_mtc_cyc_cnt_pge(decoder
);
1888 static inline void intel_pt_mtc_cyc_cnt_upd(struct intel_pt_decoder
*decoder
)
1890 uint64_t tot_cyc_cnt
, tsc_delta
;
1892 if (decoder
->have_cyc
)
1895 decoder
->sample_cyc
= true;
1897 if (!decoder
->pge
|| decoder
->timestamp
<= decoder
->cyc_cnt_timestamp
)
1900 tsc_delta
= decoder
->timestamp
- decoder
->cyc_cnt_timestamp
;
1901 tot_cyc_cnt
= tsc_delta
* decoder
->tsc_to_cyc
+ decoder
->base_cyc_cnt
;
1903 if (tot_cyc_cnt
> decoder
->tot_cyc_cnt
)
1904 decoder
->tot_cyc_cnt
= tot_cyc_cnt
;
1907 static void intel_pt_calc_tma(struct intel_pt_decoder
*decoder
)
1909 uint32_t ctc
= decoder
->packet
.payload
;
1910 uint32_t fc
= decoder
->packet
.count
;
1911 uint32_t ctc_rem
= ctc
& decoder
->ctc_rem_mask
;
1913 if (!decoder
->tsc_ctc_ratio_d
)
1916 if (decoder
->pge
&& !decoder
->in_psb
)
1917 intel_pt_mtc_cyc_cnt_pge(decoder
);
1919 intel_pt_mtc_cyc_cnt_upd(decoder
);
1921 decoder
->last_mtc
= (ctc
>> decoder
->mtc_shift
) & 0xff;
1922 decoder
->last_ctc
= ctc
- ctc_rem
;
1923 decoder
->ctc_timestamp
= decoder
->tsc_timestamp
- fc
;
1924 if (decoder
->tsc_ctc_mult
) {
1925 decoder
->ctc_timestamp
-= ctc_rem
* decoder
->tsc_ctc_mult
;
1927 decoder
->ctc_timestamp
-= multdiv(ctc_rem
,
1928 decoder
->tsc_ctc_ratio_n
,
1929 decoder
->tsc_ctc_ratio_d
);
1931 decoder
->ctc_delta
= 0;
1932 decoder
->have_tma
= true;
1933 decoder
->fixup_last_mtc
= true;
1934 intel_pt_log("CTC timestamp " x64_fmt
" last MTC %#x CTC rem %#x\n",
1935 decoder
->ctc_timestamp
, decoder
->last_mtc
, ctc_rem
);
1938 static void intel_pt_calc_mtc_timestamp(struct intel_pt_decoder
*decoder
)
1941 uint32_t mtc
, mtc_delta
;
1943 if (!decoder
->have_tma
)
1946 mtc
= decoder
->packet
.payload
;
1948 if (decoder
->mtc_shift
> 8 && decoder
->fixup_last_mtc
) {
1949 decoder
->fixup_last_mtc
= false;
1950 intel_pt_fixup_last_mtc(mtc
, decoder
->mtc_shift
,
1951 &decoder
->last_mtc
);
1954 if (mtc
> decoder
->last_mtc
)
1955 mtc_delta
= mtc
- decoder
->last_mtc
;
1957 mtc_delta
= mtc
+ 256 - decoder
->last_mtc
;
1959 decoder
->ctc_delta
+= mtc_delta
<< decoder
->mtc_shift
;
1961 if (decoder
->tsc_ctc_mult
) {
1962 timestamp
= decoder
->ctc_timestamp
+
1963 decoder
->ctc_delta
* decoder
->tsc_ctc_mult
;
1965 timestamp
= decoder
->ctc_timestamp
+
1966 multdiv(decoder
->ctc_delta
,
1967 decoder
->tsc_ctc_ratio_n
,
1968 decoder
->tsc_ctc_ratio_d
);
1971 if (timestamp
< decoder
->timestamp
)
1972 intel_pt_log("Suppressing MTC timestamp " x64_fmt
" less than current timestamp " x64_fmt
"\n",
1973 timestamp
, decoder
->timestamp
);
1975 decoder
->timestamp
= timestamp
;
1977 intel_pt_mtc_cyc_cnt_upd(decoder
);
1979 decoder
->timestamp_insn_cnt
= 0;
1980 decoder
->last_mtc
= mtc
;
1982 if (decoder
->last_packet_type
== INTEL_PT_CYC
) {
1983 decoder
->cyc_ref_timestamp
= decoder
->timestamp
;
1984 decoder
->cycle_cnt
= 0;
1985 decoder
->have_calc_cyc_to_tsc
= false;
1986 intel_pt_calc_cyc_to_tsc(decoder
, true);
1989 intel_pt_log_to("Setting timestamp", decoder
->timestamp
);
1992 static void intel_pt_calc_cbr(struct intel_pt_decoder
*decoder
)
1994 unsigned int cbr
= decoder
->packet
.payload
& 0xff;
1996 decoder
->cbr_payload
= decoder
->packet
.payload
;
1998 if (decoder
->cbr
== cbr
)
2002 decoder
->cbr_cyc_to_tsc
= decoder
->max_non_turbo_ratio_fp
/ cbr
;
2003 decoder
->cyc_ref_timestamp
= decoder
->timestamp
;
2004 decoder
->cycle_cnt
= 0;
2006 intel_pt_mtc_cyc_cnt_cbr(decoder
);
2009 static void intel_pt_calc_cyc_timestamp(struct intel_pt_decoder
*decoder
)
2011 uint64_t timestamp
= decoder
->cyc_ref_timestamp
;
2013 decoder
->have_cyc
= true;
2015 decoder
->cycle_cnt
+= decoder
->packet
.payload
;
2017 decoder
->tot_cyc_cnt
+= decoder
->packet
.payload
;
2018 decoder
->sample_cyc
= true;
2020 if (!decoder
->cyc_ref_timestamp
)
2023 if (decoder
->have_calc_cyc_to_tsc
)
2024 timestamp
+= decoder
->cycle_cnt
* decoder
->calc_cyc_to_tsc
;
2025 else if (decoder
->cbr
)
2026 timestamp
+= decoder
->cycle_cnt
* decoder
->cbr_cyc_to_tsc
;
2030 if (timestamp
< decoder
->timestamp
)
2031 intel_pt_log("Suppressing CYC timestamp " x64_fmt
" less than current timestamp " x64_fmt
"\n",
2032 timestamp
, decoder
->timestamp
);
2034 decoder
->timestamp
= timestamp
;
2036 decoder
->timestamp_insn_cnt
= 0;
2038 intel_pt_log_to("Setting timestamp", decoder
->timestamp
);
2041 static void intel_pt_bbp(struct intel_pt_decoder
*decoder
)
2043 if (decoder
->prev_pkt_ctx
== INTEL_PT_NO_CTX
) {
2044 memset(decoder
->state
.items
.mask
, 0, sizeof(decoder
->state
.items
.mask
));
2045 decoder
->state
.items
.is_32_bit
= false;
2047 decoder
->blk_type
= decoder
->packet
.payload
;
2048 decoder
->blk_type_pos
= intel_pt_blk_type_pos(decoder
->blk_type
);
2049 if (decoder
->blk_type
== INTEL_PT_GP_REGS
)
2050 decoder
->state
.items
.is_32_bit
= decoder
->packet
.count
;
2051 if (decoder
->blk_type_pos
< 0) {
2052 intel_pt_log("WARNING: Unknown block type %u\n",
2054 } else if (decoder
->state
.items
.mask
[decoder
->blk_type_pos
]) {
2055 intel_pt_log("WARNING: Duplicate block type %u\n",
2060 static void intel_pt_bip(struct intel_pt_decoder
*decoder
)
2062 uint32_t id
= decoder
->packet
.count
;
2063 uint32_t bit
= 1 << id
;
2064 int pos
= decoder
->blk_type_pos
;
2066 if (pos
< 0 || id
>= INTEL_PT_BLK_ITEM_ID_CNT
) {
2067 intel_pt_log("WARNING: Unknown block item %u type %d\n",
2068 id
, decoder
->blk_type
);
2072 if (decoder
->state
.items
.mask
[pos
] & bit
) {
2073 intel_pt_log("WARNING: Duplicate block item %u type %d\n",
2074 id
, decoder
->blk_type
);
2077 decoder
->state
.items
.mask
[pos
] |= bit
;
2078 decoder
->state
.items
.val
[pos
][id
] = decoder
->packet
.payload
;
2081 /* Walk PSB+ packets when already in sync. */
2082 static int intel_pt_walk_psbend(struct intel_pt_decoder
*decoder
)
2086 decoder
->in_psb
= true;
2089 err
= intel_pt_get_next_packet(decoder
);
2093 switch (decoder
->packet
.type
) {
2094 case INTEL_PT_PSBEND
:
2098 case INTEL_PT_TIP_PGD
:
2099 case INTEL_PT_TIP_PGE
:
2102 case INTEL_PT_TRACESTOP
:
2105 case INTEL_PT_PTWRITE
:
2106 case INTEL_PT_PTWRITE_IP
:
2107 case INTEL_PT_EXSTOP
:
2108 case INTEL_PT_EXSTOP_IP
:
2109 case INTEL_PT_MWAIT
:
2115 case INTEL_PT_BEP_IP
:
2117 case INTEL_PT_CFE_IP
:
2119 decoder
->have_tma
= false;
2120 intel_pt_log("ERROR: Unexpected packet\n");
2125 err
= intel_pt_overflow(decoder
);
2129 intel_pt_calc_tsc_timestamp(decoder
);
2133 intel_pt_calc_tma(decoder
);
2137 intel_pt_calc_cbr(decoder
);
2140 case INTEL_PT_MODE_EXEC
:
2141 intel_pt_mode_exec_status(decoder
);
2145 intel_pt_set_pip(decoder
);
2149 decoder
->pge
= true;
2150 if (decoder
->packet
.count
) {
2151 intel_pt_set_last_ip(decoder
);
2152 decoder
->psb_ip
= decoder
->last_ip
;
2156 case INTEL_PT_MODE_TSX
:
2157 intel_pt_update_in_tx(decoder
);
2161 intel_pt_calc_mtc_timestamp(decoder
);
2162 if (decoder
->period_type
== INTEL_PT_PERIOD_MTC
)
2163 decoder
->state
.type
|= INTEL_PT_INSTRUCTION
;
2167 intel_pt_calc_cyc_timestamp(decoder
);
2178 decoder
->in_psb
= false;
2183 static int intel_pt_walk_fup_tip(struct intel_pt_decoder
*decoder
)
2187 if (decoder
->tx_flags
& INTEL_PT_ABORT_TX
) {
2188 decoder
->tx_flags
= 0;
2189 decoder
->state
.flags
&= ~INTEL_PT_IN_TX
;
2190 decoder
->state
.flags
|= INTEL_PT_ABORT_TX
;
2192 decoder
->state
.flags
|= INTEL_PT_ASYNC
;
2196 err
= intel_pt_get_next_packet(decoder
);
2200 switch (decoder
->packet
.type
) {
2203 case INTEL_PT_TRACESTOP
:
2207 case INTEL_PT_MODE_TSX
:
2209 case INTEL_PT_PSBEND
:
2210 case INTEL_PT_PTWRITE
:
2211 case INTEL_PT_PTWRITE_IP
:
2212 case INTEL_PT_EXSTOP
:
2213 case INTEL_PT_EXSTOP_IP
:
2214 case INTEL_PT_MWAIT
:
2220 case INTEL_PT_BEP_IP
:
2222 case INTEL_PT_CFE_IP
:
2224 intel_pt_log("ERROR: Missing TIP after FUP\n");
2225 decoder
->pkt_state
= INTEL_PT_STATE_ERR3
;
2226 decoder
->pkt_step
= 0;
2230 intel_pt_calc_cbr(decoder
);
2234 return intel_pt_overflow(decoder
);
2236 case INTEL_PT_TIP_PGD
:
2237 decoder
->state
.from_ip
= decoder
->ip
;
2238 if (decoder
->packet
.count
== 0) {
2239 decoder
->state
.to_ip
= 0;
2241 intel_pt_set_ip(decoder
);
2242 decoder
->state
.to_ip
= decoder
->ip
;
2244 decoder
->pge
= false;
2245 decoder
->continuous_period
= false;
2246 decoder
->state
.type
|= INTEL_PT_TRACE_END
;
2247 intel_pt_update_nr(decoder
);
2250 case INTEL_PT_TIP_PGE
:
2251 decoder
->pge
= true;
2252 intel_pt_log("Omitting PGE ip " x64_fmt
"\n",
2254 decoder
->state
.from_ip
= 0;
2255 if (decoder
->packet
.count
== 0) {
2256 decoder
->state
.to_ip
= 0;
2258 intel_pt_set_ip(decoder
);
2259 decoder
->state
.to_ip
= decoder
->ip
;
2261 decoder
->state
.type
|= INTEL_PT_TRACE_BEGIN
;
2262 intel_pt_mtc_cyc_cnt_pge(decoder
);
2263 intel_pt_set_nr(decoder
);
2267 decoder
->state
.from_ip
= decoder
->ip
;
2268 if (decoder
->packet
.count
== 0) {
2269 decoder
->state
.to_ip
= 0;
2271 intel_pt_set_ip(decoder
);
2272 decoder
->state
.to_ip
= decoder
->ip
;
2274 intel_pt_update_nr(decoder
);
2275 intel_pt_sample_iflag_chg(decoder
);
2279 intel_pt_update_pip(decoder
);
2283 intel_pt_calc_mtc_timestamp(decoder
);
2284 if (decoder
->period_type
== INTEL_PT_PERIOD_MTC
)
2285 decoder
->state
.type
|= INTEL_PT_INSTRUCTION
;
2289 intel_pt_calc_cyc_timestamp(decoder
);
2292 case INTEL_PT_MODE_EXEC
:
2293 intel_pt_mode_exec(decoder
);
2302 return intel_pt_bug(decoder
);
2307 static int intel_pt_resample(struct intel_pt_decoder
*decoder
)
2309 decoder
->pkt_state
= INTEL_PT_STATE_IN_SYNC
;
2310 decoder
->state
.type
= INTEL_PT_INSTRUCTION
;
2311 decoder
->state
.from_ip
= decoder
->ip
;
2312 decoder
->state
.to_ip
= 0;
2316 struct intel_pt_vm_tsc_info
{
2317 struct intel_pt_pkt pip_packet
;
2318 struct intel_pt_pkt vmcs_packet
;
2319 struct intel_pt_pkt tma_packet
;
2320 bool tsc
, pip
, vmcs
, tma
, psbend
;
2326 /* Lookahead and get the PIP, VMCS and TMA packets from PSB+ */
2327 static int intel_pt_vm_psb_lookahead_cb(struct intel_pt_pkt_info
*pkt_info
)
2329 struct intel_pt_vm_tsc_info
*data
= pkt_info
->data
;
2331 switch (pkt_info
->packet
.type
) {
2334 case INTEL_PT_MODE_EXEC
:
2335 case INTEL_PT_MODE_TSX
:
2347 data
->tma_packet
= pkt_info
->packet
;
2352 data
->pip_packet
= pkt_info
->packet
;
2357 data
->vmcs_packet
= pkt_info
->packet
;
2361 case INTEL_PT_PSBEND
:
2362 data
->psbend
= true;
2365 case INTEL_PT_TIP_PGE
:
2366 case INTEL_PT_PTWRITE
:
2367 case INTEL_PT_PTWRITE_IP
:
2368 case INTEL_PT_EXSTOP
:
2369 case INTEL_PT_EXSTOP_IP
:
2370 case INTEL_PT_MWAIT
:
2376 case INTEL_PT_BEP_IP
:
2380 case INTEL_PT_TIP_PGD
:
2383 case INTEL_PT_TRACESTOP
:
2385 case INTEL_PT_CFE_IP
:
2394 struct intel_pt_ovf_fup_info
{
2399 /* Lookahead to detect a FUP packet after OVF */
2400 static int intel_pt_ovf_fup_lookahead_cb(struct intel_pt_pkt_info
*pkt_info
)
2402 struct intel_pt_ovf_fup_info
*data
= pkt_info
->data
;
2404 if (pkt_info
->packet
.type
== INTEL_PT_CYC
||
2405 pkt_info
->packet
.type
== INTEL_PT_MTC
||
2406 pkt_info
->packet
.type
== INTEL_PT_TSC
)
2407 return !--(data
->max_lookahead
);
2408 data
->found
= pkt_info
->packet
.type
== INTEL_PT_FUP
;
2412 static bool intel_pt_ovf_fup_lookahead(struct intel_pt_decoder
*decoder
)
2414 struct intel_pt_ovf_fup_info data
= {
2415 .max_lookahead
= 16,
2419 intel_pt_pkt_lookahead(decoder
, intel_pt_ovf_fup_lookahead_cb
, &data
);
2423 /* Lookahead and get the TMA packet after TSC */
2424 static int intel_pt_tma_lookahead_cb(struct intel_pt_pkt_info
*pkt_info
)
2426 struct intel_pt_vm_tsc_info
*data
= pkt_info
->data
;
2428 if (pkt_info
->packet
.type
== INTEL_PT_CYC
||
2429 pkt_info
->packet
.type
== INTEL_PT_MTC
)
2430 return !--(data
->max_lookahead
);
2432 if (pkt_info
->packet
.type
== INTEL_PT_TMA
) {
2433 data
->tma_packet
= pkt_info
->packet
;
2439 static uint64_t intel_pt_ctc_to_tsc(struct intel_pt_decoder
*decoder
, uint64_t ctc
)
2441 if (decoder
->tsc_ctc_mult
)
2442 return ctc
* decoder
->tsc_ctc_mult
;
2444 return multdiv(ctc
, decoder
->tsc_ctc_ratio_n
, decoder
->tsc_ctc_ratio_d
);
2447 static uint64_t intel_pt_calc_expected_tsc(struct intel_pt_decoder
*decoder
,
2450 uint64_t last_ctc_timestamp
,
2454 /* Number of CTC ticks from last_ctc_timestamp to last_mtc */
2455 uint64_t last_mtc_ctc
= last_ctc
+ ctc_delta
;
2457 * Number of CTC ticks from there until current TMA packet. We would
2458 * expect last_mtc_ctc to be before ctc, but the TSC packet can slip
2459 * past an MTC, so a sign-extended value is used.
2461 uint64_t delta
= (int16_t)((uint16_t)ctc
- (uint16_t)last_mtc_ctc
);
2462 /* Total CTC ticks from last_ctc_timestamp to current TMA packet */
2463 uint64_t new_ctc_delta
= ctc_delta
+ delta
;
2464 uint64_t expected_tsc
;
2467 * Convert CTC ticks to TSC ticks, add the starting point
2468 * (last_ctc_timestamp) and the fast counter from the TMA packet.
2470 expected_tsc
= last_ctc_timestamp
+ intel_pt_ctc_to_tsc(decoder
, new_ctc_delta
) + fc
;
2472 if (intel_pt_enable_logging
) {
2473 intel_pt_log_x64(last_mtc_ctc
);
2474 intel_pt_log_x32(last_ctc
);
2475 intel_pt_log_x64(ctc_delta
);
2476 intel_pt_log_x64(delta
);
2477 intel_pt_log_x32(ctc
);
2478 intel_pt_log_x64(new_ctc_delta
);
2479 intel_pt_log_x64(last_ctc_timestamp
);
2480 intel_pt_log_x32(fc
);
2481 intel_pt_log_x64(intel_pt_ctc_to_tsc(decoder
, new_ctc_delta
));
2482 intel_pt_log_x64(expected_tsc
);
2485 return expected_tsc
;
2488 static uint64_t intel_pt_expected_tsc(struct intel_pt_decoder
*decoder
,
2489 struct intel_pt_vm_tsc_info
*data
)
2491 uint32_t ctc
= data
->tma_packet
.payload
;
2492 uint32_t fc
= data
->tma_packet
.count
;
2494 return intel_pt_calc_expected_tsc(decoder
, ctc
, fc
,
2495 decoder
->ctc_timestamp
,
2496 data
->ctc_delta
, data
->last_ctc
);
2499 static void intel_pt_translate_vm_tsc(struct intel_pt_decoder
*decoder
,
2500 struct intel_pt_vmcs_info
*vmcs_info
)
2502 uint64_t payload
= decoder
->packet
.payload
;
2504 /* VMX adds the TSC Offset, so subtract to get host TSC */
2505 decoder
->packet
.payload
-= vmcs_info
->tsc_offset
;
2506 /* TSC packet has only 7 bytes */
2507 decoder
->packet
.payload
&= SEVEN_BYTES
;
2510 * The buffer is mmapped from the data file, so this also updates the
2513 if (!decoder
->vm_tm_corr_dry_run
)
2514 memcpy((void *)decoder
->buf
+ 1, &decoder
->packet
.payload
, 7);
2516 intel_pt_log("Translated VM TSC %#" PRIx64
" -> %#" PRIx64
2517 " VMCS %#" PRIx64
" TSC Offset %#" PRIx64
"\n",
2518 payload
, decoder
->packet
.payload
, vmcs_info
->vmcs
,
2519 vmcs_info
->tsc_offset
);
2522 static void intel_pt_translate_vm_tsc_offset(struct intel_pt_decoder
*decoder
,
2523 uint64_t tsc_offset
)
2525 struct intel_pt_vmcs_info vmcs_info
= {
2527 .tsc_offset
= tsc_offset
2530 intel_pt_translate_vm_tsc(decoder
, &vmcs_info
);
2533 static inline bool in_vm(uint64_t pip_payload
)
2535 return pip_payload
& 1;
2538 static inline bool pip_in_vm(struct intel_pt_pkt
*pip_packet
)
2540 return pip_packet
->payload
& 1;
2543 static void intel_pt_print_vmcs_info(struct intel_pt_vmcs_info
*vmcs_info
)
2545 p_log("VMCS: %#" PRIx64
" TSC Offset %#" PRIx64
,
2546 vmcs_info
->vmcs
, vmcs_info
->tsc_offset
);
2549 static void intel_pt_vm_tm_corr_psb(struct intel_pt_decoder
*decoder
,
2550 struct intel_pt_vm_tsc_info
*data
)
2552 memset(data
, 0, sizeof(*data
));
2553 data
->ctc_delta
= decoder
->ctc_delta
;
2554 data
->last_ctc
= decoder
->last_ctc
;
2555 intel_pt_pkt_lookahead(decoder
, intel_pt_vm_psb_lookahead_cb
, data
);
2556 if (data
->tsc
&& !data
->psbend
)
2557 p_log("ERROR: PSB without PSBEND");
2558 decoder
->in_psb
= data
->psbend
;
2561 static void intel_pt_vm_tm_corr_first_tsc(struct intel_pt_decoder
*decoder
,
2562 struct intel_pt_vm_tsc_info
*data
,
2563 struct intel_pt_vmcs_info
*vmcs_info
,
2566 if (!decoder
->in_psb
) {
2568 p_log("ERROR: First TSC is not in PSB+");
2572 if (pip_in_vm(&data
->pip_packet
)) { /* Guest */
2573 if (vmcs_info
&& vmcs_info
->tsc_offset
) {
2574 intel_pt_translate_vm_tsc(decoder
, vmcs_info
);
2575 decoder
->vm_tm_corr_reliable
= true;
2577 p_log("ERROR: First TSC, unknown TSC Offset");
2580 decoder
->vm_tm_corr_reliable
= true;
2582 } else { /* Host or Guest */
2583 decoder
->vm_tm_corr_reliable
= false;
2584 if (intel_pt_time_in_range(decoder
, host_tsc
)) {
2588 if (vmcs_info
&& vmcs_info
->tsc_offset
)
2589 intel_pt_translate_vm_tsc(decoder
, vmcs_info
);
2591 p_log("ERROR: First TSC, no PIP, unknown TSC Offset");
2596 static void intel_pt_vm_tm_corr_tsc(struct intel_pt_decoder
*decoder
,
2597 struct intel_pt_vm_tsc_info
*data
)
2599 struct intel_pt_vmcs_info
*vmcs_info
;
2600 uint64_t tsc_offset
= 0;
2602 bool reliable
= true;
2603 uint64_t expected_tsc
;
2605 uint64_t ref_timestamp
;
2607 bool assign
= false;
2608 bool assign_reliable
= false;
2610 /* Already have 'data' for the in_psb case */
2611 if (!decoder
->in_psb
) {
2612 memset(data
, 0, sizeof(*data
));
2613 data
->ctc_delta
= decoder
->ctc_delta
;
2614 data
->last_ctc
= decoder
->last_ctc
;
2615 data
->max_lookahead
= 16;
2616 intel_pt_pkt_lookahead(decoder
, intel_pt_tma_lookahead_cb
, data
);
2619 data
->pip_packet
.payload
= decoder
->pip_payload
;
2623 /* Calculations depend on having TMA packets */
2625 p_log("ERROR: TSC without TMA");
2629 vmcs
= data
->vmcs
? data
->vmcs_packet
.payload
: decoder
->vmcs
;
2630 if (vmcs
== NO_VMCS
)
2633 vmcs_info
= decoder
->findnew_vmcs_info(decoder
->data
, vmcs
);
2635 ref_timestamp
= decoder
->timestamp
? decoder
->timestamp
: decoder
->buf_timestamp
;
2636 host_tsc
= intel_pt_8b_tsc(decoder
->packet
.payload
, ref_timestamp
);
2638 if (!decoder
->ctc_timestamp
) {
2639 intel_pt_vm_tm_corr_first_tsc(decoder
, data
, vmcs_info
, host_tsc
);
2643 expected_tsc
= intel_pt_expected_tsc(decoder
, data
);
2645 tsc_offset
= host_tsc
- expected_tsc
;
2647 /* Determine if TSC is from Host or Guest */
2649 if (pip_in_vm(&data
->pip_packet
)) { /* Guest */
2651 /* PIP NR=1 without VMCS cannot happen */
2652 p_log("ERROR: Missing VMCS");
2653 intel_pt_translate_vm_tsc_offset(decoder
, tsc_offset
);
2654 decoder
->vm_tm_corr_reliable
= false;
2658 decoder
->last_reliable_timestamp
= host_tsc
;
2659 decoder
->vm_tm_corr_reliable
= true;
2662 } else { /* Host or Guest */
2663 reliable
= false; /* Host/Guest is a guess, so not reliable */
2664 if (decoder
->in_psb
) {
2666 return; /* Zero TSC Offset, assume Host */
2668 * TSC packet has only 7 bytes of TSC. We have no
2669 * information about the Guest's 8th byte, but it
2670 * doesn't matter because we only need 7 bytes.
2671 * Here, since the 8th byte is unreliable and
2672 * irrelevant, compare only 7 byes.
2675 (tsc_offset
& SEVEN_BYTES
) ==
2676 (vmcs_info
->tsc_offset
& SEVEN_BYTES
)) {
2677 /* Same TSC Offset as last VMCS, assume Guest */
2682 * Check if the host_tsc is within the expected range.
2683 * Note, we could narrow the range more by looking ahead for
2684 * the next host TSC in the same buffer, but we don't bother to
2685 * do that because this is probably good enough.
2687 if (host_tsc
>= expected_tsc
&& intel_pt_time_in_range(decoder
, host_tsc
)) {
2688 /* Within expected range for Host TSC, assume Host */
2689 decoder
->vm_tm_corr_reliable
= false;
2694 guest
: /* Assuming Guest */
2696 /* Determine whether to assign TSC Offset */
2697 if (vmcs_info
&& vmcs_info
->vmcs
) {
2698 if (vmcs_info
->tsc_offset
&& vmcs_info
->reliable
) {
2700 } else if (decoder
->in_psb
&& data
->pip
&& decoder
->vm_tm_corr_reliable
&&
2701 decoder
->vm_tm_corr_continuous
&& decoder
->vm_tm_corr_same_buf
) {
2702 /* Continuous tracing, TSC in a PSB is not a time loss */
2704 assign_reliable
= true;
2705 } else if (decoder
->in_psb
&& data
->pip
&& decoder
->vm_tm_corr_same_buf
) {
2707 * Unlikely to be a time loss TSC in a PSB which is not
2708 * at the start of a buffer.
2711 assign_reliable
= false;
2715 /* Record VMCS TSC Offset */
2716 if (assign
&& (vmcs_info
->tsc_offset
!= tsc_offset
||
2717 vmcs_info
->reliable
!= assign_reliable
)) {
2718 bool print
= vmcs_info
->tsc_offset
!= tsc_offset
;
2720 vmcs_info
->tsc_offset
= tsc_offset
;
2721 vmcs_info
->reliable
= assign_reliable
;
2723 intel_pt_print_vmcs_info(vmcs_info
);
2726 /* Determine what TSC Offset to use */
2727 if (vmcs_info
&& vmcs_info
->tsc_offset
) {
2728 if (!vmcs_info
->reliable
)
2730 intel_pt_translate_vm_tsc(decoder
, vmcs_info
);
2734 if (!vmcs_info
->error_printed
) {
2735 p_log("ERROR: Unknown TSC Offset for VMCS %#" PRIx64
,
2737 vmcs_info
->error_printed
= true;
2740 if (intel_pt_print_once(decoder
, INTEL_PT_PRT_ONCE_UNK_VMCS
))
2741 p_log("ERROR: Unknown VMCS");
2743 intel_pt_translate_vm_tsc_offset(decoder
, tsc_offset
);
2746 decoder
->vm_tm_corr_reliable
= reliable
;
2749 static void intel_pt_vm_tm_corr_pebs_tsc(struct intel_pt_decoder
*decoder
)
2751 uint64_t host_tsc
= decoder
->packet
.payload
;
2752 uint64_t guest_tsc
= decoder
->packet
.payload
;
2753 struct intel_pt_vmcs_info
*vmcs_info
;
2756 vmcs
= decoder
->vmcs
;
2757 if (vmcs
== NO_VMCS
)
2760 vmcs_info
= decoder
->findnew_vmcs_info(decoder
->data
, vmcs
);
2763 if (in_vm(decoder
->pip_payload
)) { /* Guest */
2765 /* PIP NR=1 without VMCS cannot happen */
2766 p_log("ERROR: Missing VMCS");
2771 } else { /* Host or Guest */
2772 if (intel_pt_time_in_range(decoder
, host_tsc
)) {
2773 /* Within expected range for Host TSC, assume Host */
2779 /* Translate Guest TSC to Host TSC */
2780 host_tsc
= ((guest_tsc
& SEVEN_BYTES
) - vmcs_info
->tsc_offset
) & SEVEN_BYTES
;
2781 host_tsc
= intel_pt_8b_tsc(host_tsc
, decoder
->timestamp
);
2782 intel_pt_log("Translated VM TSC %#" PRIx64
" -> %#" PRIx64
2783 " VMCS %#" PRIx64
" TSC Offset %#" PRIx64
"\n",
2784 guest_tsc
, host_tsc
, vmcs_info
->vmcs
,
2785 vmcs_info
->tsc_offset
);
2786 if (!intel_pt_time_in_range(decoder
, host_tsc
) &&
2787 intel_pt_print_once(decoder
, INTEL_PT_PRT_ONCE_ERANGE
))
2788 p_log("Timestamp out of range");
2790 if (intel_pt_print_once(decoder
, INTEL_PT_PRT_ONCE_UNK_VMCS
))
2791 p_log("ERROR: Unknown VMCS");
2792 host_tsc
= decoder
->timestamp
;
2795 decoder
->packet
.payload
= host_tsc
;
2797 if (!decoder
->vm_tm_corr_dry_run
)
2798 memcpy((void *)decoder
->buf
+ 1, &host_tsc
, 8);
2801 static int intel_pt_vm_time_correlation(struct intel_pt_decoder
*decoder
)
2803 struct intel_pt_vm_tsc_info data
= { .psbend
= false };
2807 if (decoder
->in_psb
)
2808 intel_pt_vm_tm_corr_psb(decoder
, &data
);
2811 err
= intel_pt_get_next_packet(decoder
);
2812 if (err
== -ENOLINK
)
2817 switch (decoder
->packet
.type
) {
2818 case INTEL_PT_TIP_PGD
:
2819 decoder
->pge
= false;
2820 decoder
->vm_tm_corr_continuous
= false;
2825 case INTEL_PT_TIP_PGE
:
2826 decoder
->pge
= true;
2830 decoder
->in_psb
= false;
2832 decoder
->pge
= intel_pt_ovf_fup_lookahead(decoder
);
2833 if (pge
!= decoder
->pge
)
2834 intel_pt_log("Surprising PGE change in OVF!");
2836 decoder
->vm_tm_corr_continuous
= false;
2840 if (decoder
->in_psb
)
2841 decoder
->pge
= true;
2844 case INTEL_PT_TRACESTOP
:
2845 decoder
->pge
= false;
2846 decoder
->vm_tm_corr_continuous
= false;
2847 decoder
->have_tma
= false;
2851 intel_pt_vm_tm_corr_psb(decoder
, &data
);
2855 decoder
->pip_payload
= decoder
->packet
.payload
;
2859 intel_pt_calc_mtc_timestamp(decoder
);
2863 intel_pt_vm_tm_corr_tsc(decoder
, &data
);
2864 intel_pt_calc_tsc_timestamp(decoder
);
2865 decoder
->vm_tm_corr_same_buf
= true;
2866 decoder
->vm_tm_corr_continuous
= decoder
->pge
;
2870 intel_pt_calc_tma(decoder
);
2874 intel_pt_calc_cyc_timestamp(decoder
);
2878 intel_pt_calc_cbr(decoder
);
2881 case INTEL_PT_PSBEND
:
2882 decoder
->in_psb
= false;
2883 data
.psbend
= false;
2887 if (decoder
->packet
.payload
!= NO_VMCS
)
2888 decoder
->vmcs
= decoder
->packet
.payload
;
2892 decoder
->blk_type
= decoder
->packet
.payload
;
2896 if (decoder
->blk_type
== INTEL_PT_PEBS_BASIC
&&
2897 decoder
->packet
.count
== 2)
2898 intel_pt_vm_tm_corr_pebs_tsc(decoder
);
2902 case INTEL_PT_BEP_IP
:
2903 decoder
->blk_type
= 0;
2907 case INTEL_PT_CFE_IP
:
2909 case INTEL_PT_MODE_EXEC
:
2910 case INTEL_PT_MODE_TSX
:
2913 case INTEL_PT_PTWRITE_IP
:
2914 case INTEL_PT_PTWRITE
:
2915 case INTEL_PT_MWAIT
:
2917 case INTEL_PT_EXSTOP_IP
:
2918 case INTEL_PT_EXSTOP
:
2920 case INTEL_PT_BAD
: /* Does not happen */
2929 #define HOP_PROCESS 0
2930 #define HOP_IGNORE 1
2931 #define HOP_RETURN 2
2934 static int intel_pt_scan_for_psb(struct intel_pt_decoder
*decoder
);
2936 /* Hop mode: Ignore TNT, do not walk code, but get ip from FUPs and TIPs */
2937 static int intel_pt_hop_trace(struct intel_pt_decoder
*decoder
, bool *no_tip
, int *err
)
2941 /* Leap from PSB to PSB, getting ip from FUP within PSB+ */
2942 if (decoder
->leap
&& !decoder
->in_psb
&& decoder
->packet
.type
!= INTEL_PT_PSB
) {
2943 *err
= intel_pt_scan_for_psb(decoder
);
2948 switch (decoder
->packet
.type
) {
2952 case INTEL_PT_TIP_PGD
:
2953 decoder
->pge
= false;
2954 if (!decoder
->packet
.count
) {
2955 intel_pt_set_nr(decoder
);
2958 intel_pt_set_ip(decoder
);
2959 decoder
->state
.type
|= INTEL_PT_TRACE_END
;
2960 decoder
->state
.from_ip
= 0;
2961 decoder
->state
.to_ip
= decoder
->ip
;
2962 intel_pt_update_nr(decoder
);
2966 if (!decoder
->packet
.count
) {
2967 intel_pt_set_nr(decoder
);
2970 intel_pt_set_ip(decoder
);
2971 decoder
->state
.type
= INTEL_PT_INSTRUCTION
;
2972 decoder
->state
.from_ip
= decoder
->ip
;
2973 decoder
->state
.to_ip
= 0;
2974 intel_pt_update_nr(decoder
);
2975 intel_pt_sample_iflag_chg(decoder
);
2979 if (!decoder
->packet
.count
)
2981 intel_pt_set_ip(decoder
);
2982 if (decoder
->set_fup_mwait
|| decoder
->set_fup_pwre
)
2984 if (!decoder
->branch_enable
|| !decoder
->pge
)
2987 decoder
->state
.type
= INTEL_PT_INSTRUCTION
;
2988 decoder
->state
.from_ip
= decoder
->ip
;
2989 decoder
->state
.to_ip
= 0;
2990 intel_pt_fup_event(decoder
, *no_tip
);
2993 intel_pt_fup_event(decoder
, *no_tip
);
2994 decoder
->state
.type
|= INTEL_PT_INSTRUCTION
| INTEL_PT_BRANCH
;
2995 *err
= intel_pt_walk_fup_tip(decoder
);
2996 if (!*err
&& decoder
->state
.to_ip
)
2997 decoder
->pkt_state
= INTEL_PT_STATE_RESAMPLE
;
3001 decoder
->state
.psb_offset
= decoder
->pos
;
3002 decoder
->psb_ip
= 0;
3003 decoder
->last_ip
= 0;
3004 decoder
->have_last_ip
= true;
3005 *err
= intel_pt_walk_psbend(decoder
);
3006 if (*err
== -EAGAIN
)
3010 decoder
->state
.type
= INTEL_PT_PSB_EVT
;
3011 if (decoder
->psb_ip
) {
3012 decoder
->state
.type
|= INTEL_PT_INSTRUCTION
;
3013 decoder
->ip
= decoder
->psb_ip
;
3015 decoder
->state
.from_ip
= decoder
->psb_ip
;
3016 decoder
->state
.to_ip
= 0;
3021 case INTEL_PT_TIP_PGE
:
3024 case INTEL_PT_MODE_EXEC
:
3025 case INTEL_PT_MODE_TSX
:
3029 case INTEL_PT_PSBEND
:
3031 case INTEL_PT_TRACESTOP
:
3035 case INTEL_PT_PTWRITE
:
3036 case INTEL_PT_PTWRITE_IP
:
3037 case INTEL_PT_EXSTOP
:
3038 case INTEL_PT_EXSTOP_IP
:
3039 case INTEL_PT_MWAIT
:
3045 case INTEL_PT_BEP_IP
:
3047 case INTEL_PT_CFE_IP
:
3054 struct intel_pt_psb_info
{
3055 struct intel_pt_pkt fup_packet
;
3060 /* Lookahead and get the FUP packet from PSB+ */
3061 static int intel_pt_psb_lookahead_cb(struct intel_pt_pkt_info
*pkt_info
)
3063 struct intel_pt_psb_info
*data
= pkt_info
->data
;
3065 switch (pkt_info
->packet
.type
) {
3070 case INTEL_PT_MODE_EXEC
:
3071 case INTEL_PT_MODE_TSX
:
3077 if (data
->after_psbend
) {
3078 data
->after_psbend
-= 1;
3079 if (!data
->after_psbend
)
3085 if (data
->after_psbend
)
3087 if (data
->fup
|| pkt_info
->packet
.count
== 0)
3089 data
->fup_packet
= pkt_info
->packet
;
3093 case INTEL_PT_PSBEND
:
3096 /* Keep going to check for a TIP.PGE */
3097 data
->after_psbend
= 6;
3100 case INTEL_PT_TIP_PGE
:
3101 /* Ignore FUP in PSB+ if followed by TIP.PGE */
3102 if (data
->after_psbend
)
3106 case INTEL_PT_PTWRITE
:
3107 case INTEL_PT_PTWRITE_IP
:
3108 case INTEL_PT_EXSTOP
:
3109 case INTEL_PT_EXSTOP_IP
:
3110 case INTEL_PT_MWAIT
:
3116 case INTEL_PT_BEP_IP
:
3118 case INTEL_PT_CFE_IP
:
3120 if (data
->after_psbend
) {
3121 data
->after_psbend
-= 1;
3122 if (!data
->after_psbend
)
3131 case INTEL_PT_TIP_PGD
:
3134 case INTEL_PT_TRACESTOP
:
3142 static int intel_pt_psb(struct intel_pt_decoder
*decoder
)
3146 decoder
->last_ip
= 0;
3147 decoder
->psb_ip
= 0;
3148 decoder
->have_last_ip
= true;
3149 intel_pt_clear_stack(&decoder
->stack
);
3150 err
= intel_pt_walk_psbend(decoder
);
3153 decoder
->state
.type
= INTEL_PT_PSB_EVT
;
3154 decoder
->state
.from_ip
= decoder
->psb_ip
;
3155 decoder
->state
.to_ip
= 0;
3159 static int intel_pt_fup_in_psb(struct intel_pt_decoder
*decoder
)
3163 if (decoder
->ip
!= decoder
->last_ip
) {
3164 err
= intel_pt_walk_fup(decoder
);
3165 if (!err
|| err
!= -EAGAIN
)
3169 decoder
->pkt_state
= INTEL_PT_STATE_IN_SYNC
;
3170 err
= intel_pt_psb(decoder
);
3172 decoder
->pkt_state
= INTEL_PT_STATE_ERR3
;
3179 static bool intel_pt_psb_with_fup(struct intel_pt_decoder
*decoder
, int *err
)
3181 struct intel_pt_psb_info data
= { .fup
= false };
3183 if (!decoder
->branch_enable
)
3186 intel_pt_pkt_lookahead(decoder
, intel_pt_psb_lookahead_cb
, &data
);
3190 decoder
->packet
= data
.fup_packet
;
3191 intel_pt_set_last_ip(decoder
);
3192 decoder
->pkt_state
= INTEL_PT_STATE_FUP_IN_PSB
;
3194 *err
= intel_pt_fup_in_psb(decoder
);
3199 static int intel_pt_walk_trace(struct intel_pt_decoder
*decoder
)
3201 int last_packet_type
= INTEL_PT_PAD
;
3202 bool no_tip
= false;
3206 err
= intel_pt_get_next_packet(decoder
);
3211 if (decoder
->cyc_threshold
) {
3212 if (decoder
->sample_cyc
&& last_packet_type
!= INTEL_PT_CYC
)
3213 decoder
->sample_cyc
= false;
3214 last_packet_type
= decoder
->packet
.type
;
3218 switch (intel_pt_hop_trace(decoder
, &no_tip
, &err
)) {
3230 switch (decoder
->packet
.type
) {
3232 if (!decoder
->packet
.count
)
3234 decoder
->tnt
= decoder
->packet
;
3235 decoder
->pkt_state
= INTEL_PT_STATE_TNT
;
3236 err
= intel_pt_walk_tnt(decoder
);
3241 case INTEL_PT_TIP_PGD
:
3242 if (decoder
->packet
.count
!= 0)
3243 intel_pt_set_last_ip(decoder
);
3244 decoder
->pkt_state
= INTEL_PT_STATE_TIP_PGD
;
3245 return intel_pt_walk_tip(decoder
);
3247 case INTEL_PT_TIP_PGE
: {
3248 decoder
->pge
= true;
3249 decoder
->overflow
= false;
3250 intel_pt_mtc_cyc_cnt_pge(decoder
);
3251 intel_pt_set_nr(decoder
);
3252 if (decoder
->packet
.count
== 0) {
3253 intel_pt_log_at("Skipping zero TIP.PGE",
3257 intel_pt_sample_iflag_chg(decoder
);
3258 intel_pt_set_ip(decoder
);
3259 decoder
->state
.from_ip
= 0;
3260 decoder
->state
.to_ip
= decoder
->ip
;
3261 decoder
->state
.type
|= INTEL_PT_TRACE_BEGIN
;
3263 * In hop mode, resample to get the to_ip as an
3264 * "instruction" sample.
3267 decoder
->pkt_state
= INTEL_PT_STATE_RESAMPLE
;
3272 return intel_pt_overflow(decoder
);
3275 if (decoder
->packet
.count
!= 0)
3276 intel_pt_set_last_ip(decoder
);
3277 decoder
->pkt_state
= INTEL_PT_STATE_TIP
;
3278 return intel_pt_walk_tip(decoder
);
3281 if (decoder
->packet
.count
== 0) {
3282 intel_pt_log_at("Skipping zero FUP",
3287 intel_pt_set_last_ip(decoder
);
3288 if (!decoder
->branch_enable
|| !decoder
->pge
) {
3289 decoder
->ip
= decoder
->last_ip
;
3290 if (intel_pt_fup_event(decoder
, no_tip
))
3295 if (decoder
->set_fup_mwait
)
3298 decoder
->pkt_state
= INTEL_PT_STATE_FUP_NO_TIP
;
3300 decoder
->pkt_state
= INTEL_PT_STATE_FUP
;
3301 err
= intel_pt_walk_fup(decoder
);
3308 return intel_pt_walk_fup_tip(decoder
);
3310 case INTEL_PT_TRACESTOP
:
3311 decoder
->pge
= false;
3312 decoder
->continuous_period
= false;
3313 intel_pt_clear_tx_flags(decoder
);
3314 decoder
->have_tma
= false;
3318 decoder
->state
.psb_offset
= decoder
->pos
;
3319 decoder
->psb_ip
= 0;
3320 if (intel_pt_psb_with_fup(decoder
, &err
))
3322 err
= intel_pt_psb(decoder
);
3328 intel_pt_update_pip(decoder
);
3332 intel_pt_calc_mtc_timestamp(decoder
);
3333 if (decoder
->period_type
!= INTEL_PT_PERIOD_MTC
)
3336 * Ensure that there has been an instruction since the
3339 if (!decoder
->mtc_insn
)
3341 decoder
->mtc_insn
= false;
3342 /* Ensure that there is a timestamp */
3343 if (!decoder
->timestamp
)
3345 decoder
->state
.type
= INTEL_PT_INSTRUCTION
;
3346 decoder
->state
.from_ip
= decoder
->ip
;
3347 decoder
->state
.to_ip
= 0;
3348 decoder
->mtc_insn
= false;
3352 intel_pt_calc_tsc_timestamp(decoder
);
3356 intel_pt_calc_tma(decoder
);
3360 intel_pt_calc_cyc_timestamp(decoder
);
3364 intel_pt_calc_cbr(decoder
);
3365 if (decoder
->cbr
!= decoder
->cbr_seen
) {
3366 decoder
->state
.type
= 0;
3371 case INTEL_PT_MODE_EXEC
:
3372 intel_pt_mode_exec(decoder
);
3373 err
= intel_pt_get_next_packet(decoder
);
3376 if (decoder
->packet
.type
== INTEL_PT_FUP
) {
3377 decoder
->set_fup_mode_exec
= true;
3382 case INTEL_PT_MODE_TSX
:
3383 /* MODE_TSX need not be followed by FUP */
3384 if (!decoder
->pge
|| decoder
->in_psb
) {
3385 intel_pt_update_in_tx(decoder
);
3388 err
= intel_pt_mode_tsx(decoder
, &no_tip
);
3393 case INTEL_PT_BAD
: /* Does not happen */
3394 return intel_pt_bug(decoder
);
3396 case INTEL_PT_PSBEND
:
3402 case INTEL_PT_PTWRITE_IP
:
3403 decoder
->fup_ptw_payload
= decoder
->packet
.payload
;
3404 err
= intel_pt_get_next_packet(decoder
);
3407 if (decoder
->packet
.type
== INTEL_PT_FUP
) {
3408 decoder
->set_fup_ptw
= true;
3411 intel_pt_log_at("ERROR: Missing FUP after PTWRITE",
3416 case INTEL_PT_PTWRITE
:
3417 decoder
->state
.type
= INTEL_PT_PTW
;
3418 decoder
->state
.from_ip
= decoder
->ip
;
3419 decoder
->state
.to_ip
= 0;
3420 decoder
->state
.ptw_payload
= decoder
->packet
.payload
;
3423 case INTEL_PT_MWAIT
:
3424 decoder
->fup_mwait_payload
= decoder
->packet
.payload
;
3425 decoder
->set_fup_mwait
= true;
3429 if (decoder
->set_fup_mwait
) {
3430 decoder
->fup_pwre_payload
=
3431 decoder
->packet
.payload
;
3432 decoder
->set_fup_pwre
= true;
3435 decoder
->state
.type
= INTEL_PT_PWR_ENTRY
;
3436 decoder
->state
.from_ip
= decoder
->ip
;
3437 decoder
->state
.to_ip
= 0;
3438 decoder
->state
.pwrx_payload
= decoder
->packet
.payload
;
3441 case INTEL_PT_EXSTOP_IP
:
3442 err
= intel_pt_get_next_packet(decoder
);
3445 if (decoder
->packet
.type
== INTEL_PT_FUP
) {
3446 decoder
->set_fup_exstop
= true;
3449 intel_pt_log_at("ERROR: Missing FUP after EXSTOP",
3454 case INTEL_PT_EXSTOP
:
3455 decoder
->state
.type
= INTEL_PT_EX_STOP
;
3456 decoder
->state
.from_ip
= decoder
->ip
;
3457 decoder
->state
.to_ip
= 0;
3461 decoder
->state
.type
= INTEL_PT_PWR_EXIT
;
3462 decoder
->state
.from_ip
= decoder
->ip
;
3463 decoder
->state
.to_ip
= 0;
3464 decoder
->state
.pwrx_payload
= decoder
->packet
.payload
;
3468 intel_pt_bbp(decoder
);
3472 intel_pt_bip(decoder
);
3476 decoder
->state
.type
= INTEL_PT_BLK_ITEMS
;
3477 decoder
->state
.from_ip
= decoder
->ip
;
3478 decoder
->state
.to_ip
= 0;
3481 case INTEL_PT_BEP_IP
:
3482 err
= intel_pt_get_next_packet(decoder
);
3485 if (decoder
->packet
.type
== INTEL_PT_FUP
) {
3486 decoder
->set_fup_bep
= true;
3489 intel_pt_log_at("ERROR: Missing FUP after BEP",
3495 decoder
->fup_cfe_pkt
= decoder
->packet
;
3496 decoder
->set_fup_cfe
= true;
3497 if (!decoder
->pge
) {
3498 intel_pt_fup_event(decoder
, true);
3503 case INTEL_PT_CFE_IP
:
3504 decoder
->fup_cfe_pkt
= decoder
->packet
;
3505 err
= intel_pt_get_next_packet(decoder
);
3508 if (decoder
->packet
.type
== INTEL_PT_FUP
) {
3509 decoder
->set_fup_cfe_ip
= true;
3512 intel_pt_log_at("ERROR: Missing FUP after CFE",
3518 err
= intel_pt_evd(decoder
);
3524 return intel_pt_bug(decoder
);
3529 static inline bool intel_pt_have_ip(struct intel_pt_decoder
*decoder
)
3531 return decoder
->packet
.count
&&
3532 (decoder
->have_last_ip
|| decoder
->packet
.count
== 3 ||
3533 decoder
->packet
.count
== 6);
3536 /* Walk PSB+ packets to get in sync. */
3537 static int intel_pt_walk_psb(struct intel_pt_decoder
*decoder
)
3541 decoder
->in_psb
= true;
3544 err
= intel_pt_get_next_packet(decoder
);
3548 switch (decoder
->packet
.type
) {
3549 case INTEL_PT_TIP_PGD
:
3550 decoder
->continuous_period
= false;
3552 case INTEL_PT_TIP_PGE
:
3554 case INTEL_PT_PTWRITE
:
3555 case INTEL_PT_PTWRITE_IP
:
3556 case INTEL_PT_EXSTOP
:
3557 case INTEL_PT_EXSTOP_IP
:
3558 case INTEL_PT_MWAIT
:
3564 case INTEL_PT_BEP_IP
:
3566 case INTEL_PT_CFE_IP
:
3568 intel_pt_log("ERROR: Unexpected packet\n");
3573 decoder
->pge
= true;
3574 if (intel_pt_have_ip(decoder
)) {
3575 uint64_t current_ip
= decoder
->ip
;
3577 intel_pt_set_ip(decoder
);
3578 decoder
->psb_ip
= decoder
->ip
;
3580 intel_pt_log_to("Setting IP",
3586 intel_pt_calc_mtc_timestamp(decoder
);
3590 intel_pt_calc_tsc_timestamp(decoder
);
3594 intel_pt_calc_tma(decoder
);
3598 intel_pt_calc_cyc_timestamp(decoder
);
3602 intel_pt_calc_cbr(decoder
);
3606 intel_pt_set_pip(decoder
);
3609 case INTEL_PT_MODE_EXEC
:
3610 intel_pt_mode_exec_status(decoder
);
3613 case INTEL_PT_MODE_TSX
:
3614 intel_pt_update_in_tx(decoder
);
3617 case INTEL_PT_TRACESTOP
:
3618 decoder
->pge
= false;
3619 decoder
->continuous_period
= false;
3620 intel_pt_clear_tx_flags(decoder
);
3624 decoder
->have_tma
= false;
3625 intel_pt_log("ERROR: Unexpected packet\n");
3627 decoder
->pkt_state
= INTEL_PT_STATE_ERR4
;
3629 decoder
->pkt_state
= INTEL_PT_STATE_ERR3
;
3633 case INTEL_PT_BAD
: /* Does not happen */
3634 err
= intel_pt_bug(decoder
);
3638 err
= intel_pt_overflow(decoder
);
3641 case INTEL_PT_PSBEND
:
3654 decoder
->in_psb
= false;
3659 static int intel_pt_walk_to_ip(struct intel_pt_decoder
*decoder
)
3664 err
= intel_pt_get_next_packet(decoder
);
3668 switch (decoder
->packet
.type
) {
3669 case INTEL_PT_TIP_PGD
:
3670 decoder
->continuous_period
= false;
3671 decoder
->pge
= false;
3672 if (intel_pt_have_ip(decoder
))
3673 intel_pt_set_ip(decoder
);
3676 decoder
->state
.type
|= INTEL_PT_TRACE_END
;
3679 case INTEL_PT_TIP_PGE
:
3680 decoder
->pge
= true;
3681 intel_pt_mtc_cyc_cnt_pge(decoder
);
3682 if (intel_pt_have_ip(decoder
))
3683 intel_pt_set_ip(decoder
);
3686 decoder
->state
.type
|= INTEL_PT_TRACE_BEGIN
;
3690 decoder
->pge
= true;
3691 if (intel_pt_have_ip(decoder
))
3692 intel_pt_set_ip(decoder
);
3698 if (intel_pt_have_ip(decoder
))
3699 intel_pt_set_ip(decoder
);
3705 intel_pt_calc_mtc_timestamp(decoder
);
3709 intel_pt_calc_tsc_timestamp(decoder
);
3713 intel_pt_calc_tma(decoder
);
3717 intel_pt_calc_cyc_timestamp(decoder
);
3721 intel_pt_calc_cbr(decoder
);
3725 intel_pt_set_pip(decoder
);
3728 case INTEL_PT_MODE_EXEC
:
3729 intel_pt_mode_exec_status(decoder
);
3732 case INTEL_PT_MODE_TSX
:
3733 intel_pt_update_in_tx(decoder
);
3737 return intel_pt_overflow(decoder
);
3739 case INTEL_PT_BAD
: /* Does not happen */
3740 return intel_pt_bug(decoder
);
3742 case INTEL_PT_TRACESTOP
:
3743 decoder
->pge
= false;
3744 decoder
->continuous_period
= false;
3745 intel_pt_clear_tx_flags(decoder
);
3746 decoder
->have_tma
= false;
3750 decoder
->state
.psb_offset
= decoder
->pos
;
3751 decoder
->psb_ip
= 0;
3752 decoder
->last_ip
= 0;
3753 decoder
->have_last_ip
= true;
3754 intel_pt_clear_stack(&decoder
->stack
);
3755 err
= intel_pt_walk_psb(decoder
);
3758 decoder
->state
.type
= INTEL_PT_PSB_EVT
;
3759 decoder
->state
.from_ip
= decoder
->psb_ip
;
3760 decoder
->state
.to_ip
= 0;
3764 case INTEL_PT_PSBEND
:
3768 case INTEL_PT_PTWRITE
:
3769 case INTEL_PT_PTWRITE_IP
:
3770 case INTEL_PT_EXSTOP
:
3771 case INTEL_PT_EXSTOP_IP
:
3772 case INTEL_PT_MWAIT
:
3778 case INTEL_PT_BEP_IP
:
3780 case INTEL_PT_CFE_IP
:
3788 static int intel_pt_sync_ip(struct intel_pt_decoder
*decoder
)
3792 intel_pt_clear_fup_event(decoder
);
3793 decoder
->overflow
= false;
3795 if (!decoder
->branch_enable
) {
3796 decoder
->pkt_state
= INTEL_PT_STATE_IN_SYNC
;
3797 decoder
->state
.type
= 0; /* Do not have a sample */
3801 intel_pt_log("Scanning for full IP\n");
3802 err
= intel_pt_walk_to_ip(decoder
);
3803 if (err
|| ((decoder
->state
.type
& INTEL_PT_PSB_EVT
) && !decoder
->ip
))
3806 /* In hop mode, resample to get the to_ip as an "instruction" sample */
3808 decoder
->pkt_state
= INTEL_PT_STATE_RESAMPLE
;
3810 decoder
->pkt_state
= INTEL_PT_STATE_IN_SYNC
;
3812 decoder
->state
.from_ip
= 0;
3813 decoder
->state
.to_ip
= decoder
->ip
;
3814 intel_pt_log_to("Setting IP", decoder
->ip
);
3819 static int intel_pt_part_psb(struct intel_pt_decoder
*decoder
)
3821 const unsigned char *end
= decoder
->buf
+ decoder
->len
;
3824 for (i
= INTEL_PT_PSB_LEN
- 1; i
; i
--) {
3825 if (i
> decoder
->len
)
3827 if (!memcmp(end
- i
, INTEL_PT_PSB_STR
, i
))
3833 static int intel_pt_rest_psb(struct intel_pt_decoder
*decoder
, int part_psb
)
3835 size_t rest_psb
= INTEL_PT_PSB_LEN
- part_psb
;
3836 const char *psb
= INTEL_PT_PSB_STR
;
3838 if (rest_psb
> decoder
->len
||
3839 memcmp(decoder
->buf
, psb
+ part_psb
, rest_psb
))
3845 static int intel_pt_get_split_psb(struct intel_pt_decoder
*decoder
,
3850 decoder
->pos
+= decoder
->len
;
3853 ret
= intel_pt_get_next_data(decoder
, false);
3857 rest_psb
= intel_pt_rest_psb(decoder
, part_psb
);
3861 decoder
->pos
-= part_psb
;
3862 decoder
->next_buf
= decoder
->buf
+ rest_psb
;
3863 decoder
->next_len
= decoder
->len
- rest_psb
;
3864 memcpy(decoder
->temp_buf
, INTEL_PT_PSB_STR
, INTEL_PT_PSB_LEN
);
3865 decoder
->buf
= decoder
->temp_buf
;
3866 decoder
->len
= INTEL_PT_PSB_LEN
;
3871 static int intel_pt_scan_for_psb(struct intel_pt_decoder
*decoder
)
3873 unsigned char *next
;
3876 intel_pt_log("Scanning for PSB\n");
3878 if (!decoder
->len
) {
3879 ret
= intel_pt_get_next_data(decoder
, false);
3884 next
= memmem(decoder
->buf
, decoder
->len
, INTEL_PT_PSB_STR
,
3889 part_psb
= intel_pt_part_psb(decoder
);
3891 ret
= intel_pt_get_split_psb(decoder
, part_psb
);
3895 decoder
->pos
+= decoder
->len
;
3901 decoder
->pkt_step
= next
- decoder
->buf
;
3902 return intel_pt_get_next_packet(decoder
);
3906 static int intel_pt_sync(struct intel_pt_decoder
*decoder
)
3910 decoder
->pge
= false;
3911 decoder
->continuous_period
= false;
3912 decoder
->have_last_ip
= false;
3913 decoder
->last_ip
= 0;
3914 decoder
->psb_ip
= 0;
3916 intel_pt_clear_stack(&decoder
->stack
);
3918 err
= intel_pt_scan_for_psb(decoder
);
3922 if (decoder
->vm_time_correlation
) {
3923 decoder
->in_psb
= true;
3924 if (!decoder
->timestamp
)
3925 decoder
->timestamp
= 1;
3926 decoder
->state
.type
= 0;
3927 decoder
->pkt_state
= INTEL_PT_STATE_VM_TIME_CORRELATION
;
3931 decoder
->have_last_ip
= true;
3932 decoder
->pkt_state
= INTEL_PT_STATE_IN_SYNC
;
3934 err
= intel_pt_walk_psb(decoder
);
3938 decoder
->state
.type
= INTEL_PT_PSB_EVT
; /* Only PSB sample */
3939 decoder
->state
.from_ip
= decoder
->psb_ip
;
3940 decoder
->state
.to_ip
= 0;
3944 * In hop mode, resample to get the PSB FUP ip as an
3945 * "instruction" sample.
3948 decoder
->pkt_state
= INTEL_PT_STATE_RESAMPLE
;
3950 decoder
->pkt_state
= INTEL_PT_STATE_IN_SYNC
;
3956 static uint64_t intel_pt_est_timestamp(struct intel_pt_decoder
*decoder
)
3958 uint64_t est
= decoder
->sample_insn_cnt
<< 1;
3960 if (!decoder
->cbr
|| !decoder
->max_non_turbo_ratio
)
3963 est
*= decoder
->max_non_turbo_ratio
;
3964 est
/= decoder
->cbr
;
3966 return decoder
->sample_timestamp
+ est
;
3969 const struct intel_pt_state
*intel_pt_decode(struct intel_pt_decoder
*decoder
)
3974 decoder
->state
.type
= INTEL_PT_BRANCH
;
3975 decoder
->state
.flags
= 0;
3977 switch (decoder
->pkt_state
) {
3978 case INTEL_PT_STATE_NO_PSB
:
3979 err
= intel_pt_sync(decoder
);
3981 case INTEL_PT_STATE_NO_IP
:
3982 decoder
->have_last_ip
= false;
3983 decoder
->last_ip
= 0;
3986 case INTEL_PT_STATE_ERR_RESYNC
:
3987 err
= intel_pt_sync_ip(decoder
);
3989 case INTEL_PT_STATE_IN_SYNC
:
3990 err
= intel_pt_walk_trace(decoder
);
3992 case INTEL_PT_STATE_TNT
:
3993 case INTEL_PT_STATE_TNT_CONT
:
3994 err
= intel_pt_walk_tnt(decoder
);
3996 err
= intel_pt_walk_trace(decoder
);
3998 case INTEL_PT_STATE_TIP
:
3999 case INTEL_PT_STATE_TIP_PGD
:
4000 err
= intel_pt_walk_tip(decoder
);
4002 case INTEL_PT_STATE_FUP
:
4003 err
= intel_pt_walk_fup(decoder
);
4005 err
= intel_pt_walk_fup_tip(decoder
);
4007 case INTEL_PT_STATE_FUP_NO_TIP
:
4008 err
= intel_pt_walk_fup(decoder
);
4010 err
= intel_pt_walk_trace(decoder
);
4012 case INTEL_PT_STATE_FUP_IN_PSB
:
4013 err
= intel_pt_fup_in_psb(decoder
);
4015 case INTEL_PT_STATE_RESAMPLE
:
4016 err
= intel_pt_resample(decoder
);
4018 case INTEL_PT_STATE_VM_TIME_CORRELATION
:
4019 err
= intel_pt_vm_time_correlation(decoder
);
4022 err
= intel_pt_bug(decoder
);
4025 } while (err
== -ENOLINK
);
4028 decoder
->state
.err
= intel_pt_ext_err(err
);
4029 if (err
!= -EOVERFLOW
)
4030 decoder
->state
.from_ip
= decoder
->ip
;
4031 intel_pt_update_sample_time(decoder
);
4032 decoder
->sample_tot_cyc_cnt
= decoder
->tot_cyc_cnt
;
4033 intel_pt_set_nr(decoder
);
4035 decoder
->state
.err
= 0;
4036 if (decoder
->cbr
!= decoder
->cbr_seen
) {
4037 decoder
->cbr_seen
= decoder
->cbr
;
4038 if (!decoder
->state
.type
) {
4039 decoder
->state
.from_ip
= decoder
->ip
;
4040 decoder
->state
.to_ip
= 0;
4042 decoder
->state
.type
|= INTEL_PT_CBR_CHG
;
4043 decoder
->state
.cbr_payload
= decoder
->cbr_payload
;
4044 decoder
->state
.cbr
= decoder
->cbr
;
4046 if (intel_pt_sample_time(decoder
->pkt_state
)) {
4047 intel_pt_update_sample_time(decoder
);
4048 if (decoder
->sample_cyc
) {
4049 decoder
->sample_tot_cyc_cnt
= decoder
->tot_cyc_cnt
;
4050 decoder
->state
.flags
|= INTEL_PT_SAMPLE_IPC
;
4051 decoder
->sample_cyc
= false;
4055 * When using only TSC/MTC to compute cycles, IPC can be
4056 * sampled as soon as the cycle count changes.
4058 if (!decoder
->have_cyc
)
4059 decoder
->state
.flags
|= INTEL_PT_SAMPLE_IPC
;
4062 /* Let PSB event always have TSC timestamp */
4063 if ((decoder
->state
.type
& INTEL_PT_PSB_EVT
) && decoder
->tsc_timestamp
)
4064 decoder
->sample_timestamp
= decoder
->tsc_timestamp
;
4066 decoder
->state
.from_nr
= decoder
->nr
;
4067 decoder
->state
.to_nr
= decoder
->next_nr
;
4068 decoder
->nr
= decoder
->next_nr
;
4070 decoder
->state
.timestamp
= decoder
->sample_timestamp
;
4071 decoder
->state
.est_timestamp
= intel_pt_est_timestamp(decoder
);
4072 decoder
->state
.tot_insn_cnt
= decoder
->tot_insn_cnt
;
4073 decoder
->state
.tot_cyc_cnt
= decoder
->sample_tot_cyc_cnt
;
4075 return &decoder
->state
;
4079 * intel_pt_next_psb - move buffer pointer to the start of the next PSB packet.
4080 * @buf: pointer to buffer pointer
4081 * @len: size of buffer
4083 * Updates the buffer pointer to point to the start of the next PSB packet if
4084 * there is one, otherwise the buffer pointer is unchanged. If @buf is updated,
4085 * @len is adjusted accordingly.
4087 * Return: %true if a PSB packet is found, %false otherwise.
4089 static bool intel_pt_next_psb(unsigned char **buf
, size_t *len
)
4091 unsigned char *next
;
4093 next
= memmem(*buf
, *len
, INTEL_PT_PSB_STR
, INTEL_PT_PSB_LEN
);
4095 *len
-= next
- *buf
;
4103 * intel_pt_step_psb - move buffer pointer to the start of the following PSB
4105 * @buf: pointer to buffer pointer
4106 * @len: size of buffer
4108 * Updates the buffer pointer to point to the start of the following PSB packet
4109 * (skipping the PSB at @buf itself) if there is one, otherwise the buffer
4110 * pointer is unchanged. If @buf is updated, @len is adjusted accordingly.
4112 * Return: %true if a PSB packet is found, %false otherwise.
4114 static bool intel_pt_step_psb(unsigned char **buf
, size_t *len
)
4116 unsigned char *next
;
4121 next
= memmem(*buf
+ 1, *len
- 1, INTEL_PT_PSB_STR
, INTEL_PT_PSB_LEN
);
4123 *len
-= next
- *buf
;
4131 * intel_pt_last_psb - find the last PSB packet in a buffer.
4133 * @len: size of buffer
4135 * This function finds the last PSB in a buffer.
4137 * Return: A pointer to the last PSB in @buf if found, %NULL otherwise.
4139 static unsigned char *intel_pt_last_psb(unsigned char *buf
, size_t len
)
4141 const char *n
= INTEL_PT_PSB_STR
;
4145 if (len
< INTEL_PT_PSB_LEN
)
4148 k
= len
- INTEL_PT_PSB_LEN
+ 1;
4150 p
= memrchr(buf
, n
[0], k
);
4153 if (!memcmp(p
+ 1, n
+ 1, INTEL_PT_PSB_LEN
- 1))
4162 * intel_pt_next_tsc - find and return next TSC.
4164 * @len: size of buffer
4165 * @tsc: TSC value returned
4166 * @rem: returns remaining size when TSC is found
4168 * Find a TSC packet in @buf and return the TSC value. This function assumes
4169 * that @buf starts at a PSB and that PSB+ will contain TSC and so stops if a
4170 * PSBEND packet is found.
4172 * Return: %true if TSC is found, false otherwise.
4174 static bool intel_pt_next_tsc(unsigned char *buf
, size_t len
, uint64_t *tsc
,
4177 enum intel_pt_pkt_ctx ctx
= INTEL_PT_NO_CTX
;
4178 struct intel_pt_pkt packet
;
4182 ret
= intel_pt_get_packet(buf
, len
, &packet
, &ctx
);
4185 if (packet
.type
== INTEL_PT_TSC
) {
4186 *tsc
= packet
.payload
;
4190 if (packet
.type
== INTEL_PT_PSBEND
)
4199 * intel_pt_tsc_cmp - compare 7-byte TSCs.
4200 * @tsc1: first TSC to compare
4201 * @tsc2: second TSC to compare
4203 * This function compares 7-byte TSC values allowing for the possibility that
4204 * TSC wrapped around. Generally it is not possible to know if TSC has wrapped
4205 * around so for that purpose this function assumes the absolute difference is
4206 * less than half the maximum difference.
4208 * Return: %-1 if @tsc1 is before @tsc2, %0 if @tsc1 == @tsc2, %1 if @tsc1 is
4211 static int intel_pt_tsc_cmp(uint64_t tsc1
, uint64_t tsc2
)
4213 const uint64_t halfway
= (1ULL << 55);
4219 if (tsc2
- tsc1
< halfway
)
4224 if (tsc1
- tsc2
< halfway
)
4231 #define MAX_PADDING (PERF_AUXTRACE_RECORD_ALIGNMENT - 1)
4234 * adj_for_padding - adjust overlap to account for padding.
4235 * @buf_b: second buffer
4236 * @buf_a: first buffer
4237 * @len_a: size of first buffer
4239 * @buf_a might have up to 7 bytes of padding appended. Adjust the overlap
4242 * Return: A pointer into @buf_b from where non-overlapped data starts
4244 static unsigned char *adj_for_padding(unsigned char *buf_b
,
4245 unsigned char *buf_a
, size_t len_a
)
4247 unsigned char *p
= buf_b
- MAX_PADDING
;
4248 unsigned char *q
= buf_a
+ len_a
- MAX_PADDING
;
4251 for (i
= MAX_PADDING
; i
; i
--, p
++, q
++) {
4260 * intel_pt_find_overlap_tsc - determine start of non-overlapped trace data
4262 * @buf_a: first buffer
4263 * @len_a: size of first buffer
4264 * @buf_b: second buffer
4265 * @len_b: size of second buffer
4266 * @consecutive: returns true if there is data in buf_b that is consecutive
4268 * @ooo_tsc: out-of-order TSC due to VM TSC offset / scaling
4270 * If the trace contains TSC we can look at the last TSC of @buf_a and the
4271 * first TSC of @buf_b in order to determine if the buffers overlap, and then
4272 * walk forward in @buf_b until a later TSC is found. A precondition is that
4273 * @buf_a and @buf_b are positioned at a PSB.
4275 * Return: A pointer into @buf_b from where non-overlapped data starts, or
4276 * @buf_b + @len_b if there is no non-overlapped data.
4278 static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a
,
4280 unsigned char *buf_b
,
4281 size_t len_b
, bool *consecutive
,
4284 uint64_t tsc_a
, tsc_b
;
4286 size_t len
, rem_a
, rem_b
;
4288 p
= intel_pt_last_psb(buf_a
, len_a
);
4290 return buf_b
; /* No PSB in buf_a => no overlap */
4292 len
= len_a
- (p
- buf_a
);
4293 if (!intel_pt_next_tsc(p
, len
, &tsc_a
, &rem_a
)) {
4294 /* The last PSB+ in buf_a is incomplete, so go back one more */
4296 p
= intel_pt_last_psb(buf_a
, len_a
);
4298 return buf_b
; /* No full PSB+ => assume no overlap */
4299 len
= len_a
- (p
- buf_a
);
4300 if (!intel_pt_next_tsc(p
, len
, &tsc_a
, &rem_a
))
4301 return buf_b
; /* No TSC in buf_a => assume no overlap */
4305 /* Ignore PSB+ with no TSC */
4306 if (intel_pt_next_tsc(buf_b
, len_b
, &tsc_b
, &rem_b
)) {
4307 int cmp
= intel_pt_tsc_cmp(tsc_a
, tsc_b
);
4309 /* Same TSC, so buffers are consecutive */
4310 if (!cmp
&& rem_b
>= rem_a
) {
4311 unsigned char *start
;
4313 *consecutive
= true;
4314 start
= buf_b
+ len_b
- (rem_b
- rem_a
);
4315 return adj_for_padding(start
, buf_a
, len_a
);
4317 if (cmp
< 0 && !ooo_tsc
)
4318 return buf_b
; /* tsc_a < tsc_b => no overlap */
4321 if (!intel_pt_step_psb(&buf_b
, &len_b
))
4322 return buf_b
+ len_b
; /* No PSB in buf_b => no data */
4327 * intel_pt_find_overlap - determine start of non-overlapped trace data.
4328 * @buf_a: first buffer
4329 * @len_a: size of first buffer
4330 * @buf_b: second buffer
4331 * @len_b: size of second buffer
4332 * @have_tsc: can use TSC packets to detect overlap
4333 * @consecutive: returns true if there is data in buf_b that is consecutive
4335 * @ooo_tsc: out-of-order TSC due to VM TSC offset / scaling
4337 * When trace samples or snapshots are recorded there is the possibility that
4338 * the data overlaps. Note that, for the purposes of decoding, data is only
4339 * useful if it begins with a PSB packet.
4341 * Return: A pointer into @buf_b from where non-overlapped data starts, or
4342 * @buf_b + @len_b if there is no non-overlapped data.
4344 unsigned char *intel_pt_find_overlap(unsigned char *buf_a
, size_t len_a
,
4345 unsigned char *buf_b
, size_t len_b
,
4346 bool have_tsc
, bool *consecutive
,
4349 unsigned char *found
;
4351 /* Buffer 'b' must start at PSB so throw away everything before that */
4352 if (!intel_pt_next_psb(&buf_b
, &len_b
))
4353 return buf_b
+ len_b
; /* No PSB */
4355 if (!intel_pt_next_psb(&buf_a
, &len_a
))
4356 return buf_b
; /* No overlap */
4359 found
= intel_pt_find_overlap_tsc(buf_a
, len_a
, buf_b
, len_b
,
4360 consecutive
, ooo_tsc
);
4366 * Buffer 'b' cannot end within buffer 'a' so, for comparison purposes,
4367 * we can ignore the first part of buffer 'a'.
4369 while (len_b
< len_a
) {
4370 if (!intel_pt_step_psb(&buf_a
, &len_a
))
4371 return buf_b
; /* No overlap */
4374 /* Now len_b >= len_a */
4376 /* Potential overlap so check the bytes */
4377 found
= memmem(buf_a
, len_a
, buf_b
, len_a
);
4379 *consecutive
= true;
4380 return adj_for_padding(buf_b
+ len_a
, buf_a
, len_a
);
4383 /* Try again at next PSB in buffer 'a' */
4384 if (!intel_pt_step_psb(&buf_a
, &len_a
))
4385 return buf_b
; /* No overlap */
4390 * struct fast_forward_data - data used by intel_pt_ff_cb().
4391 * @timestamp: timestamp to fast forward towards
4392 * @buf_timestamp: buffer timestamp of last buffer with trace data earlier than
4393 * the fast forward timestamp.
4395 struct fast_forward_data
{
4397 uint64_t buf_timestamp
;
4401 * intel_pt_ff_cb - fast forward lookahead callback.
4402 * @buffer: Intel PT trace buffer
4403 * @data: opaque pointer to fast forward data (struct fast_forward_data)
4405 * Determine if @buffer trace is past the fast forward timestamp.
4407 * Return: 1 (stop lookahead) if @buffer trace is past the fast forward
4408 * timestamp, and 0 otherwise.
4410 static int intel_pt_ff_cb(struct intel_pt_buffer
*buffer
, void *data
)
4412 struct fast_forward_data
*d
= data
;
4418 buf
= (unsigned char *)buffer
->buf
;
4421 if (!intel_pt_next_psb(&buf
, &len
) ||
4422 !intel_pt_next_tsc(buf
, len
, &tsc
, &rem
))
4425 tsc
= intel_pt_8b_tsc(tsc
, buffer
->ref_timestamp
);
4427 intel_pt_log("Buffer 1st timestamp " x64_fmt
" ref timestamp " x64_fmt
"\n",
4428 tsc
, buffer
->ref_timestamp
);
4431 * If the buffer contains a timestamp earlier that the fast forward
4432 * timestamp, then record it, else stop.
4434 if (tsc
< d
->timestamp
)
4435 d
->buf_timestamp
= buffer
->ref_timestamp
;
4443 * intel_pt_fast_forward - reposition decoder forwards.
4444 * @decoder: Intel PT decoder
4445 * @timestamp: timestamp to fast forward towards
4447 * Reposition decoder at the last PSB with a timestamp earlier than @timestamp.
4449 * Return: 0 on success or negative error code on failure.
4451 int intel_pt_fast_forward(struct intel_pt_decoder
*decoder
, uint64_t timestamp
)
4453 struct fast_forward_data d
= { .timestamp
= timestamp
};
4458 intel_pt_log("Fast forward towards timestamp " x64_fmt
"\n", timestamp
);
4460 /* Find buffer timestamp of buffer to fast forward to */
4461 err
= decoder
->lookahead(decoder
->data
, intel_pt_ff_cb
, &d
);
4465 /* Walk to buffer with same buffer timestamp */
4466 if (d
.buf_timestamp
) {
4468 decoder
->pos
+= decoder
->len
;
4470 err
= intel_pt_get_next_data(decoder
, true);
4471 /* -ENOLINK means non-consecutive trace */
4472 if (err
&& err
!= -ENOLINK
)
4474 } while (decoder
->buf_timestamp
!= d
.buf_timestamp
);
4480 buf
= (unsigned char *)decoder
->buf
;
4483 if (!intel_pt_next_psb(&buf
, &len
))
4487 * Walk PSBs while the PSB timestamp is less than the fast forward
4494 if (!intel_pt_next_tsc(buf
, len
, &tsc
, &rem
))
4496 tsc
= intel_pt_8b_tsc(tsc
, decoder
->buf_timestamp
);
4498 * A TSC packet can slip past MTC packets but, after fast
4499 * forward, decoding starts at the TSC timestamp. That means
4500 * the timestamps may not be exactly the same as the timestamps
4501 * that would have been decoded without fast forward.
4503 if (tsc
< timestamp
) {
4504 intel_pt_log("Fast forward to next PSB timestamp " x64_fmt
"\n", tsc
);
4505 decoder
->pos
+= decoder
->len
- len
;
4508 intel_pt_reposition(decoder
);
4512 } while (intel_pt_step_psb(&buf
, &len
));