1 // SPDX-License-Identifier: GPL-2.0-only
3 * sigreturn.c - tests for x86 sigreturn(2) and exit-to-userspace
4 * Copyright (c) 2014-2015 Andrew Lutomirski
6 * This is a series of tests that exercises the sigreturn(2) syscall and
7 * the IRET / SYSRET paths in the kernel.
9 * For now, this focuses on the effects of unusual CS and SS values,
10 * and it has a bunch of tests to make sure that ESP/RSP is restored
13 * The basic idea behind these tests is to raise(SIGUSR1) to create a
14 * sigcontext frame, plug in the values to be tested, and then return,
15 * which implicitly invokes sigreturn(2) and programs the user context
18 * For tests for which we expect sigreturn and the subsequent return to
19 * user mode to succeed, we return to a short trampoline that generates
20 * SIGTRAP so that the meat of the tests can be ordinary C code in a
23 * The inner workings of each test is documented below.
25 * Do not run on outdated, unpatched kernels at risk of nasty crashes.
33 #include <sys/syscall.h>
39 #include <sys/signal.h>
40 #include <sys/ucontext.h>
46 #include <sys/ptrace.h>
49 /* Pull in AR_xyz defines. */
50 typedef unsigned int u32
;
51 typedef unsigned short u16
;
52 #include "../../../../arch/x86/include/asm/desc_defs.h"
55 * Copied from asm/ucontext.h, as asm/ucontext.h conflicts badly with the glibc
60 * UC_SIGCONTEXT_SS will be set when delivering 64-bit or x32 signals on
61 * kernels that save SS in the sigcontext. All kernels that set
62 * UC_SIGCONTEXT_SS will correctly restore at least the low 32 bits of esp
63 * regardless of SS (i.e. they implement espfix).
65 * Kernels that set UC_SIGCONTEXT_SS will also set UC_STRICT_RESTORE_SS
66 * when delivering a signal that came from 64-bit code.
68 * Sigreturn restores SS as follows:
70 * if (saved SS is valid || UC_STRICT_RESTORE_SS is set ||
71 * saved CS is not 64-bit)
72 * new SS = saved SS (will fail IRET and signal if invalid)
74 * new SS = a flat 32-bit data segment
76 #define UC_SIGCONTEXT_SS 0x2
77 #define UC_STRICT_RESTORE_SS 0x4
81 * In principle, this test can run on Linux emulation layers (e.g.
82 * Illumos "LX branded zones"). Solaris-based kernels reserve LDT
83 * entries 0-5 for their own internal purposes, so start our LDT
84 * allocations above that reservation. (The tests don't pass on LX
85 * branded zones, but at least this lets them run.)
89 /* An aligned stack accessible through some of our segments. */
90 static unsigned char stack16
[65536] __attribute__((aligned(4096)));
93 * An aligned int3 instruction used as a trampoline. Some of the tests
94 * want to fish out their ss values, so this trampoline copies ss to eax
97 asm (".pushsection .text\n\t"
98 ".type int3, @function\n\t"
103 ".size int3, . - int3\n\t"
104 ".align 4096, 0xcc\n\t"
106 extern char int3
[4096];
109 * At startup, we prepapre:
111 * - ldt_nonexistent_sel: An LDT entry that doesn't exist (all-zero
112 * descriptor or out of bounds).
113 * - code16_sel: A 16-bit LDT code segment pointing to int3.
114 * - data16_sel: A 16-bit LDT data segment pointing to stack16.
115 * - npcode32_sel: A 32-bit not-present LDT code segment pointing to int3.
116 * - npdata32_sel: A 32-bit not-present LDT data segment pointing to stack16.
117 * - gdt_data16_idx: A 16-bit GDT data segment pointing to stack16.
118 * - gdt_npdata32_idx: A 32-bit not-present GDT data segment pointing to
121 * For no particularly good reason, xyz_sel is a selector value with the
122 * RPL and LDT bits filled in, whereas xyz_idx is just an index into the
123 * descriptor table. These variables will be zero if their respective
124 * segments could not be allocated.
126 static unsigned short ldt_nonexistent_sel
;
127 static unsigned short code16_sel
, data16_sel
, npcode32_sel
, npdata32_sel
;
129 static unsigned short gdt_data16_idx
, gdt_npdata32_idx
;
131 static unsigned short GDT3(int idx
)
133 return (idx
<< 3) | 3;
136 static unsigned short LDT3(int idx
)
138 return (idx
<< 3) | 7;
141 /* Our sigaltstack scratch space. */
142 static char altstack_data
[SIGSTKSZ
];
144 static void sethandler(int sig
, void (*handler
)(int, siginfo_t
*, void *),
148 memset(&sa
, 0, sizeof(sa
));
149 sa
.sa_sigaction
= handler
;
150 sa
.sa_flags
= SA_SIGINFO
| flags
;
151 sigemptyset(&sa
.sa_mask
);
152 if (sigaction(sig
, &sa
, 0))
156 static void clearhandler(int sig
)
159 memset(&sa
, 0, sizeof(sa
));
160 sa
.sa_handler
= SIG_DFL
;
161 sigemptyset(&sa
.sa_mask
);
162 if (sigaction(sig
, &sa
, 0))
166 static void add_ldt(const struct user_desc
*desc
, unsigned short *var
,
169 if (syscall(SYS_modify_ldt
, 1, desc
, sizeof(*desc
)) == 0) {
170 *var
= LDT3(desc
->entry_number
);
172 printf("[NOTE]\tFailed to create %s segment\n", name
);
177 static void setup_ldt(void)
179 if ((unsigned long)stack16
> (1ULL << 32) - sizeof(stack16
))
180 errx(1, "stack16 is too high\n");
181 if ((unsigned long)int3
> (1ULL << 32) - sizeof(int3
))
182 errx(1, "int3 is too high\n");
184 ldt_nonexistent_sel
= LDT3(LDT_OFFSET
+ 2);
186 const struct user_desc code16_desc
= {
187 .entry_number
= LDT_OFFSET
+ 0,
188 .base_addr
= (unsigned long)int3
,
191 .contents
= 2, /* Code, not conforming */
194 .seg_not_present
= 0,
197 add_ldt(&code16_desc
, &code16_sel
, "code16");
199 const struct user_desc data16_desc
= {
200 .entry_number
= LDT_OFFSET
+ 1,
201 .base_addr
= (unsigned long)stack16
,
204 .contents
= 0, /* Data, grow-up */
207 .seg_not_present
= 0,
210 add_ldt(&data16_desc
, &data16_sel
, "data16");
212 const struct user_desc npcode32_desc
= {
213 .entry_number
= LDT_OFFSET
+ 3,
214 .base_addr
= (unsigned long)int3
,
217 .contents
= 2, /* Code, not conforming */
220 .seg_not_present
= 1,
223 add_ldt(&npcode32_desc
, &npcode32_sel
, "npcode32");
225 const struct user_desc npdata32_desc
= {
226 .entry_number
= LDT_OFFSET
+ 4,
227 .base_addr
= (unsigned long)stack16
,
230 .contents
= 0, /* Data, grow-up */
233 .seg_not_present
= 1,
236 add_ldt(&npdata32_desc
, &npdata32_sel
, "npdata32");
238 struct user_desc gdt_data16_desc
= {
240 .base_addr
= (unsigned long)stack16
,
243 .contents
= 0, /* Data, grow-up */
246 .seg_not_present
= 0,
250 if (syscall(SYS_set_thread_area
, &gdt_data16_desc
) == 0) {
252 * This probably indicates vulnerability to CVE-2014-8133.
253 * Merely getting here isn't definitive, though, and we'll
254 * diagnose the problem for real later on.
256 printf("[WARN]\tset_thread_area allocated data16 at index %d\n",
257 gdt_data16_desc
.entry_number
);
258 gdt_data16_idx
= gdt_data16_desc
.entry_number
;
260 printf("[OK]\tset_thread_area refused 16-bit data\n");
263 struct user_desc gdt_npdata32_desc
= {
265 .base_addr
= (unsigned long)stack16
,
268 .contents
= 0, /* Data, grow-up */
271 .seg_not_present
= 1,
275 if (syscall(SYS_set_thread_area
, &gdt_npdata32_desc
) == 0) {
277 * As a hardening measure, newer kernels don't allow this.
279 printf("[WARN]\tset_thread_area allocated npdata32 at index %d\n",
280 gdt_npdata32_desc
.entry_number
);
281 gdt_npdata32_idx
= gdt_npdata32_desc
.entry_number
;
283 printf("[OK]\tset_thread_area refused 16-bit data\n");
287 /* State used by our signal handlers. */
288 static gregset_t initial_regs
, requested_regs
, resulting_regs
;
290 /* Instructions for the SIGUSR1 handler. */
291 static volatile unsigned short sig_cs
, sig_ss
;
292 static volatile sig_atomic_t sig_trapped
, sig_err
, sig_trapno
;
294 static volatile sig_atomic_t sig_corrupt_final_ss
;
297 /* Abstractions for some 32-bit vs 64-bit differences. */
299 # define REG_IP REG_RIP
300 # define REG_SP REG_RSP
301 # define REG_CX REG_RCX
304 unsigned short cs
, gs
, fs
, ss
;
307 static unsigned short *ssptr(ucontext_t
*ctx
)
309 struct selectors
*sels
= (void *)&ctx
->uc_mcontext
.gregs
[REG_CSGSFS
];
313 static unsigned short *csptr(ucontext_t
*ctx
)
315 struct selectors
*sels
= (void *)&ctx
->uc_mcontext
.gregs
[REG_CSGSFS
];
319 # define REG_IP REG_EIP
320 # define REG_SP REG_ESP
321 # define REG_CX REG_ECX
323 static greg_t
*ssptr(ucontext_t
*ctx
)
325 return &ctx
->uc_mcontext
.gregs
[REG_SS
];
328 static greg_t
*csptr(ucontext_t
*ctx
)
330 return &ctx
->uc_mcontext
.gregs
[REG_CS
];
335 * Checks a given selector for its code bitness or returns -1 if it's not
336 * a usable code segment selector.
338 int cs_bitness(unsigned short cs
)
340 uint32_t valid
= 0, ar
;
341 asm ("lar %[cs], %[ar]\n\t"
343 "mov $1, %[valid]\n\t"
345 : [ar
] "=r" (ar
), [valid
] "+rm" (valid
)
351 bool db
= (ar
& (1 << 22));
352 bool l
= (ar
& (1 << 21));
355 return -1; /* Not code. */
364 return -1; /* Unknown bitness. */
368 * Checks a given selector for its code bitness or returns -1 if it's not
369 * a usable code segment selector.
371 bool is_valid_ss(unsigned short cs
)
373 uint32_t valid
= 0, ar
;
374 asm ("lar %[cs], %[ar]\n\t"
376 "mov $1, %[valid]\n\t"
378 : [ar
] "=r" (ar
), [valid
] "+rm" (valid
)
384 if ((ar
& AR_TYPE_MASK
) != AR_TYPE_RWDATA
&&
385 (ar
& AR_TYPE_MASK
) != AR_TYPE_RWDATA_EXPDOWN
)
391 /* Number of errors in the current test case. */
392 static volatile sig_atomic_t nerrs
;
394 static void validate_signal_ss(int sig
, ucontext_t
*ctx
)
397 bool was_64bit
= (cs_bitness(*csptr(ctx
)) == 64);
399 if (!(ctx
->uc_flags
& UC_SIGCONTEXT_SS
)) {
400 printf("[FAIL]\tUC_SIGCONTEXT_SS was not set\n");
404 * This happens on Linux 4.1. The rest will fail, too, so
405 * return now to reduce the noise.
410 /* UC_STRICT_RESTORE_SS is set iff we came from 64-bit mode. */
411 if (!!(ctx
->uc_flags
& UC_STRICT_RESTORE_SS
) != was_64bit
) {
412 printf("[FAIL]\tUC_STRICT_RESTORE_SS was wrong in signal %d\n",
417 if (is_valid_ss(*ssptr(ctx
))) {
419 * DOSEMU was written before 64-bit sigcontext had SS, and
420 * it tries to figure out the signal source SS by looking at
421 * the physical register. Make sure that keeps working.
423 unsigned short hw_ss
;
424 asm ("mov %%ss, %0" : "=rm" (hw_ss
));
425 if (hw_ss
!= *ssptr(ctx
)) {
426 printf("[FAIL]\tHW SS didn't match saved SS\n");
434 * SIGUSR1 handler. Sets CS and SS as requested and points IP to the
435 * int3 trampoline. Sets SP to a large known value so that we can see
436 * whether the value round-trips back to user mode correctly.
438 static void sigusr1(int sig
, siginfo_t
*info
, void *ctx_void
)
440 ucontext_t
*ctx
= (ucontext_t
*)ctx_void
;
442 validate_signal_ss(sig
, ctx
);
444 memcpy(&initial_regs
, &ctx
->uc_mcontext
.gregs
, sizeof(gregset_t
));
446 *csptr(ctx
) = sig_cs
;
447 *ssptr(ctx
) = sig_ss
;
449 ctx
->uc_mcontext
.gregs
[REG_IP
] =
450 sig_cs
== code16_sel
? 0 : (unsigned long)&int3
;
451 ctx
->uc_mcontext
.gregs
[REG_SP
] = (unsigned long)0x8badf00d5aadc0deULL
;
452 ctx
->uc_mcontext
.gregs
[REG_CX
] = 0;
454 memcpy(&requested_regs
, &ctx
->uc_mcontext
.gregs
, sizeof(gregset_t
));
455 requested_regs
[REG_CX
] = *ssptr(ctx
); /* The asm code does this. */
461 * Called after a successful sigreturn (via int3) or from a failed
462 * sigreturn (directly by kernel). Restores our state so that the
463 * original raise(SIGUSR1) returns.
465 static void sigtrap(int sig
, siginfo_t
*info
, void *ctx_void
)
467 ucontext_t
*ctx
= (ucontext_t
*)ctx_void
;
469 validate_signal_ss(sig
, ctx
);
471 sig_err
= ctx
->uc_mcontext
.gregs
[REG_ERR
];
472 sig_trapno
= ctx
->uc_mcontext
.gregs
[REG_TRAPNO
];
475 asm ("mov %%ss,%0" : "=r" (ss
));
477 greg_t asm_ss
= ctx
->uc_mcontext
.gregs
[REG_CX
];
478 if (asm_ss
!= sig_ss
&& sig
== SIGTRAP
) {
479 /* Sanity check failure. */
480 printf("[FAIL]\tSIGTRAP: ss = %hx, frame ss = %hx, ax = %llx\n",
481 ss
, *ssptr(ctx
), (unsigned long long)asm_ss
);
485 memcpy(&resulting_regs
, &ctx
->uc_mcontext
.gregs
, sizeof(gregset_t
));
486 memcpy(&ctx
->uc_mcontext
.gregs
, &initial_regs
, sizeof(gregset_t
));
489 if (sig_corrupt_final_ss
) {
490 if (ctx
->uc_flags
& UC_STRICT_RESTORE_SS
) {
491 printf("[FAIL]\tUC_STRICT_RESTORE_SS was set inappropriately\n");
495 * DOSEMU transitions from 32-bit to 64-bit mode by
496 * adjusting sigcontext, and it requires that this work
497 * even if the saved SS is bogus.
499 printf("\tCorrupting SS on return to 64-bit mode\n");
509 /* Tests recovery if !UC_STRICT_RESTORE_SS */
510 static void sigusr2(int sig
, siginfo_t
*info
, void *ctx_void
)
512 ucontext_t
*ctx
= (ucontext_t
*)ctx_void
;
514 if (!(ctx
->uc_flags
& UC_STRICT_RESTORE_SS
)) {
515 printf("[FAIL]\traise(2) didn't set UC_STRICT_RESTORE_SS\n");
517 return; /* We can't do the rest. */
520 ctx
->uc_flags
&= ~UC_STRICT_RESTORE_SS
;
523 /* Return. The kernel should recover without sending another signal. */
526 static int test_nonstrict_ss(void)
528 clearhandler(SIGUSR1
);
529 clearhandler(SIGTRAP
);
530 clearhandler(SIGSEGV
);
531 clearhandler(SIGILL
);
532 sethandler(SIGUSR2
, sigusr2
, 0);
536 printf("[RUN]\tClear UC_STRICT_RESTORE_SS and corrupt SS\n");
539 printf("[OK]\tIt worked\n");
545 /* Finds a usable code segment of the requested bitness. */
546 int find_cs(int bitness
)
548 unsigned short my_cs
;
550 asm ("mov %%cs,%0" : "=r" (my_cs
));
552 if (cs_bitness(my_cs
) == bitness
)
554 if (cs_bitness(my_cs
+ (2 << 3)) == bitness
)
555 return my_cs
+ (2 << 3);
556 if (my_cs
> (2<<3) && cs_bitness(my_cs
- (2 << 3)) == bitness
)
557 return my_cs
- (2 << 3);
558 if (cs_bitness(code16_sel
) == bitness
)
561 printf("[WARN]\tCould not find %d-bit CS\n", bitness
);
565 static int test_valid_sigreturn(int cs_bits
, bool use_16bit_ss
, int force_ss
)
567 int cs
= find_cs(cs_bits
);
569 printf("[SKIP]\tCode segment unavailable for %d-bit CS, %d-bit SS\n",
570 cs_bits
, use_16bit_ss
? 16 : 32);
574 if (force_ss
!= -1) {
579 printf("[SKIP]\tData segment unavailable for %d-bit CS, 16-bit SS\n",
585 asm volatile ("mov %%ss,%0" : "=r" (sig_ss
));
591 printf("[RUN]\tValid sigreturn: %d-bit CS (%hx), %d-bit SS (%hx%s)\n",
592 cs_bits
, sig_cs
, use_16bit_ss
? 16 : 32, sig_ss
,
593 (sig_ss
& 4) ? "" : ", GDT");
600 * Check that each register had an acceptable value when the
601 * int3 trampoline was invoked.
603 for (int i
= 0; i
< NGREG
; i
++) {
604 greg_t req
= requested_regs
[i
], res
= resulting_regs
[i
];
606 if (i
== REG_TRAPNO
|| i
== REG_IP
)
607 continue; /* don't care */
611 * If we were using a 16-bit stack segment, then
612 * the kernel is a bit stuck: IRET only restores
613 * the low 16 bits of ESP/RSP if SS is 16-bit.
614 * The kernel uses a hack to restore bits 31:16,
615 * but that hack doesn't help with bits 63:32.
616 * On Intel CPUs, bits 63:32 end up zeroed, and, on
617 * AMD CPUs, they leak the high bits of the kernel
618 * espfix64 stack pointer. There's very little that
619 * the kernel can do about it.
621 * Similarly, if we are returning to a 32-bit context,
622 * the CPU will often lose the high 32 bits of RSP.
628 if (cs_bits
!= 64 && ((res
^ req
) & 0xFFFFFFFF) == 0) {
629 printf("[NOTE]\tSP: %llx -> %llx\n",
630 (unsigned long long)req
,
631 (unsigned long long)res
);
635 printf("[FAIL]\tSP mismatch: requested 0x%llx; got 0x%llx\n",
636 (unsigned long long)requested_regs
[i
],
637 (unsigned long long)resulting_regs
[i
]);
642 bool ignore_reg
= false;
647 if (i
== REG_CSGSFS
) {
648 struct selectors
*req_sels
=
649 (void *)&requested_regs
[REG_CSGSFS
];
650 struct selectors
*res_sels
=
651 (void *)&resulting_regs
[REG_CSGSFS
];
652 if (req_sels
->cs
!= res_sels
->cs
) {
653 printf("[FAIL]\tCS mismatch: requested 0x%hx; got 0x%hx\n",
654 req_sels
->cs
, res_sels
->cs
);
658 if (req_sels
->ss
!= res_sels
->ss
) {
659 printf("[FAIL]\tSS mismatch: requested 0x%hx; got 0x%hx\n",
660 req_sels
->ss
, res_sels
->ss
);
668 /* Sanity check on the kernel */
669 if (i
== REG_CX
&& req
!= res
) {
670 printf("[FAIL]\tCX (saved SP) mismatch: requested 0x%llx; got 0x%llx\n",
671 (unsigned long long)req
,
672 (unsigned long long)res
);
677 if (req
!= res
&& !ignore_reg
) {
678 printf("[FAIL]\tReg %d mismatch: requested 0x%llx; got 0x%llx\n",
679 i
, (unsigned long long)req
,
680 (unsigned long long)res
);
686 printf("[OK]\tall registers okay\n");
691 static int test_bad_iret(int cs_bits
, unsigned short ss
, int force_cs
)
693 int cs
= force_cs
== -1 ? find_cs(cs_bits
) : force_cs
;
700 printf("[RUN]\t%d-bit CS (%hx), bogus SS (%hx)\n",
701 cs_bits
, sig_cs
, sig_ss
);
706 char errdesc
[32] = "";
708 const char *src
= (sig_err
& 1) ? " EXT" : "";
710 if ((sig_err
& 0x6) == 0x0)
712 else if ((sig_err
& 0x6) == 0x4)
714 else if ((sig_err
& 0x6) == 0x2)
719 sprintf(errdesc
, "%s%s index %d, ",
720 table
, src
, sig_err
>> 3);
724 if (sig_trapno
== 13)
725 strcpy(trapname
, "GP");
726 else if (sig_trapno
== 11)
727 strcpy(trapname
, "NP");
728 else if (sig_trapno
== 12)
729 strcpy(trapname
, "SS");
730 else if (sig_trapno
== 32)
731 strcpy(trapname
, "IRET"); /* X86_TRAP_IRET */
733 sprintf(trapname
, "%d", sig_trapno
);
735 printf("[OK]\tGot #%s(0x%lx) (i.e. %s%s)\n",
736 trapname
, (unsigned long)sig_err
,
737 errdesc
, strsignal(sig_trapped
));
741 * This also implicitly tests UC_STRICT_RESTORE_SS:
742 * We check that these signals set UC_STRICT_RESTORE_SS and,
743 * if UC_STRICT_RESTORE_SS doesn't cause strict behavior,
744 * then we won't get SIGSEGV.
746 printf("[FAIL]\tDid not get SIGSEGV\n");
754 unsigned short my_cs
, my_ss
;
756 asm volatile ("mov %%cs,%0" : "=r" (my_cs
));
757 asm volatile ("mov %%ss,%0" : "=r" (my_ss
));
761 .ss_sp
= altstack_data
,
764 if (sigaltstack(&stack
, NULL
) != 0)
765 err(1, "sigaltstack");
767 sethandler(SIGUSR1
, sigusr1
, 0);
768 sethandler(SIGTRAP
, sigtrap
, SA_ONSTACK
);
770 /* Easy cases: return to a 32-bit SS in each possible CS bitness. */
771 total_nerrs
+= test_valid_sigreturn(64, false, -1);
772 total_nerrs
+= test_valid_sigreturn(32, false, -1);
773 total_nerrs
+= test_valid_sigreturn(16, false, -1);
776 * Test easy espfix cases: return to a 16-bit LDT SS in each possible
777 * CS bitness. NB: with a long mode CS, the SS bitness is irrelevant.
779 * This catches the original missing-espfix-on-64-bit-kernels issue
780 * as well as CVE-2014-8134.
782 total_nerrs
+= test_valid_sigreturn(64, true, -1);
783 total_nerrs
+= test_valid_sigreturn(32, true, -1);
784 total_nerrs
+= test_valid_sigreturn(16, true, -1);
786 if (gdt_data16_idx
) {
788 * For performance reasons, Linux skips espfix if SS points
789 * to the GDT. If we were able to allocate a 16-bit SS in
790 * the GDT, see if it leaks parts of the kernel stack pointer.
792 * This tests for CVE-2014-8133.
794 total_nerrs
+= test_valid_sigreturn(64, true,
795 GDT3(gdt_data16_idx
));
796 total_nerrs
+= test_valid_sigreturn(32, true,
797 GDT3(gdt_data16_idx
));
798 total_nerrs
+= test_valid_sigreturn(16, true,
799 GDT3(gdt_data16_idx
));
803 /* Nasty ABI case: check SS corruption handling. */
804 sig_corrupt_final_ss
= 1;
805 total_nerrs
+= test_valid_sigreturn(32, false, -1);
806 total_nerrs
+= test_valid_sigreturn(32, true, -1);
807 sig_corrupt_final_ss
= 0;
811 * We're done testing valid sigreturn cases. Now we test states
812 * for which sigreturn itself will succeed but the subsequent
813 * entry to user mode will fail.
815 * Depending on the failure mode and the kernel bitness, these
816 * entry failures can generate SIGSEGV, SIGBUS, or SIGILL.
818 clearhandler(SIGTRAP
);
819 sethandler(SIGSEGV
, sigtrap
, SA_ONSTACK
);
820 sethandler(SIGBUS
, sigtrap
, SA_ONSTACK
);
821 sethandler(SIGILL
, sigtrap
, SA_ONSTACK
); /* 32-bit kernels do this */
823 /* Easy failures: invalid SS, resulting in #GP(0) */
824 test_bad_iret(64, ldt_nonexistent_sel
, -1);
825 test_bad_iret(32, ldt_nonexistent_sel
, -1);
826 test_bad_iret(16, ldt_nonexistent_sel
, -1);
828 /* These fail because SS isn't a data segment, resulting in #GP(SS) */
829 test_bad_iret(64, my_cs
, -1);
830 test_bad_iret(32, my_cs
, -1);
831 test_bad_iret(16, my_cs
, -1);
833 /* Try to return to a not-present code segment, triggering #NP(SS). */
834 test_bad_iret(32, my_ss
, npcode32_sel
);
837 * Try to return to a not-present but otherwise valid data segment.
838 * This will cause IRET to fail with #SS on the espfix stack. This
839 * exercises CVE-2014-9322.
841 * Note that, if espfix is enabled, 64-bit Linux will lose track
842 * of the actual cause of failure and report #GP(0) instead.
843 * This would be very difficult for Linux to avoid, because
844 * espfix64 causes IRET failures to be promoted to #DF, so the
845 * original exception frame is never pushed onto the stack.
847 test_bad_iret(32, npdata32_sel
, -1);
850 * Try to return to a not-present but otherwise valid data
851 * segment without invoking espfix. Newer kernels don't allow
852 * this to happen in the first place. On older kernels, though,
853 * this can trigger CVE-2014-9322.
855 if (gdt_npdata32_idx
)
856 test_bad_iret(32, GDT3(gdt_npdata32_idx
), -1);
859 total_nerrs
+= test_nonstrict_ss();
862 return total_nerrs
? 1 : 0;