2 * sigreturn.c - tests for x86 sigreturn(2) and exit-to-userspace
3 * Copyright (c) 2014-2015 Andrew Lutomirski
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * This is a series of tests that exercises the sigreturn(2) syscall and
15 * the IRET / SYSRET paths in the kernel.
17 * For now, this focuses on the effects of unusual CS and SS values,
18 * and it has a bunch of tests to make sure that ESP/RSP is restored
21 * The basic idea behind these tests is to raise(SIGUSR1) to create a
22 * sigcontext frame, plug in the values to be tested, and then return,
23 * which implicitly invokes sigreturn(2) and programs the user context
26 * For tests for which we expect sigreturn and the subsequent return to
27 * user mode to succeed, we return to a short trampoline that generates
28 * SIGTRAP so that the meat of the tests can be ordinary C code in a
31 * The inner workings of each test is documented below.
33 * Do not run on outdated, unpatched kernels at risk of nasty crashes.
41 #include <sys/syscall.h>
47 #include <sys/signal.h>
48 #include <sys/ucontext.h>
54 #include <sys/ptrace.h>
57 /* Pull in AR_xyz defines. */
58 typedef unsigned int u32
;
59 typedef unsigned short u16
;
60 #include "../../../../arch/x86/include/asm/desc_defs.h"
63 * Copied from asm/ucontext.h, as asm/ucontext.h conflicts badly with the glibc
68 * UC_SIGCONTEXT_SS will be set when delivering 64-bit or x32 signals on
69 * kernels that save SS in the sigcontext. All kernels that set
70 * UC_SIGCONTEXT_SS will correctly restore at least the low 32 bits of esp
71 * regardless of SS (i.e. they implement espfix).
73 * Kernels that set UC_SIGCONTEXT_SS will also set UC_STRICT_RESTORE_SS
74 * when delivering a signal that came from 64-bit code.
76 * Sigreturn restores SS as follows:
78 * if (saved SS is valid || UC_STRICT_RESTORE_SS is set ||
79 * saved CS is not 64-bit)
80 * new SS = saved SS (will fail IRET and signal if invalid)
82 * new SS = a flat 32-bit data segment
84 #define UC_SIGCONTEXT_SS 0x2
85 #define UC_STRICT_RESTORE_SS 0x4
89 * In principle, this test can run on Linux emulation layers (e.g.
90 * Illumos "LX branded zones"). Solaris-based kernels reserve LDT
91 * entries 0-5 for their own internal purposes, so start our LDT
92 * allocations above that reservation. (The tests don't pass on LX
93 * branded zones, but at least this lets them run.)
97 /* An aligned stack accessible through some of our segments. */
98 static unsigned char stack16
[65536] __attribute__((aligned(4096)));
101 * An aligned int3 instruction used as a trampoline. Some of the tests
102 * want to fish out their ss values, so this trampoline copies ss to eax
105 asm (".pushsection .text\n\t"
106 ".type int3, @function\n\t"
111 ".size int3, . - int3\n\t"
112 ".align 4096, 0xcc\n\t"
114 extern char int3
[4096];
117 * At startup, we prepapre:
119 * - ldt_nonexistent_sel: An LDT entry that doesn't exist (all-zero
120 * descriptor or out of bounds).
121 * - code16_sel: A 16-bit LDT code segment pointing to int3.
122 * - data16_sel: A 16-bit LDT data segment pointing to stack16.
123 * - npcode32_sel: A 32-bit not-present LDT code segment pointing to int3.
124 * - npdata32_sel: A 32-bit not-present LDT data segment pointing to stack16.
125 * - gdt_data16_idx: A 16-bit GDT data segment pointing to stack16.
126 * - gdt_npdata32_idx: A 32-bit not-present GDT data segment pointing to
129 * For no particularly good reason, xyz_sel is a selector value with the
130 * RPL and LDT bits filled in, whereas xyz_idx is just an index into the
131 * descriptor table. These variables will be zero if their respective
132 * segments could not be allocated.
134 static unsigned short ldt_nonexistent_sel
;
135 static unsigned short code16_sel
, data16_sel
, npcode32_sel
, npdata32_sel
;
137 static unsigned short gdt_data16_idx
, gdt_npdata32_idx
;
139 static unsigned short GDT3(int idx
)
141 return (idx
<< 3) | 3;
144 static unsigned short LDT3(int idx
)
146 return (idx
<< 3) | 7;
149 /* Our sigaltstack scratch space. */
150 static char altstack_data
[SIGSTKSZ
];
152 static void sethandler(int sig
, void (*handler
)(int, siginfo_t
*, void *),
156 memset(&sa
, 0, sizeof(sa
));
157 sa
.sa_sigaction
= handler
;
158 sa
.sa_flags
= SA_SIGINFO
| flags
;
159 sigemptyset(&sa
.sa_mask
);
160 if (sigaction(sig
, &sa
, 0))
164 static void clearhandler(int sig
)
167 memset(&sa
, 0, sizeof(sa
));
168 sa
.sa_handler
= SIG_DFL
;
169 sigemptyset(&sa
.sa_mask
);
170 if (sigaction(sig
, &sa
, 0))
174 static void add_ldt(const struct user_desc
*desc
, unsigned short *var
,
177 if (syscall(SYS_modify_ldt
, 1, desc
, sizeof(*desc
)) == 0) {
178 *var
= LDT3(desc
->entry_number
);
180 printf("[NOTE]\tFailed to create %s segment\n", name
);
185 static void setup_ldt(void)
187 if ((unsigned long)stack16
> (1ULL << 32) - sizeof(stack16
))
188 errx(1, "stack16 is too high\n");
189 if ((unsigned long)int3
> (1ULL << 32) - sizeof(int3
))
190 errx(1, "int3 is too high\n");
192 ldt_nonexistent_sel
= LDT3(LDT_OFFSET
+ 2);
194 const struct user_desc code16_desc
= {
195 .entry_number
= LDT_OFFSET
+ 0,
196 .base_addr
= (unsigned long)int3
,
199 .contents
= 2, /* Code, not conforming */
202 .seg_not_present
= 0,
205 add_ldt(&code16_desc
, &code16_sel
, "code16");
207 const struct user_desc data16_desc
= {
208 .entry_number
= LDT_OFFSET
+ 1,
209 .base_addr
= (unsigned long)stack16
,
212 .contents
= 0, /* Data, grow-up */
215 .seg_not_present
= 0,
218 add_ldt(&data16_desc
, &data16_sel
, "data16");
220 const struct user_desc npcode32_desc
= {
221 .entry_number
= LDT_OFFSET
+ 3,
222 .base_addr
= (unsigned long)int3
,
225 .contents
= 2, /* Code, not conforming */
228 .seg_not_present
= 1,
231 add_ldt(&npcode32_desc
, &npcode32_sel
, "npcode32");
233 const struct user_desc npdata32_desc
= {
234 .entry_number
= LDT_OFFSET
+ 4,
235 .base_addr
= (unsigned long)stack16
,
238 .contents
= 0, /* Data, grow-up */
241 .seg_not_present
= 1,
244 add_ldt(&npdata32_desc
, &npdata32_sel
, "npdata32");
246 struct user_desc gdt_data16_desc
= {
248 .base_addr
= (unsigned long)stack16
,
251 .contents
= 0, /* Data, grow-up */
254 .seg_not_present
= 0,
258 if (syscall(SYS_set_thread_area
, &gdt_data16_desc
) == 0) {
260 * This probably indicates vulnerability to CVE-2014-8133.
261 * Merely getting here isn't definitive, though, and we'll
262 * diagnose the problem for real later on.
264 printf("[WARN]\tset_thread_area allocated data16 at index %d\n",
265 gdt_data16_desc
.entry_number
);
266 gdt_data16_idx
= gdt_data16_desc
.entry_number
;
268 printf("[OK]\tset_thread_area refused 16-bit data\n");
271 struct user_desc gdt_npdata32_desc
= {
273 .base_addr
= (unsigned long)stack16
,
276 .contents
= 0, /* Data, grow-up */
279 .seg_not_present
= 1,
283 if (syscall(SYS_set_thread_area
, &gdt_npdata32_desc
) == 0) {
285 * As a hardening measure, newer kernels don't allow this.
287 printf("[WARN]\tset_thread_area allocated npdata32 at index %d\n",
288 gdt_npdata32_desc
.entry_number
);
289 gdt_npdata32_idx
= gdt_npdata32_desc
.entry_number
;
291 printf("[OK]\tset_thread_area refused 16-bit data\n");
295 /* State used by our signal handlers. */
296 static gregset_t initial_regs
, requested_regs
, resulting_regs
;
298 /* Instructions for the SIGUSR1 handler. */
299 static volatile unsigned short sig_cs
, sig_ss
;
300 static volatile sig_atomic_t sig_trapped
, sig_err
, sig_trapno
;
302 static volatile sig_atomic_t sig_corrupt_final_ss
;
305 /* Abstractions for some 32-bit vs 64-bit differences. */
307 # define REG_IP REG_RIP
308 # define REG_SP REG_RSP
309 # define REG_AX REG_RAX
312 unsigned short cs
, gs
, fs
, ss
;
315 static unsigned short *ssptr(ucontext_t
*ctx
)
317 struct selectors
*sels
= (void *)&ctx
->uc_mcontext
.gregs
[REG_CSGSFS
];
321 static unsigned short *csptr(ucontext_t
*ctx
)
323 struct selectors
*sels
= (void *)&ctx
->uc_mcontext
.gregs
[REG_CSGSFS
];
327 # define REG_IP REG_EIP
328 # define REG_SP REG_ESP
329 # define REG_AX REG_EAX
331 static greg_t
*ssptr(ucontext_t
*ctx
)
333 return &ctx
->uc_mcontext
.gregs
[REG_SS
];
336 static greg_t
*csptr(ucontext_t
*ctx
)
338 return &ctx
->uc_mcontext
.gregs
[REG_CS
];
343 * Checks a given selector for its code bitness or returns -1 if it's not
344 * a usable code segment selector.
346 int cs_bitness(unsigned short cs
)
348 uint32_t valid
= 0, ar
;
349 asm ("lar %[cs], %[ar]\n\t"
351 "mov $1, %[valid]\n\t"
353 : [ar
] "=r" (ar
), [valid
] "+rm" (valid
)
359 bool db
= (ar
& (1 << 22));
360 bool l
= (ar
& (1 << 21));
363 return -1; /* Not code. */
372 return -1; /* Unknown bitness. */
376 * Checks a given selector for its code bitness or returns -1 if it's not
377 * a usable code segment selector.
379 bool is_valid_ss(unsigned short cs
)
381 uint32_t valid
= 0, ar
;
382 asm ("lar %[cs], %[ar]\n\t"
384 "mov $1, %[valid]\n\t"
386 : [ar
] "=r" (ar
), [valid
] "+rm" (valid
)
392 if ((ar
& AR_TYPE_MASK
) != AR_TYPE_RWDATA
&&
393 (ar
& AR_TYPE_MASK
) != AR_TYPE_RWDATA_EXPDOWN
)
399 /* Number of errors in the current test case. */
400 static volatile sig_atomic_t nerrs
;
402 static void validate_signal_ss(int sig
, ucontext_t
*ctx
)
405 bool was_64bit
= (cs_bitness(*csptr(ctx
)) == 64);
407 if (!(ctx
->uc_flags
& UC_SIGCONTEXT_SS
)) {
408 printf("[FAIL]\tUC_SIGCONTEXT_SS was not set\n");
412 * This happens on Linux 4.1. The rest will fail, too, so
413 * return now to reduce the noise.
418 /* UC_STRICT_RESTORE_SS is set iff we came from 64-bit mode. */
419 if (!!(ctx
->uc_flags
& UC_STRICT_RESTORE_SS
) != was_64bit
) {
420 printf("[FAIL]\tUC_STRICT_RESTORE_SS was wrong in signal %d\n",
425 if (is_valid_ss(*ssptr(ctx
))) {
427 * DOSEMU was written before 64-bit sigcontext had SS, and
428 * it tries to figure out the signal source SS by looking at
429 * the physical register. Make sure that keeps working.
431 unsigned short hw_ss
;
432 asm ("mov %%ss, %0" : "=rm" (hw_ss
));
433 if (hw_ss
!= *ssptr(ctx
)) {
434 printf("[FAIL]\tHW SS didn't match saved SS\n");
442 * SIGUSR1 handler. Sets CS and SS as requested and points IP to the
443 * int3 trampoline. Sets SP to a large known value so that we can see
444 * whether the value round-trips back to user mode correctly.
446 static void sigusr1(int sig
, siginfo_t
*info
, void *ctx_void
)
448 ucontext_t
*ctx
= (ucontext_t
*)ctx_void
;
450 validate_signal_ss(sig
, ctx
);
452 memcpy(&initial_regs
, &ctx
->uc_mcontext
.gregs
, sizeof(gregset_t
));
454 *csptr(ctx
) = sig_cs
;
455 *ssptr(ctx
) = sig_ss
;
457 ctx
->uc_mcontext
.gregs
[REG_IP
] =
458 sig_cs
== code16_sel
? 0 : (unsigned long)&int3
;
459 ctx
->uc_mcontext
.gregs
[REG_SP
] = (unsigned long)0x8badf00d5aadc0deULL
;
460 ctx
->uc_mcontext
.gregs
[REG_AX
] = 0;
462 memcpy(&requested_regs
, &ctx
->uc_mcontext
.gregs
, sizeof(gregset_t
));
463 requested_regs
[REG_AX
] = *ssptr(ctx
); /* The asm code does this. */
469 * Called after a successful sigreturn (via int3) or from a failed
470 * sigreturn (directly by kernel). Restores our state so that the
471 * original raise(SIGUSR1) returns.
473 static void sigtrap(int sig
, siginfo_t
*info
, void *ctx_void
)
475 ucontext_t
*ctx
= (ucontext_t
*)ctx_void
;
477 validate_signal_ss(sig
, ctx
);
479 sig_err
= ctx
->uc_mcontext
.gregs
[REG_ERR
];
480 sig_trapno
= ctx
->uc_mcontext
.gregs
[REG_TRAPNO
];
483 asm ("mov %%ss,%0" : "=r" (ss
));
485 greg_t asm_ss
= ctx
->uc_mcontext
.gregs
[REG_AX
];
486 if (asm_ss
!= sig_ss
&& sig
== SIGTRAP
) {
487 /* Sanity check failure. */
488 printf("[FAIL]\tSIGTRAP: ss = %hx, frame ss = %hx, ax = %llx\n",
489 ss
, *ssptr(ctx
), (unsigned long long)asm_ss
);
493 memcpy(&resulting_regs
, &ctx
->uc_mcontext
.gregs
, sizeof(gregset_t
));
494 memcpy(&ctx
->uc_mcontext
.gregs
, &initial_regs
, sizeof(gregset_t
));
497 if (sig_corrupt_final_ss
) {
498 if (ctx
->uc_flags
& UC_STRICT_RESTORE_SS
) {
499 printf("[FAIL]\tUC_STRICT_RESTORE_SS was set inappropriately\n");
503 * DOSEMU transitions from 32-bit to 64-bit mode by
504 * adjusting sigcontext, and it requires that this work
505 * even if the saved SS is bogus.
507 printf("\tCorrupting SS on return to 64-bit mode\n");
517 /* Tests recovery if !UC_STRICT_RESTORE_SS */
518 static void sigusr2(int sig
, siginfo_t
*info
, void *ctx_void
)
520 ucontext_t
*ctx
= (ucontext_t
*)ctx_void
;
522 if (!(ctx
->uc_flags
& UC_STRICT_RESTORE_SS
)) {
523 printf("[FAIL]\traise(2) didn't set UC_STRICT_RESTORE_SS\n");
525 return; /* We can't do the rest. */
528 ctx
->uc_flags
&= ~UC_STRICT_RESTORE_SS
;
531 /* Return. The kernel should recover without sending another signal. */
534 static int test_nonstrict_ss(void)
536 clearhandler(SIGUSR1
);
537 clearhandler(SIGTRAP
);
538 clearhandler(SIGSEGV
);
539 clearhandler(SIGILL
);
540 sethandler(SIGUSR2
, sigusr2
, 0);
544 printf("[RUN]\tClear UC_STRICT_RESTORE_SS and corrupt SS\n");
547 printf("[OK]\tIt worked\n");
553 /* Finds a usable code segment of the requested bitness. */
554 int find_cs(int bitness
)
556 unsigned short my_cs
;
558 asm ("mov %%cs,%0" : "=r" (my_cs
));
560 if (cs_bitness(my_cs
) == bitness
)
562 if (cs_bitness(my_cs
+ (2 << 3)) == bitness
)
563 return my_cs
+ (2 << 3);
564 if (my_cs
> (2<<3) && cs_bitness(my_cs
- (2 << 3)) == bitness
)
565 return my_cs
- (2 << 3);
566 if (cs_bitness(code16_sel
) == bitness
)
569 printf("[WARN]\tCould not find %d-bit CS\n", bitness
);
573 static int test_valid_sigreturn(int cs_bits
, bool use_16bit_ss
, int force_ss
)
575 int cs
= find_cs(cs_bits
);
577 printf("[SKIP]\tCode segment unavailable for %d-bit CS, %d-bit SS\n",
578 cs_bits
, use_16bit_ss
? 16 : 32);
582 if (force_ss
!= -1) {
587 printf("[SKIP]\tData segment unavailable for %d-bit CS, 16-bit SS\n",
593 asm volatile ("mov %%ss,%0" : "=r" (sig_ss
));
599 printf("[RUN]\tValid sigreturn: %d-bit CS (%hx), %d-bit SS (%hx%s)\n",
600 cs_bits
, sig_cs
, use_16bit_ss
? 16 : 32, sig_ss
,
601 (sig_ss
& 4) ? "" : ", GDT");
608 * Check that each register had an acceptable value when the
609 * int3 trampoline was invoked.
611 for (int i
= 0; i
< NGREG
; i
++) {
612 greg_t req
= requested_regs
[i
], res
= resulting_regs
[i
];
613 if (i
== REG_TRAPNO
|| i
== REG_IP
)
614 continue; /* don't care */
616 printf("\tSP: %llx -> %llx\n", (unsigned long long)req
,
617 (unsigned long long)res
);
620 * In many circumstances, the high 32 bits of rsp
621 * are zeroed. For example, we could be a real
622 * 32-bit program, or we could hit any of a number
623 * of poorly-documented IRET or segmented ESP
624 * oddities. If this happens, it's okay.
626 if (res
== (req
& 0xFFFFFFFF))
627 continue; /* OK; not expected to work */
630 bool ignore_reg
= false;
635 if (i
== REG_CSGSFS
) {
636 struct selectors
*req_sels
=
637 (void *)&requested_regs
[REG_CSGSFS
];
638 struct selectors
*res_sels
=
639 (void *)&resulting_regs
[REG_CSGSFS
];
640 if (req_sels
->cs
!= res_sels
->cs
) {
641 printf("[FAIL]\tCS mismatch: requested 0x%hx; got 0x%hx\n",
642 req_sels
->cs
, res_sels
->cs
);
646 if (req_sels
->ss
!= res_sels
->ss
) {
647 printf("[FAIL]\tSS mismatch: requested 0x%hx; got 0x%hx\n",
648 req_sels
->ss
, res_sels
->ss
);
656 /* Sanity check on the kernel */
657 if (i
== REG_AX
&& requested_regs
[i
] != resulting_regs
[i
]) {
658 printf("[FAIL]\tAX (saved SP) mismatch: requested 0x%llx; got 0x%llx\n",
659 (unsigned long long)requested_regs
[i
],
660 (unsigned long long)resulting_regs
[i
]);
665 if (requested_regs
[i
] != resulting_regs
[i
] && !ignore_reg
) {
667 * SP is particularly interesting here. The
668 * usual cause of failures is that we hit the
669 * nasty IRET case of returning to a 16-bit SS,
670 * in which case bits 16:31 of the *kernel*
671 * stack pointer persist in ESP.
673 printf("[FAIL]\tReg %d mismatch: requested 0x%llx; got 0x%llx\n",
674 i
, (unsigned long long)requested_regs
[i
],
675 (unsigned long long)resulting_regs
[i
]);
681 printf("[OK]\tall registers okay\n");
686 static int test_bad_iret(int cs_bits
, unsigned short ss
, int force_cs
)
688 int cs
= force_cs
== -1 ? find_cs(cs_bits
) : force_cs
;
695 printf("[RUN]\t%d-bit CS (%hx), bogus SS (%hx)\n",
696 cs_bits
, sig_cs
, sig_ss
);
701 char errdesc
[32] = "";
703 const char *src
= (sig_err
& 1) ? " EXT" : "";
705 if ((sig_err
& 0x6) == 0x0)
707 else if ((sig_err
& 0x6) == 0x4)
709 else if ((sig_err
& 0x6) == 0x2)
714 sprintf(errdesc
, "%s%s index %d, ",
715 table
, src
, sig_err
>> 3);
719 if (sig_trapno
== 13)
720 strcpy(trapname
, "GP");
721 else if (sig_trapno
== 11)
722 strcpy(trapname
, "NP");
723 else if (sig_trapno
== 12)
724 strcpy(trapname
, "SS");
725 else if (sig_trapno
== 32)
726 strcpy(trapname
, "IRET"); /* X86_TRAP_IRET */
728 sprintf(trapname
, "%d", sig_trapno
);
730 printf("[OK]\tGot #%s(0x%lx) (i.e. %s%s)\n",
731 trapname
, (unsigned long)sig_err
,
732 errdesc
, strsignal(sig_trapped
));
736 * This also implicitly tests UC_STRICT_RESTORE_SS:
737 * We check that these signals set UC_STRICT_RESTORE_SS and,
738 * if UC_STRICT_RESTORE_SS doesn't cause strict behavior,
739 * then we won't get SIGSEGV.
741 printf("[FAIL]\tDid not get SIGSEGV\n");
749 unsigned short my_cs
, my_ss
;
751 asm volatile ("mov %%cs,%0" : "=r" (my_cs
));
752 asm volatile ("mov %%ss,%0" : "=r" (my_ss
));
756 .ss_sp
= altstack_data
,
759 if (sigaltstack(&stack
, NULL
) != 0)
760 err(1, "sigaltstack");
762 sethandler(SIGUSR1
, sigusr1
, 0);
763 sethandler(SIGTRAP
, sigtrap
, SA_ONSTACK
);
765 /* Easy cases: return to a 32-bit SS in each possible CS bitness. */
766 total_nerrs
+= test_valid_sigreturn(64, false, -1);
767 total_nerrs
+= test_valid_sigreturn(32, false, -1);
768 total_nerrs
+= test_valid_sigreturn(16, false, -1);
771 * Test easy espfix cases: return to a 16-bit LDT SS in each possible
772 * CS bitness. NB: with a long mode CS, the SS bitness is irrelevant.
774 * This catches the original missing-espfix-on-64-bit-kernels issue
775 * as well as CVE-2014-8134.
777 total_nerrs
+= test_valid_sigreturn(64, true, -1);
778 total_nerrs
+= test_valid_sigreturn(32, true, -1);
779 total_nerrs
+= test_valid_sigreturn(16, true, -1);
781 if (gdt_data16_idx
) {
783 * For performance reasons, Linux skips espfix if SS points
784 * to the GDT. If we were able to allocate a 16-bit SS in
785 * the GDT, see if it leaks parts of the kernel stack pointer.
787 * This tests for CVE-2014-8133.
789 total_nerrs
+= test_valid_sigreturn(64, true,
790 GDT3(gdt_data16_idx
));
791 total_nerrs
+= test_valid_sigreturn(32, true,
792 GDT3(gdt_data16_idx
));
793 total_nerrs
+= test_valid_sigreturn(16, true,
794 GDT3(gdt_data16_idx
));
798 /* Nasty ABI case: check SS corruption handling. */
799 sig_corrupt_final_ss
= 1;
800 total_nerrs
+= test_valid_sigreturn(32, false, -1);
801 total_nerrs
+= test_valid_sigreturn(32, true, -1);
802 sig_corrupt_final_ss
= 0;
806 * We're done testing valid sigreturn cases. Now we test states
807 * for which sigreturn itself will succeed but the subsequent
808 * entry to user mode will fail.
810 * Depending on the failure mode and the kernel bitness, these
811 * entry failures can generate SIGSEGV, SIGBUS, or SIGILL.
813 clearhandler(SIGTRAP
);
814 sethandler(SIGSEGV
, sigtrap
, SA_ONSTACK
);
815 sethandler(SIGBUS
, sigtrap
, SA_ONSTACK
);
816 sethandler(SIGILL
, sigtrap
, SA_ONSTACK
); /* 32-bit kernels do this */
818 /* Easy failures: invalid SS, resulting in #GP(0) */
819 test_bad_iret(64, ldt_nonexistent_sel
, -1);
820 test_bad_iret(32, ldt_nonexistent_sel
, -1);
821 test_bad_iret(16, ldt_nonexistent_sel
, -1);
823 /* These fail because SS isn't a data segment, resulting in #GP(SS) */
824 test_bad_iret(64, my_cs
, -1);
825 test_bad_iret(32, my_cs
, -1);
826 test_bad_iret(16, my_cs
, -1);
828 /* Try to return to a not-present code segment, triggering #NP(SS). */
829 test_bad_iret(32, my_ss
, npcode32_sel
);
832 * Try to return to a not-present but otherwise valid data segment.
833 * This will cause IRET to fail with #SS on the espfix stack. This
834 * exercises CVE-2014-9322.
836 * Note that, if espfix is enabled, 64-bit Linux will lose track
837 * of the actual cause of failure and report #GP(0) instead.
838 * This would be very difficult for Linux to avoid, because
839 * espfix64 causes IRET failures to be promoted to #DF, so the
840 * original exception frame is never pushed onto the stack.
842 test_bad_iret(32, npdata32_sel
, -1);
845 * Try to return to a not-present but otherwise valid data
846 * segment without invoking espfix. Newer kernels don't allow
847 * this to happen in the first place. On older kernels, though,
848 * this can trigger CVE-2014-9322.
850 if (gdt_npdata32_idx
)
851 test_bad_iret(32, GDT3(gdt_npdata32_idx
), -1);
854 total_nerrs
+= test_nonstrict_ss();
857 return total_nerrs
? 1 : 0;