1 // SPDX-License-Identifier: GPL-2.0-only
2 /* ----------------------------------------------------------------------- *
4 * Copyright 2014 Intel Corporation; author: H. Peter Anvin
6 * ----------------------------------------------------------------------- */
9 * The IRET instruction, when returning to a 16-bit segment, only
10 * restores the bottom 16 bits of the user space stack pointer. This
11 * causes some 16-bit software to break, but it also leaks kernel state
14 * This works around this by creating percpu "ministacks", each of which
15 * is mapped 2^16 times 64K apart. When we detect that the return SS is
16 * on the LDT, we copy the IRET frame to the ministack and use the
17 * relevant alias to return to userspace. The ministacks are mapped
18 * readonly, so if the IRET fault we promote #GP to #DF which is an IST
19 * vector and thus has its own stack; we then do the fixup in the #DF
22 * This file sets up the ministacks and the related page tables. The
23 * actual ministack invocation is in entry_64.S.
26 #include <linux/init.h>
27 #include <linux/init_task.h>
28 #include <linux/kernel.h>
29 #include <linux/percpu.h>
30 #include <linux/gfp.h>
31 #include <linux/random.h>
32 #include <asm/pgtable.h>
33 #include <asm/pgalloc.h>
34 #include <asm/setup.h>
35 #include <asm/espfix.h>
38 * Note: we only need 6*8 = 48 bytes for the espfix stack, but round
39 * it up to a cache line to avoid unnecessary sharing.
41 #define ESPFIX_STACK_SIZE (8*8UL)
42 #define ESPFIX_STACKS_PER_PAGE (PAGE_SIZE/ESPFIX_STACK_SIZE)
44 /* There is address space for how many espfix pages? */
45 #define ESPFIX_PAGE_SPACE (1UL << (P4D_SHIFT-PAGE_SHIFT-16))
47 #define ESPFIX_MAX_CPUS (ESPFIX_STACKS_PER_PAGE * ESPFIX_PAGE_SPACE)
48 #if CONFIG_NR_CPUS > ESPFIX_MAX_CPUS
49 # error "Need more virtual address space for the ESPFIX hack"
52 #define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
54 /* This contains the *bottom* address of the espfix stack */
55 DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack
);
56 DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr
);
58 /* Initialization mutex - should this be a spinlock? */
59 static DEFINE_MUTEX(espfix_init_mutex
);
61 /* Page allocation bitmap - each page serves ESPFIX_STACKS_PER_PAGE CPUs */
62 #define ESPFIX_MAX_PAGES DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
63 static void *espfix_pages
[ESPFIX_MAX_PAGES
];
65 static __page_aligned_bss pud_t espfix_pud_page
[PTRS_PER_PUD
]
68 static unsigned int page_random
, slot_random
;
71 * This returns the bottom address of the espfix stack for a specific CPU.
72 * The math allows for a non-power-of-two ESPFIX_STACK_SIZE, in which case
73 * we have to account for some amount of padding at the end of each page.
75 static inline unsigned long espfix_base_addr(unsigned int cpu
)
77 unsigned long page
, slot
;
80 page
= (cpu
/ ESPFIX_STACKS_PER_PAGE
) ^ page_random
;
81 slot
= (cpu
+ slot_random
) % ESPFIX_STACKS_PER_PAGE
;
82 addr
= (page
<< PAGE_SHIFT
) + (slot
* ESPFIX_STACK_SIZE
);
83 addr
= (addr
& 0xffffUL
) | ((addr
& ~0xffffUL
) << 16);
84 addr
+= ESPFIX_BASE_ADDR
;
88 #define PTE_STRIDE (65536/PAGE_SIZE)
89 #define ESPFIX_PTE_CLONES (PTRS_PER_PTE/PTE_STRIDE)
90 #define ESPFIX_PMD_CLONES PTRS_PER_PMD
91 #define ESPFIX_PUD_CLONES (65536/(ESPFIX_PTE_CLONES*ESPFIX_PMD_CLONES))
93 #define PGTABLE_PROT ((_KERNPG_TABLE & ~_PAGE_RW) | _PAGE_NX)
95 static void init_espfix_random(void)
100 * This is run before the entropy pools are initialized,
101 * but this is hopefully better than nothing.
103 if (!arch_get_random_long(&rand
)) {
104 /* The constant is an arbitrary large prime */
106 rand
*= 0xc345c6b72fd16123UL
;
109 slot_random
= rand
% ESPFIX_STACKS_PER_PAGE
;
110 page_random
= (rand
/ ESPFIX_STACKS_PER_PAGE
)
111 & (ESPFIX_PAGE_SPACE
- 1);
114 void __init
init_espfix_bsp(void)
119 /* Install the espfix pud into the kernel page directory */
120 pgd
= &init_top_pgt
[pgd_index(ESPFIX_BASE_ADDR
)];
121 p4d
= p4d_alloc(&init_mm
, pgd
, ESPFIX_BASE_ADDR
);
122 p4d_populate(&init_mm
, p4d
, espfix_pud_page
);
124 /* Randomize the locations */
125 init_espfix_random();
127 /* The rest is the same as for any other processor */
131 void init_espfix_ap(int cpu
)
142 /* We only have to do this once... */
143 if (likely(per_cpu(espfix_stack
, cpu
)))
144 return; /* Already initialized */
146 addr
= espfix_base_addr(cpu
);
147 page
= cpu
/ESPFIX_STACKS_PER_PAGE
;
149 /* Did another CPU already set this up? */
150 stack_page
= READ_ONCE(espfix_pages
[page
]);
151 if (likely(stack_page
))
154 mutex_lock(&espfix_init_mutex
);
156 /* Did we race on the lock? */
157 stack_page
= READ_ONCE(espfix_pages
[page
]);
161 node
= cpu_to_node(cpu
);
162 ptemask
= __supported_pte_mask
;
164 pud_p
= &espfix_pud_page
[pud_index(addr
)];
166 if (!pud_present(pud
)) {
167 struct page
*page
= alloc_pages_node(node
, PGALLOC_GFP
, 0);
169 pmd_p
= (pmd_t
*)page_address(page
);
170 pud
= __pud(__pa(pmd_p
) | (PGTABLE_PROT
& ptemask
));
171 paravirt_alloc_pmd(&init_mm
, __pa(pmd_p
) >> PAGE_SHIFT
);
172 for (n
= 0; n
< ESPFIX_PUD_CLONES
; n
++)
173 set_pud(&pud_p
[n
], pud
);
176 pmd_p
= pmd_offset(&pud
, addr
);
178 if (!pmd_present(pmd
)) {
179 struct page
*page
= alloc_pages_node(node
, PGALLOC_GFP
, 0);
181 pte_p
= (pte_t
*)page_address(page
);
182 pmd
= __pmd(__pa(pte_p
) | (PGTABLE_PROT
& ptemask
));
183 paravirt_alloc_pte(&init_mm
, __pa(pte_p
) >> PAGE_SHIFT
);
184 for (n
= 0; n
< ESPFIX_PMD_CLONES
; n
++)
185 set_pmd(&pmd_p
[n
], pmd
);
188 pte_p
= pte_offset_kernel(&pmd
, addr
);
189 stack_page
= page_address(alloc_pages_node(node
, GFP_KERNEL
, 0));
191 * __PAGE_KERNEL_* includes _PAGE_GLOBAL, which we want since
192 * this is mapped to userspace.
194 pte
= __pte(__pa(stack_page
) | ((__PAGE_KERNEL_RO
| _PAGE_ENC
) & ptemask
));
195 for (n
= 0; n
< ESPFIX_PTE_CLONES
; n
++)
196 set_pte(&pte_p
[n
*PTE_STRIDE
], pte
);
198 /* Job is done for this CPU and any CPU which shares this page */
199 WRITE_ONCE(espfix_pages
[page
], stack_page
);
202 mutex_unlock(&espfix_init_mutex
);
204 per_cpu(espfix_stack
, cpu
) = addr
;
205 per_cpu(espfix_waddr
, cpu
) = (unsigned long)stack_page
206 + (addr
& ~PAGE_MASK
);