1 // SPDX-License-Identifier: GPL-2.0
3 * mpx.c - Memory Protection eXtensions
5 * Copyright (c) 2014, Intel Corporation.
6 * Qiaowei Ren <qiaowei.ren@intel.com>
7 * Dave Hansen <dave.hansen@intel.com>
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/mm_types.h>
12 #include <linux/syscalls.h>
13 #include <linux/sched/sysctl.h>
16 #include <asm/insn-eval.h>
18 #include <asm/mmu_context.h>
20 #include <asm/processor.h>
21 #include <asm/fpu/internal.h>
23 #define CREATE_TRACE_POINTS
24 #include <asm/trace/mpx.h>
26 static inline unsigned long mpx_bd_size_bytes(struct mm_struct
*mm
)
29 return MPX_BD_SIZE_BYTES_64
;
31 return MPX_BD_SIZE_BYTES_32
;
34 static inline unsigned long mpx_bt_size_bytes(struct mm_struct
*mm
)
37 return MPX_BT_SIZE_BYTES_64
;
39 return MPX_BT_SIZE_BYTES_32
;
43 * This is really a simplified "vm_mmap". it only handles MPX
44 * bounds tables (the bounds directory is user-allocated).
46 static unsigned long mpx_mmap(unsigned long len
)
48 struct mm_struct
*mm
= current
->mm
;
49 unsigned long addr
, populate
;
51 /* Only bounds table can be allocated here */
52 if (len
!= mpx_bt_size_bytes(mm
))
55 down_write(&mm
->mmap_sem
);
56 addr
= do_mmap(NULL
, 0, len
, PROT_READ
| PROT_WRITE
,
57 MAP_ANONYMOUS
| MAP_PRIVATE
, VM_MPX
, 0, &populate
, NULL
);
58 up_write(&mm
->mmap_sem
);
60 mm_populate(addr
, populate
);
65 static int mpx_insn_decode(struct insn
*insn
,
68 unsigned char buf
[MAX_INSN_SIZE
];
69 int x86_64
= !test_thread_flag(TIF_IA32
);
73 not_copied
= copy_from_user(buf
, (void __user
*)regs
->ip
, sizeof(buf
));
74 nr_copied
= sizeof(buf
) - not_copied
;
76 * The decoder _should_ fail nicely if we pass it a short buffer.
77 * But, let's not depend on that implementation detail. If we
78 * did not get anything, just error out now.
82 insn_init(insn
, buf
, nr_copied
, x86_64
);
83 insn_get_length(insn
);
85 * copy_from_user() tries to get as many bytes as we could see in
86 * the largest possible instruction. If the instruction we are
87 * after is shorter than that _and_ we attempt to copy from
88 * something unreadable, we might get a short read. This is OK
89 * as long as the read did not stop in the middle of the
90 * instruction. Check to see if we got a partial instruction.
92 if (nr_copied
< insn
->length
)
95 insn_get_opcode(insn
);
97 * We only _really_ need to decode bndcl/bndcn/bndcu
98 * Error out on anything else.
100 if (insn
->opcode
.bytes
[0] != 0x0f)
102 if ((insn
->opcode
.bytes
[1] != 0x1a) &&
103 (insn
->opcode
.bytes
[1] != 0x1b))
112 * If a bounds overflow occurs then a #BR is generated. This
113 * function decodes MPX instructions to get violation address
114 * and set this address into extended struct siginfo.
116 * Note that this is not a super precise way of doing this.
117 * Userspace could have, by the time we get here, written
118 * anything it wants in to the instructions. We can not
119 * trust anything about it. They might not be valid
120 * instructions or might encode invalid registers, etc...
122 int mpx_fault_info(struct mpx_fault_info
*info
, struct pt_regs
*regs
)
124 const struct mpx_bndreg_state
*bndregs
;
125 const struct mpx_bndreg
*bndreg
;
130 err
= mpx_insn_decode(&insn
, regs
);
135 * We know at this point that we are only dealing with
138 insn_get_modrm(&insn
);
139 bndregno
= X86_MODRM_REG(insn
.modrm
.value
);
144 /* get bndregs field from current task's xsave area */
145 bndregs
= get_xsave_field_ptr(XFEATURE_MASK_BNDREGS
);
150 /* now go select the individual register in the set of 4 */
151 bndreg
= &bndregs
->bndreg
[bndregno
];
154 * The registers are always 64-bit, but the upper 32
155 * bits are ignored in 32-bit mode. Also, note that the
156 * upper bounds are architecturally represented in 1's
159 * The 'unsigned long' cast is because the compiler
160 * complains when casting from integers to different-size
163 info
->lower
= (void __user
*)(unsigned long)bndreg
->lower_bound
;
164 info
->upper
= (void __user
*)(unsigned long)~bndreg
->upper_bound
;
165 info
->addr
= insn_get_addr_ref(&insn
, regs
);
168 * We were not able to extract an address from the instruction,
169 * probably because there was something invalid in it.
171 if (info
->addr
== (void __user
*)-1) {
175 trace_mpx_bounds_register_exception(info
->addr
, bndreg
);
178 /* info might be NULL, but kfree() handles that */
182 static __user
void *mpx_get_bounds_dir(void)
184 const struct mpx_bndcsr
*bndcsr
;
186 if (!cpu_feature_enabled(X86_FEATURE_MPX
))
187 return MPX_INVALID_BOUNDS_DIR
;
190 * The bounds directory pointer is stored in a register
191 * only accessible if we first do an xsave.
193 bndcsr
= get_xsave_field_ptr(XFEATURE_MASK_BNDCSR
);
195 return MPX_INVALID_BOUNDS_DIR
;
198 * Make sure the register looks valid by checking the
201 if (!(bndcsr
->bndcfgu
& MPX_BNDCFG_ENABLE_FLAG
))
202 return MPX_INVALID_BOUNDS_DIR
;
205 * Lastly, mask off the low bits used for configuration
206 * flags, and return the address of the bounds table.
208 return (void __user
*)(unsigned long)
209 (bndcsr
->bndcfgu
& MPX_BNDCFG_ADDR_MASK
);
212 int mpx_enable_management(void)
214 void __user
*bd_base
= MPX_INVALID_BOUNDS_DIR
;
215 struct mm_struct
*mm
= current
->mm
;
219 * runtime in the userspace will be responsible for allocation of
220 * the bounds directory. Then, it will save the base of the bounds
221 * directory into XSAVE/XRSTOR Save Area and enable MPX through
222 * XRSTOR instruction.
224 * The copy_xregs_to_kernel() beneath get_xsave_field_ptr() is
225 * expected to be relatively expensive. Storing the bounds
226 * directory here means that we do not have to do xsave in the
227 * unmap path; we can just use mm->context.bd_addr instead.
229 bd_base
= mpx_get_bounds_dir();
230 down_write(&mm
->mmap_sem
);
232 /* MPX doesn't support addresses above 47 bits yet. */
233 if (find_vma(mm
, DEFAULT_MAP_WINDOW
)) {
234 pr_warn_once("%s (%d): MPX cannot handle addresses "
235 "above 47-bits. Disabling.",
236 current
->comm
, current
->pid
);
240 mm
->context
.bd_addr
= bd_base
;
241 if (mm
->context
.bd_addr
== MPX_INVALID_BOUNDS_DIR
)
244 up_write(&mm
->mmap_sem
);
248 int mpx_disable_management(void)
250 struct mm_struct
*mm
= current
->mm
;
252 if (!cpu_feature_enabled(X86_FEATURE_MPX
))
255 down_write(&mm
->mmap_sem
);
256 mm
->context
.bd_addr
= MPX_INVALID_BOUNDS_DIR
;
257 up_write(&mm
->mmap_sem
);
261 static int mpx_cmpxchg_bd_entry(struct mm_struct
*mm
,
262 unsigned long *curval
,
263 unsigned long __user
*addr
,
264 unsigned long old_val
, unsigned long new_val
)
268 * user_atomic_cmpxchg_inatomic() actually uses sizeof()
269 * the pointer that we pass to it to figure out how much
270 * data to cmpxchg. We have to be careful here not to
271 * pass a pointer to a 64-bit data type when we only want
274 if (is_64bit_mm(mm
)) {
275 ret
= user_atomic_cmpxchg_inatomic(curval
,
276 addr
, old_val
, new_val
);
278 u32
uninitialized_var(curval_32
);
279 u32 old_val_32
= old_val
;
280 u32 new_val_32
= new_val
;
281 u32 __user
*addr_32
= (u32 __user
*)addr
;
283 ret
= user_atomic_cmpxchg_inatomic(&curval_32
,
284 addr_32
, old_val_32
, new_val_32
);
291 * With 32-bit mode, a bounds directory is 4MB, and the size of each
292 * bounds table is 16KB. With 64-bit mode, a bounds directory is 2GB,
293 * and the size of each bounds table is 4MB.
295 static int allocate_bt(struct mm_struct
*mm
, long __user
*bd_entry
)
297 unsigned long expected_old_val
= 0;
298 unsigned long actual_old_val
= 0;
299 unsigned long bt_addr
;
300 unsigned long bd_new_entry
;
304 * Carve the virtual space out of userspace for the new
307 bt_addr
= mpx_mmap(mpx_bt_size_bytes(mm
));
308 if (IS_ERR((void *)bt_addr
))
309 return PTR_ERR((void *)bt_addr
);
311 * Set the valid flag (kinda like _PAGE_PRESENT in a pte)
313 bd_new_entry
= bt_addr
| MPX_BD_ENTRY_VALID_FLAG
;
316 * Go poke the address of the new bounds table in to the
317 * bounds directory entry out in userspace memory. Note:
318 * we may race with another CPU instantiating the same table.
319 * In that case the cmpxchg will see an unexpected
322 * This can fault, but that's OK because we do not hold
323 * mmap_sem at this point, unlike some of the other part
324 * of the MPX code that have to pagefault_disable().
326 ret
= mpx_cmpxchg_bd_entry(mm
, &actual_old_val
, bd_entry
,
327 expected_old_val
, bd_new_entry
);
332 * The user_atomic_cmpxchg_inatomic() will only return nonzero
333 * for faults, *not* if the cmpxchg itself fails. Now we must
334 * verify that the cmpxchg itself completed successfully.
337 * We expected an empty 'expected_old_val', but instead found
338 * an apparently valid entry. Assume we raced with another
339 * thread to instantiate this table and desclare succecss.
341 if (actual_old_val
& MPX_BD_ENTRY_VALID_FLAG
) {
346 * We found a non-empty bd_entry but it did not have the
347 * VALID_FLAG set. Return an error which will result in
348 * a SEGV since this probably means that somebody scribbled
349 * some invalid data in to a bounds table.
351 if (expected_old_val
!= actual_old_val
) {
355 trace_mpx_new_bounds_table(bt_addr
);
358 vm_munmap(bt_addr
, mpx_bt_size_bytes(mm
));
363 * When a BNDSTX instruction attempts to save bounds to a bounds
364 * table, it will first attempt to look up the table in the
365 * first-level bounds directory. If it does not find a table in
366 * the directory, a #BR is generated and we get here in order to
367 * allocate a new table.
369 * With 32-bit mode, the size of BD is 4MB, and the size of each
370 * bound table is 16KB. With 64-bit mode, the size of BD is 2GB,
371 * and the size of each bound table is 4MB.
373 static int do_mpx_bt_fault(void)
375 unsigned long bd_entry
, bd_base
;
376 const struct mpx_bndcsr
*bndcsr
;
377 struct mm_struct
*mm
= current
->mm
;
379 bndcsr
= get_xsave_field_ptr(XFEATURE_MASK_BNDCSR
);
383 * Mask off the preserve and enable bits
385 bd_base
= bndcsr
->bndcfgu
& MPX_BNDCFG_ADDR_MASK
;
387 * The hardware provides the address of the missing or invalid
388 * entry via BNDSTATUS, so we don't have to go look it up.
390 bd_entry
= bndcsr
->bndstatus
& MPX_BNDSTA_ADDR_MASK
;
392 * Make sure the directory entry is within where we think
395 if ((bd_entry
< bd_base
) ||
396 (bd_entry
>= bd_base
+ mpx_bd_size_bytes(mm
)))
399 return allocate_bt(mm
, (long __user
*)bd_entry
);
402 int mpx_handle_bd_fault(void)
405 * Userspace never asked us to manage the bounds tables,
408 if (!kernel_managing_mpx_tables(current
->mm
))
411 return do_mpx_bt_fault();
415 * A thin wrapper around get_user_pages(). Returns 0 if the
416 * fault was resolved or -errno if not.
418 static int mpx_resolve_fault(long __user
*addr
, int write
)
423 gup_ret
= get_user_pages((unsigned long)addr
, nr_pages
,
424 write
? FOLL_WRITE
: 0, NULL
, NULL
);
426 * get_user_pages() returns number of pages gotten.
427 * 0 means we failed to fault in and get anything,
428 * probably because 'addr' is bad.
432 /* Other error, return it */
435 /* must have gup'd a page and gup_ret>0, success */
439 static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct
*mm
,
440 unsigned long bd_entry
)
442 unsigned long bt_addr
= bd_entry
;
445 * Bit 0 in a bt_entry is always the valid bit.
447 bt_addr
&= ~MPX_BD_ENTRY_VALID_FLAG
;
449 * Tables are naturally aligned at 8-byte boundaries
450 * on 64-bit and 4-byte boundaries on 32-bit. The
451 * documentation makes it appear that the low bits
452 * are ignored by the hardware, so we do the same.
458 bt_addr
&= ~(align_to_bytes
-1);
463 * We only want to do a 4-byte get_user() on 32-bit. Otherwise,
464 * we might run off the end of the bounds table if we are on
465 * a 64-bit kernel and try to get 8 bytes.
467 static int get_user_bd_entry(struct mm_struct
*mm
, unsigned long *bd_entry_ret
,
468 long __user
*bd_entry_ptr
)
474 return get_user(*bd_entry_ret
, bd_entry_ptr
);
477 * Note that get_user() uses the type of the *pointer* to
478 * establish the size of the get, not the destination.
480 ret
= get_user(bd_entry_32
, (u32 __user
*)bd_entry_ptr
);
481 *bd_entry_ret
= bd_entry_32
;
486 * Get the base of bounds tables pointed by specific bounds
489 static int get_bt_addr(struct mm_struct
*mm
,
490 long __user
*bd_entry_ptr
,
491 unsigned long *bt_addr_result
)
495 unsigned long bd_entry
;
496 unsigned long bt_addr
;
498 if (!access_ok(VERIFY_READ
, (bd_entry_ptr
), sizeof(*bd_entry_ptr
)))
505 ret
= get_user_bd_entry(mm
, &bd_entry
, bd_entry_ptr
);
510 ret
= mpx_resolve_fault(bd_entry_ptr
, need_write
);
512 * If we could not resolve the fault, consider it
513 * userspace's fault and error out.
519 valid_bit
= bd_entry
& MPX_BD_ENTRY_VALID_FLAG
;
520 bt_addr
= mpx_bd_entry_to_bt_addr(mm
, bd_entry
);
523 * When the kernel is managing bounds tables, a bounds directory
524 * entry will either have a valid address (plus the valid bit)
525 * *OR* be completely empty. If we see a !valid entry *and* some
526 * data in the address field, we know something is wrong. This
527 * -EINVAL return will cause a SIGSEGV.
529 if (!valid_bit
&& bt_addr
)
532 * Do we have an completely zeroed bt entry? That is OK. It
533 * just means there was no bounds table for this memory. Make
534 * sure to distinguish this from -EINVAL, which will cause
540 *bt_addr_result
= bt_addr
;
544 static inline int bt_entry_size_bytes(struct mm_struct
*mm
)
547 return MPX_BT_ENTRY_BYTES_64
;
549 return MPX_BT_ENTRY_BYTES_32
;
553 * Take a virtual address and turns it in to the offset in bytes
554 * inside of the bounds table where the bounds table entry
555 * controlling 'addr' can be found.
557 static unsigned long mpx_get_bt_entry_offset_bytes(struct mm_struct
*mm
,
560 unsigned long bt_table_nr_entries
;
561 unsigned long offset
= addr
;
563 if (is_64bit_mm(mm
)) {
564 /* Bottom 3 bits are ignored on 64-bit */
566 bt_table_nr_entries
= MPX_BT_NR_ENTRIES_64
;
568 /* Bottom 2 bits are ignored on 32-bit */
570 bt_table_nr_entries
= MPX_BT_NR_ENTRIES_32
;
573 * We know the size of the table in to which we are
574 * indexing, and we have eliminated all the low bits
575 * which are ignored for indexing.
577 * Mask out all the high bits which we do not need
578 * to index in to the table. Note that the tables
579 * are always powers of two so this gives us a proper
582 offset
&= (bt_table_nr_entries
-1);
584 * We now have an entry offset in terms of *entries* in
585 * the table. We need to scale it back up to bytes.
587 offset
*= bt_entry_size_bytes(mm
);
592 * How much virtual address space does a single bounds
593 * directory entry cover?
595 * Note, we need a long long because 4GB doesn't fit in
596 * to a long on 32-bit.
598 static inline unsigned long bd_entry_virt_space(struct mm_struct
*mm
)
600 unsigned long long virt_space
;
601 unsigned long long GB
= (1ULL << 30);
604 * This covers 32-bit emulation as well as 32-bit kernels
605 * running on 64-bit hardware.
607 if (!is_64bit_mm(mm
))
608 return (4ULL * GB
) / MPX_BD_NR_ENTRIES_32
;
611 * 'x86_virt_bits' returns what the hardware is capable
612 * of, and returns the full >32-bit address space when
613 * running 32-bit kernels on 64-bit hardware.
615 virt_space
= (1ULL << boot_cpu_data
.x86_virt_bits
);
616 return virt_space
/ MPX_BD_NR_ENTRIES_64
;
620 * Free the backing physical pages of bounds table 'bt_addr'.
621 * Assume start...end is within that bounds table.
623 static noinline
int zap_bt_entries_mapping(struct mm_struct
*mm
,
624 unsigned long bt_addr
,
625 unsigned long start_mapping
, unsigned long end_mapping
)
627 struct vm_area_struct
*vma
;
628 unsigned long addr
, len
;
633 * if we 'end' on a boundary, the offset will be 0 which
634 * is not what we want. Back it up a byte to get the
635 * last bt entry. Then once we have the entry itself,
636 * move 'end' back up by the table entry size.
638 start
= bt_addr
+ mpx_get_bt_entry_offset_bytes(mm
, start_mapping
);
639 end
= bt_addr
+ mpx_get_bt_entry_offset_bytes(mm
, end_mapping
- 1);
641 * Move end back up by one entry. Among other things
642 * this ensures that it remains page-aligned and does
643 * not screw up zap_page_range()
645 end
+= bt_entry_size_bytes(mm
);
648 * Find the first overlapping vma. If vma->vm_start > start, there
649 * will be a hole in the bounds table. This -EINVAL return will
652 vma
= find_vma(mm
, start
);
653 if (!vma
|| vma
->vm_start
> start
)
657 * A NUMA policy on a VM_MPX VMA could cause this bounds table to
658 * be split. So we need to look across the entire 'start -> end'
659 * range of this bounds table, find all of the VM_MPX VMAs, and
663 while (vma
&& vma
->vm_start
< end
) {
665 * We followed a bounds directory entry down
666 * here. If we find a non-MPX VMA, that's bad,
667 * so stop immediately and return an error. This
668 * probably results in a SIGSEGV.
670 if (!(vma
->vm_flags
& VM_MPX
))
673 len
= min(vma
->vm_end
, end
) - addr
;
674 zap_page_range(vma
, addr
, len
);
675 trace_mpx_unmap_zap(addr
, addr
+len
);
678 addr
= vma
->vm_start
;
683 static unsigned long mpx_get_bd_entry_offset(struct mm_struct
*mm
,
687 * There are several ways to derive the bd offsets. We
688 * use the following approach here:
689 * 1. We know the size of the virtual address space
690 * 2. We know the number of entries in a bounds table
691 * 3. We know that each entry covers a fixed amount of
692 * virtual address space.
693 * So, we can just divide the virtual address by the
694 * virtual space used by one entry to determine which
695 * entry "controls" the given virtual address.
697 if (is_64bit_mm(mm
)) {
698 int bd_entry_size
= 8; /* 64-bit pointer */
700 * Take the 64-bit addressing hole in to account.
702 addr
&= ((1UL << boot_cpu_data
.x86_virt_bits
) - 1);
703 return (addr
/ bd_entry_virt_space(mm
)) * bd_entry_size
;
705 int bd_entry_size
= 4; /* 32-bit pointer */
707 * 32-bit has no hole so this case needs no mask
709 return (addr
/ bd_entry_virt_space(mm
)) * bd_entry_size
;
712 * The two return calls above are exact copies. If we
713 * pull out a single copy and put it in here, gcc won't
714 * realize that we're doing a power-of-2 divide and use
715 * shifts. It uses a real divide. If we put them up
716 * there, it manages to figure it out (gcc 4.8.3).
720 static int unmap_entire_bt(struct mm_struct
*mm
,
721 long __user
*bd_entry
, unsigned long bt_addr
)
723 unsigned long expected_old_val
= bt_addr
| MPX_BD_ENTRY_VALID_FLAG
;
724 unsigned long uninitialized_var(actual_old_val
);
729 unsigned long cleared_bd_entry
= 0;
732 ret
= mpx_cmpxchg_bd_entry(mm
, &actual_old_val
,
733 bd_entry
, expected_old_val
, cleared_bd_entry
);
738 ret
= mpx_resolve_fault(bd_entry
, need_write
);
740 * If we could not resolve the fault, consider it
741 * userspace's fault and error out.
747 * The cmpxchg was performed, check the results.
749 if (actual_old_val
!= expected_old_val
) {
751 * Someone else raced with us to unmap the table.
752 * That is OK, since we were both trying to do
753 * the same thing. Declare success.
758 * Something messed with the bounds directory
759 * entry. We hold mmap_sem for read or write
760 * here, so it could not be a _new_ bounds table
761 * that someone just allocated. Something is
762 * wrong, so pass up the error and SIGSEGV.
767 * Note, we are likely being called under do_munmap() already. To
768 * avoid recursion, do_munmap() will check whether it comes
769 * from one bounds table through VM_MPX flag.
771 return do_munmap(mm
, bt_addr
, mpx_bt_size_bytes(mm
), NULL
);
774 static int try_unmap_single_bt(struct mm_struct
*mm
,
775 unsigned long start
, unsigned long end
)
777 struct vm_area_struct
*next
;
778 struct vm_area_struct
*prev
;
780 * "bta" == Bounds Table Area: the area controlled by the
781 * bounds table that we are unmapping.
783 unsigned long bta_start_vaddr
= start
& ~(bd_entry_virt_space(mm
)-1);
784 unsigned long bta_end_vaddr
= bta_start_vaddr
+ bd_entry_virt_space(mm
);
785 unsigned long uninitialized_var(bt_addr
);
786 void __user
*bde_vaddr
;
789 * We already unlinked the VMAs from the mm's rbtree so 'start'
790 * is guaranteed to be in a hole. This gets us the first VMA
791 * before the hole in to 'prev' and the next VMA after the hole
794 next
= find_vma_prev(mm
, start
, &prev
);
796 * Do not count other MPX bounds table VMAs as neighbors.
797 * Although theoretically possible, we do not allow bounds
798 * tables for bounds tables so our heads do not explode.
799 * If we count them as neighbors here, we may end up with
800 * lots of tables even though we have no actual table
803 while (next
&& (next
->vm_flags
& VM_MPX
))
804 next
= next
->vm_next
;
805 while (prev
&& (prev
->vm_flags
& VM_MPX
))
806 prev
= prev
->vm_prev
;
808 * We know 'start' and 'end' lie within an area controlled
809 * by a single bounds table. See if there are any other
810 * VMAs controlled by that bounds table. If there are not
811 * then we can "expand" the are we are unmapping to possibly
812 * cover the entire table.
814 next
= find_vma_prev(mm
, start
, &prev
);
815 if ((!prev
|| prev
->vm_end
<= bta_start_vaddr
) &&
816 (!next
|| next
->vm_start
>= bta_end_vaddr
)) {
818 * No neighbor VMAs controlled by same bounds
819 * table. Try to unmap the whole thing
821 start
= bta_start_vaddr
;
825 bde_vaddr
= mm
->context
.bd_addr
+ mpx_get_bd_entry_offset(mm
, start
);
826 ret
= get_bt_addr(mm
, bde_vaddr
, &bt_addr
);
828 * No bounds table there, so nothing to unmap.
830 if (ret
== -ENOENT
) {
837 * We are unmapping an entire table. Either because the
838 * unmap that started this whole process was large enough
839 * to cover an entire table, or that the unmap was small
840 * but was the area covered by a bounds table.
842 if ((start
== bta_start_vaddr
) &&
843 (end
== bta_end_vaddr
))
844 return unmap_entire_bt(mm
, bde_vaddr
, bt_addr
);
845 return zap_bt_entries_mapping(mm
, bt_addr
, start
, end
);
848 static int mpx_unmap_tables(struct mm_struct
*mm
,
849 unsigned long start
, unsigned long end
)
851 unsigned long one_unmap_start
;
852 trace_mpx_unmap_search(start
, end
);
854 one_unmap_start
= start
;
855 while (one_unmap_start
< end
) {
857 unsigned long next_unmap_start
= ALIGN(one_unmap_start
+1,
858 bd_entry_virt_space(mm
));
859 unsigned long one_unmap_end
= end
;
861 * if the end is beyond the current bounds table,
862 * move it back so we only deal with a single one
865 if (one_unmap_end
> next_unmap_start
)
866 one_unmap_end
= next_unmap_start
;
867 ret
= try_unmap_single_bt(mm
, one_unmap_start
, one_unmap_end
);
871 one_unmap_start
= next_unmap_start
;
877 * Free unused bounds tables covered in a virtual address region being
878 * munmap()ed. Assume end > start.
880 * This function will be called by do_munmap(), and the VMAs covering
881 * the virtual address region start...end have already been split if
882 * necessary, and the 'vma' is the first vma in this range (start -> end).
884 void mpx_notify_unmap(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
885 unsigned long start
, unsigned long end
)
890 * Refuse to do anything unless userspace has asked
891 * the kernel to help manage the bounds tables,
893 if (!kernel_managing_mpx_tables(current
->mm
))
896 * This will look across the entire 'start -> end' range,
897 * and find all of the non-VM_MPX VMAs.
899 * To avoid recursion, if a VM_MPX vma is found in the range
900 * (start->end), we will not continue follow-up work. This
901 * recursion represents having bounds tables for bounds tables,
902 * which should not occur normally. Being strict about it here
903 * helps ensure that we do not have an exploitable stack overflow.
906 if (vma
->vm_flags
& VM_MPX
)
909 } while (vma
&& vma
->vm_start
< end
);
911 ret
= mpx_unmap_tables(mm
, start
, end
);
913 force_sig(SIGSEGV
, current
);
916 /* MPX cannot handle addresses above 47 bits yet. */
917 unsigned long mpx_unmapped_area_check(unsigned long addr
, unsigned long len
,
920 if (!kernel_managing_mpx_tables(current
->mm
))
922 if (addr
+ len
<= DEFAULT_MAP_WINDOW
)
924 if (flags
& MAP_FIXED
)
928 * Requested len is larger than the whole area we're allowed to map in.
929 * Resetting hinting address wouldn't do much good -- fail early.
931 if (len
> DEFAULT_MAP_WINDOW
)
934 /* Look for unmap area within DEFAULT_MAP_WINDOW */