mb/starlabs/starbook/mtl: Correct USB Port Configuration
[coreboot2.git] / src / include / cpu / x86 / mtrr.h
blob16efbb6eef7fea604c6c48096b2fe00d94209926
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #ifndef CPU_X86_MTRR_H
4 #define CPU_X86_MTRR_H
6 #ifndef __ASSEMBLER__
7 #include <cpu/x86/msr.h>
8 #include <arch/cpu.h>
9 #endif
11 #define MTRR_VERBOSE_LEVEL BIOS_NEVER
13 /* MTRRs are at a 4KiB granularity. */
14 #define RANGE_SHIFT 12
15 #define ADDR_SHIFT_TO_RANGE_SHIFT(x) \
16 (((x) > RANGE_SHIFT) ? ((x) - RANGE_SHIFT) : RANGE_SHIFT)
17 #define PHYS_TO_RANGE_ADDR(x) ((x) >> RANGE_SHIFT)
18 #define RANGE_TO_PHYS_ADDR(x) (((resource_t)(x)) << RANGE_SHIFT)
20 /* Helpful constants. */
21 #define RANGE_1MB PHYS_TO_RANGE_ADDR(1ULL << 20)
22 #define RANGE_4GB (1ULL << (ADDR_SHIFT_TO_RANGE_SHIFT(32)))
24 #define MTRR_ALGO_SHIFT (8)
25 #define MTRR_TAG_MASK ((1 << MTRR_ALGO_SHIFT) - 1)
27 /* These are the region types */
28 #define MTRR_TYPE_UNCACHEABLE 0
29 #define MTRR_TYPE_WRCOMB 1
30 #define MTRR_TYPE_WRTHROUGH 4
31 #define MTRR_TYPE_WRPROT 5
32 #define MTRR_TYPE_WRBACK 6
33 #define MTRR_NUM_TYPES 7
35 #define MTRR_CAP_MSR 0x0fe
37 #define MTRR_CAP_PRMRR (1 << 12)
38 #define MTRR_CAP_SMRR (1 << 11)
39 #define MTRR_CAP_WC (1 << 10)
40 #define MTRR_CAP_FIX (1 << 8)
41 #define MTRR_CAP_VCNT 0xff
43 #define MTRR_DEF_TYPE_MSR 0x2ff
44 #define MTRR_DEF_TYPE_MASK 0xff
45 #define MTRR_DEF_TYPE_EN (1 << 11)
46 #define MTRR_DEF_TYPE_FIX_EN (1 << 10)
48 #define IA32_SMRR_PHYS_BASE 0x1f2
49 #define IA32_SMRR_PHYS_MASK 0x1f3
50 #define SMRR_PHYS_MASK_LOCK (1 << 10)
52 /* Specific to model_6fx and model_1067x.
53 These are named MSR_SMRR_PHYSBASE in the SDM. */
54 #define CORE2_SMRR_PHYS_BASE 0xa0
55 #define CORE2_SMRR_PHYS_MASK 0xa1
57 #define MTRR_PHYS_BASE(reg) (0x200 + 2 * (reg))
58 #define MTRR_PHYS_MASK(reg) (MTRR_PHYS_BASE(reg) + 1)
59 #define MTRR_PHYS_MASK_VALID (1 << 11)
61 #define NUM_FIXED_RANGES 88
62 #define RANGES_PER_FIXED_MTRR 8
63 #define NUM_FIXED_MTRRS (NUM_FIXED_RANGES / RANGES_PER_FIXED_MTRR)
64 #define MTRR_FIX_64K_00000 0x250
65 #define MTRR_FIX_16K_80000 0x258
66 #define MTRR_FIX_16K_A0000 0x259
67 #define MTRR_FIX_4K_C0000 0x268
68 #define MTRR_FIX_4K_C8000 0x269
69 #define MTRR_FIX_4K_D0000 0x26a
70 #define MTRR_FIX_4K_D8000 0x26b
71 #define MTRR_FIX_4K_E0000 0x26c
72 #define MTRR_FIX_4K_E8000 0x26d
73 #define MTRR_FIX_4K_F0000 0x26e
74 #define MTRR_FIX_4K_F8000 0x26f
76 #if !defined(__ASSEMBLER__)
78 #include <stdint.h>
79 #include <stddef.h>
80 #include <lib.h>
83 * The MTRR code has some side effects that the callers should be aware for.
84 * 1. The call sequence matters. x86_setup_mtrrs() calls
85 * x86_setup_fixed_mtrrs_no_enable() then enable_fixed_mtrrs() (equivalent
86 * of x86_setup_fixed_mtrrs()) then x86_setup_var_mtrrs(). If the callers
87 * want to call the components of x86_setup_mtrrs() because of other
88 * requirements the ordering should still preserved.
89 * 2. enable_fixed_mtrr() will enable both variable and fixed MTRRs because
90 * of the nature of the global MTRR enable flag. Therefore, all direct
91 * or indirect callers of enable_fixed_mtrr() should ensure that the
92 * variable MTRR MSRs do not contain bad ranges.
94 * Note that this function sets up MTRRs for addresses above 4GiB.
96 void x86_setup_mtrrs(void);
98 * x86_setup_mtrrs_with_detect() does the same thing as x86_setup_mtrrs(), but
99 * it always dynamically detects the number of variable MTRRs available.
101 void x86_setup_mtrrs_with_detect(void);
102 void x86_setup_mtrrs_with_detect_no_above_4gb(void);
104 * x86_setup_var_mtrrs() parameters:
105 * address_bits - number of physical address bits supported by cpu
106 * above4gb - if set setup MTRRs for addresses above 4GiB else ignore
107 * memory ranges above 4GiB
109 void x86_setup_var_mtrrs(unsigned int address_bits, unsigned int above4gb);
110 void enable_fixed_mtrr(void);
111 /* Unhide Rd/WrDram bits and allow modification for AMD. */
112 void fixed_mtrrs_expose_amd_rwdram(void);
113 /* Hide Rd/WrDram bits and allow modification for AMD. */
114 void fixed_mtrrs_hide_amd_rwdram(void);
116 void x86_mtrr_check(void);
118 /* Insert a temporary MTRR range for the duration of coreboot's runtime.
119 * This function needs to be called after the first MTRR solution is derived. */
120 void mtrr_use_temp_range(uintptr_t begin, size_t size, int type);
122 static inline int get_var_mtrr_count(void)
124 return rdmsr(MTRR_CAP_MSR).lo & MTRR_CAP_VCNT;
127 void set_var_mtrr(unsigned int reg, unsigned int base, unsigned int size,
128 unsigned int type);
129 int get_free_var_mtrr(void);
130 void clear_all_var_mtrr(void);
132 asmlinkage void display_mtrrs(void);
134 struct var_mtrr_context {
135 uint32_t max_var_mtrrs;
136 uint32_t used_var_mtrrs;
137 struct {
138 msr_t base;
139 msr_t mask;
140 } mtrr[];
143 void var_mtrr_context_init(struct var_mtrr_context *ctx);
144 int var_mtrr_set(struct var_mtrr_context *ctx, uintptr_t addr, size_t size, int type);
145 void commit_mtrr_setup(const struct var_mtrr_context *ctx);
146 void postcar_mtrr_setup(void);
148 static inline uint64_t calculate_var_mtrr_size(uint64_t mask)
150 return 1 << (__ffs64(mask >> RANGE_SHIFT) + RANGE_SHIFT);
153 #endif /* !defined(__ASSEMBLER__) */
155 /* Align up/down to next power of 2, suitable for assembler
156 too. Range of result 256kB to 128MB is good enough here. */
157 #define _POW2_MASK(x) ((x>>1)|(x>>2)|(x>>3)|(x>>4)|(x>>5)| \
158 (x>>6)|(x>>7)|(x>>8)|((1<<18)-1))
159 #define _ALIGN_UP_POW2(x) ((x + _POW2_MASK(x)) & ~_POW2_MASK(x))
160 #define _ALIGN_DOWN_POW2(x) ((x) & ~_POW2_MASK(x))
162 /* Calculate `4GiB - x` (e.g. absolute address for offset from 4GiB) */
163 #define _FROM_4G_TOP(x) ((0xffffffff - (x)) + 1)
165 /* At the end of romstage, low RAM 0..CACHE_TM_RAMTOP may be set
166 * as write-back cacheable to speed up ramstage decompression.
167 * Note MTRR boundaries, must be power of two.
169 #define CACHE_TMP_RAMTOP (16<<20)
171 /* For ROM caching, generally, try to use the next power of 2. */
172 #define OPTIMAL_CACHE_ROM_SIZE _ALIGN_UP_POW2(CONFIG_ROM_SIZE)
173 #define OPTIMAL_CACHE_ROM_BASE _FROM_4G_TOP(OPTIMAL_CACHE_ROM_SIZE)
174 #if (OPTIMAL_CACHE_ROM_SIZE < CONFIG_ROM_SIZE) || \
175 (OPTIMAL_CACHE_ROM_SIZE >= (2 * CONFIG_ROM_SIZE))
176 # error "Optimal CACHE_ROM_SIZE can't be derived, _POW2_MASK needs refinement."
177 #endif
179 /* Make sure it doesn't overlap CAR, though. If the gap between
180 CAR and 4GiB is too small, make it at most the size of this
181 gap. As we can't align up (might overlap again), align down
182 to get a power of 2 again, for a single MTRR. */
183 #define CAR_END (CONFIG_DCACHE_RAM_BASE + CONFIG_DCACHE_RAM_SIZE)
184 #if CAR_END > OPTIMAL_CACHE_ROM_BASE
185 # define CAR_CACHE_ROM_SIZE _ALIGN_DOWN_POW2(_FROM_4G_TOP(CAR_END))
186 #else
187 # define CAR_CACHE_ROM_SIZE OPTIMAL_CACHE_ROM_SIZE
188 #endif
189 #if ((CAR_CACHE_ROM_SIZE & (CAR_CACHE_ROM_SIZE - 1)) != 0)
190 # error "CAR CACHE_ROM_SIZE is not a power of 2, _POW2_MASK needs refinement."
191 #endif
193 /* Last but not least, most (if not all) chipsets have MMIO
194 between 0xfe000000 and 0xff000000, so limit to 16MiB. */
195 #if CAR_CACHE_ROM_SIZE >= 16 << 20
196 # define CACHE_ROM_SIZE (16 << 20)
197 #else
198 # define CACHE_ROM_SIZE CAR_CACHE_ROM_SIZE
199 #endif
201 #define CACHE_ROM_BASE _FROM_4G_TOP(CACHE_ROM_SIZE)
203 #endif /* CPU_X86_MTRR_H */