1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Copyright (C) 2013 Imagination Technologies
4 * Author: Paul Burton <paul.burton@mips.com>
7 #ifndef __MIPS_ASM_MIPS_CPS_H__
8 # error Please include asm/mips-cps.h rather than asm/mips-cm.h
11 #ifndef __MIPS_ASM_MIPS_CM_H__
12 #define __MIPS_ASM_MIPS_CM_H__
14 #include <linux/bitops.h>
15 #include <linux/errno.h>
17 /* The base address of the CM GCR block */
18 extern void __iomem
*mips_gcr_base
;
20 /* The base address of the CM L2-only sync region */
21 extern void __iomem
*mips_cm_l2sync_base
;
24 * __mips_cm_phys_base - retrieve the physical base address of the CM
26 * This function returns the physical base address of the Coherence Manager
27 * global control block, or 0 if no Coherence Manager is present. It provides
28 * a default implementation which reads the CMGCRBase register where available,
29 * and may be overridden by platforms which determine this address in a
30 * different way by defining a function with the same prototype except for the
31 * name mips_cm_phys_base (without underscores).
33 extern phys_addr_t
__mips_cm_phys_base(void);
36 * mips_cm_is64 - determine CM register width
38 * The CM register width is determined by the version of the CM, with CM3
39 * introducing 64 bit GCRs and all prior CM versions having 32 bit GCRs.
40 * However we may run a kernel built for MIPS32 on a system with 64 bit GCRs,
41 * or vice-versa. This variable indicates the width of the memory accesses
42 * that the kernel will perform to GCRs, which may differ from the actual
45 * It's set to 0 for 32-bit accesses and 1 for 64-bit accesses.
47 extern int mips_cm_is64
;
50 * mips_cm_error_report - Report CM cache errors
53 extern void mips_cm_error_report(void);
55 static inline void mips_cm_error_report(void) {}
59 * mips_cm_probe - probe for a Coherence Manager
61 * Attempt to detect the presence of a Coherence Manager. Returns 0 if a CM
62 * is successfully detected, else -errno.
65 extern int mips_cm_probe(void);
67 static inline int mips_cm_probe(void)
74 * mips_cm_present - determine whether a Coherence Manager is present
76 * Returns true if a CM is present in the system, else false.
78 static inline bool mips_cm_present(void)
81 return mips_gcr_base
!= NULL
;
88 * mips_cm_has_l2sync - determine whether an L2-only sync region is present
90 * Returns true if the system implements an L2-only sync region, else false.
92 static inline bool mips_cm_has_l2sync(void)
95 return mips_cm_l2sync_base
!= NULL
;
101 /* Offsets to register blocks from the CM base address */
102 #define MIPS_CM_GCB_OFS 0x0000 /* Global Control Block */
103 #define MIPS_CM_CLCB_OFS 0x2000 /* Core Local Control Block */
104 #define MIPS_CM_COCB_OFS 0x4000 /* Core Other Control Block */
105 #define MIPS_CM_GDB_OFS 0x6000 /* Global Debug Block */
107 /* Total size of the CM memory mapped registers */
108 #define MIPS_CM_GCR_SIZE 0x8000
110 /* Size of the L2-only sync region */
111 #define MIPS_CM_L2SYNC_SIZE 0x1000
113 #define GCR_ACCESSOR_RO(sz, off, name) \
114 CPS_ACCESSOR_RO(gcr, sz, MIPS_CM_GCB_OFS + off, name) \
115 CPS_ACCESSOR_RO(gcr, sz, MIPS_CM_COCB_OFS + off, redir_##name)
117 #define GCR_ACCESSOR_RW(sz, off, name) \
118 CPS_ACCESSOR_RW(gcr, sz, MIPS_CM_GCB_OFS + off, name) \
119 CPS_ACCESSOR_RW(gcr, sz, MIPS_CM_COCB_OFS + off, redir_##name)
121 #define GCR_CX_ACCESSOR_RO(sz, off, name) \
122 CPS_ACCESSOR_RO(gcr, sz, MIPS_CM_CLCB_OFS + off, cl_##name) \
123 CPS_ACCESSOR_RO(gcr, sz, MIPS_CM_COCB_OFS + off, co_##name)
125 #define GCR_CX_ACCESSOR_RW(sz, off, name) \
126 CPS_ACCESSOR_RW(gcr, sz, MIPS_CM_CLCB_OFS + off, cl_##name) \
127 CPS_ACCESSOR_RW(gcr, sz, MIPS_CM_COCB_OFS + off, co_##name)
129 /* GCR_CONFIG - Information about the system */
130 GCR_ACCESSOR_RO(64, 0x000, config
)
131 #define CM_GCR_CONFIG_CLUSTER_COH_CAPABLE BIT_ULL(43)
132 #define CM_GCR_CONFIG_CLUSTER_ID GENMASK_ULL(39, 32)
133 #define CM_GCR_CONFIG_NUM_CLUSTERS GENMASK(29, 23)
134 #define CM_GCR_CONFIG_NUMIOCU GENMASK(15, 8)
135 #define CM_GCR_CONFIG_PCORES GENMASK(7, 0)
137 /* GCR_BASE - Base address of the Global Configuration Registers (GCRs) */
138 GCR_ACCESSOR_RW(64, 0x008, base
)
139 #define CM_GCR_BASE_GCRBASE GENMASK_ULL(47, 15)
140 #define CM_GCR_BASE_CMDEFTGT GENMASK(1, 0)
141 #define CM_GCR_BASE_CMDEFTGT_MEM 0
142 #define CM_GCR_BASE_CMDEFTGT_RESERVED 1
143 #define CM_GCR_BASE_CMDEFTGT_IOCU0 2
144 #define CM_GCR_BASE_CMDEFTGT_IOCU1 3
146 /* GCR_ACCESS - Controls core/IOCU access to GCRs */
147 GCR_ACCESSOR_RW(32, 0x020, access
)
148 #define CM_GCR_ACCESS_ACCESSEN GENMASK(7, 0)
150 /* GCR_REV - Indicates the Coherence Manager revision */
151 GCR_ACCESSOR_RO(32, 0x030, rev
)
152 #define CM_GCR_REV_MAJOR GENMASK(15, 8)
153 #define CM_GCR_REV_MINOR GENMASK(7, 0)
155 #define CM_ENCODE_REV(major, minor) \
156 (((major) << __ffs(CM_GCR_REV_MAJOR)) | \
157 ((minor) << __ffs(CM_GCR_REV_MINOR)))
159 #define CM_REV_CM2 CM_ENCODE_REV(6, 0)
160 #define CM_REV_CM2_5 CM_ENCODE_REV(7, 0)
161 #define CM_REV_CM3 CM_ENCODE_REV(8, 0)
162 #define CM_REV_CM3_5 CM_ENCODE_REV(9, 0)
164 /* GCR_ERR_CONTROL - Control error checking logic */
165 GCR_ACCESSOR_RW(32, 0x038, err_control
)
166 #define CM_GCR_ERR_CONTROL_L2_ECC_EN BIT(1)
167 #define CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT BIT(0)
169 /* GCR_ERR_MASK - Control which errors are reported as interrupts */
170 GCR_ACCESSOR_RW(64, 0x040, error_mask
)
172 /* GCR_ERR_CAUSE - Indicates the type of error that occurred */
173 GCR_ACCESSOR_RW(64, 0x048, error_cause
)
174 #define CM_GCR_ERROR_CAUSE_ERRTYPE GENMASK(31, 27)
175 #define CM3_GCR_ERROR_CAUSE_ERRTYPE GENMASK_ULL(63, 58)
176 #define CM_GCR_ERROR_CAUSE_ERRINFO GENMASK(26, 0)
178 /* GCR_ERR_ADDR - Indicates the address associated with an error */
179 GCR_ACCESSOR_RW(64, 0x050, error_addr
)
181 /* GCR_ERR_MULT - Indicates when multiple errors have occurred */
182 GCR_ACCESSOR_RW(64, 0x058, error_mult
)
183 #define CM_GCR_ERROR_MULT_ERR2ND GENMASK(4, 0)
185 /* GCR_L2_ONLY_SYNC_BASE - Base address of the L2 cache-only sync region */
186 GCR_ACCESSOR_RW(64, 0x070, l2_only_sync_base
)
187 #define CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE GENMASK(31, 12)
188 #define CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN BIT(0)
190 /* GCR_GIC_BASE - Base address of the Global Interrupt Controller (GIC) */
191 GCR_ACCESSOR_RW(64, 0x080, gic_base
)
192 #define CM_GCR_GIC_BASE_GICBASE GENMASK(31, 17)
193 #define CM_GCR_GIC_BASE_GICEN BIT(0)
195 /* GCR_CPC_BASE - Base address of the Cluster Power Controller (CPC) */
196 GCR_ACCESSOR_RW(64, 0x088, cpc_base
)
197 #define CM_GCR_CPC_BASE_CPCBASE GENMASK(31, 15)
198 #define CM_GCR_CPC_BASE_CPCEN BIT(0)
200 /* GCR_REGn_BASE - Base addresses of CM address regions */
201 GCR_ACCESSOR_RW(64, 0x090, reg0_base
)
202 GCR_ACCESSOR_RW(64, 0x0a0, reg1_base
)
203 GCR_ACCESSOR_RW(64, 0x0b0, reg2_base
)
204 GCR_ACCESSOR_RW(64, 0x0c0, reg3_base
)
205 #define CM_GCR_REGn_BASE_BASEADDR GENMASK(31, 16)
207 /* GCR_REGn_MASK - Size & destination of CM address regions */
208 GCR_ACCESSOR_RW(64, 0x098, reg0_mask
)
209 GCR_ACCESSOR_RW(64, 0x0a8, reg1_mask
)
210 GCR_ACCESSOR_RW(64, 0x0b8, reg2_mask
)
211 GCR_ACCESSOR_RW(64, 0x0c8, reg3_mask
)
212 #define CM_GCR_REGn_MASK_ADDRMASK GENMASK(31, 16)
213 #define CM_GCR_REGn_MASK_CCAOVR GENMASK(7, 5)
214 #define CM_GCR_REGn_MASK_CCAOVREN BIT(4)
215 #define CM_GCR_REGn_MASK_DROPL2 BIT(2)
216 #define CM_GCR_REGn_MASK_CMTGT GENMASK(1, 0)
217 #define CM_GCR_REGn_MASK_CMTGT_DISABLED 0x0
218 #define CM_GCR_REGn_MASK_CMTGT_MEM 0x1
219 #define CM_GCR_REGn_MASK_CMTGT_IOCU0 0x2
220 #define CM_GCR_REGn_MASK_CMTGT_IOCU1 0x3
222 /* GCR_GIC_STATUS - Indicates presence of a Global Interrupt Controller (GIC) */
223 GCR_ACCESSOR_RO(32, 0x0d0, gic_status
)
224 #define CM_GCR_GIC_STATUS_EX BIT(0)
226 /* GCR_CPC_STATUS - Indicates presence of a Cluster Power Controller (CPC) */
227 GCR_ACCESSOR_RO(32, 0x0f0, cpc_status
)
228 #define CM_GCR_CPC_STATUS_EX BIT(0)
230 /* GCR_L2_CONFIG - Indicates L2 cache configuration when Config5.L2C=1 */
231 GCR_ACCESSOR_RW(32, 0x130, l2_config
)
232 #define CM_GCR_L2_CONFIG_BYPASS BIT(20)
233 #define CM_GCR_L2_CONFIG_SET_SIZE GENMASK(15, 12)
234 #define CM_GCR_L2_CONFIG_LINE_SIZE GENMASK(11, 8)
235 #define CM_GCR_L2_CONFIG_ASSOC GENMASK(7, 0)
237 /* GCR_SYS_CONFIG2 - Further information about the system */
238 GCR_ACCESSOR_RO(32, 0x150, sys_config2
)
239 #define CM_GCR_SYS_CONFIG2_MAXVPW GENMASK(3, 0)
241 /* GCR_L2_PFT_CONTROL - Controls hardware L2 prefetching */
242 GCR_ACCESSOR_RW(32, 0x300, l2_pft_control
)
243 #define CM_GCR_L2_PFT_CONTROL_PAGEMASK GENMASK(31, 12)
244 #define CM_GCR_L2_PFT_CONTROL_PFTEN BIT(8)
245 #define CM_GCR_L2_PFT_CONTROL_NPFT GENMASK(7, 0)
247 /* GCR_L2_PFT_CONTROL_B - Controls hardware L2 prefetching */
248 GCR_ACCESSOR_RW(32, 0x308, l2_pft_control_b
)
249 #define CM_GCR_L2_PFT_CONTROL_B_CEN BIT(8)
250 #define CM_GCR_L2_PFT_CONTROL_B_PORTID GENMASK(7, 0)
252 /* GCR_L2SM_COP - L2 cache op state machine control */
253 GCR_ACCESSOR_RW(32, 0x620, l2sm_cop
)
254 #define CM_GCR_L2SM_COP_PRESENT BIT(31)
255 #define CM_GCR_L2SM_COP_RESULT GENMASK(8, 6)
256 #define CM_GCR_L2SM_COP_RESULT_DONTCARE 0
257 #define CM_GCR_L2SM_COP_RESULT_DONE_OK 1
258 #define CM_GCR_L2SM_COP_RESULT_DONE_ERROR 2
259 #define CM_GCR_L2SM_COP_RESULT_ABORT_OK 3
260 #define CM_GCR_L2SM_COP_RESULT_ABORT_ERROR 4
261 #define CM_GCR_L2SM_COP_RUNNING BIT(5)
262 #define CM_GCR_L2SM_COP_TYPE GENMASK(4, 2)
263 #define CM_GCR_L2SM_COP_TYPE_IDX_WBINV 0
264 #define CM_GCR_L2SM_COP_TYPE_IDX_STORETAG 1
265 #define CM_GCR_L2SM_COP_TYPE_IDX_STORETAGDATA 2
266 #define CM_GCR_L2SM_COP_TYPE_HIT_INV 4
267 #define CM_GCR_L2SM_COP_TYPE_HIT_WBINV 5
268 #define CM_GCR_L2SM_COP_TYPE_HIT_WB 6
269 #define CM_GCR_L2SM_COP_TYPE_FETCHLOCK 7
270 #define CM_GCR_L2SM_COP_CMD GENMASK(1, 0)
271 #define CM_GCR_L2SM_COP_CMD_START 1 /* only when idle */
272 #define CM_GCR_L2SM_COP_CMD_ABORT 3 /* only when running */
274 /* GCR_L2SM_TAG_ADDR_COP - L2 cache op state machine address control */
275 GCR_ACCESSOR_RW(64, 0x628, l2sm_tag_addr_cop
)
276 #define CM_GCR_L2SM_TAG_ADDR_COP_NUM_LINES GENMASK_ULL(63, 48)
277 #define CM_GCR_L2SM_TAG_ADDR_COP_START_TAG GENMASK_ULL(47, 6)
279 /* GCR_BEV_BASE - Controls the location of the BEV for powered up cores */
280 GCR_ACCESSOR_RW(64, 0x680, bev_base
)
282 /* GCR_Cx_RESET_RELEASE - Controls core reset for CM 1.x */
283 GCR_CX_ACCESSOR_RW(32, 0x000, reset_release
)
285 /* GCR_Cx_COHERENCE - Controls core coherence */
286 GCR_CX_ACCESSOR_RW(32, 0x008, coherence
)
287 #define CM_GCR_Cx_COHERENCE_COHDOMAINEN GENMASK(7, 0)
288 #define CM3_GCR_Cx_COHERENCE_COHEN BIT(0)
290 /* GCR_Cx_CONFIG - Information about a core's configuration */
291 GCR_CX_ACCESSOR_RO(32, 0x010, config
)
292 #define CM_GCR_Cx_CONFIG_IOCUTYPE GENMASK(11, 10)
293 #define CM_GCR_Cx_CONFIG_PVPE GENMASK(9, 0)
295 /* GCR_Cx_OTHER - Configure the core-other/redirect GCR block */
296 GCR_CX_ACCESSOR_RW(32, 0x018, other
)
297 #define CM_GCR_Cx_OTHER_CORENUM GENMASK(31, 16) /* CM < 3 */
298 #define CM_GCR_Cx_OTHER_CLUSTER_EN BIT(31) /* CM >= 3.5 */
299 #define CM_GCR_Cx_OTHER_GIC_EN BIT(30) /* CM >= 3.5 */
300 #define CM_GCR_Cx_OTHER_BLOCK GENMASK(25, 24) /* CM >= 3.5 */
301 #define CM_GCR_Cx_OTHER_BLOCK_LOCAL 0
302 #define CM_GCR_Cx_OTHER_BLOCK_GLOBAL 1
303 #define CM_GCR_Cx_OTHER_BLOCK_USER 2
304 #define CM_GCR_Cx_OTHER_BLOCK_GLOBAL_HIGH 3
305 #define CM_GCR_Cx_OTHER_CLUSTER GENMASK(21, 16) /* CM >= 3.5 */
306 #define CM3_GCR_Cx_OTHER_CORE GENMASK(13, 8) /* CM >= 3 */
307 #define CM_GCR_Cx_OTHER_CORE_CM 32
308 #define CM3_GCR_Cx_OTHER_VP GENMASK(2, 0) /* CM >= 3 */
310 /* GCR_Cx_RESET_BASE - Configure where powered up cores will fetch from */
311 GCR_CX_ACCESSOR_RW(32, 0x020, reset_base
)
312 #define CM_GCR_Cx_RESET_BASE_BEVEXCBASE GENMASK(31, 12)
314 /* GCR_Cx_ID - Identify the current core */
315 GCR_CX_ACCESSOR_RO(32, 0x028, id
)
316 #define CM_GCR_Cx_ID_CLUSTER GENMASK(15, 8)
317 #define CM_GCR_Cx_ID_CORE GENMASK(7, 0)
319 /* GCR_Cx_RESET_EXT_BASE - Configure behaviour when cores reset or power up */
320 GCR_CX_ACCESSOR_RW(32, 0x030, reset_ext_base
)
321 #define CM_GCR_Cx_RESET_EXT_BASE_EVARESET BIT(31)
322 #define CM_GCR_Cx_RESET_EXT_BASE_UEB BIT(30)
323 #define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCMASK GENMASK(27, 20)
324 #define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCPA GENMASK(7, 1)
325 #define CM_GCR_Cx_RESET_EXT_BASE_PRESENT BIT(0)
328 * mips_cm_l2sync - perform an L2-only sync operation
330 * If an L2-only sync region is present in the system then this function
331 * performs and L2-only sync and returns zero. Otherwise it returns -ENODEV.
333 static inline int mips_cm_l2sync(void)
335 if (!mips_cm_has_l2sync())
338 writel(0, mips_cm_l2sync_base
);
343 * mips_cm_revision() - return CM revision
345 * Return: The revision of the CM, from GCR_REV, or 0 if no CM is present. The
346 * return value should be checked against the CM_REV_* macros.
348 static inline int mips_cm_revision(void)
350 if (!mips_cm_present())
353 return read_gcr_rev();
357 * mips_cm_max_vp_width() - return the width in bits of VP indices
359 * Return: the width, in bits, of VP indices in fields that combine core & VP
362 static inline unsigned int mips_cm_max_vp_width(void)
364 extern int smp_num_siblings
;
367 if (mips_cm_revision() >= CM_REV_CM3
)
368 return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW
;
370 if (mips_cm_present()) {
372 * We presume that all cores in the system will have the same
373 * number of VP(E)s, and if that ever changes then this will
376 cfg
= read_gcr_cl_config() & CM_GCR_Cx_CONFIG_PVPE
;
377 return (cfg
>> __ffs(CM_GCR_Cx_CONFIG_PVPE
)) + 1;
380 if (IS_ENABLED(CONFIG_SMP
))
381 return smp_num_siblings
;
387 * mips_cm_vp_id() - calculate the hardware VP ID for a CPU
388 * @cpu: the CPU whose VP ID to calculate
390 * Hardware such as the GIC uses identifiers for VPs which may not match the
391 * CPU numbers used by Linux. This function calculates the hardware VP
392 * identifier corresponding to a given CPU.
394 * Return: the VP ID for the CPU.
396 static inline unsigned int mips_cm_vp_id(unsigned int cpu
)
398 unsigned int core
= cpu_core(&cpu_data
[cpu
]);
399 unsigned int vp
= cpu_vpe_id(&cpu_data
[cpu
]);
401 return (core
* mips_cm_max_vp_width()) + vp
;
404 #ifdef CONFIG_MIPS_CM
407 * mips_cm_lock_other - lock access to redirect/other region
408 * @cluster: the other cluster to be accessed
409 * @core: the other core to be accessed
410 * @vp: the VP within the other core to be accessed
411 * @block: the register block to be accessed
413 * Configure the redirect/other region for the local core/VP (depending upon
414 * the CM revision) to target the specified @cluster, @core, @vp & register
415 * @block. Must be called before using the redirect/other region, and followed
416 * by a call to mips_cm_unlock_other() when access to the redirect/other region
419 * This function acquires a spinlock such that code between it &
420 * mips_cm_unlock_other() calls cannot be pre-empted by anything which may
421 * reconfigure the redirect/other region, and cannot be interfered with by
422 * another VP in the core. As such calls to this function should not be nested.
424 extern void mips_cm_lock_other(unsigned int cluster
, unsigned int core
,
425 unsigned int vp
, unsigned int block
);
428 * mips_cm_unlock_other - unlock access to redirect/other region
430 * Must be called after mips_cm_lock_other() once all required access to the
431 * redirect/other region has been completed.
433 extern void mips_cm_unlock_other(void);
435 #else /* !CONFIG_MIPS_CM */
437 static inline void mips_cm_lock_other(unsigned int cluster
, unsigned int core
,
438 unsigned int vp
, unsigned int block
) { }
439 static inline void mips_cm_unlock_other(void) { }
441 #endif /* !CONFIG_MIPS_CM */
444 * mips_cm_lock_other_cpu - lock access to redirect/other region
445 * @cpu: the other CPU whose register we want to access
447 * Configure the redirect/other region for the local core/VP (depending upon
448 * the CM revision) to target the specified @cpu & register @block. This is
449 * equivalent to calling mips_cm_lock_other() but accepts a Linux CPU number
452 static inline void mips_cm_lock_other_cpu(unsigned int cpu
, unsigned int block
)
454 struct cpuinfo_mips
*d
= &cpu_data
[cpu
];
456 mips_cm_lock_other(cpu_cluster(d
), cpu_core(d
), cpu_vpe_id(d
), block
);
459 #endif /* __MIPS_ASM_MIPS_CM_H__ */