drm/ast: Only warn about unsupported TX chips on Gen4 and later
[drm/drm-misc.git] / arch / riscv / include / asm / sbi.h
blob6c82318065cfd4d7fe69d6bad3da44062aadecb5
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2015 Regents of the University of California
4 * Copyright (c) 2020 Western Digital Corporation or its affiliates.
5 */
7 #ifndef _ASM_RISCV_SBI_H
8 #define _ASM_RISCV_SBI_H
10 #include <linux/types.h>
11 #include <linux/cpumask.h>
12 #include <linux/jump_label.h>
14 #ifdef CONFIG_RISCV_SBI
15 enum sbi_ext_id {
16 #ifdef CONFIG_RISCV_SBI_V01
17 SBI_EXT_0_1_SET_TIMER = 0x0,
18 SBI_EXT_0_1_CONSOLE_PUTCHAR = 0x1,
19 SBI_EXT_0_1_CONSOLE_GETCHAR = 0x2,
20 SBI_EXT_0_1_CLEAR_IPI = 0x3,
21 SBI_EXT_0_1_SEND_IPI = 0x4,
22 SBI_EXT_0_1_REMOTE_FENCE_I = 0x5,
23 SBI_EXT_0_1_REMOTE_SFENCE_VMA = 0x6,
24 SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID = 0x7,
25 SBI_EXT_0_1_SHUTDOWN = 0x8,
26 #endif
27 SBI_EXT_BASE = 0x10,
28 SBI_EXT_TIME = 0x54494D45,
29 SBI_EXT_IPI = 0x735049,
30 SBI_EXT_RFENCE = 0x52464E43,
31 SBI_EXT_HSM = 0x48534D,
32 SBI_EXT_SRST = 0x53525354,
33 SBI_EXT_SUSP = 0x53555350,
34 SBI_EXT_PMU = 0x504D55,
35 SBI_EXT_DBCN = 0x4442434E,
36 SBI_EXT_STA = 0x535441,
37 SBI_EXT_NACL = 0x4E41434C,
39 /* Experimentals extensions must lie within this range */
40 SBI_EXT_EXPERIMENTAL_START = 0x08000000,
41 SBI_EXT_EXPERIMENTAL_END = 0x08FFFFFF,
43 /* Vendor extensions must lie within this range */
44 SBI_EXT_VENDOR_START = 0x09000000,
45 SBI_EXT_VENDOR_END = 0x09FFFFFF,
48 enum sbi_ext_base_fid {
49 SBI_EXT_BASE_GET_SPEC_VERSION = 0,
50 SBI_EXT_BASE_GET_IMP_ID,
51 SBI_EXT_BASE_GET_IMP_VERSION,
52 SBI_EXT_BASE_PROBE_EXT,
53 SBI_EXT_BASE_GET_MVENDORID,
54 SBI_EXT_BASE_GET_MARCHID,
55 SBI_EXT_BASE_GET_MIMPID,
58 enum sbi_ext_time_fid {
59 SBI_EXT_TIME_SET_TIMER = 0,
62 enum sbi_ext_ipi_fid {
63 SBI_EXT_IPI_SEND_IPI = 0,
66 enum sbi_ext_rfence_fid {
67 SBI_EXT_RFENCE_REMOTE_FENCE_I = 0,
68 SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
69 SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
70 SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID,
71 SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA,
72 SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID,
73 SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA,
76 enum sbi_ext_hsm_fid {
77 SBI_EXT_HSM_HART_START = 0,
78 SBI_EXT_HSM_HART_STOP,
79 SBI_EXT_HSM_HART_STATUS,
80 SBI_EXT_HSM_HART_SUSPEND,
83 enum sbi_hsm_hart_state {
84 SBI_HSM_STATE_STARTED = 0,
85 SBI_HSM_STATE_STOPPED,
86 SBI_HSM_STATE_START_PENDING,
87 SBI_HSM_STATE_STOP_PENDING,
88 SBI_HSM_STATE_SUSPENDED,
89 SBI_HSM_STATE_SUSPEND_PENDING,
90 SBI_HSM_STATE_RESUME_PENDING,
93 #define SBI_HSM_SUSP_BASE_MASK 0x7fffffff
94 #define SBI_HSM_SUSP_NON_RET_BIT 0x80000000
95 #define SBI_HSM_SUSP_PLAT_BASE 0x10000000
97 #define SBI_HSM_SUSPEND_RET_DEFAULT 0x00000000
98 #define SBI_HSM_SUSPEND_RET_PLATFORM SBI_HSM_SUSP_PLAT_BASE
99 #define SBI_HSM_SUSPEND_RET_LAST SBI_HSM_SUSP_BASE_MASK
100 #define SBI_HSM_SUSPEND_NON_RET_DEFAULT SBI_HSM_SUSP_NON_RET_BIT
101 #define SBI_HSM_SUSPEND_NON_RET_PLATFORM (SBI_HSM_SUSP_NON_RET_BIT | \
102 SBI_HSM_SUSP_PLAT_BASE)
103 #define SBI_HSM_SUSPEND_NON_RET_LAST (SBI_HSM_SUSP_NON_RET_BIT | \
104 SBI_HSM_SUSP_BASE_MASK)
106 enum sbi_ext_srst_fid {
107 SBI_EXT_SRST_RESET = 0,
110 enum sbi_srst_reset_type {
111 SBI_SRST_RESET_TYPE_SHUTDOWN = 0,
112 SBI_SRST_RESET_TYPE_COLD_REBOOT,
113 SBI_SRST_RESET_TYPE_WARM_REBOOT,
116 enum sbi_srst_reset_reason {
117 SBI_SRST_RESET_REASON_NONE = 0,
118 SBI_SRST_RESET_REASON_SYS_FAILURE,
121 enum sbi_ext_susp_fid {
122 SBI_EXT_SUSP_SYSTEM_SUSPEND = 0,
125 enum sbi_ext_susp_sleep_type {
126 SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM = 0,
129 enum sbi_ext_pmu_fid {
130 SBI_EXT_PMU_NUM_COUNTERS = 0,
131 SBI_EXT_PMU_COUNTER_GET_INFO,
132 SBI_EXT_PMU_COUNTER_CFG_MATCH,
133 SBI_EXT_PMU_COUNTER_START,
134 SBI_EXT_PMU_COUNTER_STOP,
135 SBI_EXT_PMU_COUNTER_FW_READ,
136 SBI_EXT_PMU_COUNTER_FW_READ_HI,
137 SBI_EXT_PMU_SNAPSHOT_SET_SHMEM,
140 union sbi_pmu_ctr_info {
141 unsigned long value;
142 struct {
143 unsigned long csr:12;
144 unsigned long width:6;
145 #if __riscv_xlen == 32
146 unsigned long reserved:13;
147 #else
148 unsigned long reserved:45;
149 #endif
150 unsigned long type:1;
154 /* Data structure to contain the pmu snapshot data */
155 struct riscv_pmu_snapshot_data {
156 u64 ctr_overflow_mask;
157 u64 ctr_values[64];
158 u64 reserved[447];
161 #define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(47, 0)
162 #define RISCV_PMU_RAW_EVENT_IDX 0x20000
163 #define RISCV_PLAT_FW_EVENT 0xFFFF
165 /** General pmu event codes specified in SBI PMU extension */
166 enum sbi_pmu_hw_generic_events_t {
167 SBI_PMU_HW_NO_EVENT = 0,
168 SBI_PMU_HW_CPU_CYCLES = 1,
169 SBI_PMU_HW_INSTRUCTIONS = 2,
170 SBI_PMU_HW_CACHE_REFERENCES = 3,
171 SBI_PMU_HW_CACHE_MISSES = 4,
172 SBI_PMU_HW_BRANCH_INSTRUCTIONS = 5,
173 SBI_PMU_HW_BRANCH_MISSES = 6,
174 SBI_PMU_HW_BUS_CYCLES = 7,
175 SBI_PMU_HW_STALLED_CYCLES_FRONTEND = 8,
176 SBI_PMU_HW_STALLED_CYCLES_BACKEND = 9,
177 SBI_PMU_HW_REF_CPU_CYCLES = 10,
179 SBI_PMU_HW_GENERAL_MAX,
183 * Special "firmware" events provided by the firmware, even if the hardware
184 * does not support performance events. These events are encoded as a raw
185 * event type in Linux kernel perf framework.
187 enum sbi_pmu_fw_generic_events_t {
188 SBI_PMU_FW_MISALIGNED_LOAD = 0,
189 SBI_PMU_FW_MISALIGNED_STORE = 1,
190 SBI_PMU_FW_ACCESS_LOAD = 2,
191 SBI_PMU_FW_ACCESS_STORE = 3,
192 SBI_PMU_FW_ILLEGAL_INSN = 4,
193 SBI_PMU_FW_SET_TIMER = 5,
194 SBI_PMU_FW_IPI_SENT = 6,
195 SBI_PMU_FW_IPI_RCVD = 7,
196 SBI_PMU_FW_FENCE_I_SENT = 8,
197 SBI_PMU_FW_FENCE_I_RCVD = 9,
198 SBI_PMU_FW_SFENCE_VMA_SENT = 10,
199 SBI_PMU_FW_SFENCE_VMA_RCVD = 11,
200 SBI_PMU_FW_SFENCE_VMA_ASID_SENT = 12,
201 SBI_PMU_FW_SFENCE_VMA_ASID_RCVD = 13,
203 SBI_PMU_FW_HFENCE_GVMA_SENT = 14,
204 SBI_PMU_FW_HFENCE_GVMA_RCVD = 15,
205 SBI_PMU_FW_HFENCE_GVMA_VMID_SENT = 16,
206 SBI_PMU_FW_HFENCE_GVMA_VMID_RCVD = 17,
208 SBI_PMU_FW_HFENCE_VVMA_SENT = 18,
209 SBI_PMU_FW_HFENCE_VVMA_RCVD = 19,
210 SBI_PMU_FW_HFENCE_VVMA_ASID_SENT = 20,
211 SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD = 21,
212 SBI_PMU_FW_MAX,
215 /* SBI PMU event types */
216 enum sbi_pmu_event_type {
217 SBI_PMU_EVENT_TYPE_HW = 0x0,
218 SBI_PMU_EVENT_TYPE_CACHE = 0x1,
219 SBI_PMU_EVENT_TYPE_RAW = 0x2,
220 SBI_PMU_EVENT_TYPE_FW = 0xf,
223 /* SBI PMU event types */
224 enum sbi_pmu_ctr_type {
225 SBI_PMU_CTR_TYPE_HW = 0x0,
226 SBI_PMU_CTR_TYPE_FW,
229 /* Helper macros to decode event idx */
230 #define SBI_PMU_EVENT_IDX_OFFSET 20
231 #define SBI_PMU_EVENT_IDX_MASK 0xFFFFF
232 #define SBI_PMU_EVENT_IDX_CODE_MASK 0xFFFF
233 #define SBI_PMU_EVENT_IDX_TYPE_MASK 0xF0000
234 #define SBI_PMU_EVENT_RAW_IDX 0x20000
235 #define SBI_PMU_FIXED_CTR_MASK 0x07
237 #define SBI_PMU_EVENT_CACHE_ID_CODE_MASK 0xFFF8
238 #define SBI_PMU_EVENT_CACHE_OP_ID_CODE_MASK 0x06
239 #define SBI_PMU_EVENT_CACHE_RESULT_ID_CODE_MASK 0x01
241 #define SBI_PMU_EVENT_CACHE_ID_SHIFT 3
242 #define SBI_PMU_EVENT_CACHE_OP_SHIFT 1
244 #define SBI_PMU_EVENT_IDX_INVALID 0xFFFFFFFF
246 /* Flags defined for config matching function */
247 #define SBI_PMU_CFG_FLAG_SKIP_MATCH BIT(0)
248 #define SBI_PMU_CFG_FLAG_CLEAR_VALUE BIT(1)
249 #define SBI_PMU_CFG_FLAG_AUTO_START BIT(2)
250 #define SBI_PMU_CFG_FLAG_SET_VUINH BIT(3)
251 #define SBI_PMU_CFG_FLAG_SET_VSINH BIT(4)
252 #define SBI_PMU_CFG_FLAG_SET_UINH BIT(5)
253 #define SBI_PMU_CFG_FLAG_SET_SINH BIT(6)
254 #define SBI_PMU_CFG_FLAG_SET_MINH BIT(7)
256 /* Flags defined for counter start function */
257 #define SBI_PMU_START_FLAG_SET_INIT_VALUE BIT(0)
258 #define SBI_PMU_START_FLAG_INIT_SNAPSHOT BIT(1)
260 /* Flags defined for counter stop function */
261 #define SBI_PMU_STOP_FLAG_RESET BIT(0)
262 #define SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT BIT(1)
264 enum sbi_ext_dbcn_fid {
265 SBI_EXT_DBCN_CONSOLE_WRITE = 0,
266 SBI_EXT_DBCN_CONSOLE_READ = 1,
267 SBI_EXT_DBCN_CONSOLE_WRITE_BYTE = 2,
270 /* SBI STA (steal-time accounting) extension */
271 enum sbi_ext_sta_fid {
272 SBI_EXT_STA_STEAL_TIME_SET_SHMEM = 0,
275 struct sbi_sta_struct {
276 __le32 sequence;
277 __le32 flags;
278 __le64 steal;
279 u8 preempted;
280 u8 pad[47];
281 } __packed;
283 #define SBI_SHMEM_DISABLE -1
285 enum sbi_ext_nacl_fid {
286 SBI_EXT_NACL_PROBE_FEATURE = 0x0,
287 SBI_EXT_NACL_SET_SHMEM = 0x1,
288 SBI_EXT_NACL_SYNC_CSR = 0x2,
289 SBI_EXT_NACL_SYNC_HFENCE = 0x3,
290 SBI_EXT_NACL_SYNC_SRET = 0x4,
293 enum sbi_ext_nacl_feature {
294 SBI_NACL_FEAT_SYNC_CSR = 0x0,
295 SBI_NACL_FEAT_SYNC_HFENCE = 0x1,
296 SBI_NACL_FEAT_SYNC_SRET = 0x2,
297 SBI_NACL_FEAT_AUTOSWAP_CSR = 0x3,
300 #define SBI_NACL_SHMEM_ADDR_SHIFT 12
301 #define SBI_NACL_SHMEM_SCRATCH_OFFSET 0x0000
302 #define SBI_NACL_SHMEM_SCRATCH_SIZE 0x1000
303 #define SBI_NACL_SHMEM_SRET_OFFSET 0x0000
304 #define SBI_NACL_SHMEM_SRET_SIZE 0x0200
305 #define SBI_NACL_SHMEM_AUTOSWAP_OFFSET (SBI_NACL_SHMEM_SRET_OFFSET + \
306 SBI_NACL_SHMEM_SRET_SIZE)
307 #define SBI_NACL_SHMEM_AUTOSWAP_SIZE 0x0080
308 #define SBI_NACL_SHMEM_UNUSED_OFFSET (SBI_NACL_SHMEM_AUTOSWAP_OFFSET + \
309 SBI_NACL_SHMEM_AUTOSWAP_SIZE)
310 #define SBI_NACL_SHMEM_UNUSED_SIZE 0x0580
311 #define SBI_NACL_SHMEM_HFENCE_OFFSET (SBI_NACL_SHMEM_UNUSED_OFFSET + \
312 SBI_NACL_SHMEM_UNUSED_SIZE)
313 #define SBI_NACL_SHMEM_HFENCE_SIZE 0x0780
314 #define SBI_NACL_SHMEM_DBITMAP_OFFSET (SBI_NACL_SHMEM_HFENCE_OFFSET + \
315 SBI_NACL_SHMEM_HFENCE_SIZE)
316 #define SBI_NACL_SHMEM_DBITMAP_SIZE 0x0080
317 #define SBI_NACL_SHMEM_CSR_OFFSET (SBI_NACL_SHMEM_DBITMAP_OFFSET + \
318 SBI_NACL_SHMEM_DBITMAP_SIZE)
319 #define SBI_NACL_SHMEM_CSR_SIZE ((__riscv_xlen / 8) * 1024)
320 #define SBI_NACL_SHMEM_SIZE (SBI_NACL_SHMEM_CSR_OFFSET + \
321 SBI_NACL_SHMEM_CSR_SIZE)
323 #define SBI_NACL_SHMEM_CSR_INDEX(__csr_num) \
324 ((((__csr_num) & 0xc00) >> 2) | ((__csr_num) & 0xff))
326 #define SBI_NACL_SHMEM_HFENCE_ENTRY_SZ ((__riscv_xlen / 8) * 4)
327 #define SBI_NACL_SHMEM_HFENCE_ENTRY_MAX \
328 (SBI_NACL_SHMEM_HFENCE_SIZE / \
329 SBI_NACL_SHMEM_HFENCE_ENTRY_SZ)
330 #define SBI_NACL_SHMEM_HFENCE_ENTRY(__num) \
331 (SBI_NACL_SHMEM_HFENCE_OFFSET + \
332 (__num) * SBI_NACL_SHMEM_HFENCE_ENTRY_SZ)
333 #define SBI_NACL_SHMEM_HFENCE_ENTRY_CONFIG(__num) \
334 SBI_NACL_SHMEM_HFENCE_ENTRY(__num)
335 #define SBI_NACL_SHMEM_HFENCE_ENTRY_PNUM(__num)\
336 (SBI_NACL_SHMEM_HFENCE_ENTRY(__num) + (__riscv_xlen / 8))
337 #define SBI_NACL_SHMEM_HFENCE_ENTRY_PCOUNT(__num)\
338 (SBI_NACL_SHMEM_HFENCE_ENTRY(__num) + \
339 ((__riscv_xlen / 8) * 3))
341 #define SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_BITS 1
342 #define SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_SHIFT \
343 (__riscv_xlen - SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_BITS)
344 #define SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_MASK \
345 ((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_BITS) - 1)
346 #define SBI_NACL_SHMEM_HFENCE_CONFIG_PEND \
347 (SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_MASK << \
348 SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_SHIFT)
350 #define SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD1_BITS 3
351 #define SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD1_SHIFT \
352 (SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_SHIFT - \
353 SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD1_BITS)
355 #define SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_BITS 4
356 #define SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_SHIFT \
357 (SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD1_SHIFT - \
358 SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_BITS)
359 #define SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_MASK \
360 ((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_BITS) - 1)
362 #define SBI_NACL_SHMEM_HFENCE_TYPE_GVMA 0x0
363 #define SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_ALL 0x1
364 #define SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_VMID 0x2
365 #define SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_VMID_ALL 0x3
366 #define SBI_NACL_SHMEM_HFENCE_TYPE_VVMA 0x4
367 #define SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ALL 0x5
368 #define SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ASID 0x6
369 #define SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ASID_ALL 0x7
371 #define SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD2_BITS 1
372 #define SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD2_SHIFT \
373 (SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_SHIFT - \
374 SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD2_BITS)
376 #define SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_BITS 7
377 #define SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_SHIFT \
378 (SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD2_SHIFT - \
379 SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_BITS)
380 #define SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_MASK \
381 ((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_BITS) - 1)
382 #define SBI_NACL_SHMEM_HFENCE_ORDER_BASE 12
384 #if __riscv_xlen == 32
385 #define SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_BITS 9
386 #define SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_BITS 7
387 #else
388 #define SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_BITS 16
389 #define SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_BITS 14
390 #endif
391 #define SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_SHIFT \
392 SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_BITS
393 #define SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_MASK \
394 ((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_BITS) - 1)
395 #define SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_MASK \
396 ((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_BITS) - 1)
398 #define SBI_NACL_SHMEM_AUTOSWAP_FLAG_HSTATUS BIT(0)
399 #define SBI_NACL_SHMEM_AUTOSWAP_HSTATUS ((__riscv_xlen / 8) * 1)
401 #define SBI_NACL_SHMEM_SRET_X(__i) ((__riscv_xlen / 8) * (__i))
402 #define SBI_NACL_SHMEM_SRET_X_LAST 31
404 /* SBI spec version fields */
405 #define SBI_SPEC_VERSION_DEFAULT 0x1
406 #define SBI_SPEC_VERSION_MAJOR_SHIFT 24
407 #define SBI_SPEC_VERSION_MAJOR_MASK 0x7f
408 #define SBI_SPEC_VERSION_MINOR_MASK 0xffffff
410 /* SBI return error codes */
411 #define SBI_SUCCESS 0
412 #define SBI_ERR_FAILURE -1
413 #define SBI_ERR_NOT_SUPPORTED -2
414 #define SBI_ERR_INVALID_PARAM -3
415 #define SBI_ERR_DENIED -4
416 #define SBI_ERR_INVALID_ADDRESS -5
417 #define SBI_ERR_ALREADY_AVAILABLE -6
418 #define SBI_ERR_ALREADY_STARTED -7
419 #define SBI_ERR_ALREADY_STOPPED -8
420 #define SBI_ERR_NO_SHMEM -9
422 extern unsigned long sbi_spec_version;
423 struct sbiret {
424 long error;
425 long value;
428 void sbi_init(void);
429 long __sbi_base_ecall(int fid);
430 struct sbiret __sbi_ecall(unsigned long arg0, unsigned long arg1,
431 unsigned long arg2, unsigned long arg3,
432 unsigned long arg4, unsigned long arg5,
433 int fid, int ext);
434 #define sbi_ecall(e, f, a0, a1, a2, a3, a4, a5) \
435 __sbi_ecall(a0, a1, a2, a3, a4, a5, f, e)
437 #ifdef CONFIG_RISCV_SBI_V01
438 void sbi_console_putchar(int ch);
439 int sbi_console_getchar(void);
440 #else
441 static inline void sbi_console_putchar(int ch) { }
442 static inline int sbi_console_getchar(void) { return -ENOENT; }
443 #endif
444 long sbi_get_mvendorid(void);
445 long sbi_get_marchid(void);
446 long sbi_get_mimpid(void);
447 void sbi_set_timer(uint64_t stime_value);
448 void sbi_shutdown(void);
449 void sbi_send_ipi(unsigned int cpu);
450 int sbi_remote_fence_i(const struct cpumask *cpu_mask);
452 int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask,
453 unsigned long start,
454 unsigned long size,
455 unsigned long asid);
456 int sbi_remote_hfence_gvma(const struct cpumask *cpu_mask,
457 unsigned long start,
458 unsigned long size);
459 int sbi_remote_hfence_gvma_vmid(const struct cpumask *cpu_mask,
460 unsigned long start,
461 unsigned long size,
462 unsigned long vmid);
463 int sbi_remote_hfence_vvma(const struct cpumask *cpu_mask,
464 unsigned long start,
465 unsigned long size);
466 int sbi_remote_hfence_vvma_asid(const struct cpumask *cpu_mask,
467 unsigned long start,
468 unsigned long size,
469 unsigned long asid);
470 long sbi_probe_extension(int ext);
472 /* Check if current SBI specification version is 0.1 or not */
473 static inline int sbi_spec_is_0_1(void)
475 return (sbi_spec_version == SBI_SPEC_VERSION_DEFAULT) ? 1 : 0;
478 /* Get the major version of SBI */
479 static inline unsigned long sbi_major_version(void)
481 return (sbi_spec_version >> SBI_SPEC_VERSION_MAJOR_SHIFT) &
482 SBI_SPEC_VERSION_MAJOR_MASK;
485 /* Get the minor version of SBI */
486 static inline unsigned long sbi_minor_version(void)
488 return sbi_spec_version & SBI_SPEC_VERSION_MINOR_MASK;
491 /* Make SBI version */
492 static inline unsigned long sbi_mk_version(unsigned long major,
493 unsigned long minor)
495 return ((major & SBI_SPEC_VERSION_MAJOR_MASK) << SBI_SPEC_VERSION_MAJOR_SHIFT)
496 | (minor & SBI_SPEC_VERSION_MINOR_MASK);
499 static inline int sbi_err_map_linux_errno(int err)
501 switch (err) {
502 case SBI_SUCCESS:
503 return 0;
504 case SBI_ERR_DENIED:
505 return -EPERM;
506 case SBI_ERR_INVALID_PARAM:
507 return -EINVAL;
508 case SBI_ERR_INVALID_ADDRESS:
509 return -EFAULT;
510 case SBI_ERR_NOT_SUPPORTED:
511 case SBI_ERR_FAILURE:
512 default:
513 return -ENOTSUPP;
517 extern bool sbi_debug_console_available;
518 int sbi_debug_console_write(const char *bytes, unsigned int num_bytes);
519 int sbi_debug_console_read(char *bytes, unsigned int num_bytes);
521 #else /* CONFIG_RISCV_SBI */
522 static inline int sbi_remote_fence_i(const struct cpumask *cpu_mask) { return -1; }
523 static inline void sbi_init(void) {}
524 #endif /* CONFIG_RISCV_SBI */
526 unsigned long riscv_get_mvendorid(void);
527 unsigned long riscv_get_marchid(void);
528 unsigned long riscv_cached_mvendorid(unsigned int cpu_id);
529 unsigned long riscv_cached_marchid(unsigned int cpu_id);
530 unsigned long riscv_cached_mimpid(unsigned int cpu_id);
532 #if IS_ENABLED(CONFIG_SMP) && IS_ENABLED(CONFIG_RISCV_SBI)
533 DECLARE_STATIC_KEY_FALSE(riscv_sbi_for_rfence);
534 #define riscv_use_sbi_for_rfence() \
535 static_branch_unlikely(&riscv_sbi_for_rfence)
536 void sbi_ipi_init(void);
537 #else
538 static inline bool riscv_use_sbi_for_rfence(void) { return false; }
539 static inline void sbi_ipi_init(void) { }
540 #endif
542 #endif /* _ASM_RISCV_SBI_H */