2 * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the NetLogic
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
22 * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
31 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
32 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #ifndef _ASM_NLM_MIPS_EXTS_H
36 #define _ASM_NLM_MIPS_EXTS_H
39 * XLR and XLP interrupt request and interrupt mask registers
42 * NOTE: Do not save/restore flags around write_c0_eimr().
43 * On non-R2 platforms the flags has part of EIMR that is shadowed in STATUS
44 * register. Restoring flags will overwrite the lower 8 bits of EIMR.
46 * Call with interrupts disabled.
48 #define write_c0_eimr(val) \
50 if (sizeof(unsigned long) == 4) { \
51 __asm__ __volatile__( \
53 "dsll\t%L0, %L0, 32\n\t" \
54 "dsrl\t%L0, %L0, 32\n\t" \
55 "dsll\t%M0, %M0, 32\n\t" \
56 "or\t%L0, %L0, %M0\n\t" \
57 "dmtc0\t%L0, $9, 7\n\t" \
61 __write_64bit_c0_register($9, 7, (val)); \
65 * Handling the 64 bit EIMR and EIRR registers in 32-bit mode with
66 * standard functions will be very inefficient. This provides
67 * optimized functions for the normal operations on the registers.
69 * Call with interrupts disabled.
71 static inline void ack_c0_eirr(int irq
)
78 "dsllv $1, $1, %0\n\t"
84 static inline void set_c0_eimr(int irq
)
91 "dsllv %0, $1, %0\n\t"
99 static inline void clear_c0_eimr(int irq
)
101 __asm__
__volatile__(
106 "dsllv %0, $1, %0\n\t"
107 "dmfc0 $1, $9, 7\n\t"
110 "dmtc0 $1, $9, 7\n\t"
116 * Read c0 eimr and c0 eirr, do AND of the two values, the result is
117 * the interrupts which are raised and are not masked.
119 static inline uint64_t read_c0_eirr_and_eimr(void)
124 val
= __read_64bit_c0_register($
9, 6) & __read_64bit_c0_register($
9, 7);
126 __asm__
__volatile__(
130 "dmfc0 %M0, $9, 6\n\t"
131 "dmfc0 %L0, $9, 7\n\t"
133 "dsll %L0, %M0, 32\n\t"
134 "dsra %M0, %M0, 32\n\t"
135 "dsra %L0, %L0, 32\n\t"
142 static inline int hard_smp_processor_id(void)
144 return __read_32bit_c0_register($
15, 1) & 0x3ff;
147 static inline int nlm_nodeid(void)
149 uint32_t prid
= read_c0_prid() & PRID_IMP_MASK
;
151 if ((prid
== PRID_IMP_NETLOGIC_XLP9XX
) ||
152 (prid
== PRID_IMP_NETLOGIC_XLP5XX
))
153 return (__read_32bit_c0_register($
15, 1) >> 7) & 0x7;
155 return (__read_32bit_c0_register($
15, 1) >> 5) & 0x3;
158 static inline unsigned int nlm_core_id(void)
160 uint32_t prid
= read_c0_prid() & PRID_IMP_MASK
;
162 if ((prid
== PRID_IMP_NETLOGIC_XLP9XX
) ||
163 (prid
== PRID_IMP_NETLOGIC_XLP5XX
))
164 return (read_c0_ebase() & 0x7c) >> 2;
166 return (read_c0_ebase() & 0x1c) >> 2;
169 static inline unsigned int nlm_thread_id(void)
171 return read_c0_ebase() & 0x3;
174 #define __read_64bit_c2_split(source, sel) \
176 unsigned long long __val; \
177 unsigned long __flags; \
179 local_irq_save(__flags); \
181 __asm__ __volatile__( \
183 "dmfc2\t%M0, " #source "\n\t" \
184 "dsll\t%L0, %M0, 32\n\t" \
185 "dsra\t%M0, %M0, 32\n\t" \
186 "dsra\t%L0, %L0, 32\n\t" \
190 __asm__ __volatile__( \
192 "dmfc2\t%M0, " #source ", " #sel "\n\t" \
193 "dsll\t%L0, %M0, 32\n\t" \
194 "dsra\t%M0, %M0, 32\n\t" \
195 "dsra\t%L0, %L0, 32\n\t" \
198 local_irq_restore(__flags); \
203 #define __write_64bit_c2_split(source, sel, val) \
205 unsigned long __flags; \
207 local_irq_save(__flags); \
209 __asm__ __volatile__( \
211 "dsll\t%L0, %L0, 32\n\t" \
212 "dsrl\t%L0, %L0, 32\n\t" \
213 "dsll\t%M0, %M0, 32\n\t" \
214 "or\t%L0, %L0, %M0\n\t" \
215 "dmtc2\t%L0, " #source "\n\t" \
219 __asm__ __volatile__( \
221 "dsll\t%L0, %L0, 32\n\t" \
222 "dsrl\t%L0, %L0, 32\n\t" \
223 "dsll\t%M0, %M0, 32\n\t" \
224 "or\t%L0, %L0, %M0\n\t" \
225 "dmtc2\t%L0, " #source ", " #sel "\n\t" \
228 local_irq_restore(__flags); \
231 #define __read_32bit_c2_register(source, sel) \
234 __asm__ __volatile__( \
236 "mfc2\t%0, " #source "\n\t" \
240 __asm__ __volatile__( \
242 "mfc2\t%0, " #source ", " #sel "\n\t" \
248 #define __read_64bit_c2_register(source, sel) \
249 ({ unsigned long long __res; \
250 if (sizeof(unsigned long) == 4) \
251 __res = __read_64bit_c2_split(source, sel); \
253 __asm__ __volatile__( \
255 "dmfc2\t%0, " #source "\n\t" \
259 __asm__ __volatile__( \
261 "dmfc2\t%0, " #source ", " #sel "\n\t" \
267 #define __write_64bit_c2_register(register, sel, value) \
269 if (sizeof(unsigned long) == 4) \
270 __write_64bit_c2_split(register, sel, value); \
272 __asm__ __volatile__( \
274 "dmtc2\t%z0, " #register "\n\t" \
278 __asm__ __volatile__( \
280 "dmtc2\t%z0, " #register ", " #sel "\n\t" \
285 #define __write_32bit_c2_register(reg, sel, value) \
288 __asm__ __volatile__( \
290 "mtc2\t%z0, " #reg "\n\t" \
294 __asm__ __volatile__( \
296 "mtc2\t%z0, " #reg ", " #sel "\n\t" \
301 #endif /*_ASM_NLM_MIPS_EXTS_H */