Expand PMF_FN_* macros.
[netbsd-mini2440.git] / sys / arch / hppa / include / cpufunc.h
blob75d47b05fd7cd0d1fba06029f8e6446e2e3cef69
1 /* $NetBSD: cpufunc.h,v 1.13 2009/11/29 10:08:10 skrll Exp $ */
3 /* $OpenBSD: cpufunc.h,v 1.17 2000/05/15 17:22:40 mickey Exp $ */
5 /*
6 * Copyright (c) 1998-2004 Michael Shalayeff
7 * All rights reserved.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
22 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
31 * (c) Copyright 1988 HEWLETT-PACKARD COMPANY
33 * To anyone who acknowledges that this file is provided "AS IS"
34 * without any express or implied warranty:
35 * permission to use, copy, modify, and distribute this file
36 * for any purpose is hereby granted without fee, provided that
37 * the above copyright notice and this notice appears in all
38 * copies, and that the name of Hewlett-Packard Company not be
39 * used in advertising or publicity pertaining to distribution
40 * of the software without specific, written prior permission.
41 * Hewlett-Packard Company makes no representations about the
42 * suitability of this software for any purpose.
45 * Copyright (c) 1990,1994 The University of Utah and
46 * the Computer Systems Laboratory (CSL). All rights reserved.
48 * THE UNIVERSITY OF UTAH AND CSL PROVIDE THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION, AND DISCLAIM ANY LIABILITY OF ANY KIND FOR ANY DAMAGES
50 * WHATSOEVER RESULTING FROM ITS USE.
52 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
53 * improvements that they make and grant CSL redistribution rights.
55 * Utah $Hdr: c_support.s 1.8 94/12/14$
56 * Author: Bob Wheeler, University of Utah CSL
59 #ifndef _HPPA_CPUFUNC_H_
60 #define _HPPA_CPUFUNC_H_
62 #include <machine/psl.h>
63 #include <machine/pte.h>
65 #define tlbbtop(b) ((b) >> (PGSHIFT - 5))
66 #define tlbptob(p) ((p) << (PGSHIFT - 5))
68 #define hptbtop(b) ((b) >> 17)
70 /* Get space register for an address */
71 static __inline register_t
72 ldsid(vaddr_t p) {
73 register_t ret;
74 __asm volatile("ldsid (%1),%0" : "=r" (ret) : "r" (p));
75 return ret;
78 #define mtctl(v,r) __asm volatile("mtctl %0,%1":: "r" (v), "i" (r))
79 #define mfctl(r,v) __asm volatile("mfctl %1,%0": "=r" (v): "i" (r))
81 #define mfcpu(r,v) /* XXX for the lack of the mnemonics */ \
82 __asm volatile("diag %1\n\t" \
83 "copy %%r22, %0" \
84 : "=r" (v) : "i" ((0x1400 | ((r) << 21) | (22))) : "r22")
86 #define mtsp(v,r) __asm volatile("mtsp %0,%1":: "r" (v), "i" (r))
87 #define mfsp(r,v) __asm volatile("mfsp %1,%0": "=r" (v): "i" (r))
89 #define ssm(v,r) __asm volatile("ssm %1,%0": "=r" (r): "i" (v))
90 #define rsm(v,r) __asm volatile("rsm %1,%0": "=r" (r): "i" (v))
93 /* Get coherence index for an address */
94 static __inline register_t
95 lci(pa_space_t sp, vaddr_t va) {
96 register_t ret;
98 mtsp((sp), 1); \
99 __asm volatile("lci 0(%%sr1, %1), %0" : "=r" (ret) : "r" (va));
101 return ret;
105 /* Move to system mask. Old value of system mask is returned. */
106 static __inline register_t mtsm(register_t mask) {
107 register_t ret;
108 __asm volatile(
109 "ssm 0,%0\n\t"
110 "mtsm %1": "=&r" (ret) : "r" (mask));
111 return ret;
114 #define fdce(sp,off) __asm volatile("fdce 0(%0,%1)":: "i" (sp), "r" (off))
115 #define fice(sp,off) __asm volatile("fice 0(%0,%1)":: "i" (sp), "r" (off))
116 #define sync_caches() \
117 __asm volatile("sync\n\tnop\n\tnop\n\tnop\n\tnop\n\tnop\n\tnop\n\tnop":::"memory")
119 static __inline void
120 iitlba(u_int pg, pa_space_t sp, vaddr_t va)
122 mtsp(sp, 1);
123 __asm volatile("iitlba %0,(%%sr1, %1)":: "r" (pg), "r" (va));
126 static __inline void
127 idtlba(u_int pg, pa_space_t sp, vaddr_t va)
129 mtsp(sp, 1);
130 __asm volatile("idtlba %0,(%%sr1, %1)":: "r" (pg), "r" (va));
133 static __inline void
134 iitlbp(u_int prot, pa_space_t sp, vaddr_t va)
136 mtsp(sp, 1);
137 __asm volatile("iitlbp %0,(%%sr1, %1)":: "r" (prot), "r" (va));
140 static __inline void
141 idtlbp(u_int prot, pa_space_t sp, vaddr_t va)
143 mtsp(sp, 1);
144 __asm volatile("idtlbp %0,(%%sr1, %1)":: "r" (prot), "r" (va));
147 static __inline void
148 pitlb(pa_space_t sp, vaddr_t va)
150 mtsp(sp, 1);
151 __asm volatile("pitlb %%r0(%%sr1, %0)":: "r" (va));
154 static __inline void
155 pdtlb(pa_space_t sp, vaddr_t va)
157 mtsp(sp, 1);
158 __asm volatile("pdtlb %%r0(%%sr1, %0)":: "r" (va));
161 static __inline void
162 pitlbe(pa_space_t sp, vaddr_t va)
164 mtsp(sp, 1);
165 __asm volatile("pitlbe %%r0(%%sr1, %0)":: "r" (va));
168 static __inline void
169 pdtlbe(pa_space_t sp, vaddr_t va)
171 mtsp(sp, 1);
172 __asm volatile("pdtlbe %%r0(%%sr1, %0)":: "r" (va));
175 #ifdef _KERNEL
176 extern int (*cpu_hpt_init)(vaddr_t, vsize_t);
178 void ficache(pa_space_t, vaddr_t, vsize_t);
179 void fdcache(pa_space_t, vaddr_t, vsize_t);
180 void pdcache(pa_space_t, vaddr_t, vsize_t);
181 void fcacheall(void);
182 void ptlball(void);
183 hppa_hpa_t cpu_gethpa(int);
185 #define PCXL2_ACCEL_IO_START 0xf4000000
186 #define PCXL2_ACCEL_IO_END (0xfc000000 - 1)
187 #define PCXL2_ACCEL_IO_ADDR2MASK(a) (0x8 >> ((((a) >> 25) - 2) & 3))
188 void eaio_l2(int);
191 * These flush or purge the data cache for a item whose total
192 * size is <= the size of a data cache line, however they don't
193 * check this constraint.
195 static __inline void
196 fdcache_small(pa_space_t sp, vaddr_t va, vsize_t size)
198 __asm volatile(
199 " mtsp %0,%%sr1 \n"
200 " fdc %%r0(%%sr1, %1) \n"
201 " fdc %2(%%sr1, %1) \n"
202 " sync \n"
203 " syncdma \n"
205 : "r" (sp), "r" (va), "r" (size - 1));
207 static __inline void
208 pdcache_small(pa_space_t sp, vaddr_t va, vsize_t size)
210 __asm volatile(
211 " mtsp %0,%%sr1 \n"
212 " pdc %%r0(%%sr1, %1) \n"
213 " pdc %2(%%sr1, %1) \n"
214 " sync \n"
215 " syncdma \n"
217 : "r" (sp), "r" (va), "r" (size - 1));
220 #endif /* _KERNEL */
222 #endif /* _HPPA_CPUFUNC_H_ */