6 #include <linux/kernel.h>
8 #include <asm/compiler.h>
9 #include <asm/system.h>
10 #include <asm/pgtable.h>
11 #include <asm/machvec.h>
12 #include <asm/hwrpb.h>
14 /* The generic header contains only prototypes. Including it ensures that
15 the implementation we have here matches that interface. */
16 #include <asm-generic/iomap.h>
18 /* We don't use IO slowdowns on the Alpha, but.. */
19 #define __SLOW_DOWN_IO do { } while (0)
20 #define SLOW_DOWN_IO do { } while (0)
23 * Virtual -> physical identity mapping starts at this offset
25 #ifdef USE_48_BIT_KSEG
26 #define IDENT_ADDR 0xffff800000000000UL
28 #define IDENT_ADDR 0xfffffc0000000000UL
32 * We try to avoid hae updates (thus the cache), but when we
33 * do need to update the hae, we need to do it atomically, so
34 * that any interrupts wouldn't get confused with the hae
35 * register not being up-to-date with respect to the hardware
38 static inline void __set_hae(unsigned long new_hae
)
41 local_irq_save(flags
);
43 alpha_mv
.hae_cache
= new_hae
;
44 *alpha_mv
.hae_register
= new_hae
;
46 /* Re-read to make sure it was written. */
47 new_hae
= *alpha_mv
.hae_register
;
49 local_irq_restore(flags
);
52 static inline void set_hae(unsigned long new_hae
)
54 if (new_hae
!= alpha_mv
.hae_cache
)
59 * Change virtual addresses to physical addresses and vv.
61 #ifdef USE_48_BIT_KSEG
62 static inline unsigned long virt_to_phys(void *address
)
64 return (unsigned long)address
- IDENT_ADDR
;
67 static inline void * phys_to_virt(unsigned long address
)
69 return (void *) (address
+ IDENT_ADDR
);
72 static inline unsigned long virt_to_phys(void *address
)
74 unsigned long phys
= (unsigned long)address
;
76 /* Sign-extend from bit 41. */
78 phys
= (long)phys
>> (64 - 41);
80 /* Crop to the physical address width of the processor. */
81 phys
&= (1ul << hwrpb
->pa_bits
) - 1;
86 static inline void * phys_to_virt(unsigned long address
)
88 return (void *)(IDENT_ADDR
+ (address
& ((1ul << 41) - 1)));
92 #define page_to_phys(page) page_to_pa(page)
94 static inline dma_addr_t __deprecated
isa_page_to_bus(struct page
*page
)
96 return page_to_phys(page
);
99 /* This depends on working iommu. */
100 #define BIO_VMERGE_BOUNDARY (alpha_mv.mv_pci_tbi ? PAGE_SIZE : 0)
102 /* Maximum PIO space address supported? */
103 #define IO_SPACE_LIMIT 0xffff
106 * Change addresses as seen by the kernel (virtual) to addresses as
107 * seen by a device (bus), and vice versa.
109 * Note that this only works for a limited range of kernel addresses,
110 * and very well may not span all memory. Consider this interface
111 * deprecated in favour of the DMA-mapping API.
113 extern unsigned long __direct_map_base
;
114 extern unsigned long __direct_map_size
;
116 static inline unsigned long __deprecated
virt_to_bus(void *address
)
118 unsigned long phys
= virt_to_phys(address
);
119 unsigned long bus
= phys
+ __direct_map_base
;
120 return phys
<= __direct_map_size
? bus
: 0;
122 #define isa_virt_to_bus virt_to_bus
124 static inline void * __deprecated
bus_to_virt(unsigned long address
)
128 /* This check is a sanity check but also ensures that bus address 0
129 maps to virtual address 0 which is useful to detect null pointers
130 (the NCR driver is much simpler if NULL pointers are preserved). */
131 address
-= __direct_map_base
;
132 virt
= phys_to_virt(address
);
133 return (long)address
<= 0 ? NULL
: virt
;
135 #define isa_bus_to_virt bus_to_virt
138 * There are different chipsets to interface the Alpha CPUs to the world.
141 #define IO_CONCAT(a,b) _IO_CONCAT(a,b)
142 #define _IO_CONCAT(a,b) a ## _ ## b
144 #ifdef CONFIG_ALPHA_GENERIC
146 /* In a generic kernel, we always go through the machine vector. */
148 #define REMAP1(TYPE, NAME, QUAL) \
149 static inline TYPE generic_##NAME(QUAL void __iomem *addr) \
151 return alpha_mv.mv_##NAME(addr); \
154 #define REMAP2(TYPE, NAME, QUAL) \
155 static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr) \
157 alpha_mv.mv_##NAME(b, addr); \
160 REMAP1(unsigned int, ioread8
, /**/)
161 REMAP1(unsigned int, ioread16
, /**/)
162 REMAP1(unsigned int, ioread32
, /**/)
163 REMAP1(u8
, readb
, const volatile)
164 REMAP1(u16
, readw
, const volatile)
165 REMAP1(u32
, readl
, const volatile)
166 REMAP1(u64
, readq
, const volatile)
168 REMAP2(u8
, iowrite8
, /**/)
169 REMAP2(u16
, iowrite16
, /**/)
170 REMAP2(u32
, iowrite32
, /**/)
171 REMAP2(u8
, writeb
, volatile)
172 REMAP2(u16
, writew
, volatile)
173 REMAP2(u32
, writel
, volatile)
174 REMAP2(u64
, writeq
, volatile)
179 static inline void __iomem
*generic_ioportmap(unsigned long a
)
181 return alpha_mv
.mv_ioportmap(a
);
184 static inline void __iomem
*generic_ioremap(unsigned long a
, unsigned long s
)
186 return alpha_mv
.mv_ioremap(a
, s
);
189 static inline void generic_iounmap(volatile void __iomem
*a
)
191 return alpha_mv
.mv_iounmap(a
);
194 static inline int generic_is_ioaddr(unsigned long a
)
196 return alpha_mv
.mv_is_ioaddr(a
);
199 static inline int generic_is_mmio(const volatile void __iomem
*a
)
201 return alpha_mv
.mv_is_mmio(a
);
204 #define __IO_PREFIX generic
205 #define generic_trivial_rw_bw 0
206 #define generic_trivial_rw_lq 0
207 #define generic_trivial_io_bw 0
208 #define generic_trivial_io_lq 0
209 #define generic_trivial_iounmap 0
213 #if defined(CONFIG_ALPHA_APECS)
214 # include <asm/core_apecs.h>
215 #elif defined(CONFIG_ALPHA_CIA)
216 # include <asm/core_cia.h>
217 #elif defined(CONFIG_ALPHA_IRONGATE)
218 # include <asm/core_irongate.h>
219 #elif defined(CONFIG_ALPHA_JENSEN)
220 # include <asm/jensen.h>
221 #elif defined(CONFIG_ALPHA_LCA)
222 # include <asm/core_lca.h>
223 #elif defined(CONFIG_ALPHA_MARVEL)
224 # include <asm/core_marvel.h>
225 #elif defined(CONFIG_ALPHA_MCPCIA)
226 # include <asm/core_mcpcia.h>
227 #elif defined(CONFIG_ALPHA_POLARIS)
228 # include <asm/core_polaris.h>
229 #elif defined(CONFIG_ALPHA_T2)
230 # include <asm/core_t2.h>
231 #elif defined(CONFIG_ALPHA_TSUNAMI)
232 # include <asm/core_tsunami.h>
233 #elif defined(CONFIG_ALPHA_TITAN)
234 # include <asm/core_titan.h>
235 #elif defined(CONFIG_ALPHA_WILDFIRE)
236 # include <asm/core_wildfire.h>
238 #error "What system is this?"
244 * We always have external versions of these routines.
246 extern u8
inb(unsigned long port
);
247 extern u16
inw(unsigned long port
);
248 extern u32
inl(unsigned long port
);
249 extern void outb(u8 b
, unsigned long port
);
250 extern void outw(u16 b
, unsigned long port
);
251 extern void outl(u32 b
, unsigned long port
);
253 extern u8
readb(const volatile void __iomem
*addr
);
254 extern u16
readw(const volatile void __iomem
*addr
);
255 extern u32
readl(const volatile void __iomem
*addr
);
256 extern u64
readq(const volatile void __iomem
*addr
);
257 extern void writeb(u8 b
, volatile void __iomem
*addr
);
258 extern void writew(u16 b
, volatile void __iomem
*addr
);
259 extern void writel(u32 b
, volatile void __iomem
*addr
);
260 extern void writeq(u64 b
, volatile void __iomem
*addr
);
262 extern u8
__raw_readb(const volatile void __iomem
*addr
);
263 extern u16
__raw_readw(const volatile void __iomem
*addr
);
264 extern u32
__raw_readl(const volatile void __iomem
*addr
);
265 extern u64
__raw_readq(const volatile void __iomem
*addr
);
266 extern void __raw_writeb(u8 b
, volatile void __iomem
*addr
);
267 extern void __raw_writew(u16 b
, volatile void __iomem
*addr
);
268 extern void __raw_writel(u32 b
, volatile void __iomem
*addr
);
269 extern void __raw_writeq(u64 b
, volatile void __iomem
*addr
);
272 * Mapping from port numbers to __iomem space is pretty easy.
275 /* These two have to be extern inline because of the extern prototype from
276 <asm-generic/iomap.h>. It is not legal to mix "extern" and "static" for
277 the same declaration. */
278 extern inline void __iomem
*ioport_map(unsigned long port
, unsigned int size
)
280 return IO_CONCAT(__IO_PREFIX
,ioportmap
) (port
);
283 extern inline void ioport_unmap(void __iomem
*addr
)
287 static inline void __iomem
*ioremap(unsigned long port
, unsigned long size
)
289 return IO_CONCAT(__IO_PREFIX
,ioremap
) (port
, size
);
292 static inline void __iomem
*__ioremap(unsigned long port
, unsigned long size
,
295 return ioremap(port
, size
);
298 static inline void __iomem
* ioremap_nocache(unsigned long offset
,
301 return ioremap(offset
, size
);
304 static inline void iounmap(volatile void __iomem
*addr
)
306 IO_CONCAT(__IO_PREFIX
,iounmap
)(addr
);
309 static inline int __is_ioaddr(unsigned long addr
)
311 return IO_CONCAT(__IO_PREFIX
,is_ioaddr
)(addr
);
313 #define __is_ioaddr(a) __is_ioaddr((unsigned long)(a))
315 static inline int __is_mmio(const volatile void __iomem
*addr
)
317 return IO_CONCAT(__IO_PREFIX
,is_mmio
)(addr
);
322 * If the actual I/O bits are sufficiently trivial, then expand inline.
325 #if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
326 extern inline unsigned int ioread8(void __iomem
*addr
)
328 unsigned int ret
= IO_CONCAT(__IO_PREFIX
,ioread8
)(addr
);
333 extern inline unsigned int ioread16(void __iomem
*addr
)
335 unsigned int ret
= IO_CONCAT(__IO_PREFIX
,ioread16
)(addr
);
340 extern inline void iowrite8(u8 b
, void __iomem
*addr
)
342 IO_CONCAT(__IO_PREFIX
,iowrite8
)(b
, addr
);
346 extern inline void iowrite16(u16 b
, void __iomem
*addr
)
348 IO_CONCAT(__IO_PREFIX
,iowrite16
)(b
, addr
);
352 extern inline u8
inb(unsigned long port
)
354 return ioread8(ioport_map(port
, 1));
357 extern inline u16
inw(unsigned long port
)
359 return ioread16(ioport_map(port
, 2));
362 extern inline void outb(u8 b
, unsigned long port
)
364 iowrite8(b
, ioport_map(port
, 1));
367 extern inline void outw(u16 b
, unsigned long port
)
369 iowrite16(b
, ioport_map(port
, 2));
373 #if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
374 extern inline unsigned int ioread32(void __iomem
*addr
)
376 unsigned int ret
= IO_CONCAT(__IO_PREFIX
,ioread32
)(addr
);
381 extern inline void iowrite32(u32 b
, void __iomem
*addr
)
383 IO_CONCAT(__IO_PREFIX
,iowrite32
)(b
, addr
);
387 extern inline u32
inl(unsigned long port
)
389 return ioread32(ioport_map(port
, 4));
392 extern inline void outl(u32 b
, unsigned long port
)
394 iowrite32(b
, ioport_map(port
, 4));
398 #if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1
399 extern inline u8
__raw_readb(const volatile void __iomem
*addr
)
401 return IO_CONCAT(__IO_PREFIX
,readb
)(addr
);
404 extern inline u16
__raw_readw(const volatile void __iomem
*addr
)
406 return IO_CONCAT(__IO_PREFIX
,readw
)(addr
);
409 extern inline void __raw_writeb(u8 b
, volatile void __iomem
*addr
)
411 IO_CONCAT(__IO_PREFIX
,writeb
)(b
, addr
);
414 extern inline void __raw_writew(u16 b
, volatile void __iomem
*addr
)
416 IO_CONCAT(__IO_PREFIX
,writew
)(b
, addr
);
419 extern inline u8
readb(const volatile void __iomem
*addr
)
421 u8 ret
= __raw_readb(addr
);
426 extern inline u16
readw(const volatile void __iomem
*addr
)
428 u16 ret
= __raw_readw(addr
);
433 extern inline void writeb(u8 b
, volatile void __iomem
*addr
)
435 __raw_writeb(b
, addr
);
439 extern inline void writew(u16 b
, volatile void __iomem
*addr
)
441 __raw_writew(b
, addr
);
446 #if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1
447 extern inline u32
__raw_readl(const volatile void __iomem
*addr
)
449 return IO_CONCAT(__IO_PREFIX
,readl
)(addr
);
452 extern inline u64
__raw_readq(const volatile void __iomem
*addr
)
454 return IO_CONCAT(__IO_PREFIX
,readq
)(addr
);
457 extern inline void __raw_writel(u32 b
, volatile void __iomem
*addr
)
459 IO_CONCAT(__IO_PREFIX
,writel
)(b
, addr
);
462 extern inline void __raw_writeq(u64 b
, volatile void __iomem
*addr
)
464 IO_CONCAT(__IO_PREFIX
,writeq
)(b
, addr
);
467 extern inline u32
readl(const volatile void __iomem
*addr
)
469 u32 ret
= __raw_readl(addr
);
474 extern inline u64
readq(const volatile void __iomem
*addr
)
476 u64 ret
= __raw_readq(addr
);
481 extern inline void writel(u32 b
, volatile void __iomem
*addr
)
483 __raw_writel(b
, addr
);
487 extern inline void writeq(u64 b
, volatile void __iomem
*addr
)
489 __raw_writeq(b
, addr
);
500 #define readb_relaxed(addr) __raw_readb(addr)
501 #define readw_relaxed(addr) __raw_readw(addr)
502 #define readl_relaxed(addr) __raw_readl(addr)
503 #define readq_relaxed(addr) __raw_readq(addr)
508 * String version of IO memory access ops:
510 extern void memcpy_fromio(void *, const volatile void __iomem
*, long);
511 extern void memcpy_toio(volatile void __iomem
*, const void *, long);
512 extern void _memset_c_io(volatile void __iomem
*, unsigned long, long);
514 static inline void memset_io(volatile void __iomem
*addr
, u8 c
, long len
)
516 _memset_c_io(addr
, 0x0101010101010101UL
* c
, len
);
519 #define __HAVE_ARCH_MEMSETW_IO
520 static inline void memsetw_io(volatile void __iomem
*addr
, u16 c
, long len
)
522 _memset_c_io(addr
, 0x0001000100010001UL
* c
, len
);
526 * String versions of in/out ops:
528 extern void insb (unsigned long port
, void *dst
, unsigned long count
);
529 extern void insw (unsigned long port
, void *dst
, unsigned long count
);
530 extern void insl (unsigned long port
, void *dst
, unsigned long count
);
531 extern void outsb (unsigned long port
, const void *src
, unsigned long count
);
532 extern void outsw (unsigned long port
, const void *src
, unsigned long count
);
533 extern void outsl (unsigned long port
, const void *src
, unsigned long count
);
536 * The Alpha Jensen hardware for some rather strange reason puts
537 * the RTC clock at 0x170 instead of 0x70. Probably due to some
538 * misguided idea about using 0x70 for NMI stuff.
540 * These defines will override the defaults when doing RTC queries
543 #ifdef CONFIG_ALPHA_GENERIC
544 # define RTC_PORT(x) ((x) + alpha_mv.rtc_port)
546 # ifdef CONFIG_ALPHA_JENSEN
547 # define RTC_PORT(x) (0x170+(x))
549 # define RTC_PORT(x) (0x70 + (x))
552 #define RTC_ALWAYS_BCD 0
556 #define dma_cache_inv(_start,_size) do { } while (0)
557 #define dma_cache_wback(_start,_size) do { } while (0)
558 #define dma_cache_wback_inv(_start,_size) do { } while (0)
561 * Some mucking forons use if[n]def writeq to check if platform has it.
562 * It's a bloody bad idea and we probably want ARCH_HAS_WRITEQ for them
563 * to play with; for now just use cpp anti-recursion logics and make sure
564 * that damn thing is defined and expands to itself.
567 #define writeq writeq
571 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
574 #define xlate_dev_mem_ptr(p) __va(p)
577 * Convert a virtual cached pointer to an uncached pointer
579 #define xlate_dev_kmem_ptr(p) p
581 #endif /* __KERNEL__ */
583 #endif /* __ALPHA_IO_H */