5 * This file contains the definitions for the emulated IO instructions
6 * inb/inw/inl/outb/outw/outl and the "string versions" of the same
7 * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
8 * versions of the single-IO instructions (inb_p/inw_p/..).
10 * This file is not meant to be obfuscating: it's just complicated to
11 * (a) handle it all in a way that makes gcc able to optimize it as
12 * well as possible and (b) trying to avoid writing the same thing
13 * over and over again with slight variations and possibly making a
16 * Copyright (C) 1998-2003 Hewlett-Packard Co
17 * David Mosberger-Tang <davidm@hpl.hp.com>
18 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
19 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
22 /* We don't use IO slowdowns on the ia64, but.. */
23 #define __SLOW_DOWN_IO do { } while (0)
24 #define SLOW_DOWN_IO do { } while (0)
26 #define __IA64_UNCACHED_OFFSET RGN_BASE(RGN_UNCACHED)
29 * The legacy I/O space defined by the ia64 architecture supports only 65536 ports, but
30 * large machines may have multiple other I/O spaces so we can't place any a priori limit
31 * on IO_SPACE_LIMIT. These additional spaces are described in ACPI.
33 #define IO_SPACE_LIMIT 0xffffffffffffffffUL
35 #define MAX_IO_SPACES_BITS 4
36 #define MAX_IO_SPACES (1UL << MAX_IO_SPACES_BITS)
37 #define IO_SPACE_BITS 24
38 #define IO_SPACE_SIZE (1UL << IO_SPACE_BITS)
40 #define IO_SPACE_NR(port) ((port) >> IO_SPACE_BITS)
41 #define IO_SPACE_BASE(space) ((space) << IO_SPACE_BITS)
42 #define IO_SPACE_PORT(port) ((port) & (IO_SPACE_SIZE - 1))
44 #define IO_SPACE_SPARSE_ENCODING(p) ((((p) >> 2) << 12) | ((p) & 0xfff))
47 unsigned long mmio_base
; /* base in MMIO space */
51 extern struct io_space io_space
[];
52 extern unsigned int num_io_spaces
;
57 * All MMIO iomem cookies are in region 6; anything less is a PIO cookie:
58 * 0xCxxxxxxxxxxxxxxx MMIO cookie (return from ioremap)
59 * 0x000000001SPPPPPP PIO cookie (S=space number, P..P=port)
61 * ioread/writeX() uses the leading 1 in PIO cookies (PIO_OFFSET) to catch
62 * code that uses bare port numbers without the prerequisite pci_iomap().
64 #define PIO_OFFSET (1UL << (MAX_IO_SPACES_BITS + IO_SPACE_BITS))
65 #define PIO_MASK (PIO_OFFSET - 1)
66 #define PIO_RESERVED __IA64_UNCACHED_OFFSET
67 #define HAVE_ARCH_PIO_SIZE
69 #include <asm/intrinsics.h>
70 #include <asm/machvec.h>
72 #include <asm/system.h>
73 #include <asm-generic/iomap.h>
76 * Change virtual addresses to physical addresses and vv.
78 static inline unsigned long
79 virt_to_phys (volatile void *address
)
81 return (unsigned long) address
- PAGE_OFFSET
;
85 phys_to_virt (unsigned long address
)
87 return (void *) (address
+ PAGE_OFFSET
);
90 #define ARCH_HAS_VALID_PHYS_ADDR_RANGE
91 extern u64
kern_mem_attribute (unsigned long phys_addr
, unsigned long size
);
92 extern int valid_phys_addr_range (unsigned long addr
, size_t count
); /* efi.c */
93 extern int valid_mmap_phys_addr_range (unsigned long pfn
, size_t count
);
96 * The following two macros are deprecated and scheduled for removal.
97 * Please use the PCI-DMA interface defined in <asm/pci.h> instead.
99 #define bus_to_virt phys_to_virt
100 #define virt_to_bus virt_to_phys
101 #define page_to_bus page_to_phys
106 * Memory fence w/accept. This should never be used in code that is
107 * not IA-64 specific.
109 #define __ia64_mf_a() ia64_mfa()
112 * ___ia64_mmiowb - I/O write barrier
114 * Ensure ordering of I/O space writes. This will make sure that writes
115 * following the barrier will arrive after all previous writes. For most
116 * ia64 platforms, this is a simple 'mf.a' instruction.
118 * See Documentation/DocBook/deviceiobook.tmpl for more information.
120 static inline void ___ia64_mmiowb(void)
126 __ia64_mk_io_addr (unsigned long port
)
128 struct io_space
*space
;
129 unsigned long offset
;
131 space
= &io_space
[IO_SPACE_NR(port
)];
132 port
= IO_SPACE_PORT(port
);
134 offset
= IO_SPACE_SPARSE_ENCODING(port
);
138 return (void *) (space
->mmio_base
| offset
);
141 #define __ia64_inb ___ia64_inb
142 #define __ia64_inw ___ia64_inw
143 #define __ia64_inl ___ia64_inl
144 #define __ia64_outb ___ia64_outb
145 #define __ia64_outw ___ia64_outw
146 #define __ia64_outl ___ia64_outl
147 #define __ia64_readb ___ia64_readb
148 #define __ia64_readw ___ia64_readw
149 #define __ia64_readl ___ia64_readl
150 #define __ia64_readq ___ia64_readq
151 #define __ia64_readb_relaxed ___ia64_readb
152 #define __ia64_readw_relaxed ___ia64_readw
153 #define __ia64_readl_relaxed ___ia64_readl
154 #define __ia64_readq_relaxed ___ia64_readq
155 #define __ia64_writeb ___ia64_writeb
156 #define __ia64_writew ___ia64_writew
157 #define __ia64_writel ___ia64_writel
158 #define __ia64_writeq ___ia64_writeq
159 #define __ia64_mmiowb ___ia64_mmiowb
162 * For the in/out routines, we need to do "mf.a" _after_ doing the I/O access to ensure
163 * that the access has completed before executing other I/O accesses. Since we're doing
164 * the accesses through an uncachable (UC) translation, the CPU will execute them in
165 * program order. However, we still need to tell the compiler not to shuffle them around
166 * during optimization, which is why we use "volatile" pointers.
169 static inline unsigned int
170 ___ia64_inb (unsigned long port
)
172 volatile unsigned char *addr
= __ia64_mk_io_addr(port
);
180 static inline unsigned int
181 ___ia64_inw (unsigned long port
)
183 volatile unsigned short *addr
= __ia64_mk_io_addr(port
);
191 static inline unsigned int
192 ___ia64_inl (unsigned long port
)
194 volatile unsigned int *addr
= __ia64_mk_io_addr(port
);
203 ___ia64_outb (unsigned char val
, unsigned long port
)
205 volatile unsigned char *addr
= __ia64_mk_io_addr(port
);
212 ___ia64_outw (unsigned short val
, unsigned long port
)
214 volatile unsigned short *addr
= __ia64_mk_io_addr(port
);
221 ___ia64_outl (unsigned int val
, unsigned long port
)
223 volatile unsigned int *addr
= __ia64_mk_io_addr(port
);
230 __insb (unsigned long port
, void *dst
, unsigned long count
)
232 unsigned char *dp
= dst
;
235 *dp
++ = platform_inb(port
);
239 __insw (unsigned long port
, void *dst
, unsigned long count
)
241 unsigned short *dp
= dst
;
244 *dp
++ = platform_inw(port
);
248 __insl (unsigned long port
, void *dst
, unsigned long count
)
250 unsigned int *dp
= dst
;
253 *dp
++ = platform_inl(port
);
257 __outsb (unsigned long port
, const void *src
, unsigned long count
)
259 const unsigned char *sp
= src
;
262 platform_outb(*sp
++, port
);
266 __outsw (unsigned long port
, const void *src
, unsigned long count
)
268 const unsigned short *sp
= src
;
271 platform_outw(*sp
++, port
);
275 __outsl (unsigned long port
, const void *src
, unsigned long count
)
277 const unsigned int *sp
= src
;
280 platform_outl(*sp
++, port
);
284 * Unfortunately, some platforms are broken and do not follow the IA-64 architecture
285 * specification regarding legacy I/O support. Thus, we have to make these operations
286 * platform dependent...
288 #define __inb platform_inb
289 #define __inw platform_inw
290 #define __inl platform_inl
291 #define __outb platform_outb
292 #define __outw platform_outw
293 #define __outl platform_outl
294 #define __mmiowb platform_mmiowb
296 #define inb(p) __inb(p)
297 #define inw(p) __inw(p)
298 #define inl(p) __inl(p)
299 #define insb(p,d,c) __insb(p,d,c)
300 #define insw(p,d,c) __insw(p,d,c)
301 #define insl(p,d,c) __insl(p,d,c)
302 #define outb(v,p) __outb(v,p)
303 #define outw(v,p) __outw(v,p)
304 #define outl(v,p) __outl(v,p)
305 #define outsb(p,s,c) __outsb(p,s,c)
306 #define outsw(p,s,c) __outsw(p,s,c)
307 #define outsl(p,s,c) __outsl(p,s,c)
308 #define mmiowb() __mmiowb()
311 * The address passed to these functions are ioremap()ped already.
313 * We need these to be machine vectors since some platforms don't provide
314 * DMA coherence via PIO reads (PCI drivers and the spec imply that this is
315 * a good idea). Writes are ok though for all existing ia64 platforms (and
316 * hopefully it'll stay that way).
318 static inline unsigned char
319 ___ia64_readb (const volatile void __iomem
*addr
)
321 return *(volatile unsigned char __force
*)addr
;
324 static inline unsigned short
325 ___ia64_readw (const volatile void __iomem
*addr
)
327 return *(volatile unsigned short __force
*)addr
;
330 static inline unsigned int
331 ___ia64_readl (const volatile void __iomem
*addr
)
333 return *(volatile unsigned int __force
*) addr
;
336 static inline unsigned long
337 ___ia64_readq (const volatile void __iomem
*addr
)
339 return *(volatile unsigned long __force
*) addr
;
343 __writeb (unsigned char val
, volatile void __iomem
*addr
)
345 *(volatile unsigned char __force
*) addr
= val
;
349 __writew (unsigned short val
, volatile void __iomem
*addr
)
351 *(volatile unsigned short __force
*) addr
= val
;
355 __writel (unsigned int val
, volatile void __iomem
*addr
)
357 *(volatile unsigned int __force
*) addr
= val
;
361 __writeq (unsigned long val
, volatile void __iomem
*addr
)
363 *(volatile unsigned long __force
*) addr
= val
;
366 #define __readb platform_readb
367 #define __readw platform_readw
368 #define __readl platform_readl
369 #define __readq platform_readq
370 #define __readb_relaxed platform_readb_relaxed
371 #define __readw_relaxed platform_readw_relaxed
372 #define __readl_relaxed platform_readl_relaxed
373 #define __readq_relaxed platform_readq_relaxed
375 #define readb(a) __readb((a))
376 #define readw(a) __readw((a))
377 #define readl(a) __readl((a))
378 #define readq(a) __readq((a))
379 #define readb_relaxed(a) __readb_relaxed((a))
380 #define readw_relaxed(a) __readw_relaxed((a))
381 #define readl_relaxed(a) __readl_relaxed((a))
382 #define readq_relaxed(a) __readq_relaxed((a))
383 #define __raw_readb readb
384 #define __raw_readw readw
385 #define __raw_readl readl
386 #define __raw_readq readq
387 #define __raw_readb_relaxed readb_relaxed
388 #define __raw_readw_relaxed readw_relaxed
389 #define __raw_readl_relaxed readl_relaxed
390 #define __raw_readq_relaxed readq_relaxed
391 #define writeb(v,a) __writeb((v), (a))
392 #define writew(v,a) __writew((v), (a))
393 #define writel(v,a) __writel((v), (a))
394 #define writeq(v,a) __writeq((v), (a))
395 #define __raw_writeb writeb
396 #define __raw_writew writew
397 #define __raw_writel writel
398 #define __raw_writeq writeq
420 extern void __iomem
* ioremap(unsigned long offset
, unsigned long size
);
421 extern void __iomem
* ioremap_nocache (unsigned long offset
, unsigned long size
);
424 iounmap (volatile void __iomem
*addr
)
428 /* Use normal IO mappings for DMI */
429 #define dmi_ioremap ioremap
430 #define dmi_iounmap(x,l) iounmap(x)
431 #define dmi_alloc(l) kmalloc(l, GFP_ATOMIC)
436 * String version of IO memory access ops:
438 extern void memcpy_fromio(void *dst
, const volatile void __iomem
*src
, long n
);
439 extern void memcpy_toio(volatile void __iomem
*dst
, const void *src
, long n
);
440 extern void memset_io(volatile void __iomem
*s
, int c
, long n
);
442 #define dma_cache_inv(_start,_size) do { } while (0)
443 #define dma_cache_wback(_start,_size) do { } while (0)
444 #define dma_cache_wback_inv(_start,_size) do { } while (0)
446 # endif /* __KERNEL__ */
449 * Enabling BIO_VMERGE_BOUNDARY forces us to turn off I/O MMU bypassing. It is said that
450 * BIO-level virtual merging can give up to 4% performance boost (not verified for ia64).
451 * On the other hand, we know that I/O MMU bypassing gives ~8% performance improvement on
452 * SPECweb-like workloads on zx1-based machines. Thus, for now we favor I/O MMU bypassing
453 * over BIO-level virtual merging.
455 extern unsigned long ia64_max_iommu_merge_mask
;
457 #define BIO_VMERGE_BOUNDARY 0
460 * It makes no sense at all to have this BIO_VMERGE_BOUNDARY macro here. Should be
461 * replaced by dma_merge_mask() or something of that sort. Note: the only way
462 * BIO_VMERGE_BOUNDARY is used is to mask off bits. Effectively, our definition gets
465 * addr & ((ia64_max_iommu_merge_mask + 1) - 1) == (addr & ia64_max_iommu_vmerge_mask)
467 * which is precisely what we want.
469 #define BIO_VMERGE_BOUNDARY (ia64_max_iommu_merge_mask + 1)
472 #endif /* _ASM_IA64_IO_H */