1 // SPDX-License-Identifier: GPL-2.0
3 * Alpha IO and memory functions.
6 #include <linux/kernel.h>
7 #include <linux/types.h>
8 #include <linux/string.h>
9 #include <linux/module.h>
12 /* Out-of-line versions of the i/o routines that redirect into the
13 platform-specific version. Note that "platform-specific" may mean
14 "generic", which bumps through the machine vector. */
17 ioread8(const void __iomem
*addr
)
21 ret
= IO_CONCAT(__IO_PREFIX
,ioread8
)(addr
);
26 unsigned int ioread16(const void __iomem
*addr
)
30 ret
= IO_CONCAT(__IO_PREFIX
,ioread16
)(addr
);
35 unsigned int ioread32(const void __iomem
*addr
)
39 ret
= IO_CONCAT(__IO_PREFIX
,ioread32
)(addr
);
44 u64
ioread64(const void __iomem
*addr
)
48 ret
= IO_CONCAT(__IO_PREFIX
,ioread64
)(addr
);
53 void iowrite8(u8 b
, void __iomem
*addr
)
56 IO_CONCAT(__IO_PREFIX
,iowrite8
)(b
, addr
);
59 void iowrite16(u16 b
, void __iomem
*addr
)
62 IO_CONCAT(__IO_PREFIX
,iowrite16
)(b
, addr
);
65 void iowrite32(u32 b
, void __iomem
*addr
)
68 IO_CONCAT(__IO_PREFIX
,iowrite32
)(b
, addr
);
71 void iowrite64(u64 b
, void __iomem
*addr
)
74 IO_CONCAT(__IO_PREFIX
,iowrite64
)(b
, addr
);
77 EXPORT_SYMBOL(ioread8
);
78 EXPORT_SYMBOL(ioread16
);
79 EXPORT_SYMBOL(ioread32
);
80 EXPORT_SYMBOL(ioread64
);
81 EXPORT_SYMBOL(iowrite8
);
82 EXPORT_SYMBOL(iowrite16
);
83 EXPORT_SYMBOL(iowrite32
);
84 EXPORT_SYMBOL(iowrite64
);
86 u8
inb(unsigned long port
)
88 return ioread8(ioport_map(port
, 1));
91 u16
inw(unsigned long port
)
93 return ioread16(ioport_map(port
, 2));
96 u32
inl(unsigned long port
)
98 return ioread32(ioport_map(port
, 4));
101 void outb(u8 b
, unsigned long port
)
103 iowrite8(b
, ioport_map(port
, 1));
106 void outw(u16 b
, unsigned long port
)
108 iowrite16(b
, ioport_map(port
, 2));
111 void outl(u32 b
, unsigned long port
)
113 iowrite32(b
, ioport_map(port
, 4));
123 u8
__raw_readb(const volatile void __iomem
*addr
)
125 return IO_CONCAT(__IO_PREFIX
,readb
)(addr
);
128 u16
__raw_readw(const volatile void __iomem
*addr
)
130 return IO_CONCAT(__IO_PREFIX
,readw
)(addr
);
133 u32
__raw_readl(const volatile void __iomem
*addr
)
135 return IO_CONCAT(__IO_PREFIX
,readl
)(addr
);
138 u64
__raw_readq(const volatile void __iomem
*addr
)
140 return IO_CONCAT(__IO_PREFIX
,readq
)(addr
);
143 void __raw_writeb(u8 b
, volatile void __iomem
*addr
)
145 IO_CONCAT(__IO_PREFIX
,writeb
)(b
, addr
);
148 void __raw_writew(u16 b
, volatile void __iomem
*addr
)
150 IO_CONCAT(__IO_PREFIX
,writew
)(b
, addr
);
153 void __raw_writel(u32 b
, volatile void __iomem
*addr
)
155 IO_CONCAT(__IO_PREFIX
,writel
)(b
, addr
);
158 void __raw_writeq(u64 b
, volatile void __iomem
*addr
)
160 IO_CONCAT(__IO_PREFIX
,writeq
)(b
, addr
);
163 EXPORT_SYMBOL(__raw_readb
);
164 EXPORT_SYMBOL(__raw_readw
);
165 EXPORT_SYMBOL(__raw_readl
);
166 EXPORT_SYMBOL(__raw_readq
);
167 EXPORT_SYMBOL(__raw_writeb
);
168 EXPORT_SYMBOL(__raw_writew
);
169 EXPORT_SYMBOL(__raw_writel
);
170 EXPORT_SYMBOL(__raw_writeq
);
172 u8
readb(const volatile void __iomem
*addr
)
176 ret
= __raw_readb(addr
);
181 u16
readw(const volatile void __iomem
*addr
)
185 ret
= __raw_readw(addr
);
190 u32
readl(const volatile void __iomem
*addr
)
194 ret
= __raw_readl(addr
);
199 u64
readq(const volatile void __iomem
*addr
)
203 ret
= __raw_readq(addr
);
208 void writeb(u8 b
, volatile void __iomem
*addr
)
211 __raw_writeb(b
, addr
);
214 void writew(u16 b
, volatile void __iomem
*addr
)
217 __raw_writew(b
, addr
);
220 void writel(u32 b
, volatile void __iomem
*addr
)
223 __raw_writel(b
, addr
);
226 void writeq(u64 b
, volatile void __iomem
*addr
)
229 __raw_writeq(b
, addr
);
232 EXPORT_SYMBOL(readb
);
233 EXPORT_SYMBOL(readw
);
234 EXPORT_SYMBOL(readl
);
235 EXPORT_SYMBOL(readq
);
236 EXPORT_SYMBOL(writeb
);
237 EXPORT_SYMBOL(writew
);
238 EXPORT_SYMBOL(writel
);
239 EXPORT_SYMBOL(writeq
);
242 * The _relaxed functions must be ordered w.r.t. each other, but they don't
243 * have to be ordered w.r.t. other memory accesses.
245 u8
readb_relaxed(const volatile void __iomem
*addr
)
248 return __raw_readb(addr
);
251 u16
readw_relaxed(const volatile void __iomem
*addr
)
254 return __raw_readw(addr
);
257 u32
readl_relaxed(const volatile void __iomem
*addr
)
260 return __raw_readl(addr
);
263 u64
readq_relaxed(const volatile void __iomem
*addr
)
266 return __raw_readq(addr
);
269 EXPORT_SYMBOL(readb_relaxed
);
270 EXPORT_SYMBOL(readw_relaxed
);
271 EXPORT_SYMBOL(readl_relaxed
);
272 EXPORT_SYMBOL(readq_relaxed
);
275 * Read COUNT 8-bit bytes from port PORT into memory starting at SRC.
277 void ioread8_rep(const void __iomem
*port
, void *dst
, unsigned long count
)
279 while ((unsigned long)dst
& 0x3) {
283 *(unsigned char *)dst
= ioread8(port
);
291 w
|= ioread8(port
) << 8;
292 w
|= ioread8(port
) << 16;
293 w
|= ioread8(port
) << 24;
294 *(unsigned int *)dst
= w
;
300 *(unsigned char *)dst
= ioread8(port
);
305 void insb(unsigned long port
, void *dst
, unsigned long count
)
307 ioread8_rep(ioport_map(port
, 1), dst
, count
);
310 EXPORT_SYMBOL(ioread8_rep
);
314 * Read COUNT 16-bit words from port PORT into memory starting at
315 * SRC. SRC must be at least short aligned. This is used by the
316 * IDE driver to read disk sectors. Performance is important, but
317 * the interfaces seems to be slow: just using the inlined version
318 * of the inw() breaks things.
320 void ioread16_rep(const void __iomem
*port
, void *dst
, unsigned long count
)
322 if (unlikely((unsigned long)dst
& 0x3)) {
325 BUG_ON((unsigned long)dst
& 0x1);
327 *(unsigned short *)dst
= ioread16(port
);
335 w
|= ioread16(port
) << 16;
336 *(unsigned int *)dst
= w
;
341 *(unsigned short*)dst
= ioread16(port
);
345 void insw(unsigned long port
, void *dst
, unsigned long count
)
347 ioread16_rep(ioport_map(port
, 2), dst
, count
);
350 EXPORT_SYMBOL(ioread16_rep
);
355 * Read COUNT 32-bit words from port PORT into memory starting at
356 * SRC. Now works with any alignment in SRC. Performance is important,
357 * but the interfaces seems to be slow: just using the inlined version
358 * of the inl() breaks things.
360 void ioread32_rep(const void __iomem
*port
, void *dst
, unsigned long count
)
362 if (unlikely((unsigned long)dst
& 0x3)) {
364 struct S
{ int x
__attribute__((packed
)); };
365 ((struct S
*)dst
)->x
= ioread32(port
);
369 /* Buffer 32-bit aligned. */
371 *(unsigned int *)dst
= ioread32(port
);
377 void insl(unsigned long port
, void *dst
, unsigned long count
)
379 ioread32_rep(ioport_map(port
, 4), dst
, count
);
382 EXPORT_SYMBOL(ioread32_rep
);
387 * Like insb but in the opposite direction.
388 * Don't worry as much about doing aligned memory transfers:
389 * doing byte reads the "slow" way isn't nearly as slow as
390 * doing byte writes the slow way (no r-m-w cycle).
392 void iowrite8_rep(void __iomem
*port
, const void *xsrc
, unsigned long count
)
394 const unsigned char *src
= xsrc
;
396 iowrite8(*src
++, port
);
399 void outsb(unsigned long port
, const void *src
, unsigned long count
)
401 iowrite8_rep(ioport_map(port
, 1), src
, count
);
404 EXPORT_SYMBOL(iowrite8_rep
);
405 EXPORT_SYMBOL(outsb
);
409 * Like insw but in the opposite direction. This is used by the IDE
410 * driver to write disk sectors. Performance is important, but the
411 * interfaces seems to be slow: just using the inlined version of the
412 * outw() breaks things.
414 void iowrite16_rep(void __iomem
*port
, const void *src
, unsigned long count
)
416 if (unlikely((unsigned long)src
& 0x3)) {
419 BUG_ON((unsigned long)src
& 0x1);
420 iowrite16(*(unsigned short *)src
, port
);
428 w
= *(unsigned int *)src
;
430 iowrite16(w
>> 0, port
);
431 iowrite16(w
>> 16, port
);
435 iowrite16(*(unsigned short *)src
, port
);
439 void outsw(unsigned long port
, const void *src
, unsigned long count
)
441 iowrite16_rep(ioport_map(port
, 2), src
, count
);
444 EXPORT_SYMBOL(iowrite16_rep
);
445 EXPORT_SYMBOL(outsw
);
449 * Like insl but in the opposite direction. This is used by the IDE
450 * driver to write disk sectors. Works with any alignment in SRC.
451 * Performance is important, but the interfaces seems to be slow:
452 * just using the inlined version of the outl() breaks things.
454 void iowrite32_rep(void __iomem
*port
, const void *src
, unsigned long count
)
456 if (unlikely((unsigned long)src
& 0x3)) {
458 struct S
{ int x
__attribute__((packed
)); };
459 iowrite32(((struct S
*)src
)->x
, port
);
463 /* Buffer 32-bit aligned. */
465 iowrite32(*(unsigned int *)src
, port
);
471 void outsl(unsigned long port
, const void *src
, unsigned long count
)
473 iowrite32_rep(ioport_map(port
, 4), src
, count
);
476 EXPORT_SYMBOL(iowrite32_rep
);
477 EXPORT_SYMBOL(outsl
);
481 * Copy data from IO memory space to "real" memory space.
482 * This needs to be optimized.
484 void memcpy_fromio(void *to
, const volatile void __iomem
*from
, long count
)
486 /* Optimize co-aligned transfers. Everything else gets handled
489 if (count
>= 8 && ((u64
)to
& 7) == ((u64
)from
& 7)) {
492 *(u64
*)to
= __raw_readq(from
);
496 } while (count
>= 0);
500 if (count
>= 4 && ((u64
)to
& 3) == ((u64
)from
& 3)) {
503 *(u32
*)to
= __raw_readl(from
);
507 } while (count
>= 0);
511 if (count
>= 2 && ((u64
)to
& 1) == ((u64
)from
& 1)) {
514 *(u16
*)to
= __raw_readw(from
);
518 } while (count
>= 0);
523 *(u8
*) to
= __raw_readb(from
);
531 EXPORT_SYMBOL(memcpy_fromio
);
535 * Copy data from "real" memory space to IO memory space.
536 * This needs to be optimized.
538 void memcpy_toio(volatile void __iomem
*to
, const void *from
, long count
)
540 /* Optimize co-aligned transfers. Everything else gets handled
542 /* FIXME -- align FROM. */
544 if (count
>= 8 && ((u64
)to
& 7) == ((u64
)from
& 7)) {
547 __raw_writeq(*(const u64
*)from
, to
);
551 } while (count
>= 0);
555 if (count
>= 4 && ((u64
)to
& 3) == ((u64
)from
& 3)) {
558 __raw_writel(*(const u32
*)from
, to
);
562 } while (count
>= 0);
566 if (count
>= 2 && ((u64
)to
& 1) == ((u64
)from
& 1)) {
569 __raw_writew(*(const u16
*)from
, to
);
573 } while (count
>= 0);
578 __raw_writeb(*(const u8
*) from
, to
);
586 EXPORT_SYMBOL(memcpy_toio
);
590 * "memset" on IO memory space.
592 void _memset_c_io(volatile void __iomem
*to
, unsigned long c
, long count
)
594 /* Handle any initial odd byte */
595 if (count
> 0 && ((u64
)to
& 1)) {
601 /* Handle any initial odd halfword */
602 if (count
>= 2 && ((u64
)to
& 2)) {
608 /* Handle any initial odd word */
609 if (count
>= 4 && ((u64
)to
& 4)) {
615 /* Handle all full-sized quadwords: we're aligned
616 (or have a small count) */
623 } while (count
>= 0);
627 /* The tail is word-aligned if we still have count >= 4 */
634 /* The tail is half-word aligned if we have count >= 2 */
641 /* And finally, one last byte.. */
648 EXPORT_SYMBOL(_memset_c_io
);
650 #if IS_ENABLED(CONFIG_VGA_CONSOLE) || IS_ENABLED(CONFIG_MDA_CONSOLE)
654 /* A version of memcpy used by the vga console routines to move data around
655 arbitrarily between screen and main memory. */
658 scr_memcpyw(u16
*d
, const u16
*s
, unsigned int count
)
660 const u16 __iomem
*ios
= (const u16 __iomem
*) s
;
661 u16 __iomem
*iod
= (u16 __iomem
*) d
;
662 int s_isio
= __is_ioaddr(s
);
663 int d_isio
= __is_ioaddr(d
);
667 /* FIXME: Should handle unaligned ops and
668 operation widening. */
672 u16 tmp
= __raw_readw(ios
++);
673 __raw_writew(tmp
, iod
++);
677 memcpy_fromio(d
, ios
, count
);
680 memcpy_toio(iod
, s
, count
);
686 EXPORT_SYMBOL(scr_memcpyw
);
688 void scr_memmovew(u16
*d
, const u16
*s
, unsigned int count
)
691 scr_memcpyw(d
, s
, count
);
697 scr_writew(scr_readw(--s
), --d
);
700 EXPORT_SYMBOL(scr_memmovew
);
703 void __iomem
*ioport_map(unsigned long port
, unsigned int size
)
705 return IO_CONCAT(__IO_PREFIX
,ioportmap
) (port
);
708 void ioport_unmap(void __iomem
*addr
)
712 EXPORT_SYMBOL(ioport_map
);
713 EXPORT_SYMBOL(ioport_unmap
);