cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

io.c (4111B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * I/O string operations
      4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
      5 *    Copyright (C) 2006 IBM Corporation
      6 *
      7 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
      8 * and Paul Mackerras.
      9 *
     10 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
     11 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
     12 *
     13 * Rewritten in C by Stephen Rothwell.
     14 */
     15#include <linux/kernel.h>
     16#include <linux/types.h>
     17#include <linux/compiler.h>
     18#include <linux/export.h>
     19
     20#include <asm/io.h>
     21#include <asm/firmware.h>
     22#include <asm/bug.h>
     23
     24/* See definition in io.h */
     25bool isa_io_special;
     26
     27void _insb(const volatile u8 __iomem *port, void *buf, long count)
     28{
     29	u8 *tbuf = buf;
     30	u8 tmp;
     31
     32	if (unlikely(count <= 0))
     33		return;
     34	asm volatile("sync");
     35	do {
     36		tmp = *port;
     37		eieio();
     38		*tbuf++ = tmp;
     39	} while (--count != 0);
     40	asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
     41}
     42EXPORT_SYMBOL(_insb);
     43
     44void _outsb(volatile u8 __iomem *port, const void *buf, long count)
     45{
     46	const u8 *tbuf = buf;
     47
     48	if (unlikely(count <= 0))
     49		return;
     50	asm volatile("sync");
     51	do {
     52		*port = *tbuf++;
     53	} while (--count != 0);
     54	asm volatile("sync");
     55}
     56EXPORT_SYMBOL(_outsb);
     57
     58void _insw_ns(const volatile u16 __iomem *port, void *buf, long count)
     59{
     60	u16 *tbuf = buf;
     61	u16 tmp;
     62
     63	if (unlikely(count <= 0))
     64		return;
     65	asm volatile("sync");
     66	do {
     67		tmp = *port;
     68		eieio();
     69		*tbuf++ = tmp;
     70	} while (--count != 0);
     71	asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
     72}
     73EXPORT_SYMBOL(_insw_ns);
     74
     75void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count)
     76{
     77	const u16 *tbuf = buf;
     78
     79	if (unlikely(count <= 0))
     80		return;
     81	asm volatile("sync");
     82	do {
     83		*port = *tbuf++;
     84	} while (--count != 0);
     85	asm volatile("sync");
     86}
     87EXPORT_SYMBOL(_outsw_ns);
     88
     89void _insl_ns(const volatile u32 __iomem *port, void *buf, long count)
     90{
     91	u32 *tbuf = buf;
     92	u32 tmp;
     93
     94	if (unlikely(count <= 0))
     95		return;
     96	asm volatile("sync");
     97	do {
     98		tmp = *port;
     99		eieio();
    100		*tbuf++ = tmp;
    101	} while (--count != 0);
    102	asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
    103}
    104EXPORT_SYMBOL(_insl_ns);
    105
    106void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count)
    107{
    108	const u32 *tbuf = buf;
    109
    110	if (unlikely(count <= 0))
    111		return;
    112	asm volatile("sync");
    113	do {
    114		*port = *tbuf++;
    115	} while (--count != 0);
    116	asm volatile("sync");
    117}
    118EXPORT_SYMBOL(_outsl_ns);
    119
    120#define IO_CHECK_ALIGN(v,a) ((((unsigned long)(v)) & ((a) - 1)) == 0)
    121
    122notrace void
    123_memset_io(volatile void __iomem *addr, int c, unsigned long n)
    124{
    125	void *p = (void __force *)addr;
    126	u32 lc = c;
    127	lc |= lc << 8;
    128	lc |= lc << 16;
    129
    130	__asm__ __volatile__ ("sync" : : : "memory");
    131	while(n && !IO_CHECK_ALIGN(p, 4)) {
    132		*((volatile u8 *)p) = c;
    133		p++;
    134		n--;
    135	}
    136	while(n >= 4) {
    137		*((volatile u32 *)p) = lc;
    138		p += 4;
    139		n -= 4;
    140	}
    141	while(n) {
    142		*((volatile u8 *)p) = c;
    143		p++;
    144		n--;
    145	}
    146	__asm__ __volatile__ ("sync" : : : "memory");
    147}
    148EXPORT_SYMBOL(_memset_io);
    149
    150void _memcpy_fromio(void *dest, const volatile void __iomem *src,
    151		    unsigned long n)
    152{
    153	void *vsrc = (void __force *) src;
    154
    155	__asm__ __volatile__ ("sync" : : : "memory");
    156	while(n && (!IO_CHECK_ALIGN(vsrc, 4) || !IO_CHECK_ALIGN(dest, 4))) {
    157		*((u8 *)dest) = *((volatile u8 *)vsrc);
    158		eieio();
    159		vsrc++;
    160		dest++;
    161		n--;
    162	}
    163	while(n >= 4) {
    164		*((u32 *)dest) = *((volatile u32 *)vsrc);
    165		eieio();
    166		vsrc += 4;
    167		dest += 4;
    168		n -= 4;
    169	}
    170	while(n) {
    171		*((u8 *)dest) = *((volatile u8 *)vsrc);
    172		eieio();
    173		vsrc++;
    174		dest++;
    175		n--;
    176	}
    177	__asm__ __volatile__ ("sync" : : : "memory");
    178}
    179EXPORT_SYMBOL(_memcpy_fromio);
    180
    181void _memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n)
    182{
    183	void *vdest = (void __force *) dest;
    184
    185	__asm__ __volatile__ ("sync" : : : "memory");
    186	while(n && (!IO_CHECK_ALIGN(vdest, 4) || !IO_CHECK_ALIGN(src, 4))) {
    187		*((volatile u8 *)vdest) = *((u8 *)src);
    188		src++;
    189		vdest++;
    190		n--;
    191	}
    192	while(n >= 4) {
    193		*((volatile u32 *)vdest) = *((volatile u32 *)src);
    194		src += 4;
    195		vdest += 4;
    196		n-=4;
    197	}
    198	while(n) {
    199		*((volatile u8 *)vdest) = *((u8 *)src);
    200		src++;
    201		vdest++;
    202		n--;
    203	}
    204	__asm__ __volatile__ ("sync" : : : "memory");
    205}
    206EXPORT_SYMBOL(_memcpy_toio);