Commit f5e706ad authored by Sam Ravnborg's avatar Sam Ravnborg Committed by David S. Miller
Browse files

sparc: join the remaining header files


With this commit all sparc64 header files are moved to asm-sparc.
The remaining files (71 files) were too different to be trivially
merged so divide them up in a _32.h and a _64.h file which
are both included from the file with no bit size.

The following script were used:
cd include
FILES=`wc -l asm-sparc64/*h | grep -v '^     1' | cut -b 20-`

for FILE in ${FILES}; do
  echo $FILE:
  BASE=`echo $FILE | cut -d '.' -f 1`
  FN32=${BASE}_32.h
  FN64=${BASE}_64.h
  GUARD=___ASM_SPARC_`echo $BASE | tr '-' '_' | tr [:lower:] [:upper:]`_H
  git mv asm-sparc/$FILE asm-sparc/$FN32
  git mv asm-sparc64/$FILE asm-sparc/$FN64
  echo git mv done
  printf "#ifndef %s\n" $GUARD                             >   asm-sparc/$FILE
  printf "#define %s\n" $GUARD                             >>  asm-sparc/$FILE
  printf "#if defined(__sparc__) && defined(__arch64__)\n" >>  asm-sparc/$FILE
  printf "#include <asm-sparc/%s>\n" $FN64                 >>  asm-sparc/$FILE
  printf "#else\n"                                         >>  asm-sparc/$FILE
  printf "#include <asm-sparc/%s>\n" $FN32                 >>  asm-sparc/$FILE
  printf "#endif\n"                                        >>  asm-sparc/$FILE
  printf "#endif\n"                                        >>  asm-sparc/$FILE
  git add asm-sparc/$FILE
  echo new file done
  printf "#include <asm-sparc/%s>\n" $FILE                 >  asm-sparc64/$FILE
  git add asm-sparc64/$FILE
  echo sparc64 file done
done

The guard contains three '_' to avoid conflict with existing guards.
In additing the two Kbuild files are emptied to avoid breaking
headers_* targets.
We will reintroduce the exported header files when the necessary
kbuild changes are merged.
Signed-off-by: default avatarSam Ravnborg <sam@ravnborg.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5e3609f6
include include/asm-generic/Kbuild.asm
header-y += apc.h
header-y += asi.h
header-y += bpp.h
header-y += display7seg.h
header-y += envctrl.h
header-y += jsflash.h
header-y += openprom.h
header-y += openpromio.h
header-y += psrcompat.h
header-y += pstate.h
header-y += reg.h
header-y += traps.h
header-y += uctx.h
header-y += utrap.h
header-y += vfc_ioctls.h
header-y += watchdog.h
unifdef-y += fbio.h
unifdef-y += perfctr.h
unifdef-y += psr.h
# dummy file to avoid breaking make headers_install
/* atomic.h: These still suck, but the I-cache hit rate is higher.
*
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
* Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au)
* Copyright (C) 2007 Kyle McMartin (kyle@parisc-linux.org)
*
* Additions by Keith M Wesolowski (wesolows@foobazco.org) based
* on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>.
*/
#ifndef __ARCH_SPARC_ATOMIC__
#define __ARCH_SPARC_ATOMIC__
#include <linux/types.h>
typedef struct { volatile int counter; } atomic_t;
#ifdef __KERNEL__
#define ATOMIC_INIT(i) { (i) }
extern int __atomic_add_return(int, atomic_t *);
extern int atomic_cmpxchg(atomic_t *, int, int);
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
extern int atomic_add_unless(atomic_t *, int, int);
extern void atomic_set(atomic_t *, int);
#define atomic_read(v) ((v)->counter)
#define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v)))
#define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v)))
#define atomic_inc(v) ((void)__atomic_add_return( 1, (v)))
#define atomic_dec(v) ((void)__atomic_add_return( -1, (v)))
#define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v)))
#define atomic_sub_return(i, v) (__atomic_add_return(-(int)(i), (v)))
#define atomic_inc_return(v) (__atomic_add_return( 1, (v)))
#define atomic_dec_return(v) (__atomic_add_return( -1, (v)))
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
/*
* atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
/* This is the old 24-bit implementation. It's still used internally
* by some sparc-specific code, notably the semaphore implementation.
*/
typedef struct { volatile int counter; } atomic24_t;
#ifndef CONFIG_SMP
#define ATOMIC24_INIT(i) { (i) }
#define atomic24_read(v) ((v)->counter)
#define atomic24_set(v, i) (((v)->counter) = i)
#ifndef ___ASM_SPARC_ATOMIC_H
#define ___ASM_SPARC_ATOMIC_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm-sparc/atomic_64.h>
#else
/* We do the bulk of the actual work out of line in two common
* routines in assembler, see arch/sparc/lib/atomic.S for the
* "fun" details.
*
* For SMP the trick is you embed the spin lock byte within
* the word, use the low byte so signedness is easily retained
* via a quick arithmetic shift. It looks like this:
*
* ----------------------------------------
* | signed 24-bit counter value | lock | atomic_t
* ----------------------------------------
* 31 8 7 0
*/
#define ATOMIC24_INIT(i) { ((i) << 8) }
static inline int atomic24_read(const atomic24_t *v)
{
int ret = v->counter;
while(ret & 0xff)
ret = v->counter;
return ret >> 8;
}
#define atomic24_set(v, i) (((v)->counter) = ((i) << 8))
#include <asm-sparc/atomic_32.h>
#endif
#endif
static inline int __atomic24_add(int i, atomic24_t *v)
{
register volatile int *ptr asm("g1");
register int increment asm("g2");
register int tmp1 asm("g3");
register int tmp2 asm("g4");
register int tmp3 asm("g7");
ptr = &v->counter;
increment = i;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
"call ___atomic24_add\n\t"
" add %%o7, 8, %%o7\n"
: "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
: "0" (increment), "r" (ptr)
: "memory", "cc");
return increment;
}
static inline int __atomic24_sub(int i, atomic24_t *v)
{
register volatile int *ptr asm("g1");
register int increment asm("g2");
register int tmp1 asm("g3");
register int tmp2 asm("g4");
register int tmp3 asm("g7");
ptr = &v->counter;
increment = i;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
"call ___atomic24_sub\n\t"
" add %%o7, 8, %%o7\n"
: "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
: "0" (increment), "r" (ptr)
: "memory", "cc");
return increment;
}
#define atomic24_add(i, v) ((void)__atomic24_add((i), (v)))
#define atomic24_sub(i, v) ((void)__atomic24_sub((i), (v)))
#define atomic24_dec_return(v) __atomic24_sub(1, (v))
#define atomic24_inc_return(v) __atomic24_add(1, (v))
#define atomic24_sub_and_test(i, v) (__atomic24_sub((i), (v)) == 0)
#define atomic24_dec_and_test(v) (__atomic24_sub(1, (v)) == 0)
#define atomic24_inc(v) ((void)__atomic24_add(1, (v)))
#define atomic24_dec(v) ((void)__atomic24_sub(1, (v)))
#define atomic24_add_negative(i, v) (__atomic24_add((i), (v)) < 0)
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#endif /* !(__KERNEL__) */
#include <asm-generic/atomic.h>
#endif /* !(__ARCH_SPARC_ATOMIC__) */
/* atomic.h: These still suck, but the I-cache hit rate is higher.
*
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
* Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au)
* Copyright (C) 2007 Kyle McMartin (kyle@parisc-linux.org)
*
* Additions by Keith M Wesolowski (wesolows@foobazco.org) based
* on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>.
*/
#ifndef __ARCH_SPARC_ATOMIC__
#define __ARCH_SPARC_ATOMIC__
#include <linux/types.h>
typedef struct { volatile int counter; } atomic_t;
#ifdef __KERNEL__
#define ATOMIC_INIT(i) { (i) }
extern int __atomic_add_return(int, atomic_t *);
extern int atomic_cmpxchg(atomic_t *, int, int);
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
extern int atomic_add_unless(atomic_t *, int, int);
extern void atomic_set(atomic_t *, int);
#define atomic_read(v) ((v)->counter)
#define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v)))
#define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v)))
#define atomic_inc(v) ((void)__atomic_add_return( 1, (v)))
#define atomic_dec(v) ((void)__atomic_add_return( -1, (v)))
#define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v)))
#define atomic_sub_return(i, v) (__atomic_add_return(-(int)(i), (v)))
#define atomic_inc_return(v) (__atomic_add_return( 1, (v)))
#define atomic_dec_return(v) (__atomic_add_return( -1, (v)))
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
/*
* atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
/* This is the old 24-bit implementation. It's still used internally
* by some sparc-specific code, notably the semaphore implementation.
*/
typedef struct { volatile int counter; } atomic24_t;
#ifndef CONFIG_SMP
#define ATOMIC24_INIT(i) { (i) }
#define atomic24_read(v) ((v)->counter)
#define atomic24_set(v, i) (((v)->counter) = i)
#else
/* We do the bulk of the actual work out of line in two common
* routines in assembler, see arch/sparc/lib/atomic.S for the
* "fun" details.
*
* For SMP the trick is you embed the spin lock byte within
* the word, use the low byte so signedness is easily retained
* via a quick arithmetic shift. It looks like this:
*
* ----------------------------------------
* | signed 24-bit counter value | lock | atomic_t
* ----------------------------------------
* 31 8 7 0
*/
#define ATOMIC24_INIT(i) { ((i) << 8) }
static inline int atomic24_read(const atomic24_t *v)
{
int ret = v->counter;
while(ret & 0xff)
ret = v->counter;
return ret >> 8;
}
#define atomic24_set(v, i) (((v)->counter) = ((i) << 8))
#endif
static inline int __atomic24_add(int i, atomic24_t *v)
{
register volatile int *ptr asm("g1");
register int increment asm("g2");
register int tmp1 asm("g3");
register int tmp2 asm("g4");
register int tmp3 asm("g7");
ptr = &v->counter;
increment = i;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
"call ___atomic24_add\n\t"
" add %%o7, 8, %%o7\n"
: "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
: "0" (increment), "r" (ptr)
: "memory", "cc");
return increment;
}
static inline int __atomic24_sub(int i, atomic24_t *v)
{
register volatile int *ptr asm("g1");
register int increment asm("g2");
register int tmp1 asm("g3");
register int tmp2 asm("g4");
register int tmp3 asm("g7");
ptr = &v->counter;
increment = i;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
"call ___atomic24_sub\n\t"
" add %%o7, 8, %%o7\n"
: "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
: "0" (increment), "r" (ptr)
: "memory", "cc");
return increment;
}
#define atomic24_add(i, v) ((void)__atomic24_add((i), (v)))
#define atomic24_sub(i, v) ((void)__atomic24_sub((i), (v)))
#define atomic24_dec_return(v) __atomic24_sub(1, (v))
#define atomic24_inc_return(v) __atomic24_add(1, (v))
#define atomic24_sub_and_test(i, v) (__atomic24_sub((i), (v)) == 0)
#define atomic24_dec_and_test(v) (__atomic24_sub(1, (v)) == 0)
#define atomic24_inc(v) ((void)__atomic24_add(1, (v)))
#define atomic24_dec(v) ((void)__atomic24_sub(1, (v)))
#define atomic24_add_negative(i, v) (__atomic24_add((i), (v)) < 0)
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#endif /* !(__KERNEL__) */
#include <asm-generic/atomic.h>
#endif /* !(__ARCH_SPARC_ATOMIC__) */
/* atomic.h: Thankfully the V9 is at least reasonable for this
* stuff.
*
* Copyright (C) 1996, 1997, 2000 David S. Miller (davem@redhat.com)
*/
#ifndef __ARCH_SPARC64_ATOMIC__
#define __ARCH_SPARC64_ATOMIC__
#include <linux/types.h>
#include <asm/system.h>
typedef struct { volatile int counter; } atomic_t;
typedef struct { volatile __s64 counter; } atomic64_t;
#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
#define atomic_read(v) ((v)->counter)
#define atomic64_read(v) ((v)->counter)
#define atomic_set(v, i) (((v)->counter) = i)
#define atomic64_set(v, i) (((v)->counter) = i)
extern void atomic_add(int, atomic_t *);
extern void atomic64_add(int, atomic64_t *);
extern void atomic_sub(int, atomic_t *);
extern void atomic64_sub(int, atomic64_t *);
extern int atomic_add_ret(int, atomic_t *);
extern int atomic64_add_ret(int, atomic64_t *);
extern int atomic_sub_ret(int, atomic_t *);
extern int atomic64_sub_ret(int, atomic64_t *);
#define atomic_dec_return(v) atomic_sub_ret(1, v)
#define atomic64_dec_return(v) atomic64_sub_ret(1, v)
#define atomic_inc_return(v) atomic_add_ret(1, v)
#define atomic64_inc_return(v) atomic64_add_ret(1, v)
#define atomic_sub_return(i, v) atomic_sub_ret(i, v)
#define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
#define atomic_add_return(i, v) atomic_add_ret(i, v)
#define atomic64_add_return(i, v) atomic64_add_ret(i, v)
/*
* atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
#define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
#define atomic64_sub_and_test(i, v) (atomic64_sub_ret(i, v) == 0)
#define atomic_dec_and_test(v) (atomic_sub_ret(1, v) == 0)
#define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
#define atomic_inc(v) atomic_add(1, v)
#define atomic64_inc(v) atomic64_add(1, v)
#define atomic_dec(v) atomic_sub(1, v)
#define atomic64_dec(v) atomic64_sub(1, v)
#define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
#define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
for (;;) {
if (unlikely(c == (u)))
break;
old = atomic_cmpxchg((v), c, c + (a));
if (likely(old == c))
break;
c = old;
}
return c != (u);
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic64_cmpxchg(v, o, n) \
((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
{
long c, old;
c = atomic64_read(v);
for (;;) {
if (unlikely(c == (u)))
break;
old = atomic64_cmpxchg((v), c, c + (a));
if (likely(old == c))
break;
c = old;
}
return c != (u);
}
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
/* Atomic operations are already serializing */
#ifdef CONFIG_SMP
#define smp_mb__before_atomic_dec() membar_storeload_loadload();
#define smp_mb__after_atomic_dec() membar_storeload_storestore();
#define smp_mb__before_atomic_inc() membar_storeload_loadload();
#define smp_mb__after_atomic_inc() membar_storeload_storestore();
#else
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#endif
#include <asm-generic/atomic.h>
#endif /* !(__ARCH_SPARC64_ATOMIC__) */
/*
* auxio.h: Definitions and code for the Auxiliary I/O register.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef _SPARC_AUXIO_H
#define _SPARC_AUXIO_H
#include <asm/system.h>
#include <asm/vaddrs.h>
/* This register is an unsigned char in IO space. It does two things.
* First, it is used to control the front panel LED light on machines
* that have it (good for testing entry points to trap handlers and irq's)
* Secondly, it controls various floppy drive parameters.
*/
#define AUXIO_ORMEIN 0xf0 /* All writes must set these bits. */
#define AUXIO_ORMEIN4M 0xc0 /* sun4m - All writes must set these bits. */
#define AUXIO_FLPY_DENS 0x20 /* Floppy density, high if set. Read only. */
#define AUXIO_FLPY_DCHG 0x10 /* A disk change occurred. Read only. */
#define AUXIO_EDGE_ON 0x10 /* sun4m - On means Jumper block is in. */
#define AUXIO_FLPY_DSEL 0x08 /* Drive select/start-motor. Write only. */
#define AUXIO_LINK_TEST 0x08 /* sun4m - On means TPE Carrier detect. */
/* Set the following to one, then zero, after doing a pseudo DMA transfer. */
#define AUXIO_FLPY_TCNT 0x04 /* Floppy terminal count. Write only. */
/* Set the following to zero to eject the floppy. */
#define AUXIO_FLPY_EJCT 0x02 /* Eject floppy disk. Write only. */
#define AUXIO_LED 0x01 /* On if set, off if unset. Read/Write */
#ifndef __ASSEMBLY__
/*
* NOTE: these routines are implementation dependent--
* understand the hardware you are querying!
*/
extern void set_auxio(unsigned char bits_on, unsigned char bits_off);
extern unsigned char get_auxio(void); /* .../asm-sparc/floppy.h */
/*
* The following routines are provided for driver-compatibility
* with sparc64 (primarily sunlance.c)
*/
#define AUXIO_LTE_ON 1
#define AUXIO_LTE_OFF 0
/* auxio_set_lte - Set Link Test Enable (TPE Link Detect)
*
* on - AUXIO_LTE_ON or AUXIO_LTE_OFF
*/
#define auxio_set_lte(on) \
do { \
if(on) { \
set_auxio(AUXIO_LINK_TEST, 0); \
} else { \
set_auxio(0, AUXIO_LINK_TEST); \
} \
} while (0)
#define AUXIO_LED_ON 1
#define AUXIO_LED_OFF 0
/* auxio_set_led - Set system front panel LED
*
* on - AUXIO_LED_ON or AUXIO_LED_OFF
*/
#define auxio_set_led(on) \
do { \
if(on) { \
set_auxio(AUXIO_LED, 0); \
} else { \
set_auxio(0, AUXIO_LED); \
} \
} while (0)
#endif /* !(__ASSEMBLY__) */
/* AUXIO2 (Power Off Control) */
extern __volatile__ unsigned char * auxio_power_register;
#define AUXIO_POWER_DETECT_FAILURE 32
#define AUXIO_POWER_CLEAR_FAILURE 2
#define AUXIO_POWER_OFF 1
#endif /* !(_SPARC_AUXIO_H) */
#ifndef ___ASM_SPARC_AUXIO_H
#define ___ASM_SPARC_AUXIO_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm-sparc/auxio_64.h>
#else
#include <asm-sparc/auxio_32.h>
#endif
#endif
/*
* auxio.h: Definitions and code for the Auxiliary I/O register.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef _SPARC_AUXIO_H
#define _SPARC_AUXIO_H
#include <asm/system.h>
#include <asm/vaddrs.h>
/* This register is an unsigned char in IO space. It does two things.
* First, it is used to control the front panel LED light on machines
* that have it (good for testing entry points to trap handlers and irq's)
* Secondly, it controls various floppy drive parameters.
*/
#define AUXIO_ORMEIN 0xf0 /* All writes must set these bits. */
#define AUXIO_ORMEIN4M 0xc0 /* sun4m - All writes must set these bits. */
#define AUXIO_FLPY_DENS 0x20 /* Floppy density, high if set. Read only. */
#define AUXIO_FLPY_DCHG 0x10 /* A disk change occurred. Read only. */
#define AUXIO_EDGE_ON 0x10 /* sun4m - On means Jumper block is in. */
#define AUXIO_FLPY_DSEL 0x08 /* Drive select/start-motor. Write only. */
#define AUXIO_LINK_TEST 0x08 /* sun4m - On means TPE Carrier detect. */
/* Set the following to one, then zero, after doing a pseudo DMA transfer. */
#define AUXIO_FLPY_TCNT 0x04 /* Floppy terminal count. Write only. */
/* Set the following to zero to eject the floppy. */
#define AUXIO_FLPY_EJCT 0x02 /* Eject floppy disk. Write only. */
#define AUXIO_LED 0x01 /* On if set, off if unset. Read/Write */
#ifndef __ASSEMBLY__
/*
* NOTE: these routines are implementation dependent--
* understand the hardware you are querying!
*/
extern void set_auxio(unsigned char bits_on, unsigned char bits_off);
extern unsigned char get_auxio(void); /* .../asm-sparc/floppy.h */
/*
* The following routines are provided for driver-compatibility
* with sparc64 (primarily sunlance.c)
*/
#define AUXIO_LTE_ON 1
#define AUXIO_LTE_OFF 0
/* auxio_set_lte - Set Link Test Enable (TPE Link Detect)
*
* on - AUXIO_LTE_ON or AUXIO_LTE_OFF
*/
#define auxio_set_lte(on) \
do { \
if(on) { \
set_auxio(AUXIO_LINK_TEST, 0); \
} else { \
set_auxio(0, AUXIO_LINK_TEST); \
} \
} while (0)
#define AUXIO_LED_ON 1
#define AUXIO_LED_OFF 0
/* auxio_set_led - Set system front panel LED
*
* on - AUXIO_LED_ON or AUXIO_LED_OFF
*/
#define auxio_set_led(on) \
do { \
if(on) { \
set_auxio(AUXIO_LED, 0); \
} else { \
set_auxio(0, AUXIO_LED); \
} \
} while (0)
#endif /* !(__ASSEMBLY__) */
/* AUXIO2 (Power Off Control) */
extern __volatile__ unsigned char * auxio_power_register;
#define AUXIO_POWER_DETECT_FAILURE 32
#define AUXIO_POWER_CLEAR_FAILURE 2
#define AUXIO_POWER_OFF 1
#endif /* !(_SPARC_AUXIO_H) */
/*
* auxio.h: Definitions and code for the Auxiliary I/O registers.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*
* Refactoring for unified NCR/PCIO support 2002 Eric Brower (ebrower@usa.net)
*/
#ifndef _SPARC64_AUXIO_H
#define _SPARC64_AUXIO_H
/* AUXIO implementations:
* sbus-based NCR89C105 "Slavio"
* LED/Floppy (AUX1) register
* Power (AUX2) register
*
* ebus-based auxio on PCIO
* LED Auxio Register
* Power Auxio Register
*
* Register definitions from NCR _NCR89C105 Chip Specification_
*
* SLAVIO AUX1 @ 0x1900000
* -------------------------------------------------
* | (R) | (R) | D | (R) | E | M | T | L |
* -------------------------------------------------
* (R) - bit 7:6,4 are reserved and should be masked in s/w
* D - Floppy Density Sense (1=high density) R/O
* E - Link Test Enable, directly reflected on AT&T 7213 LTE pin
* M - Monitor/Mouse Mux, directly reflected on MON_MSE_MUX pin
* T - Terminal Count: sends TC pulse to 82077 floppy controller
* L - System LED on front panel (0=off, 1=on)
*/
#define AUXIO_AUX1_MASK 0xc0 /* Mask bits */
#define AUXIO_AUX1_FDENS 0x20 /* Floppy Density Sense */
#define AUXIO_AUX1_LTE 0x08 /* Link Test Enable */
#define AUXIO_AUX1_MMUX 0x04 /* Monitor/Mouse Mux */
#define AUXIO_AUX1_FTCNT 0x02 /* Terminal Count, */
#define AUXIO_AUX1_LED 0x01 /* System LED */
/* SLAVIO AUX2 @ 0x1910000
* -------------------------------------------------
* | (R) | (R) | D | (R) | (R) | (R) | C | F |
* -------------------------------------------------
* (R) - bits 7:6,4:2 are reserved and should be masked in s/w
* D - Power Failure Detect (1=power fail)
* C - Clear Power Failure Detect Int (1=clear)
* F - Power Off (1=power off)
*/
#define AUXIO_AUX2_MASK 0xdc /* Mask Bits */
#define AUXIO_AUX2_PFAILDET 0x20 /* Power Fail Detect */
#define AUXIO_AUX2_PFAILCLR 0x02 /* Clear Pwr Fail Det Intr */
#define AUXIO_AUX2_PWR_OFF 0x01 /* Power Off */
/* Register definitions from Sun Microsystems _PCIO_ p/n 802-7837
*
* PCIO LED Auxio @ 0x726000
* -------------------------------------------------
* | 31:1 Unused | LED |
* -------------------------------------------------
* Bits 31:1 unused
* LED - System LED on front panel (0=off, 1=on)
*/
#define AUXIO_PCIO_LED 0x01 /* System LED */
/* PCIO Power Auxio @ 0x724000
* -------------------------------------------------
* | 31:2 Unused | CPO | SPO |
* -------------------------------------------------
* Bits 31:2 unused
* CPO - Courtesy Power Off (1=off)
* SPO - System Power Off (1=off)
*/
#define AUXIO_PCIO_CPWR_OFF 0x02 /* Courtesy Power Off */
#define AUXIO_PCIO_SPWR_OFF 0x01 /* System Power Off */
#ifndef __ASSEMBLY__
extern void __iomem *auxio_register;
#define AUXIO_LTE_ON 1
#define AUXIO_LTE_OFF 0
/* auxio_set_lte - Set Link Test Enable (TPE Link Detect)
*
* on - AUXIO_LTE_ON or AUXIO_LTE_OFF
*/
extern void auxio_set_lte(int on);
#define AUXIO_LED_ON 1
#define AUXIO_LED_OFF 0
/* auxio_set_led - Set system front panel LED
*
* on - AUXIO_LED_ON or AUXIO_LED_OFF
*/
extern void auxio_set_led(int on);
#endif /* ifndef __ASSEMBLY__ */
#endif /* !(_SPARC64_AUXIO_H) */
/*
* bitops.h: Bit string operations on the Sparc.
*
* Copyright 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright 2001 Anton Blanchard (anton@samba.org)
*/
#ifndef _SPARC_BITOPS_H
#define _SPARC_BITOPS_H
#include <linux/compiler.h>
#include <asm/byteorder.h>
#ifdef __KERNEL__
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#ifndef ___ASM_SPARC_BITOPS_H
#define ___ASM_SPARC_BITOPS_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm-sparc/bitops_64.h>
#else
#include <asm-sparc/bitops_32.h>
#endif
#endif
extern unsigned long ___set_bit(unsigned long *addr, unsigned long mask);
extern unsigned long ___clear_bit(unsigned long *addr, unsigned long mask);
extern unsigned long ___change_bit(unsigned long *addr, unsigned long mask);
/*
* Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0'
* is in the highest of the four bytes and bit '31' is the high bit
* within the first byte. Sparc is BIG-Endian. Unless noted otherwise
* all bit-ops return 0 if bit was previously clear and != 0 otherwise.
*/
static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
return ___set_bit(ADDR, mask) != 0;
}
static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
(void) ___set_bit(ADDR, mask);
}
static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
return ___clear_bit(ADDR, mask) != 0;
}
static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
(void) ___clear_bit(ADDR, mask);
}
static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
return ___change_bit(ADDR, mask) != 0;
}
static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
(void) ___change_bit(ADDR, mask);
}
#include <asm-generic/bitops/non-atomic.h>
#define smp_mb__before_clear_bit() do { } while(0)
#define smp_mb__after_clear_bit() do { } while(0)
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/ext2-non-atomic.h>
#include <asm-generic/bitops/ext2-atomic.h>
#include <asm-generic/bitops/minix.h>
#endif /* __KERNEL__ */
#endif /* defined(_SPARC_BITOPS_H) */
/*
* bitops.h: Bit string operations on the Sparc.
*
* Copyright 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright 2001 Anton Blanchard (anton@samba.org)
*/
#ifndef _SPARC_BITOPS_H
#define _SPARC_BITOPS_H
#include <linux/compiler.h>
#include <asm/byteorder.h>
#ifdef __KERNEL__
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
extern unsigned long ___set_bit(unsigned long *addr, unsigned long mask);
extern unsigned long ___clear_bit(unsigned long *addr, unsigned long mask);
extern unsigned long ___change_bit(unsigned long *addr, unsigned long mask);
/*
* Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0'
* is in the highest of the four bytes and bit '31' is the high bit
* within the first byte. Sparc is BIG-Endian. Unless noted otherwise
* all bit-ops return 0 if bit was previously clear and != 0 otherwise.
*/
static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
return ___set_bit(ADDR, mask) != 0;
}
static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
(void) ___set_bit(ADDR, mask);
}
static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
return ___clear_bit(ADDR, mask) != 0;
}
static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
(void) ___clear_bit(ADDR, mask);
}
static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
return ___change_bit(ADDR, mask) != 0;
}
static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
(void) ___change_bit(ADDR, mask);
}
#include <asm-generic/bitops/non-atomic.h>
#define smp_mb__before_clear_bit() do { } while(0)
#define smp_mb__after_clear_bit() do { } while(0)
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/ext2-non-atomic.h>
#include <asm-generic/bitops/ext2-atomic.h>
#include <asm-generic/bitops/minix.h>
#endif /* __KERNEL__ */
#endif /* defined(_SPARC_BITOPS_H) */
/*
* bitops.h: Bit string operations on the V9.
*
* Copyright 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef _SPARC64_BITOPS_H
#define _SPARC64_BITOPS_H
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
#include <linux/compiler.h>
#include <asm/byteorder.h>
extern int test_and_set_bit(unsigned long nr, volatile unsigned long *addr);
extern int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr);
extern int test_and_change_bit(unsigned long nr, volatile unsigned long *addr);
extern void set_bit(unsigned long nr, volatile unsigned long *addr);
extern void clear_bit(unsigned long nr, volatile unsigned long *addr);
extern void change_bit(unsigned long nr, volatile unsigned long *addr);
#include <asm-generic/bitops/non-atomic.h>
#ifdef CONFIG_SMP
#define smp_mb__before_clear_bit() membar_storeload_loadload()
#define smp_mb__after_clear_bit() membar_storeload_storestore()
#else
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
#endif
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#ifdef __KERNEL__
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ffs.h>
/*
* hweightN: returns the hamming weight (i.e. the number
* of bits set) of a N-bit word
*/
#ifdef ULTRA_HAS_POPULATION_COUNT
static inline unsigned int hweight64(unsigned long w)
{
unsigned int res;
__asm__ ("popc %1,%0" : "=r" (res) : "r" (w));
return res;
}
static inline unsigned int hweight32(unsigned int w)
{
unsigned int res;
__asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xffffffff));
return res;
}
static inline unsigned int hweight16(unsigned int w)
{
unsigned int res;
__asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xffff));
return res;
}
static inline unsigned int hweight8(unsigned int w)
{
unsigned int res;
__asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xff));
return res;
}
#else
#include <asm-generic/bitops/hweight.h>
#endif
#include <asm-generic/bitops/lock.h>
#endif /* __KERNEL__ */
#include <asm-generic/bitops/find.h>
#ifdef __KERNEL__
#include <asm-generic/bitops/ext2-non-atomic.h>
#define ext2_set_bit_atomic(lock,nr,addr) \
test_and_set_bit((nr) ^ 0x38,(unsigned long *)(addr))
#define ext2_clear_bit_atomic(lock,nr,addr) \
test_and_clear_bit((nr) ^ 0x38,(unsigned long *)(addr))
#include <asm-generic/bitops/minix.h>
#endif /* __KERNEL__ */
#endif /* defined(_SPARC64_BITOPS_H) */
#ifndef _SPARC_CACHEFLUSH_H
#define _SPARC_CACHEFLUSH_H
#include <linux/mm.h> /* Common for other includes */
// #include <linux/kernel.h> from pgalloc.h
// #include <linux/sched.h> from pgalloc.h
// #include <asm/page.h>
#include <asm/btfixup.h>
/*
* Fine grained cache flushing.
*/
#ifdef CONFIG_SMP
BTFIXUPDEF_CALL(void, local_flush_cache_all, void)
BTFIXUPDEF_CALL(void, local_flush_cache_mm, struct mm_struct *)
BTFIXUPDEF_CALL(void, local_flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
BTFIXUPDEF_CALL(void, local_flush_cache_page, struct vm_area_struct *, unsigned long)
#define local_flush_cache_all() BTFIXUP_CALL(local_flush_cache_all)()
#define local_flush_cache_mm(mm) BTFIXUP_CALL(local_flush_cache_mm)(mm)
#define local_flush_cache_range(vma,start,end) BTFIXUP_CALL(local_flush_cache_range)(vma,start,end)
#define local_flush_cache_page(vma,addr) BTFIXUP_CALL(local_flush_cache_page)(vma,addr)
BTFIXUPDEF_CALL(void, local_flush_page_to_ram, unsigned long)
BTFIXUPDEF_CALL(void, local_flush_sig_insns, struct mm_struct *, unsigned long)
#define local_flush_page_to_ram(addr) BTFIXUP_CALL(local_flush_page_to_ram)(addr)
#define local_flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(local_flush_sig_insns)(mm,insn_addr)
extern void smp_flush_cache_all(void);
extern void smp_flush_cache_mm(struct mm_struct *mm);
extern void smp_flush_cache_range(struct vm_area_struct *vma,
unsigned long start,
unsigned long end);
extern void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
extern void smp_flush_page_to_ram(unsigned long page);
extern void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
#endif /* CONFIG_SMP */
BTFIXUPDEF_CALL(void, flush_cache_all, void)
BTFIXUPDEF_CALL(void, flush_cache_mm, struct mm_struct *)
BTFIXUPDEF_CALL(void, flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
BTFIXUPDEF_CALL(void, flush_cache_page, struct vm_area_struct *, unsigned long)
#define flush_cache_all() BTFIXUP_CALL(flush_cache_all)()
#define flush_cache_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
#define flush_cache_dup_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
#define flush_cache_range(vma,start,end) BTFIXUP_CALL(flush_cache_range)(vma,start,end)
#define flush_cache_page(vma,addr,pfn) BTFIXUP_CALL(flush_cache_page)(vma,addr)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma, pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr, page_to_pfn(page));\
memcpy(dst, src, len); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr, page_to_pfn(page));\
memcpy(dst, src, len); \
} while (0)
BTFIXUPDEF_CALL(void, __flush_page_to_ram, unsigned long)
BTFIXUPDEF_CALL(void, flush_sig_insns, struct mm_struct *, unsigned long)
#define __flush_page_to_ram(addr) BTFIXUP_CALL(__flush_page_to_ram)(addr)
#define flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(flush_sig_insns)(mm,insn_addr)
extern void sparc_flush_page_to_ram(struct page *page);
#define flush_dcache_page(page) sparc_flush_page_to_ram(page)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all()
#endif /* _SPARC_CACHEFLUSH_H */
#ifndef ___ASM_SPARC_CACHEFLUSH_H
#define ___ASM_SPARC_CACHEFLUSH_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm-sparc/cacheflush_64.h>
#else
#include <asm-sparc/cacheflush_32.h>
#endif
#endif
#ifndef _SPARC_CACHEFLUSH_H
#define _SPARC_CACHEFLUSH_H
#include <linux/mm.h> /* Common for other includes */
// #include <linux/kernel.h> from pgalloc.h
// #include <linux/sched.h> from pgalloc.h
// #include <asm/page.h>
#include <asm/btfixup.h>
/*
* Fine grained cache flushing.
*/
#ifdef CONFIG_SMP
BTFIXUPDEF_CALL(void, local_flush_cache_all, void)
BTFIXUPDEF_CALL(void, local_flush_cache_mm, struct mm_struct *)
BTFIXUPDEF_CALL(void, local_flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
BTFIXUPDEF_CALL(void, local_flush_cache_page, struct vm_area_struct *, unsigned long)
#define local_flush_cache_all() BTFIXUP_CALL(local_flush_cache_all)()
#define local_flush_cache_mm(mm) BTFIXUP_CALL(local_flush_cache_mm)(mm)
#define local_flush_cache_range(vma,start,end) BTFIXUP_CALL(local_flush_cache_range)(vma,start,end)
#define local_flush_cache_page(vma,addr) BTFIXUP_CALL(local_flush_cache_page)(vma,addr)
BTFIXUPDEF_CALL(void, local_flush_page_to_ram, unsigned long)
BTFIXUPDEF_CALL(void, local_flush_sig_insns, struct mm_struct *, unsigned long)
#define local_flush_page_to_ram(addr) BTFIXUP_CALL(local_flush_page_to_ram)(addr)
#define local_flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(local_flush_sig_insns)(mm,insn_addr)
extern void smp_flush_cache_all(void);
extern void smp_flush_cache_mm(struct mm_struct *mm);
extern void smp_flush_cache_range(struct vm_area_struct *vma,
unsigned long start,
unsigned long end);
extern void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
extern void smp_flush_page_to_ram(unsigned long page);
extern void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
#endif /* CONFIG_SMP */
BTFIXUPDEF_CALL(void, flush_cache_all, void)
BTFIXUPDEF_CALL(void, flush_cache_mm, struct mm_struct *)
BTFIXUPDEF_CALL(void, flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
BTFIXUPDEF_CALL(void, flush_cache_page, struct vm_area_struct *, unsigned long)
#define flush_cache_all() BTFIXUP_CALL(flush_cache_all)()
#define flush_cache_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
#define flush_cache_dup_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
#define flush_cache_range(vma,start,end) BTFIXUP_CALL(flush_cache_range)(vma,start,end)
#define flush_cache_page(vma,addr,pfn) BTFIXUP_CALL(flush_cache_page)(vma,addr)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma, pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr, page_to_pfn(page));\
memcpy(dst, src, len); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr, page_to_pfn(page));\
memcpy(dst, src, len); \
} while (0)
BTFIXUPDEF_CALL(void, __flush_page_to_ram, unsigned long)
BTFIXUPDEF_CALL(void, flush_sig_insns, struct mm_struct *, unsigned long)
#define __flush_page_to_ram(addr) BTFIXUP_CALL(__flush_page_to_ram)(addr)
#define flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(flush_sig_insns)(mm,insn_addr)
extern void sparc_flush_page_to_ram(struct page *page);
#define flush_dcache_page(page) sparc_flush_page_to_ram(page)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all()
#endif /* _SPARC_CACHEFLUSH_H */
#ifndef _SPARC64_CACHEFLUSH_H
#define _SPARC64_CACHEFLUSH_H
#include <asm/page.h>
#ifndef __ASSEMBLY__
#include <linux/mm.h>
/* Cache flush operations. */
/* These are the same regardless of whether this is an SMP kernel or not. */
#define flush_cache_mm(__mm) \
do { if ((__mm) == current->mm) flushw_user(); } while(0)
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
#define flush_cache_range(vma, start, end) \
flush_cache_mm((vma)->vm_mm)
#define flush_cache_page(vma, page, pfn) \
flush_cache_mm((vma)->vm_mm)
/*
* On spitfire, the icache doesn't snoop local stores and we don't
* use block commit stores (which invalidate icache lines) during
* module load, so we need this.
*/
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void __flush_icache_page(unsigned long);
extern void __flush_dcache_page(void *addr, int flush_icache);
extern void flush_dcache_page_impl(struct page *page);
#ifdef CONFIG_SMP
extern void smp_flush_dcache_page_impl(struct page *page, int cpu);
extern void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
#else
#define smp_flush_dcache_page_impl(page,cpu) flush_dcache_page_impl(page)
#define flush_dcache_page_all(mm,page) flush_dcache_page_impl(page)
#endif
extern void __flush_dcache_range(unsigned long start, unsigned long end);
extern void flush_dcache_page(struct page *page);
#define flush_icache_page(vma, pg) do { } while(0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
extern void flush_ptrace_access(struct vm_area_struct *, struct page *,
unsigned long uaddr, void *kaddr,
unsigned long len, int write);
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr, page_to_pfn(page)); \
memcpy(dst, src, len); \
flush_ptrace_access(vma, page, vaddr, src, len, 0); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr, page_to_pfn(page)); \
memcpy(dst, src, len); \
flush_ptrace_access(vma, page, vaddr, dst, len, 1); \
} while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
#ifdef CONFIG_DEBUG_PAGEALLOC
/* internal debugging function */
void kernel_map_pages(struct page *page, int numpages, int enable);
#endif
#endif /* !__ASSEMBLY__ */
#endif /* _SPARC64_CACHEFLUSH_H */
#ifndef __SPARC_CHECKSUM_H
#define __SPARC_CHECKSUM_H
/* checksum.h: IP/UDP/TCP checksum routines on the Sparc.
*
* Copyright(C) 1995 Linus Torvalds
* Copyright(C) 1995 Miguel de Icaza
* Copyright(C) 1996 David S. Miller
* Copyright(C) 1996 Eddie C. Dost
* Copyright(C) 1997 Jakub Jelinek
*
* derived from:
* Alpha checksum c-code
* ix86 inline assembly
* RFC1071 Computing the Internet Checksum
*/
#include <linux/in6.h>
#include <asm/uaccess.h>
/* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
/* the same as csum_partial, but copies from fs:src while it
* checksums
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
extern unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *);
static inline __wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
{
register unsigned int ret asm("o0") = (unsigned int)src;
register char *d asm("o1") = dst;
register int l asm("g1") = len;
__asm__ __volatile__ (
"call __csum_partial_copy_sparc_generic\n\t"
" mov %6, %%g7\n"
: "=&r" (ret), "=&r" (d), "=&r" (l)
: "0" (ret), "1" (d), "2" (l), "r" (sum)
: "o2", "o3", "o4", "o5", "o7",
"g2", "g3", "g4", "g5", "g7",
"memory", "cc");
return (__force __wsum)ret;
}
static inline __wsum
csum_partial_copy_from_user(const void __user *src, void *dst, int len,
__wsum sum, int *err)
{
register unsigned long ret asm("o0") = (unsigned long)src;
register char *d asm("o1") = dst;
register int l asm("g1") = len;
register __wsum s asm("g7") = sum;
__asm__ __volatile__ (
".section __ex_table,#alloc\n\t"
".align 4\n\t"
".word 1f,2\n\t"
".previous\n"
"1:\n\t"
"call __csum_partial_copy_sparc_generic\n\t"
" st %8, [%%sp + 64]\n"
: "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
: "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
: "o2", "o3", "o4", "o5", "o7", "g2", "g3", "g4", "g5",
"cc", "memory");
return (__force __wsum)ret;
}
static inline __wsum
csum_partial_copy_to_user(const void *src, void __user *dst, int len,
__wsum sum, int *err)
{
if (!access_ok (VERIFY_WRITE, dst, len)) {
*err = -EFAULT;
return sum;
} else {
register unsigned long ret asm("o0") = (unsigned long)src;
register char __user *d asm("o1") = dst;
register int l asm("g1") = len;
register __wsum s asm("g7") = sum;
__asm__ __volatile__ (
".section __ex_table,#alloc\n\t"
".align 4\n\t"
".word 1f,1\n\t"
".previous\n"
"1:\n\t"
"call __csum_partial_copy_sparc_generic\n\t"
" st %8, [%%sp + 64]\n"
: "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
: "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
: "o2", "o3", "o4", "o5", "o7",
"g2", "g3", "g4", "g5",
"cc", "memory");
return (__force __wsum)ret;
}
}
#define HAVE_CSUM_COPY_USER
#define csum_and_copy_to_user csum_partial_copy_to_user
/* ihl is always 5 or greater, almost always is 5, and iph is word aligned
* the majority of the time.
*/
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
__sum16 sum;
/* Note: We must read %2 before we touch %0 for the first time,
* because GCC can legitimately use the same register for
* both operands.
*/
__asm__ __volatile__("sub\t%2, 4, %%g4\n\t"
"ld\t[%1 + 0x00], %0\n\t"
"ld\t[%1 + 0x04], %%g2\n\t"
"ld\t[%1 + 0x08], %%g3\n\t"
"addcc\t%%g2, %0, %0\n\t"
"addxcc\t%%g3, %0, %0\n\t"
"ld\t[%1 + 0x0c], %%g2\n\t"
"ld\t[%1 + 0x10], %%g3\n\t"
"addxcc\t%%g2, %0, %0\n\t"
"addx\t%0, %%g0, %0\n"
"1:\taddcc\t%%g3, %0, %0\n\t"
"add\t%1, 4, %1\n\t"
"addxcc\t%0, %%g0, %0\n\t"
"subcc\t%%g4, 1, %%g4\n\t"
"be,a\t2f\n\t"
"sll\t%0, 16, %%g2\n\t"
"b\t1b\n\t"
"ld\t[%1 + 0x10], %%g3\n"
"2:\taddcc\t%0, %%g2, %%g2\n\t"
"srl\t%%g2, 16, %0\n\t"
"addx\t%0, %%g0, %0\n\t"
"xnor\t%%g0, %0, %0"
: "=r" (sum), "=&r" (iph)
: "r" (ihl), "1" (iph)
: "g2", "g3", "g4", "cc", "memory");
return sum;
}
/* Fold a partial checksum without adding pseudo headers. */
static inline __sum16 csum_fold(__wsum sum)
{
unsigned int tmp;
__asm__ __volatile__("addcc\t%0, %1, %1\n\t"
"srl\t%1, 16, %1\n\t"
"addx\t%1, %%g0, %1\n\t"
"xnor\t%%g0, %1, %0"
: "=&r" (sum), "=r" (tmp)
: "0" (sum), "1" ((__force u32)sum<<16)
: "cc");
return (__force __sum16)sum;
}
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
__asm__ __volatile__("addcc\t%1, %0, %0\n\t"
"addxcc\t%2, %0, %0\n\t"
"addxcc\t%3, %0, %0\n\t"
"addx\t%0, %%g0, %0\n\t"
: "=r" (sum), "=r" (saddr)
: "r" (daddr), "r" (proto + len), "0" (sum),
"1" (saddr)
: "cc");
return sum;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}
#define _HAVE_ARCH_IPV6_CSUM
static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u32 len, unsigned short proto,
__wsum sum)
{
__asm__ __volatile__ (
"addcc %3, %4, %%g4\n\t"
"addxcc %5, %%g4, %%g4\n\t"
"ld [%2 + 0x0c], %%g2\n\t"
"ld [%2 + 0x08], %%g3\n\t"
"addxcc %%g2, %%g4, %%g4\n\t"
"ld [%2 + 0x04], %%g2\n\t"
"addxcc %%g3, %%g4, %%g4\n\t"
"ld [%2 + 0x00], %%g3\n\t"
"addxcc %%g2, %%g4, %%g4\n\t"
"ld [%1 + 0x0c], %%g2\n\t"
"addxcc %%g3, %%g4, %%g4\n\t"
"ld [%1 + 0x08], %%g3\n\t"
"addxcc %%g2, %%g4, %%g4\n\t"
"ld [%1 + 0x04], %%g2\n\t"
"addxcc %%g3, %%g4, %%g4\n\t"
"ld [%1 + 0x00], %%g3\n\t"
"addxcc %%g2, %%g4, %%g4\n\t"
"addxcc %%g3, %%g4, %0\n\t"
"addx 0, %0, %0\n"
: "=&r" (sum)
: "r" (saddr), "r" (daddr),
"r"(htonl(len)), "r"(htonl(proto)), "r"(sum)
: "g2", "g3", "g4", "cc");
return csum_fold(sum);
}
/* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */
static inline __sum16 ip_compute_csum(const void *buff, int len)
{
return csum_fold(csum_partial(buff, len, 0));
}
#endif /* !(__SPARC_CHECKSUM_H) */
#ifndef ___ASM_SPARC_CHECKSUM_H
#define ___ASM_SPARC_CHECKSUM_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm-sparc/checksum_64.h>
#else
#include <asm-sparc/checksum_32.h>
#endif
#endif
#ifndef __SPARC_CHECKSUM_H
#define __SPARC_CHECKSUM_H
/* checksum.h: IP/UDP/TCP checksum routines on the Sparc.
*
* Copyright(C) 1995 Linus Torvalds
* Copyright(C) 1995 Miguel de Icaza
* Copyright(C) 1996 David S. Miller
* Copyright(C) 1996 Eddie C. Dost
* Copyright(C) 1997 Jakub Jelinek
*
* derived from:
* Alpha checksum c-code
* ix86 inline assembly
* RFC1071 Computing the Internet Checksum
*/
#include <linux/in6.h>
#include <asm/uaccess.h>
/* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
/* the same as csum_partial, but copies from fs:src while it
* checksums
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
extern unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *);
static inline __wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
{
register unsigned int ret asm("o0") = (unsigned int)src;
register char *d asm("o1") = dst;
register int l asm("g1") = len;
__asm__ __volatile__ (
"call __csum_partial_copy_sparc_generic\n\t"
" mov %6, %%g7\n"
: "=&r" (ret), "=&r" (d), "=&r" (l)
: "0" (ret), "1" (d), "2" (l), "r" (sum)
: "o2", "o3", "o4", "o5", "o7",
"g2", "g3", "g4", "g5", "g7",
"memory", "cc");
return (__force __wsum)ret;
}
static inline __wsum
csum_partial_copy_from_user(const void __user *src, void *dst, int len,
__wsum sum, int *err)
{
register unsigned long ret asm("o0") = (unsigned long)src;
register char *d asm("o1") = dst;
register int l asm("g1") = len;
register __wsum s asm("g7") = sum;
__asm__ __volatile__ (
".section __ex_table,#alloc\n\t"
".align 4\n\t"
".word 1f,2\n\t"
".previous\n"
"1:\n\t"
"call __csum_partial_copy_sparc_generic\n\t"
" st %8, [%%sp + 64]\n"
: "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
: "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
: "o2", "o3", "o4", "o5", "o7", "g2", "g3", "g4", "g5",
"cc", "memory");
return (__force __wsum)ret;
}
static inline __wsum
csum_partial_copy_to_user(const void *src, void __user *dst, int len,
__wsum sum, int *err)
{
if (!access_ok (VERIFY_WRITE, dst, len)) {
*err = -EFAULT;
return sum;
} else {
register unsigned long ret asm("o0") = (unsigned long)src;
register char __user *d asm("o1") = dst;
register int l asm("g1") = len;
register __wsum s asm("g7") = sum;
__asm__ __volatile__ (
".section __ex_table,#alloc\n\t"
".align 4\n\t"
".word 1f,1\n\t"
".previous\n"
"1:\n\t"
"call __csum_partial_copy_sparc_generic\n\t"
" st %8, [%%sp + 64]\n"
: "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
: "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
: "o2", "o3", "o4", "o5", "o7",
"g2", "g3", "g4", "g5",
"cc", "memory");
return (__force __wsum)ret;
}
}
#define HAVE_CSUM_COPY_USER
#define csum_and_copy_to_user csum_partial_copy_to_user
/* ihl is always 5 or greater, almost always is 5, and iph is word aligned
* the majority of the time.
*/
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
__sum16 sum;
/* Note: We must read %2 before we touch %0 for the first time,
* because GCC can legitimately use the same register for
* both operands.
*/
__asm__ __volatile__("sub\t%2, 4, %%g4\n\t"
"ld\t[%1 + 0x00], %0\n\t"
"ld\t[%1 + 0x04], %%g2\n\t"
"ld\t[%1 + 0x08], %%g3\n\t"
"addcc\t%%g2, %0, %0\n\t"
"addxcc\t%%g3, %0, %0\n\t"
"ld\t[%1 + 0x0c], %%g2\n\t"
"ld\t[%1 + 0x10], %%g3\n\t"
"addxcc\t%%g2, %0, %0\n\t"
"addx\t%0, %%g0, %0\n"
"1:\taddcc\t%%g3, %0, %0\n\t"
"add\t%1, 4, %1\n\t"
"addxcc\t%0, %%g0, %0\n\t"
"subcc\t%%g4, 1, %%g4\n\t"
"be,a\t2f\n\t"
"sll\t%0, 16, %%g2\n\t"
"b\t1b\n\t"
"ld\t[%1 + 0x10], %%g3\n"
"2:\taddcc\t%0, %%g2, %%g2\n\t"
"srl\t%%g2, 16, %0\n\t"
"addx\t%0, %%g0, %0\n\t"
"xnor\t%%g0, %0, %0"
: "=r" (sum), "=&r" (iph)
: "r" (ihl), "1" (iph)
: "g2", "g3", "g4", "cc", "memory");
return sum;
}
/* Fold a partial checksum without adding pseudo headers. */
static inline __sum16 csum_fold(__wsum sum)
{
unsigned int tmp;
__asm__ __volatile__("addcc\t%0, %1, %1\n\t"
"srl\t%1, 16, %1\n\t"
"addx\t%1, %%g0, %1\n\t"
"xnor\t%%g0, %1, %0"
: "=&r" (sum), "=r" (tmp)
: "0" (sum), "1" ((__force u32)sum<<16)
: "cc");
return (__force __sum16)sum;
}
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
__asm__ __volatile__("addcc\t%1, %0, %0\n\t"
"addxcc\t%2, %0, %0\n\t"
"addxcc\t%3, %0, %0\n\t"
"addx\t%0, %%g0, %0\n\t"
: "=r" (sum), "=r" (saddr)
: "r" (daddr), "r" (proto + len), "0" (sum),
"1" (saddr)
: "cc");
return sum;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}
#define _HAVE_ARCH_IPV6_CSUM
static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u32 len, unsigned short proto,
__wsum sum)
{
__asm__ __volatile__ (
"addcc %3, %4, %%g4\n\t"
"addxcc %5, %%g4, %%g4\n\t"
"ld [%2 + 0x0c], %%g2\n\t"
"ld [%2 + 0x08], %%g3\n\t"
"addxcc %%g2, %%g4, %%g4\n\t"
"ld [%2 + 0x04], %%g2\n\t"
"addxcc %%g3, %%g4, %%g4\n\t"
"ld [%2 + 0x00], %%g3\n\t"
"addxcc %%g2, %%g4, %%g4\n\t"
"ld [%1 + 0x0c], %%g2\n\t"
"addxcc %%g3, %%g4, %%g4\n\t"
"ld [%1 + 0x08], %%g3\n\t"
"addxcc %%g2, %%g4, %%g4\n\t"
"ld [%1 + 0x04], %%g2\n\t"
"addxcc %%g3, %%g4, %%g4\n\t"
"ld [%1 + 0x00], %%g3\n\t"
"addxcc %%g2, %%g4, %%g4\n\t"
"addxcc %%g3, %%g4, %0\n\t"
"addx 0, %0, %0\n"
: "=&r" (sum)
: "r" (saddr), "r" (daddr),
"r"(htonl(len)), "r"(htonl(proto)), "r"(sum)
: "g2", "g3", "g4", "cc");
return csum_fold(sum);
}
/* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */
static inline __sum16 ip_compute_csum(const void *buff, int len)
{
return csum_fold(csum_partial(buff, len, 0));
}
#endif /* !(__SPARC_CHECKSUM_H) */
#ifndef __SPARC64_CHECKSUM_H
#define __SPARC64_CHECKSUM_H
/* checksum.h: IP/UDP/TCP checksum routines on the V9.
*
* Copyright(C) 1995 Linus Torvalds
* Copyright(C) 1995 Miguel de Icaza
* Copyright(C) 1996 David S. Miller
* Copyright(C) 1996 Eddie C. Dost
* Copyright(C) 1997 Jakub Jelinek
*
* derived from:
* Alpha checksum c-code
* ix86 inline assembly
* RFC1071 Computing the Internet Checksum
*/
#include <linux/in6.h>
#include <asm/uaccess.h>
/* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
extern __wsum csum_partial(const void * buff, int len, __wsum sum);
/* the same as csum_partial, but copies from user space while it
* checksums
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
extern __wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, __wsum sum);
extern long __csum_partial_copy_from_user(const void __user *src,
void *dst, int len,
__wsum sum);
static inline __wsum
csum_partial_copy_from_user(const void __user *src,
void *dst, int len,
__wsum sum, int *err)
{
long ret = __csum_partial_copy_from_user(src, dst, len, sum);
if (ret < 0)
*err = -EFAULT;
return (__force __wsum) ret;
}
/*
* Copy and checksum to user
*/
#define HAVE_CSUM_COPY_USER
extern long __csum_partial_copy_to_user(const void *src,
void __user *dst, int len,
__wsum sum);
static inline __wsum
csum_and_copy_to_user(const void *src,
void __user *dst, int len,
__wsum sum, int *err)
{
long ret = __csum_partial_copy_to_user(src, dst, len, sum);
if (ret < 0)
*err = -EFAULT;
return (__force __wsum) ret;
}
/* ihl is always 5 or greater, almost always is 5, and iph is word aligned
* the majority of the time.
*/
extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
/* Fold a partial checksum without adding pseudo headers. */
static inline __sum16 csum_fold(__wsum sum)
{
unsigned int tmp;
__asm__ __volatile__(
" addcc %0, %1, %1\n"
" srl %1, 16, %1\n"
" addc %1, %%g0, %1\n"
" xnor %%g0, %1, %0\n"
: "=&r" (sum), "=r" (tmp)
: "0" (sum), "1" ((__force u32)sum<<16)
: "cc");
return (__force __sum16)sum;
}
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned int len,
unsigned short proto,
__wsum sum)
{
__asm__ __volatile__(
" addcc %1, %0, %0\n"
" addccc %2, %0, %0\n"
" addccc %3, %0, %0\n"
" addc %0, %%g0, %0\n"
: "=r" (sum), "=r" (saddr)
: "r" (daddr), "r" (proto + len), "0" (sum), "1" (saddr)
: "cc");
return sum;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}
#define _HAVE_ARCH_IPV6_CSUM
static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u32 len, unsigned short proto,
__wsum sum)
{
__asm__ __volatile__ (
" addcc %3, %4, %%g7\n"
" addccc %5, %%g7, %%g7\n"
" lduw [%2 + 0x0c], %%g2\n"
" lduw [%2 + 0x08], %%g3\n"
" addccc %%g2, %%g7, %%g7\n"
" lduw [%2 + 0x04], %%g2\n"
" addccc %%g3, %%g7, %%g7\n"
" lduw [%2 + 0x00], %%g3\n"
" addccc %%g2, %%g7, %%g7\n"
" lduw [%1 + 0x0c], %%g2\n"
" addccc %%g3, %%g7, %%g7\n"
" lduw [%1 + 0x08], %%g3\n"
" addccc %%g2, %%g7, %%g7\n"
" lduw [%1 + 0x04], %%g2\n"
" addccc %%g3, %%g7, %%g7\n"
" lduw [%1 + 0x00], %%g3\n"
" addccc %%g2, %%g7, %%g7\n"
" addccc %%g3, %%g7, %0\n"
" addc 0, %0, %0\n"
: "=&r" (sum)
: "r" (saddr), "r" (daddr), "r"(htonl(len)),
"r"(htonl(proto)), "r"(sum)
: "g2", "g3", "g7", "cc");
return csum_fold(sum);
}
/* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */
static inline __sum16 ip_compute_csum(const void *buff, int len)
{
return csum_fold(csum_partial(buff, len, 0));
}
#endif /* !(__SPARC64_CHECKSUM_H) */
/* cpudata.h: Per-cpu parameters.
*
* Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org)
*
* Based on include/asm-sparc64/cpudata.h and Linux 2.4 smp.h
* both (C) David S. Miller.
*/
#ifndef _SPARC_CPUDATA_H
#define _SPARC_CPUDATA_H
#include <linux/percpu.h>
typedef struct {
unsigned long udelay_val;
unsigned long clock_tick;
unsigned int multiplier;
unsigned int counter;
int prom_node;
int mid;
int next;
} cpuinfo_sparc;
DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
#endif /* _SPARC_CPUDATA_H */
#ifndef ___ASM_SPARC_CPUDATA_H
#define ___ASM_SPARC_CPUDATA_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm-sparc/cpudata_64.h>
#else
#include <asm-sparc/cpudata_32.h>
#endif
#endif
/* cpudata.h: Per-cpu parameters.
*
* Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org)
*
* Based on include/asm-sparc64/cpudata.h and Linux 2.4 smp.h
* both (C) David S. Miller.
*/
#ifndef _SPARC_CPUDATA_H
#define _SPARC_CPUDATA_H
#include <linux/percpu.h>
typedef struct {
unsigned long udelay_val;
unsigned long clock_tick;
unsigned int multiplier;
unsigned int counter;
int prom_node;
int mid;
int next;
} cpuinfo_sparc;
DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
#endif /* _SPARC_CPUDATA_H */
/* cpudata.h: Per-cpu parameters.
*
* Copyright (C) 2003, 2005, 2006 David S. Miller (davem@davemloft.net)
*/
#ifndef _SPARC64_CPUDATA_H
#define _SPARC64_CPUDATA_H
#include <asm/hypervisor.h>
#include <asm/asi.h>
#ifndef __ASSEMBLY__
#include <linux/percpu.h>
#include <linux/threads.h>
typedef struct {
/* Dcache line 1 */
unsigned int __softirq_pending; /* must be 1st, see rtrap.S */
unsigned int __pad0;
unsigned long clock_tick; /* %tick's per second */
unsigned long __pad;
unsigned int __pad1;
unsigned int __pad2;
/* Dcache line 2, rarely used */
unsigned int dcache_size;
unsigned int dcache_line_size;
unsigned int icache_size;
unsigned int icache_line_size;
unsigned int ecache_size;
unsigned int ecache_line_size;
int core_id;
int proc_id;
} cpuinfo_sparc;
DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
#define local_cpu_data() __get_cpu_var(__cpu_data)
/* Trap handling code needs to get at a few critical values upon
* trap entry and to process TSB misses. These cannot be in the
* per_cpu() area as we really need to lock them into the TLB and
* thus make them part of the main kernel image. As a result we
* try to make this as small as possible.
*
* This is padded out and aligned to 64-bytes to avoid false sharing
* on SMP.
*/
/* If you modify the size of this structure, please update
* TRAP_BLOCK_SZ_SHIFT below.
*/
struct thread_info;
struct trap_per_cpu {
/* D-cache line 1: Basic thread information, cpu and device mondo queues */
struct thread_info *thread;
unsigned long pgd_paddr;
unsigned long cpu_mondo_pa;
unsigned long dev_mondo_pa;
/* D-cache line 2: Error Mondo Queue and kernel buffer pointers */
unsigned long resum_mondo_pa;
unsigned long resum_kernel_buf_pa;
unsigned long nonresum_mondo_pa;
unsigned long nonresum_kernel_buf_pa;
/* Dcache lines 3, 4, 5, and 6: Hypervisor Fault Status */
struct hv_fault_status fault_info;
/* Dcache line 7: Physical addresses of CPU send mondo block and CPU list. */
unsigned long cpu_mondo_block_pa;
unsigned long cpu_list_pa;
unsigned long tsb_huge;
unsigned long tsb_huge_temp;
/* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */
unsigned long irq_worklist_pa;
unsigned int cpu_mondo_qmask;
unsigned int dev_mondo_qmask;
unsigned int resum_qmask;
unsigned int nonresum_qmask;
void *hdesc;
} __attribute__((aligned(64)));
extern struct trap_per_cpu trap_block[NR_CPUS];
extern void init_cur_cpu_trap(struct thread_info *);
extern void setup_tba(void);
extern int ncpus_probed;
extern void __init cpu_probe(void);
extern const struct seq_operations cpuinfo_op;
extern unsigned long real_hard_smp_processor_id(void);
struct cpuid_patch_entry {
unsigned int addr;
unsigned int cheetah_safari[4];
unsigned int cheetah_jbus[4];
unsigned int starfire[4];
unsigned int sun4v[4];
};
extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
struct sun4v_1insn_patch_entry {
unsigned int addr;
unsigned int insn;
};
extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch,
__sun4v_1insn_patch_end;
struct sun4v_2insn_patch_entry {
unsigned int addr;
unsigned int insns[2];
};
extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
__sun4v_2insn_patch_end;
#endif /* !(__ASSEMBLY__) */
#define TRAP_PER_CPU_THREAD 0x00
#define TRAP_PER_CPU_PGD_PADDR 0x08
#define TRAP_PER_CPU_CPU_MONDO_PA 0x10
#define TRAP_PER_CPU_DEV_MONDO_PA 0x18
#define TRAP_PER_CPU_RESUM_MONDO_PA 0x20
#define TRAP_PER_CPU_RESUM_KBUF_PA 0x28
#define TRAP_PER_CPU_NONRESUM_MONDO_PA 0x30
#define TRAP_PER_CPU_NONRESUM_KBUF_PA 0x38
#define TRAP_PER_CPU_FAULT_INFO 0x40
#define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA 0xc0
#define TRAP_PER_CPU_CPU_LIST_PA 0xc8
#define TRAP_PER_CPU_TSB_HUGE 0xd0
#define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8
#define TRAP_PER_CPU_IRQ_WORKLIST_PA 0xe0
#define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe8
#define TRAP_PER_CPU_DEV_MONDO_QMASK 0xec
#define TRAP_PER_CPU_RESUM_QMASK 0xf0
#define TRAP_PER_CPU_NONRESUM_QMASK 0xf4
#define TRAP_BLOCK_SZ_SHIFT 8
#include <asm/scratchpad.h>
#define __GET_CPUID(REG) \
/* Spitfire implementation (default). */ \
661: ldxa [%g0] ASI_UPA_CONFIG, REG; \
srlx REG, 17, REG; \
and REG, 0x1f, REG; \
nop; \
.section .cpuid_patch, "ax"; \
/* Instruction location. */ \
.word 661b; \
/* Cheetah Safari implementation. */ \
ldxa [%g0] ASI_SAFARI_CONFIG, REG; \
srlx REG, 17, REG; \
and REG, 0x3ff, REG; \
nop; \
/* Cheetah JBUS implementation. */ \
ldxa [%g0] ASI_JBUS_CONFIG, REG; \
srlx REG, 17, REG; \
and REG, 0x1f, REG; \
nop; \
/* Starfire implementation. */ \
sethi %hi(0x1fff40000d0 >> 9), REG; \
sllx REG, 9, REG; \
or REG, 0xd0, REG; \
lduwa [REG] ASI_PHYS_BYPASS_EC_E, REG;\
/* sun4v implementation. */ \
mov SCRATCHPAD_CPUID, REG; \
ldxa [REG] ASI_SCRATCHPAD, REG; \
nop; \
nop; \
.previous;
#ifdef CONFIG_SMP
#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
__GET_CPUID(TMP) \
sethi %hi(trap_block), DEST; \
sllx TMP, TRAP_BLOCK_SZ_SHIFT, TMP; \
or DEST, %lo(trap_block), DEST; \
add DEST, TMP, DEST; \
/* Clobbers TMP, current address space PGD phys address into DEST. */
#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
/* Clobbers TMP, loads DEST with current thread info pointer. */
#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
/* Given the current thread info pointer in THR, load the per-cpu
* area base of the current processor into DEST. REG1, REG2, and REG3 are
* clobbered.
*
* You absolutely cannot use DEST as a temporary in this code. The
* reason is that traps can happen during execution, and return from
* trap will load the fully resolved DEST per-cpu base. This can corrupt
* the calculations done by the macro mid-stream.
*/
#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) \
lduh [THR + TI_CPU], REG1; \
sethi %hi(__per_cpu_shift), REG3; \
sethi %hi(__per_cpu_base), REG2; \
ldx [REG3 + %lo(__per_cpu_shift)], REG3; \
ldx [REG2 + %lo(__per_cpu_base)], REG2; \
sllx REG1, REG3, REG3; \
add REG3, REG2, DEST;
#else
#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
sethi %hi(trap_block), DEST; \
or DEST, %lo(trap_block), DEST; \
/* Uniprocessor versions, we know the cpuid is zero. */
#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
/* No per-cpu areas on uniprocessor, so no need to load DEST. */
#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3)
#endif /* !(CONFIG_SMP) */
#endif /* _SPARC64_CPUDATA_H */
/*
* delay.h: Linux delay routines on the Sparc.
*
* Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu).
*/
#ifndef __SPARC_DELAY_H
#define __SPARC_DELAY_H
#include <asm/cpudata.h>
static inline void __delay(unsigned long loops)
{
__asm__ __volatile__("cmp %0, 0\n\t"
"1: bne 1b\n\t"
"subcc %0, 1, %0\n" :
"=&r" (loops) :
"0" (loops) :
"cc");
}
/* This is too messy with inline asm on the Sparc. */
extern void __udelay(unsigned long usecs, unsigned long lpj);
extern void __ndelay(unsigned long nsecs, unsigned long lpj);
#ifdef CONFIG_SMP
#define __udelay_val cpu_data(smp_processor_id()).udelay_val
#else /* SMP */
#define __udelay_val loops_per_jiffy
#endif /* SMP */
#define udelay(__usecs) __udelay(__usecs, __udelay_val)
#define ndelay(__nsecs) __ndelay(__nsecs, __udelay_val)
#endif /* defined(__SPARC_DELAY_H) */
#ifndef ___ASM_SPARC_DELAY_H
#define ___ASM_SPARC_DELAY_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm-sparc/delay_64.h>
#else
#include <asm-sparc/delay_32.h>
#endif
#endif
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment