diff --git a/BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/atomic.h b/BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/atomic.h new file mode 100644 index 0000000000000000000000000000000000000000..5efc5f17a86852ad7e46e9d80b8bcb318450d09a --- /dev/null +++ b/BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/atomic.h @@ -0,0 +1,608 @@ +/* + * Portions Copyright 2008, 2010 VMware, Inc. + */ +#ifndef __ARCH_X86_64_ATOMIC__ +#define __ARCH_X86_64_ATOMIC__ + +#include +#if defined(__VMKLNX__) +#include "vmkapi.h" +#endif /* defined(__VMKLNX__) */ + +/* atomic_t should be 32 bit signed type */ + +/* + * Atomic operations that C can't guarantee us. Useful for + * resource counting etc.. + */ + +#ifdef CONFIG_SMP +#define LOCK "lock ; " +#else +#define LOCK "" +#endif + +/* + * Make sure gcc doesn't try to be clever and move things around + * on us. We need to use _exactly_ the address the user gave us, + * not some alias that contains the same information. + */ +typedef struct { volatile int counter; } atomic_t; + +#define ATOMIC_INIT(i) { (i) } + +/** + * atomic_read - read atomic variable + * @v: pointer of type atomic_t + * + * Atomically reads the value of @v. + */ +#define atomic_read(v) ((v)->counter) + +/** + * atomic_set - set atomic variable + * @v: pointer of type atomic_t + * @i: required value + * + * Atomically sets the value of @v to @i. + */ +#define atomic_set(v,i) (((v)->counter) = (i)) + +/** + * atomic_add - add integer to atomic variable + * @i: integer value to add + * @v: pointer of type atomic_t + * + * Atomically adds @i to @v. + */ +/* _VMKLNX_CODECHECK_: atomic_add */ +static __inline__ void atomic_add(int i, atomic_t *v) +{ +#if defined(__VMKLNX__) + vmk_AtomicPrologue(); +#endif /* defined(__VMKLNX__) */ + __asm__ __volatile__( + LOCK_PREFIX "addl %1,%0" + :"=m" (v->counter) + :"ir" (i), "m" (v->counter)); +#if defined(__VMKLNX__) + vmk_AtomicEpilogue(); +#endif /* defined(__VMKLNX__) */ +} + +/** + * atomic_sub - subtract the atomic variable + * @i: integer value to subtract + * @v: pointer of type atomic_t + * + * Atomically subtracts @i from @v. + */ +/* _VMKLNX_CODECHECK_: atomic_sub */ +static __inline__ void atomic_sub(int i, atomic_t *v) +{ +#if defined(__VMKLNX__) + vmk_AtomicPrologue(); +#endif /* defined(__VMKLNX__) */ + __asm__ __volatile__( + LOCK_PREFIX "subl %1,%0" + :"=m" (v->counter) + :"ir" (i), "m" (v->counter)); +#if defined(__VMKLNX__) + vmk_AtomicEpilogue(); +#endif /* defined(__VMKLNX__) */ +} + +/** + * atomic_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @v: pointer of type atomic_t + * + * Atomically subtracts @i from @v and returns + * true if the result is zero, or false for all + * other cases. + */ +static __inline__ int atomic_sub_and_test(int i, atomic_t *v) +{ + unsigned char c; + +#if defined(__VMKLNX__) + vmk_AtomicPrologue(); +#endif /* defined(__VMKLNX__) */ + __asm__ __volatile__( + LOCK_PREFIX "subl %2,%0; sete %1" + :"=m" (v->counter), "=qm" (c) + :"ir" (i), "m" (v->counter) : "memory"); +#if defined(__VMKLNX__) + vmk_AtomicEpilogue(); +#endif /* defined(__VMKLNX__) */ + return c; +} + +/** + * atomic_inc - increment atomic variable + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1. + */ +/* _VMKLNX_CODECHECK_: atomic_inc */ +static __inline__ void atomic_inc(atomic_t *v) +{ +#if defined(__VMKLNX__) + vmk_AtomicPrologue(); +#endif /* defined(__VMKLNX__) */ + __asm__ __volatile__( + LOCK_PREFIX "incl %0" + :"=m" (v->counter) + :"m" (v->counter)); +#if defined(__VMKLNX__) + vmk_AtomicEpilogue(); +#endif /* defined(__VMKLNX__) */ +} + +/** + * atomic_dec - decrement atomic variable + * @v: pointer of type atomic_t + * + * Atomically decrements @v by 1. + */ +/* _VMKLNX_CODECHECK_: atomic_dec */ +static __inline__ void atomic_dec(atomic_t *v) +{ +#if defined(__VMKLNX__) + vmk_AtomicPrologue(); +#endif /* defined(__VMKLNX__) */ + __asm__ __volatile__( + LOCK_PREFIX "decl %0" + :"=m" (v->counter) + :"m" (v->counter)); +#if defined(__VMKLNX__) + vmk_AtomicEpilogue(); +#endif /* defined(__VMKLNX__) */ +} + +/** + * atomic_dec_and_test - decrement and test + * @v: pointer of type atomic_t + * + * Atomically decrements @v by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +/* _VMKLNX_CODECHECK_: atomic_dec_and_test */ +static __inline__ int atomic_dec_and_test(atomic_t *v) +{ + unsigned char c; + +#if defined(__VMKLNX__) + vmk_AtomicPrologue(); +#endif /* defined(__VMKLNX__) */ + __asm__ __volatile__( + LOCK_PREFIX "decl %0; sete %1" + :"=m" (v->counter), "=qm" (c) + :"m" (v->counter) : "memory"); +#if defined(__VMKLNX__) + vmk_AtomicEpilogue(); +#endif /* defined(__VMKLNX__) */ + return c != 0; +} + +/** + * atomic_inc_and_test - increment and test + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +static __inline__ int atomic_inc_and_test(atomic_t *v) +{ + unsigned char c; + +#if defined(__VMKLNX__) + vmk_AtomicPrologue(); +#endif /* defined(__VMKLNX__) */ + __asm__ __volatile__( + LOCK_PREFIX "incl %0; sete %1" + :"=m" (v->counter), "=qm" (c) + :"m" (v->counter) : "memory"); +#if defined(__VMKLNX__) + vmk_AtomicEpilogue(); +#endif /* defined(__VMKLNX__) */ + return c != 0; +} + +/** + * atomic_add_negative - add and test if negative + * @i: integer value to add + * @v: pointer of type atomic_t + * + * Atomically adds @i to @v and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +static __inline__ int atomic_add_negative(int i, atomic_t *v) +{ + unsigned char c; + +#if defined(__VMKLNX__) + vmk_AtomicPrologue(); +#endif /* defined(__VMKLNX__) */ + __asm__ __volatile__( + LOCK_PREFIX "addl %2,%0; sets %1" + :"=m" (v->counter), "=qm" (c) + :"ir" (i), "m" (v->counter) : "memory"); +#if defined(__VMKLNX__) + vmk_AtomicEpilogue(); +#endif /* defined(__VMKLNX__) */ + return c; +} + +/** + * atomic_add_return - add and return + * @i: integer value to add + * @v: pointer of type atomic_t + * + * Atomically adds @i to @v and returns @i + @v + */ +/* _VMKLNX_CODECHECK_: atomic_add_return */ +static __inline__ int atomic_add_return(int i, atomic_t *v) +{ + int __i = i; + +#if defined(__VMKLNX__) + vmk_AtomicPrologue(); +#endif /* defined(__VMKLNX__) */ + __asm__ __volatile__( + LOCK_PREFIX "xaddl %0, %1;" + :"=r"(i) + :"m"(v->counter), "0"(i)); +#if defined(__VMKLNX__) + vmk_AtomicEpilogue(); +#endif /* defined(__VMKLNX__) */ + return i + __i; +} + +static __inline__ int atomic_sub_return(int i, atomic_t *v) +{ + return atomic_add_return(-i,v); +} + +/** + * atomic_inc_return - increment by 1 and return + * @v: integer value to increment + * + * Atomically increments @v by 1 returns @v + 1 + * + * SYNOPSIS: + * #define atomic_inc_return(v) + * + * RETURN VALUE: + * Returns @v + 1 + */ +/* _VMKLNX_CODECHECK_: atomic_inc_return */ +#define atomic_inc_return(v) (atomic_add_return(1,v)) +#define atomic_dec_return(v) (atomic_sub_return(1,v)) + +/* An 64bit atomic type */ + +typedef struct { volatile long counter; } atomic64_t; + +#define ATOMIC64_INIT(i) { (i) } + +/** + * atomic64_read - read atomic64 variable + * @v: pointer of type atomic64_t + * + * Atomically reads the value of @v. + * Doesn't imply a read memory barrier. + */ +#define atomic64_read(v) ((v)->counter) + +/** + * atomic64_set - set atomic64 variable + * @v: pointer to type atomic64_t + * @i: required value + * + * Atomically sets the value of @v to @i. + */ +#if defined(__VMKLNX__) +static __inline__ void atomic64_set(atomic64_t *v, long i) +{ + /* + * Ensure that we do a single movq. Without this, the compiler + * may do write with a constant as two movl operations. + */ + __asm__ __volatile__( + "movq %1, %0" + : "+m" (v->counter) + : "r" (i) + ); +} +#else /* !defined(__VMKLNX__) */ +#define atomic64_set(v,i) (((v)->counter) = (i)) +#endif /* defined(__VMKLNX__) */ + + +/** + * atomic64_add - add integer to atomic64 variable + * @i: integer value to add + * @v: pointer to type atomic64_t + * + * Atomically adds @i to @v. + */ +static __inline__ void atomic64_add(long i, atomic64_t *v) +{ +#if defined(__VMKLNX__) + vmk_AtomicPrologue(); +#endif /* defined(__VMKLNX__) */ + __asm__ __volatile__( + LOCK_PREFIX "addq %1,%0" + :"=m" (v->counter) + :"ir" (i), "m" (v->counter)); +#if defined(__VMKLNX__) + vmk_AtomicEpilogue(); +#endif /* defined(__VMKLNX__) */ +} + +/** + * atomic64_sub - subtract the atomic64 variable + * @i: integer value to subtract + * @v: pointer to type atomic64_t + * + * Atomically subtracts @i from @v. + */ +static __inline__ void atomic64_sub(long i, atomic64_t *v) +{ +#if defined(__VMKLNX__) + vmk_AtomicPrologue(); +#endif /* defined(__VMKLNX__) */ + __asm__ __volatile__( + LOCK_PREFIX "subq %1,%0" + :"=m" (v->counter) + :"ir" (i), "m" (v->counter)); +#if defined(__VMKLNX__) + vmk_AtomicEpilogue(); +#endif /* defined(__VMKLNX__) */ +} + +/** + * atomic64_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @v: pointer to type atomic64_t + * + * Atomically subtracts @i from @v and returns + * true if the result is zero, or false for all + * other cases. + */ +static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v) +{ + unsigned char c; + +#if defined(__VMKLNX__) + vmk_AtomicPrologue(); +#endif /* defined(__VMKLNX__) */ + __asm__ __volatile__( + LOCK_PREFIX "subq %2,%0; sete %1" + :"=m" (v->counter), "=qm" (c) + :"ir" (i), "m" (v->counter) : "memory"); +#if defined(__VMKLNX__) + vmk_AtomicEpilogue(); +#endif /* defined(__VMKLNX__) */ + return c; +} + +/** + * atomic64_inc - increment atomic64 variable + * @v: pointer to type atomic64_t + * + * Atomically increments @v by 1. + */ +static __inline__ void atomic64_inc(atomic64_t *v) +{ +#if defined(__VMKLNX__) + vmk_AtomicPrologue(); +#endif /* defined(__VMKLNX__) */ + __asm__ __volatile__( + LOCK_PREFIX "incq %0" + :"=m" (v->counter) + :"m" (v->counter)); +#if defined(__VMKLNX__) + vmk_AtomicEpilogue(); +#endif /* defined(__VMKLNX__) */ +} + +/** + * atomic64_dec - decrement atomic64 variable + * @v: pointer to type atomic64_t + * + * Atomically decrements @v by 1. + */ +static __inline__ void atomic64_dec(atomic64_t *v) +{ +#if defined(__VMKLNX__) + vmk_AtomicPrologue(); +#endif /* defined(__VMKLNX__) */ + __asm__ __volatile__( + LOCK_PREFIX "decq %0" + :"=m" (v->counter) + :"m" (v->counter)); +#if defined(__VMKLNX__) + vmk_AtomicEpilogue(); +#endif /* defined(__VMKLNX__) */ +} + +/** + * atomic64_dec_and_test - decrement and test + * @v: pointer to type atomic64_t + * + * Atomically decrements @v by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +static __inline__ int atomic64_dec_and_test(atomic64_t *v) +{ + unsigned char c; + +#if defined(__VMKLNX__) + vmk_AtomicPrologue(); +#endif /* defined(__VMKLNX__) */ + __asm__ __volatile__( + LOCK_PREFIX "decq %0; sete %1" + :"=m" (v->counter), "=qm" (c) + :"m" (v->counter) : "memory"); +#if defined(__VMKLNX__) + vmk_AtomicEpilogue(); +#endif /* defined(__VMKLNX__) */ + return c != 0; +} + +/** + * atomic64_inc_and_test - increment and test + * @v: pointer to type atomic64_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +static __inline__ int atomic64_inc_and_test(atomic64_t *v) +{ + unsigned char c; + +#if defined(__VMKLNX__) + vmk_AtomicPrologue(); +#endif /* defined(__VMKLNX__) */ + __asm__ __volatile__( + LOCK_PREFIX "incq %0; sete %1" + :"=m" (v->counter), "=qm" (c) + :"m" (v->counter) : "memory"); +#if defined(__VMKLNX__) + vmk_AtomicEpilogue(); +#endif /* defined(__VMKLNX__) */ + return c != 0; +} + +/** + * atomic64_add_negative - add and test if negative + * @i: integer value to add + * @v: pointer to type atomic64_t + * + * Atomically adds @i to @v and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +static __inline__ int atomic64_add_negative(long i, atomic64_t *v) +{ + unsigned char c; + +#if defined(__VMKLNX__) + vmk_AtomicPrologue(); +#endif /* defined(__VMKLNX__) */ + __asm__ __volatile__( + LOCK_PREFIX "addq %2,%0; sets %1" + :"=m" (v->counter), "=qm" (c) + :"ir" (i), "m" (v->counter) : "memory"); +#if defined(__VMKLNX__) + vmk_AtomicEpilogue(); +#endif /* defined(__VMKLNX__) */ + return c; +} + +/** + * atomic64_add_return - add and return + * @i: integer value to add + * @v: pointer to type atomic64_t + * + * Atomically adds @i to @v and returns @i + @v + */ +static __inline__ long atomic64_add_return(long i, atomic64_t *v) +{ + long __i = i; + +#if defined(__VMKLNX__) + vmk_AtomicPrologue(); +#endif /* defined(__VMKLNX__) */ + __asm__ __volatile__( + LOCK_PREFIX "xaddq %0, %1;" + :"=r"(i) + :"m"(v->counter), "0"(i)); +#if defined(__VMKLNX__) + vmk_AtomicEpilogue(); +#endif /* defined(__VMKLNX__) */ + return i + __i; +} + +static __inline__ long atomic64_sub_return(long i, atomic64_t *v) +{ + return atomic64_add_return(-i,v); +} + +#define atomic64_inc_return(v) (atomic64_add_return(1,v)) +#define atomic64_dec_return(v) (atomic64_sub_return(1,v)) + +#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) + +/** + * atomic_add_unless - add unless the number is a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns non-zero if @v was not @u, and zero otherwise. + */ +#define atomic_add_unless(v, a, u) \ +({ \ + int c, old; \ + c = atomic_read(v); \ + for (;;) { \ + if (unlikely(c == (u))) \ + break; \ + old = atomic_cmpxchg((v), c, c + (a)); \ + if (likely(old == c)) \ + break; \ + c = old; \ + } \ + c != (u); \ +}) +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + +/* These are x86-specific, used by some header files */ +#if defined(__VMKLNX__) + +#define atomic_clear_mask(mask, addr) \ +do { \ + vmk_AtomicPrologue(); \ + __asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \ + : : "r" (~(mask)),"m" (*addr) : "memory") ; \ + vmk_AtomicEpilogue(); \ +} while (0) + +#define atomic_set_mask(mask, addr) \ +do { \ + vmk_AtomicPrologue(); \ + __asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \ + : : "r" ((unsigned)mask),"m" (*(addr)) : "memory"); \ + vmk_AtomicEpilogue(); \ +} while (0) + +#else /* !defined(__VMKLNX__) */ + +#define atomic_clear_mask(mask, addr) \ +__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \ +: : "r" (~(mask)),"m" (*addr) : "memory") + +#define atomic_set_mask(mask, addr) \ +__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \ +: : "r" ((unsigned)mask),"m" (*(addr)) : "memory") + +#endif /* defined(__VMKLNX__) */ + +/* Atomic operations are already serializing on x86 */ +#define smp_mb__before_atomic_dec() barrier() +#define smp_mb__after_atomic_dec() barrier() +#define smp_mb__before_atomic_inc() barrier() +#define smp_mb__after_atomic_inc() barrier() + +#include +#endif