Files
@ 95e39e5412bd
Branch filter:
Location: vmkdrivers/BLD/build/HEADERS/92-vmkdrivers-asm-x64/vmkernel64/release/asm/spinlock.h
95e39e5412bd
4.7 KiB
text/x-chdr
ESXi-6.0-GA
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 | /*
* Portions Copyright 2008 VMware, Inc.
*/
#ifndef __ASM_SPINLOCK_H
#define __ASM_SPINLOCK_H
#include <asm/atomic.h>
#include <asm/rwlock.h>
#include <asm/page.h>
#if defined(__VMKLNX__)
#include "vmkapi.h"
#endif /* defined(__VMKLNX__) */
/*
* XXX: Somebody managed to create a big header mess which ends up in asm/smp.h
* not being includable in spinlock.h.
* Define raw_smp_processor_id in here for now to avoid that mess.
*/
#if defined(__VMKLNX__)
extern uint32_t raw_smp_processor_id(void);
#endif
/*
* Your basic SMP spinlocks, allowing only a single CPU anywhere
*
* Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not.
*
* We make no fairness assumptions. They have a cost.
*
* (the type definitions are in asm/spinlock_types.h)
*/
#define __raw_spin_is_locked(x) \
(*(volatile signed int *)(&(x)->slock) <= 0)
#if !defined(__VMKLNX__)
#define __raw_spin_lock_string \
"\n1:\t" \
LOCK_PREFIX " ; decl %0\n\t" \
"js 2f\n" \
LOCK_SECTION_START("") \
"2:\t" \
"rep;nop\n\t" \
"cmpl $0,%0\n\t" \
"jle 2b\n\t" \
"jmp 1b\n" \
LOCK_SECTION_END
#else /* defined(__VMKLNX__) */
asmlinkage void __raw_spin_failed(void /* special register calling convention */);
#define __raw_spin_lock_string \
LOCK_PREFIX " ; decl %0\n\t" \
"js 2f\n" \
"1:\n" \
LOCK_SECTION_START("") \
"2:\t" \
"pushq %%rdi\n\t" \
"lea %0,%%rdi\n\t" \
"call __raw_spin_failed\n\t" \
"popq %%rdi\n\t" \
"jmp 1b\n" \
LOCK_SECTION_END
#endif /* defined(__VMKLNX__) */
#define __raw_spin_lock_string_up \
"\n\tdecl %0"
#define __raw_spin_unlock_string \
"movl $1,%0" \
:"=m" (lock->slock) : : "memory"
static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
asm volatile(__raw_spin_lock_string : "=m" (lock->slock) : : "memory");
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
lock->cpu = raw_smp_processor_id();
lock->ra = (unsigned long) __builtin_return_address(0);
#endif /* defined(__VMKLNX__) */
}
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
{
int oldval;
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
"xchgl %0,%1"
:"=q" (oldval), "=m" (lock->slock)
:"0" (0) : "memory");
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
if (oldval > 0) {
lock->cpu = raw_smp_processor_id();
lock->ra = (unsigned long) __builtin_return_address(0);
}
#endif /* defined(__VMKLNX__) */
return oldval > 0;
}
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
#if defined(__VMKLNX__)
lock->cpu = SPINLOCK_VMKERNEL_CPU_INVALID;
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
__raw_spin_unlock_string
);
}
#define __raw_spin_unlock_wait(lock) \
do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.
*
* NOTE! it is quite common to have readers in interrupts
* but no interrupt writers. For those circumstances we
* can "mix" irq-safe locks - any writer needs to get a
* irq-safe write-lock, but readers can get non-irqsafe
* read-locks.
*
* On x86, we implement read-write locks as a 32-bit counter
* with the high bit (sign) being the "contended" bit.
*/
#define __raw_read_can_lock(x) ((int)(x)->lock > 0)
#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
static inline void __raw_read_lock(raw_rwlock_t *rw)
{
__build_read_lock(rw);
}
static inline void __raw_write_lock(raw_rwlock_t *rw)
{
__build_write_lock(rw);
}
static inline int __raw_read_trylock(raw_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
atomic_dec(count);
if (atomic_read(count) >= 0)
return 1;
atomic_inc(count);
return 0;
}
static inline int __raw_write_trylock(raw_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
if (atomic_sub_and_test(RW_LOCK_BIAS, count))
return 1;
atomic_add(RW_LOCK_BIAS, count);
return 0;
}
static inline void __raw_read_unlock(raw_rwlock_t *rw)
{
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
asm volatile(LOCK_PREFIX " ; incl %0" :"=m" (rw->lock) : : "memory");
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
}
static inline void __raw_write_unlock(raw_rwlock_t *rw)
{
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
asm volatile(LOCK_PREFIX " ; addl $" RW_LOCK_BIAS_STR ",%0"
: "=m" (rw->lock) : : "memory");
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
}
#endif /* __ASM_SPINLOCK_H */
|