Files @ 85642093068c
Branch filter:

Location: vmkdrivers/BLD/build/HEADERS/CUR-92-vmkdrivers-asm-x64/vmkernel64/release/asm/system.h

unknown
ESXi-5.1-U2
/*
 * Portions Copyright 2008 VMware, Inc.
 */
#ifndef __ASM_SYSTEM_H
#define __ASM_SYSTEM_H

#include <linux/kernel.h>
#include <asm/segment.h>
#include <asm/alternative.h>
#if defined(__VMKLNX__)
#include "vmkapi.h"
#endif

#ifdef __KERNEL__

#define __STR(x) #x
#define STR(x) __STR(x)

#define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
#define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"

/* frame pointer must be last for get_wchan */
#define SAVE_CONTEXT    "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\n\t"

#define __EXTRA_CLOBBER  \
	,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"

#define switch_to(prev,next,last) \
	asm volatile(SAVE_CONTEXT						    \
		     "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */	  \
		     "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */	  \
		     "call __switch_to\n\t"					  \
		     ".globl thread_return\n"					\
		     "thread_return:\n\t"					    \
		     "movq %%gs:%P[pda_pcurrent],%%rsi\n\t"			  \
		     "movq %P[thread_info](%%rsi),%%r8\n\t"			  \
		     LOCK_PREFIX "btr  %[tif_fork],%P[ti_flags](%%r8)\n\t"	  \
		     "movq %%rax,%%rdi\n\t" 					  \
		     "jc   ret_from_fork\n\t"					  \
		     RESTORE_CONTEXT						    \
		     : "=a" (last)					  	  \
		     : [next] "S" (next), [prev] "D" (prev),			  \
		       [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
		       [ti_flags] "i" (offsetof(struct thread_info, flags)),\
		       [tif_fork] "i" (TIF_FORK),			  \
		       [thread_info] "i" (offsetof(struct task_struct, thread_info)), \
		       [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent))   \
		     : "memory", "cc" __EXTRA_CLOBBER)
    
extern void load_gs_index(unsigned); 

/*
 * Load a segment. Fall back on loading the zero
 * segment if something goes wrong..
 */
#define loadsegment(seg,value)	\
	asm volatile("\n"			\
		"1:\t"				\
		"movl %k0,%%" #seg "\n"		\
		"2:\n"				\
		".section .fixup,\"ax\"\n"	\
		"3:\t"				\
		"movl %1,%%" #seg "\n\t" 	\
		"jmp 2b\n"			\
		".previous\n"			\
		".section __ex_table,\"a\"\n\t"	\
		".align 8\n\t"			\
		".quad 1b,3b\n"			\
		".previous"			\
		: :"r" (value), "r" (0))

/*
 * Clear and set 'TS' bit respectively
 */
#define clts() __asm__ __volatile__ ("clts")

static inline unsigned long read_cr0(void)
{ 
	unsigned long cr0;
	asm volatile("movq %%cr0,%0" : "=r" (cr0));
	return cr0;
} 

static inline void write_cr0(unsigned long val) 
{ 
	asm volatile("movq %0,%%cr0" :: "r" (val));
} 

static inline unsigned long read_cr3(void)
{ 
	unsigned long cr3;
	asm("movq %%cr3,%0" : "=r" (cr3));
	return cr3;
} 

static inline unsigned long read_cr4(void)
{ 
	unsigned long cr4;
	asm("movq %%cr4,%0" : "=r" (cr4));
	return cr4;
} 

static inline void write_cr4(unsigned long val)
{ 
	asm volatile("movq %0,%%cr4" :: "r" (val));
} 

#define stts() write_cr0(8 | read_cr0())

#define wbinvd() \
	__asm__ __volatile__ ("wbinvd": : :"memory");

/*
 * On SMP systems, when the scheduler does migration-cost autodetection,
 * it needs a way to flush as much of the CPU's caches as possible.
 */
static inline void sched_cacheflush(void)
{
	wbinvd();
}

#endif	/* __KERNEL__ */

#define nop() __asm__ __volatile__ ("nop")

/**
 *  xchg - exchange data between v and the value in memory pointed by ptr in an atomic operation
 *  @ptr: a pointer to a memory to be exchanged 
 *  @v: a value to be exchanged 
 *
 *  Exchange v and the value in memory pointed by ptr in an atomic operation.
 *
 *  SYNOPSIS:
 *  #define xchg(ptr,v) 
 *
 *  RETURN VALUE:
 *  It returns the original value of (*ptr).
 */
/* _VMKLNX_CODECHECK_: xchg */
#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))

#define tas(ptr) (xchg((ptr),1))

#define __xg(x) ((volatile long *)(x))

static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
{
	*ptr = val;
}

#define _set_64bit set_64bit

/*
 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
 * Note 2: xchg has side effect, so that attribute volatile is necessary,
 *	  but generally the primitive is invalid, *ptr is output argument. --ANK
 */
/**                                          
 *  __xchg - exchange data between x and the value in memory pointed by ptr in an atomic operation
 *  @x:  a value to be exchanged
 *  @ptr: a pointer to a memory to be exchanged 
 *  @size: the size of the value to be exchanged, in bytes  - must be 1, 2, 4, or 8.
 *
 *  Exchange x and the value in memory pointed by ptr in an atomic operation.
 *
 *  RETURN VALUE:
 *  The original value of (*ptr).
 *                                           
 */                                          
/* _VMKLNX_CODECHECK_: __xchg */
static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
{
	switch (size) {
		case 1:
			__asm__ __volatile__("xchgb %b0,%1"
				:"=q" (x)
				:"m" (*__xg(ptr)), "0" (x)
				:"memory");
			break;
		case 2:
			__asm__ __volatile__("xchgw %w0,%1"
				:"=r" (x)
				:"m" (*__xg(ptr)), "0" (x)
				:"memory");
			break;
		case 4:
			__asm__ __volatile__("xchgl %k0,%1"
				:"=r" (x)
				:"m" (*__xg(ptr)), "0" (x)
				:"memory");
			break;
		case 8:
			__asm__ __volatile__("xchgq %0,%1"
				:"=r" (x)
				:"m" (*__xg(ptr)), "0" (x)
				:"memory");
			break;
	}
	return x;
}

/*
 * Atomic compare and exchange.  Compare OLD with MEM, if identical,
 * store NEW in MEM.  Return the initial value in MEM.  Success is
 * indicated by comparing RETURN with OLD.
 */

#define __HAVE_ARCH_CMPXCHG 1

static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
				      unsigned long new, int size)
{
	unsigned long prev;
	switch (size) {
	case 1:
#if defined(__VMKLNX__)
                vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
		__asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
				     : "=a"(prev)
				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
				     : "memory");
#if defined(__VMKLNX__)
                vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */

		return prev;
	case 2:
#if defined(__VMKLNX__)
                vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
		__asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
				     : "=a"(prev)
				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
				     : "memory");
#if defined(__VMKLNX__)
                vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */

		return prev;
	case 4:
#if defined(__VMKLNX__)
                vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
		__asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
				     : "=a"(prev)
				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
				     : "memory");
#if defined(__VMKLNX__)
                vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */

		return prev;
	case 8:
#if defined(__VMKLNX__)
                vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
		__asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
				     : "=a"(prev)
				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
				     : "memory");
#if defined(__VMKLNX__)
                vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */

		return prev;
	}
	return old;
}

#define cmpxchg(ptr,o,n)\
	((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
					(unsigned long)(n),sizeof(*(ptr))))

#ifdef CONFIG_SMP

/**
 *  smp_mb - memory barrier for SMP
 *
 *  Invokes memory barrier for SMP.
 *
 *  SYNOPSIS:
 *    #define smp_mb(void)
 *
 *  RETURN VALUE:
 *  None
 */
 /* _VMKLNX_CODECHECK_: smp_mb */

#define smp_mb()	mb()

/**
 *  smp_rmb - read memory barrier for SMP
 *
 *  Invokes read memory barrier for SMP.
 *
 *  SYNOPSIS:
 *    #define smp_rmb(void)
 *
 *  RETURN VALUE:
 *  None
 */
 /* _VMKLNX_CODECHECK_: smp_rmb */

#define smp_rmb()	rmb()

/**
 *  smp_wmb - write memory barrier for SMP
 *
 *  Invokes write memory barrier for SMP.
 *
 *  SYNOPSIS:
 *    #define smp_wmb(void)
 *
 *  RETURN VALUE:
 *  None
 */
 /* _VMKLNX_CODECHECK_: smp_wmb */

#define smp_wmb()	wmb()
#define smp_read_barrier_depends()	do {} while(0)
#else
#define smp_mb()	barrier()
#define smp_rmb()	barrier()
#define smp_wmb()	barrier()
#define smp_read_barrier_depends()	do {} while(0)
#endif

    
/*
 * Force strict CPU ordering.
 * And yes, this is required on UP too when we're talking
 * to devices.
 */
#define mb() 	asm volatile("mfence":::"memory")

/**
 *  rmb - read memory barrier
 *
 *  Read Memory Barriers make a guarantee that all the memory reads specified
 *  before the barrier will happen before all memory reads specified after the
 *  barrier.
 *
 *  SYNOPSIS:
 *    #define rmb(void)
 *
 *  RETURN VALUE:
 *  This function does not return a value.
 *
 */
/* _VMKLNX_CODECHECK_: rmb */
#define rmb()	asm volatile("lfence":::"memory")

#ifdef CONFIG_UNORDERED_IO
#define wmb()	asm volatile("sfence" ::: "memory")
#else
/**
 *  wmb - write memory barrier 
 *
 *  A write memory barrier guarantees that all write operations specified
 *  before the barrier will happen before all the write operations specified
 *  after the barrier. 
 *
 *  SYNOPSIS:
 *    #define wmb() 
 *
 *  RETURN VALUE:
 *  NONE
 */
/* _VMKLNX_CODECHECK_: wmb */
#define wmb()	asm volatile("" ::: "memory")
#endif
#define read_barrier_depends()	do {} while(0)
#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)

/**
 *  warn_if_not_ulong - warn if the type is not unsigned long 
 *  @x: variable to be tested 
 *
 *  Trigger a compiler warning if the type of a given argument is not unsigned long
 *
 *  SYNOPSIS:
 *    #define warn_if_not_ulong(x) 
 *
 *  RETURN VALUE:
 *  NONE 
 */
/* _VMKLNX_CODECHECK_: warn_if_not_ulong */
#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)

#include <linux/irqflags.h>

void cpu_idle_wait(void);

extern unsigned long arch_align_stack(unsigned long sp);
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);

#endif