! ! !
unknown - 5 years ago 2015-10-23 19:21:55
unknown@example.com
ESXi-4.1-U3
1346 files changed:
Changeset was too big and was cut off... Show full diff anyway
↑ Collapse Diff ↑
 
new file 100644
1
 
/*
2
 
 * 8253/8254 Programmable Interval Timer
3
 
 */
4
 

	
5
 
#ifndef _8253PIT_H
6
 
#define _8253PIT_H
7
 

	
8
 
#define PIT_TICK_RATE 	1193182UL
9
 

	
10
 
#endif
 
new file 100644
1
 
#ifndef __X8664_A_OUT_H__
2
 
#define __X8664_A_OUT_H__
3
 

	
4
 
/* 32bit a.out */
5
 

	
6
 
struct exec
7
 
{
8
 
  unsigned int a_info;		/* Use macros N_MAGIC, etc for access */
9
 
  unsigned a_text;		/* length of text, in bytes */
10
 
  unsigned a_data;		/* length of data, in bytes */
11
 
  unsigned a_bss;		/* length of uninitialized data area for file, in bytes */
12
 
  unsigned a_syms;		/* length of symbol table data in file, in bytes */
13
 
  unsigned a_entry;		/* start address */
14
 
  unsigned a_trsize;		/* length of relocation info for text, in bytes */
15
 
  unsigned a_drsize;		/* length of relocation info for data, in bytes */
16
 
};
17
 

	
18
 
#define N_TRSIZE(a)	((a).a_trsize)
19
 
#define N_DRSIZE(a)	((a).a_drsize)
20
 
#define N_SYMSIZE(a)	((a).a_syms)
21
 

	
22
 
#ifdef __KERNEL__
23
 
#include <linux/thread_info.h>
24
 
#define STACK_TOP TASK_SIZE
25
 
#endif
26
 

	
27
 
#endif /* __A_OUT_GNU_H__ */
 
new file 100644
1
 
/*
2
 
 *  asm-x86_64/acpi.h
3
 
 *
4
 
 *  Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
5
 
 *  Copyright (C) 2001 Patrick Mochel <mochel@osdl.org>
6
 
  *
7
 
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8
 
 *
9
 
 *  This program is free software; you can redistribute it and/or modify
10
 
 *  it under the terms of the GNU General Public License as published by
11
 
 *  the Free Software Foundation; either version 2 of the License, or
12
 
 *  (at your option) any later version.
13
 
 *
14
 
 *  This program is distributed in the hope that it will be useful,
15
 
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
16
 
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17
 
 *  GNU General Public License for more details.
18
 
 *
19
 
 *  You should have received a copy of the GNU General Public License
20
 
 *  along with this program; if not, write to the Free Software
21
 
 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
22
 
 *
23
 
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24
 
 */
25
 

	
26
 
#ifndef _ASM_ACPI_H
27
 
#define _ASM_ACPI_H
28
 

	
29
 
#ifdef __KERNEL__
30
 

	
31
 
#include <acpi/pdc_intel.h>
32
 

	
33
 
#define COMPILER_DEPENDENT_INT64   long long
34
 
#define COMPILER_DEPENDENT_UINT64  unsigned long long
35
 

	
36
 
/*
37
 
 * Calling conventions:
38
 
 *
39
 
 * ACPI_SYSTEM_XFACE        - Interfaces to host OS (handlers, threads)
40
 
 * ACPI_EXTERNAL_XFACE      - External ACPI interfaces 
41
 
 * ACPI_INTERNAL_XFACE      - Internal ACPI interfaces
42
 
 * ACPI_INTERNAL_VAR_XFACE  - Internal variable-parameter list interfaces
43
 
 */
44
 
#define ACPI_SYSTEM_XFACE
45
 
#define ACPI_EXTERNAL_XFACE
46
 
#define ACPI_INTERNAL_XFACE
47
 
#define ACPI_INTERNAL_VAR_XFACE
48
 

	
49
 
/* Asm macros */
50
 

	
51
 
#define ACPI_ASM_MACROS
52
 
#define BREAKPOINT3
53
 
#define ACPI_DISABLE_IRQS() local_irq_disable()
54
 
#define ACPI_ENABLE_IRQS()  local_irq_enable()
55
 
#define ACPI_FLUSH_CPU_CACHE()	wbinvd()
56
 

	
57
 

	
58
 
static inline int
59
 
__acpi_acquire_global_lock (unsigned int *lock)
60
 
{
61
 
	unsigned int old, new, val;
62
 
	do {
63
 
		old = *lock;
64
 
		new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
65
 
		val = cmpxchg(lock, old, new);
66
 
	} while (unlikely (val != old));
67
 
	return (new < 3) ? -1 : 0;
68
 
}
69
 

	
70
 
static inline int
71
 
__acpi_release_global_lock (unsigned int *lock)
72
 
{
73
 
	unsigned int old, new, val;
74
 
	do {
75
 
		old = *lock;
76
 
		new = old & ~0x3;
77
 
		val = cmpxchg(lock, old, new);
78
 
	} while (unlikely (val != old));
79
 
	return old & 0x1;
80
 
}
81
 

	
82
 
#define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \
83
 
	((Acq) = __acpi_acquire_global_lock((unsigned int *) GLptr))
84
 

	
85
 
#define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \
86
 
	((Acq) = __acpi_release_global_lock((unsigned int *) GLptr))
87
 

	
88
 
/*
89
 
 * Math helper asm macros
90
 
 */
91
 
#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \
92
 
        asm("divl %2;"        \
93
 
        :"=a"(q32), "=d"(r32) \
94
 
        :"r"(d32),            \
95
 
        "0"(n_lo), "1"(n_hi))
96
 

	
97
 

	
98
 
#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \
99
 
    asm("shrl   $1,%2;"             \
100
 
        "rcrl   $1,%3;"             \
101
 
        :"=r"(n_hi), "=r"(n_lo)     \
102
 
        :"0"(n_hi), "1"(n_lo))
103
 

	
104
 
#ifdef CONFIG_ACPI
105
 
extern int acpi_lapic;
106
 
extern int acpi_ioapic;
107
 
extern int acpi_noirq;
108
 
extern int acpi_strict;
109
 
extern int acpi_disabled;
110
 
extern int acpi_pci_disabled;
111
 
extern int acpi_ht;
112
 
static inline void disable_acpi(void) 
113
 
{ 
114
 
	acpi_disabled = 1; 
115
 
	acpi_ht = 0; 
116
 
	acpi_pci_disabled = 1;
117
 
	acpi_noirq = 1;
118
 
}
119
 

	
120
 
/* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */
121
 
#define FIX_ACPI_PAGES 4
122
 

	
123
 
extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
124
 
static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
125
 
static inline void acpi_disable_pci(void) 
126
 
{
127
 
	acpi_pci_disabled = 1; 
128
 
	acpi_noirq_set();
129
 
}
130
 
extern int acpi_irq_balance_set(char *str);
131
 

	
132
 
#else	/* !CONFIG_ACPI */
133
 

	
134
 
#define acpi_lapic 0
135
 
#define acpi_ioapic 0
136
 
static inline void acpi_noirq_set(void) { }
137
 
static inline void acpi_disable_pci(void) { }
138
 

	
139
 
#endif /* !CONFIG_ACPI */
140
 

	
141
 
extern int acpi_numa;
142
 
extern int acpi_scan_nodes(unsigned long start, unsigned long end);
143
 
#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
144
 

	
145
 
#ifdef CONFIG_ACPI_SLEEP
146
 

	
147
 
/* routines for saving/restoring kernel state */
148
 
extern int acpi_save_state_mem(void);
149
 
extern void acpi_restore_state_mem(void);
150
 

	
151
 
extern unsigned long acpi_wakeup_address;
152
 

	
153
 
/* early initialization routine */
154
 
extern void acpi_reserve_bootmem(void);
155
 

	
156
 
#endif /*CONFIG_ACPI_SLEEP*/
157
 

	
158
 
#define boot_cpu_physical_apicid boot_cpu_id
159
 

	
160
 
extern int acpi_disabled;
161
 
extern int acpi_pci_disabled;
162
 

	
163
 
extern u8 x86_acpiid_to_apicid[];
164
 

	
165
 
#define ARCH_HAS_POWER_INIT 1
166
 

	
167
 
extern int acpi_skip_timer_override;
168
 

	
169
 
#endif /*__KERNEL__*/
170
 

	
171
 
#endif /*_ASM_ACPI_H*/
 
new file 100644
1
 
#ifndef AGP_H
2
 
#define AGP_H 1
3
 

	
4
 
#include <asm/cacheflush.h>
5
 

	
6
 
/* 
7
 
 * Functions to keep the agpgart mappings coherent.
8
 
 * The GART gives the CPU a physical alias of memory. The alias is
9
 
 * mapped uncacheable. Make sure there are no conflicting mappings
10
 
 * with different cachability attributes for the same page.
11
 
 */
12
 

	
13
 
int map_page_into_agp(struct page *page);
14
 
int unmap_page_from_agp(struct page *page);
15
 
#define flush_agp_mappings() global_flush_tlb()
16
 

	
17
 
/* Could use CLFLUSH here if the cpu supports it. But then it would
18
 
   need to be called for each cacheline of the whole page so it may not be 
19
 
   worth it. Would need a page for it. */
20
 
#define flush_agp_cache() asm volatile("wbinvd":::"memory")
21
 

	
22
 
/* Convert a physical address to an address suitable for the GART. */
23
 
#define phys_to_gart(x) (x)
24
 
#define gart_to_phys(x) (x)
25
 

	
26
 
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
27
 
#define alloc_gatt_pages(order)		\
28
 
	((char *)__get_free_pages(GFP_KERNEL, (order)))
29
 
#define free_gatt_pages(table, order)	\
30
 
	free_pages((unsigned long)(table), (order))
31
 

	
32
 
#endif
 
new file 100644
1
 
#ifndef _X86_64_ALTERNATIVE_H
2
 
#define _X86_64_ALTERNATIVE_H
3
 

	
4
 
#ifdef __KERNEL__
5
 

	
6
 
#include <linux/types.h>
7
 
#include <asm/cpufeature.h>
8
 

	
9
 
struct alt_instr {
10
 
	u8 *instr; 		/* original instruction */
11
 
	u8 *replacement;
12
 
	u8  cpuid;		/* cpuid bit set for replacement */
13
 
	u8  instrlen;		/* length of original instruction */
14
 
	u8  replacementlen; 	/* length of new instruction, <= instrlen */
15
 
	u8  pad[5];
16
 
};
17
 

	
18
 
extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
19
 

	
20
 
struct module;
21
 

	
22
 
#ifdef CONFIG_SMP
23
 
extern void alternatives_smp_module_add(struct module *mod, char *name,
24
 
					void *locks, void *locks_end,
25
 
					void *text, void *text_end);
26
 
extern void alternatives_smp_module_del(struct module *mod);
27
 
extern void alternatives_smp_switch(int smp);
28
 
#else
29
 
static inline void alternatives_smp_module_add(struct module *mod, char *name,
30
 
					void *locks, void *locks_end,
31
 
					void *text, void *text_end) {}
32
 
static inline void alternatives_smp_module_del(struct module *mod) {}
33
 
static inline void alternatives_smp_switch(int smp) {}
34
 
#endif
35
 

	
36
 
#endif
37
 

	
38
 
/*
39
 
 * Alternative instructions for different CPU types or capabilities.
40
 
 *
41
 
 * This allows to use optimized instructions even on generic binary
42
 
 * kernels.
43
 
 *
44
 
 * length of oldinstr must be longer or equal the length of newinstr
45
 
 * It can be padded with nops as needed.
46
 
 *
47
 
 * For non barrier like inlines please define new variants
48
 
 * without volatile and memory clobber.
49
 
 */
50
 
#define alternative(oldinstr, newinstr, feature) 	\
51
 
	asm volatile ("661:\n\t" oldinstr "\n662:\n" 		     \
52
 
		      ".section .altinstructions,\"a\"\n"     	     \
53
 
		      "  .align 8\n"				       \
54
 
		      "  .quad 661b\n"            /* label */          \
55
 
		      "  .quad 663f\n"		  /* new instruction */ \
56
 
		      "  .byte %c0\n"             /* feature bit */    \
57
 
		      "  .byte 662b-661b\n"       /* sourcelen */      \
58
 
		      "  .byte 664f-663f\n"       /* replacementlen */ \
59
 
		      ".previous\n"					\
60
 
		      ".section .altinstr_replacement,\"ax\"\n"		\
61
 
		      "663:\n\t" newinstr "\n664:\n"   /* replacement */ \
62
 
		      ".previous" :: "i" (feature) : "memory")
63
 

	
64
 
/*
65
 
 * Alternative inline assembly with input.
66
 
 *
67
 
 * Pecularities:
68
 
 * No memory clobber here.
69
 
 * Argument numbers start with 1.
70
 
 * Best is to use constraints that are fixed size (like (%1) ... "r")
71
 
 * If you use variable sized constraints like "m" or "g" in the
72
 
 * replacement make sure to pad to the worst case length.
73
 
 */
74
 
#define alternative_input(oldinstr, newinstr, feature, input...)	\
75
 
	asm volatile ("661:\n\t" oldinstr "\n662:\n"			\
76
 
		      ".section .altinstructions,\"a\"\n"		\
77
 
		      "  .align 8\n"					\
78
 
		      "  .quad 661b\n"            /* label */		\
79
 
		      "  .quad 663f\n"		  /* new instruction */	\
80
 
		      "  .byte %c0\n"             /* feature bit */	\
81
 
		      "  .byte 662b-661b\n"       /* sourcelen */	\
82
 
		      "  .byte 664f-663f\n"       /* replacementlen */	\
83
 
		      ".previous\n"					\
84
 
		      ".section .altinstr_replacement,\"ax\"\n"		\
85
 
		      "663:\n\t" newinstr "\n664:\n"   /* replacement */ \
86
 
		      ".previous" :: "i" (feature), ##input)
87
 

	
88
 
/* Like alternative_input, but with a single output argument */
89
 
#define alternative_io(oldinstr, newinstr, feature, output, input...) \
90
 
	asm volatile ("661:\n\t" oldinstr "\n662:\n"			\
91
 
		      ".section .altinstructions,\"a\"\n"		\
92
 
		      "  .align 8\n"					\
93
 
		      "  .quad 661b\n"            /* label */		\
94
 
		      "  .quad 663f\n"		  /* new instruction */	\
95
 
		      "  .byte %c[feat]\n"        /* feature bit */	\
96
 
		      "  .byte 662b-661b\n"       /* sourcelen */	\
97
 
		      "  .byte 664f-663f\n"       /* replacementlen */	\
98
 
		      ".previous\n"					\
99
 
		      ".section .altinstr_replacement,\"ax\"\n"		\
100
 
		      "663:\n\t" newinstr "\n664:\n"   /* replacement */ \
101
 
		      ".previous" : output : [feat] "i" (feature), ##input)
102
 

	
103
 
/*
104
 
 * Alternative inline assembly for SMP.
105
 
 *
106
 
 * The LOCK_PREFIX macro defined here replaces the LOCK and
107
 
 * LOCK_PREFIX macros used everywhere in the source tree.
108
 
 *
109
 
 * SMP alternatives use the same data structures as the other
110
 
 * alternatives and the X86_FEATURE_UP flag to indicate the case of a
111
 
 * UP system running a SMP kernel.  The existing apply_alternatives()
112
 
 * works fine for patching a SMP kernel for UP.
113
 
 *
114
 
 * The SMP alternative tables can be kept after boot and contain both
115
 
 * UP and SMP versions of the instructions to allow switching back to
116
 
 * SMP at runtime, when hotplugging in a new CPU, which is especially
117
 
 * useful in virtualized environments.
118
 
 *
119
 
 * The very common lock prefix is handled as special case in a
120
 
 * separate table which is a pure address list without replacement ptr
121
 
 * and size information.  That keeps the table sizes small.
122
 
 */
123
 

	
124
 
#ifdef CONFIG_SMP
125
 
#define LOCK_PREFIX \
126
 
		".section .smp_locks,\"a\"\n"	\
127
 
		"  .align 8\n"			\
128
 
		"  .quad 661f\n" /* address */	\
129
 
		".previous\n"			\
130
 
	       	"661:\n\tlock; "
131
 

	
132
 
#else /* ! CONFIG_SMP */
133
 
#define LOCK_PREFIX ""
134
 
#endif
135
 

	
136
 
#endif /* _X86_64_ALTERNATIVE_H */
 
new file 100644
1
 
#ifndef __ASM_APIC_H
2
 
#define __ASM_APIC_H
3
 

	
4
 
#include <linux/pm.h>
5
 
#include <asm/fixmap.h>
6
 
#include <asm/apicdef.h>
7
 
#include <asm/system.h>
8
 

	
9
 
#define Dprintk(x...)
10
 

	
11
 
/*
12
 
 * Debugging macros
13
 
 */
14
 
#define APIC_QUIET   0
15
 
#define APIC_VERBOSE 1
16
 
#define APIC_DEBUG   2
17
 

	
18
 
extern int apic_verbosity;
19
 
extern int apic_runs_main_timer;
20
 

	
21
 
/*
22
 
 * Define the default level of output to be very little
23
 
 * This can be turned up by using apic=verbose for more
24
 
 * information and apic=debug for _lots_ of information.
25
 
 * apic_verbosity is defined in apic.c
26
 
 */
27
 
#define apic_printk(v, s, a...) do {       \
28
 
		if ((v) <= apic_verbosity) \
29
 
			printk(s, ##a);    \
30
 
	} while (0)
31
 

	
32
 
#ifdef CONFIG_X86_LOCAL_APIC
33
 

	
34
 
struct pt_regs;
35
 

	
36
 
/*
37
 
 * Basic functions accessing APICs.
38
 
 */
39
 

	
40
 
static __inline void apic_write(unsigned long reg, unsigned int v)
41
 
{
42
 
	*((volatile unsigned int *)(APIC_BASE+reg)) = v;
43
 
}
44
 

	
45
 
static __inline unsigned int apic_read(unsigned long reg)
46
 
{
47
 
	return *((volatile unsigned int *)(APIC_BASE+reg));
48
 
}
49
 

	
50
 
static __inline__ void apic_wait_icr_idle(void)
51
 
{
52
 
	while (apic_read( APIC_ICR ) & APIC_ICR_BUSY)
53
 
		cpu_relax();
54
 
}
55
 

	
56
 
static inline void ack_APIC_irq(void)
57
 
{
58
 
	/*
59
 
	 * ack_APIC_irq() actually gets compiled as a single instruction:
60
 
	 * - a single rmw on Pentium/82489DX
61
 
	 * - a single write on P6+ cores (CONFIG_X86_GOOD_APIC)
62
 
	 * ... yummie.
63
 
	 */
64
 

	
65
 
	/* Docs say use 0 for future compatibility */
66
 
	apic_write(APIC_EOI, 0);
67
 
}
68
 

	
69
 
extern int get_maxlvt (void);
70
 
extern void clear_local_APIC (void);
71
 
extern void connect_bsp_APIC (void);
72
 
extern void disconnect_bsp_APIC (int virt_wire_setup);
73
 
extern void disable_local_APIC (void);
74
 
extern int verify_local_APIC (void);
75
 
extern void cache_APIC_registers (void);
76
 
extern void sync_Arb_IDs (void);
77
 
extern void init_bsp_APIC (void);
78
 
extern void setup_local_APIC (void);
79
 
extern void init_apic_mappings (void);
80
 
extern void smp_local_timer_interrupt (struct pt_regs * regs);
81
 
extern void setup_boot_APIC_clock (void);
82
 
extern void setup_secondary_APIC_clock (void);
83
 
extern int APIC_init_uniprocessor (void);
84
 
extern void disable_APIC_timer(void);
85
 
extern void enable_APIC_timer(void);
86
 
extern void clustered_apic_check(void);
87
 

	
88
 
extern void setup_APIC_extened_lvt(unsigned char lvt_off, unsigned char vector,
89
 
				   unsigned char msg_type, unsigned char mask);
90
 

	
91
 
#define K8_APIC_EXT_LVT_BASE    0x500
92
 
#define K8_APIC_EXT_INT_MSG_FIX 0x0
93
 
#define K8_APIC_EXT_INT_MSG_SMI 0x2
94
 
#define K8_APIC_EXT_INT_MSG_NMI 0x4
95
 
#define K8_APIC_EXT_INT_MSG_EXT 0x7
96
 
#define K8_APIC_EXT_LVT_ENTRY_THRESHOLD    0
97
 

	
98
 
extern int disable_timer_pin_1;
99
 

	
100
 

	
101
 
#ifndef CONFIG_XEN
102
 
void smp_send_timer_broadcast_ipi(void);
103
 
void switch_APIC_timer_to_ipi(void *cpumask);
104
 
void switch_ipi_to_APIC_timer(void *cpumask);
105
 

	
106
 
#define ARCH_APICTIMER_STOPS_ON_C3	1
107
 
#endif
108
 

	
109
 
#endif /* CONFIG_X86_LOCAL_APIC */
110
 

	
111
 
extern unsigned boot_cpu_id;
112
 

	
113
 
#endif /* __ASM_APIC_H */
 
new file 100644
1
 
#ifndef __ASM_APICDEF_H
2
 
#define __ASM_APICDEF_H
3
 

	
4
 
/*
5
 
 * Constants for various Intel APICs. (local APIC, IOAPIC, etc.)
6
 
 *
7
 
 * Alan Cox <Alan.Cox@linux.org>, 1995.
8
 
 * Ingo Molnar <mingo@redhat.com>, 1999, 2000
9
 
 */
10
 

	
11
 
#define		APIC_DEFAULT_PHYS_BASE	0xfee00000
12
 
 
13
 
#define		APIC_ID		0x20
14
 
#define			APIC_ID_MASK		(0xFFu<<24)
15
 
#define			GET_APIC_ID(x)		(((x)>>24)&0xFFu)
16
 
#define			SET_APIC_ID(x)		(((x)<<24))
17
 
#define		APIC_LVR	0x30
18
 
#define			APIC_LVR_MASK		0xFF00FF
19
 
#define			GET_APIC_VERSION(x)	((x)&0xFFu)
20
 
#define			GET_APIC_MAXLVT(x)	(((x)>>16)&0xFFu)
21
 
#define			APIC_INTEGRATED(x)	((x)&0xF0u)
22
 
#define		APIC_TASKPRI	0x80
23
 
#define			APIC_TPRI_MASK		0xFFu
24
 
#define		APIC_ARBPRI	0x90
25
 
#define			APIC_ARBPRI_MASK	0xFFu
26
 
#define		APIC_PROCPRI	0xA0
27
 
#define		APIC_EOI	0xB0
28
 
#define			APIC_EIO_ACK		0x0		/* Write this to the EOI register */
29
 
#define		APIC_RRR	0xC0
30
 
#define		APIC_LDR	0xD0
31
 
#define			APIC_LDR_MASK		(0xFFu<<24)
32
 
#define			GET_APIC_LOGICAL_ID(x)	(((x)>>24)&0xFFu)
33
 
#define			SET_APIC_LOGICAL_ID(x)	(((x)<<24))
34
 
#define			APIC_ALL_CPUS		0xFFu
35
 
#define		APIC_DFR	0xE0
36
 
#define			APIC_DFR_CLUSTER		0x0FFFFFFFul
37
 
#define			APIC_DFR_FLAT			0xFFFFFFFFul
38
 
#define		APIC_SPIV	0xF0
39
 
#define			APIC_SPIV_FOCUS_DISABLED	(1<<9)
40
 
#define			APIC_SPIV_APIC_ENABLED		(1<<8)
41
 
#define		APIC_ISR	0x100
42
 
#define		APIC_ISR_NR	0x8	/* Number of 32 bit ISR registers. */
43
 
#define		APIC_TMR	0x180
44
 
#define 	APIC_IRR	0x200
45
 
#define 	APIC_ESR	0x280
46
 
#define			APIC_ESR_SEND_CS	0x00001
47
 
#define			APIC_ESR_RECV_CS	0x00002
48
 
#define			APIC_ESR_SEND_ACC	0x00004
49
 
#define			APIC_ESR_RECV_ACC	0x00008
50
 
#define			APIC_ESR_SENDILL	0x00020
51
 
#define			APIC_ESR_RECVILL	0x00040
52
 
#define			APIC_ESR_ILLREGA	0x00080
53
 
#define		APIC_ICR	0x300
54
 
#define			APIC_DEST_SELF		0x40000
55
 
#define			APIC_DEST_ALLINC	0x80000
56
 
#define			APIC_DEST_ALLBUT	0xC0000
57
 
#define			APIC_ICR_RR_MASK	0x30000
58
 
#define			APIC_ICR_RR_INVALID	0x00000
59
 
#define			APIC_ICR_RR_INPROG	0x10000
60
 
#define			APIC_ICR_RR_VALID	0x20000
61
 
#define			APIC_INT_LEVELTRIG	0x08000
62
 
#define			APIC_INT_ASSERT		0x04000
63
 
#define			APIC_ICR_BUSY		0x01000
64
 
#define			APIC_DEST_LOGICAL	0x00800
65
 
#define			APIC_DEST_PHYSICAL	0x00000
66
 
#define			APIC_DM_FIXED		0x00000
67
 
#define			APIC_DM_LOWEST		0x00100
68
 
#define			APIC_DM_SMI		0x00200
69
 
#define			APIC_DM_REMRD		0x00300
70
 
#define			APIC_DM_NMI		0x00400
71
 
#define			APIC_DM_INIT		0x00500
72
 
#define			APIC_DM_STARTUP		0x00600
73
 
#define			APIC_DM_EXTINT		0x00700
74
 
#define			APIC_VECTOR_MASK	0x000FF
75
 
#define		APIC_ICR2	0x310
76
 
#define			GET_APIC_DEST_FIELD(x)	(((x)>>24)&0xFF)
77
 
#define			SET_APIC_DEST_FIELD(x)	((x)<<24)
78
 
#define		APIC_LVTT	0x320
79
 
#define		APIC_LVTTHMR	0x330
80
 
#define		APIC_LVTPC	0x340
81
 
#define		APIC_LVT0	0x350
82
 
#define			APIC_LVT_TIMER_BASE_MASK	(0x3<<18)
83
 
#define			GET_APIC_TIMER_BASE(x)		(((x)>>18)&0x3)
84
 
#define			SET_APIC_TIMER_BASE(x)		(((x)<<18))
85
 
#define			APIC_TIMER_BASE_CLKIN		0x0
86
 
#define			APIC_TIMER_BASE_TMBASE		0x1
87
 
#define			APIC_TIMER_BASE_DIV		0x2
88
 
#define			APIC_LVT_TIMER_PERIODIC		(1<<17)
89
 
#define			APIC_LVT_MASKED			(1<<16)
90
 
#define			APIC_LVT_LEVEL_TRIGGER		(1<<15)
91
 
#define			APIC_LVT_REMOTE_IRR		(1<<14)
92
 
#define			APIC_INPUT_POLARITY		(1<<13)
93
 
#define			APIC_SEND_PENDING		(1<<12)
94
 
#define			APIC_MODE_MASK			0x700
95
 
#define			GET_APIC_DELIVERY_MODE(x)	(((x)>>8)&0x7)
96
 
#define			SET_APIC_DELIVERY_MODE(x,y)	(((x)&~0x700)|((y)<<8))
97
 
#define				APIC_MODE_FIXED		0x0
98
 
#define				APIC_MODE_NMI		0x4
99
 
#define				APIC_MODE_EXTINT	0x7
100
 
#define 	APIC_LVT1	0x360
101
 
#define		APIC_LVTERR	0x370
102
 
#define		APIC_TMICT	0x380
103
 
#define		APIC_TMCCT	0x390
104
 
#define		APIC_TDCR	0x3E0
105
 
#define			APIC_TDR_DIV_TMBASE	(1<<2)
106
 
#define			APIC_TDR_DIV_1		0xB
107
 
#define			APIC_TDR_DIV_2		0x0
108
 
#define			APIC_TDR_DIV_4		0x1
109
 
#define			APIC_TDR_DIV_8		0x2
110
 
#define			APIC_TDR_DIV_16		0x3
111
 
#define			APIC_TDR_DIV_32		0x8
112
 
#define			APIC_TDR_DIV_64		0x9
113
 
#define			APIC_TDR_DIV_128	0xA
114
 

	
115
 
#define APIC_BASE (fix_to_virt(FIX_APIC_BASE))
116
 

	
117
 
#define MAX_IO_APICS 128
118
 
#define MAX_LOCAL_APIC 256
119
 

	
120
 
/*
121
 
 * All x86-64 systems are xAPIC compatible.
122
 
 * In the following, "apicid" is a physical APIC ID.
123
 
 */
124
 
#define XAPIC_DEST_CPUS_SHIFT	4
125
 
#define XAPIC_DEST_CPUS_MASK	((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
126
 
#define XAPIC_DEST_CLUSTER_MASK	(XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
127
 
#define APIC_CLUSTER(apicid)	((apicid) & XAPIC_DEST_CLUSTER_MASK)
128
 
#define APIC_CLUSTERID(apicid)	(APIC_CLUSTER(apicid) >> XAPIC_DEST_CPUS_SHIFT)
129
 
#define APIC_CPUID(apicid)	((apicid) & XAPIC_DEST_CPUS_MASK)
130
 
#define NUM_APIC_CLUSTERS	((BAD_APICID + 1) >> XAPIC_DEST_CPUS_SHIFT)
131
 

	
132
 
/*
133
 
 * the local APIC register structure, memory mapped. Not terribly well
134
 
 * tested, but we might eventually use this one in the future - the
135
 
 * problem why we cannot use it right now is the P5 APIC, it has an
136
 
 * errata which cannot take 8-bit reads and writes, only 32-bit ones ...
137
 
 */
138
 
#define u32 unsigned int
139
 

	
140
 
struct local_apic {
141
 

	
142
 
/*000*/	struct { u32 __reserved[4]; } __reserved_01;
143
 

	
144
 
/*010*/	struct { u32 __reserved[4]; } __reserved_02;
145
 

	
146
 
/*020*/	struct { /* APIC ID Register */
147
 
		u32   __reserved_1	: 24,
148
 
			phys_apic_id	:  4,
149
 
			__reserved_2	:  4;
150
 
		u32 __reserved[3];
151
 
	} id;
152
 

	
153
 
/*030*/	const
154
 
	struct { /* APIC Version Register */
155
 
		u32   version		:  8,
156
 
			__reserved_1	:  8,
157
 
			max_lvt		:  8,
158
 
			__reserved_2	:  8;
159
 
		u32 __reserved[3];
160
 
	} version;
161
 

	
162
 
/*040*/	struct { u32 __reserved[4]; } __reserved_03;
163
 

	
164
 
/*050*/	struct { u32 __reserved[4]; } __reserved_04;
165
 

	
166
 
/*060*/	struct { u32 __reserved[4]; } __reserved_05;
167
 

	
168
 
/*070*/	struct { u32 __reserved[4]; } __reserved_06;
169
 

	
170
 
/*080*/	struct { /* Task Priority Register */
171
 
		u32   priority	:  8,
172
 
			__reserved_1	: 24;
173
 
		u32 __reserved_2[3];
174
 
	} tpr;
175
 

	
176
 
/*090*/	const
177
 
	struct { /* Arbitration Priority Register */
178
 
		u32   priority	:  8,
179
 
			__reserved_1	: 24;
180
 
		u32 __reserved_2[3];
181
 
	} apr;
182
 

	
183
 
/*0A0*/	const
184
 
	struct { /* Processor Priority Register */
185
 
		u32   priority	:  8,
186
 
			__reserved_1	: 24;
187
 
		u32 __reserved_2[3];
188
 
	} ppr;
189
 

	
190
 
/*0B0*/	struct { /* End Of Interrupt Register */
191
 
		u32   eoi;
192
 
		u32 __reserved[3];
193
 
	} eoi;
194
 

	
195
 
/*0C0*/	struct { u32 __reserved[4]; } __reserved_07;
196
 

	
197
 
/*0D0*/	struct { /* Logical Destination Register */
198
 
		u32   __reserved_1	: 24,
199
 
			logical_dest	:  8;
200
 
		u32 __reserved_2[3];
201
 
	} ldr;
202
 

	
203
 
/*0E0*/	struct { /* Destination Format Register */
204
 
		u32   __reserved_1	: 28,
205
 
			model		:  4;
206
 
		u32 __reserved_2[3];
207
 
	} dfr;
208
 

	
209
 
/*0F0*/	struct { /* Spurious Interrupt Vector Register */
210
 
		u32	spurious_vector	:  8,
211
 
			apic_enabled	:  1,
212
 
			focus_cpu	:  1,
213
 
			__reserved_2	: 22;
214
 
		u32 __reserved_3[3];
215
 
	} svr;
216
 

	
217
 
/*100*/	struct { /* In Service Register */
218
 
/*170*/		u32 bitfield;
219
 
		u32 __reserved[3];
220
 
	} isr [8];
221
 

	
222
 
/*180*/	struct { /* Trigger Mode Register */
223
 
/*1F0*/		u32 bitfield;
224
 
		u32 __reserved[3];
225
 
	} tmr [8];
226
 

	
227
 
/*200*/	struct { /* Interrupt Request Register */
228
 
/*270*/		u32 bitfield;
229
 
		u32 __reserved[3];
230
 
	} irr [8];
231
 

	
232
 
/*280*/	union { /* Error Status Register */
233
 
		struct {
234
 
			u32   send_cs_error			:  1,
235
 
				receive_cs_error		:  1,
236
 
				send_accept_error		:  1,
237
 
				receive_accept_error		:  1,
238
 
				__reserved_1			:  1,
239
 
				send_illegal_vector		:  1,
240
 
				receive_illegal_vector		:  1,
241
 
				illegal_register_address	:  1,
242
 
				__reserved_2			: 24;
243
 
			u32 __reserved_3[3];
244
 
		} error_bits;
245
 
		struct {
246
 
			u32 errors;
247
 
			u32 __reserved_3[3];
248
 
		} all_errors;
249
 
	} esr;
250
 

	
251
 
/*290*/	struct { u32 __reserved[4]; } __reserved_08;
252
 

	
253
 
/*2A0*/	struct { u32 __reserved[4]; } __reserved_09;
254
 

	
255
 
/*2B0*/	struct { u32 __reserved[4]; } __reserved_10;
256
 

	
257
 
/*2C0*/	struct { u32 __reserved[4]; } __reserved_11;
258
 

	
259
 
/*2D0*/	struct { u32 __reserved[4]; } __reserved_12;
260
 

	
261
 
/*2E0*/	struct { u32 __reserved[4]; } __reserved_13;
262
 

	
263
 
/*2F0*/	struct { u32 __reserved[4]; } __reserved_14;
264
 

	
265
 
/*300*/	struct { /* Interrupt Command Register 1 */
266
 
		u32   vector			:  8,
267
 
			delivery_mode		:  3,
268
 
			destination_mode	:  1,
269
 
			delivery_status		:  1,
270
 
			__reserved_1		:  1,
271
 
			level			:  1,
272
 
			trigger			:  1,
273
 
			__reserved_2		:  2,
274
 
			shorthand		:  2,
275
 
			__reserved_3		:  12;
276
 
		u32 __reserved_4[3];
277
 
	} icr1;
278
 

	
279
 
/*310*/	struct { /* Interrupt Command Register 2 */
280
 
		union {
281
 
			u32   __reserved_1	: 24,
282
 
				phys_dest	:  4,
283
 
				__reserved_2	:  4;
284
 
			u32   __reserved_3	: 24,
285
 
				logical_dest	:  8;
286
 
		} dest;
287
 
		u32 __reserved_4[3];
288
 
	} icr2;
289
 

	
290
 
/*320*/	struct { /* LVT - Timer */
291
 
		u32   vector		:  8,
292
 
			__reserved_1	:  4,
293
 
			delivery_status	:  1,
294
 
			__reserved_2	:  3,
295
 
			mask		:  1,
296
 
			timer_mode	:  1,
297
 
			__reserved_3	: 14;
298
 
		u32 __reserved_4[3];
299
 
	} lvt_timer;
300
 

	
301
 
/*330*/	struct { /* LVT - Thermal Sensor */
302
 
		u32  vector		:  8,
303
 
			delivery_mode	:  3,
304
 
			__reserved_1	:  1,
305
 
			delivery_status	:  1,
306
 
			__reserved_2	:  3,
307
 
			mask		:  1,
308
 
			__reserved_3	: 15;
309
 
		u32 __reserved_4[3];
310
 
	} lvt_thermal;
311
 

	
312
 
/*340*/	struct { /* LVT - Performance Counter */
313
 
		u32   vector		:  8,
314
 
			delivery_mode	:  3,
315
 
			__reserved_1	:  1,
316
 
			delivery_status	:  1,
317
 
			__reserved_2	:  3,
318
 
			mask		:  1,
319
 
			__reserved_3	: 15;
320
 
		u32 __reserved_4[3];
321
 
	} lvt_pc;
322
 

	
323
 
/*350*/	struct { /* LVT - LINT0 */
324
 
		u32   vector		:  8,
325
 
			delivery_mode	:  3,
326
 
			__reserved_1	:  1,
327
 
			delivery_status	:  1,
328
 
			polarity	:  1,
329
 
			remote_irr	:  1,
330
 
			trigger		:  1,
331
 
			mask		:  1,
332
 
			__reserved_2	: 15;
333
 
		u32 __reserved_3[3];
334
 
	} lvt_lint0;
335
 

	
336
 
/*360*/	struct { /* LVT - LINT1 */
337
 
		u32   vector		:  8,
338
 
			delivery_mode	:  3,
339
 
			__reserved_1	:  1,
340
 
			delivery_status	:  1,
341
 
			polarity	:  1,
342
 
			remote_irr	:  1,
343
 
			trigger		:  1,
344
 
			mask		:  1,
345
 
			__reserved_2	: 15;
346
 
		u32 __reserved_3[3];
347
 
	} lvt_lint1;
348
 

	
349
 
/*370*/	struct { /* LVT - Error */
350
 
		u32   vector		:  8,
351
 
			__reserved_1	:  4,
352
 
			delivery_status	:  1,
353
 
			__reserved_2	:  3,
354
 
			mask		:  1,
355
 
			__reserved_3	: 15;
356
 
		u32 __reserved_4[3];
357
 
	} lvt_error;
358
 

	
359
 
/*380*/	struct { /* Timer Initial Count Register */
360
 
		u32   initial_count;
361
 
		u32 __reserved_2[3];
362
 
	} timer_icr;
363
 

	
364
 
/*390*/	const
365
 
	struct { /* Timer Current Count Register */
366
 
		u32   curr_count;
367
 
		u32 __reserved_2[3];
368
 
	} timer_ccr;
369
 

	
370
 
/*3A0*/	struct { u32 __reserved[4]; } __reserved_16;
371
 

	
372
 
/*3B0*/	struct { u32 __reserved[4]; } __reserved_17;
373
 

	
374
 
/*3C0*/	struct { u32 __reserved[4]; } __reserved_18;
375
 

	
376
 
/*3D0*/	struct { u32 __reserved[4]; } __reserved_19;
377
 

	
378
 
/*3E0*/	struct { /* Timer Divide Configuration Register */
379
 
		u32   divisor		:  4,
380
 
			__reserved_1	: 28;
381
 
		u32 __reserved_2[3];
382
 
	} timer_dcr;
383
 

	
384
 
/*3F0*/	struct { u32 __reserved[4]; } __reserved_20;
385
 

	
386
 
} __attribute__ ((packed));
387
 

	
388
 
#undef u32
389
 

	
390
 
#define BAD_APICID 0xFFu
391
 

	
392
 
#endif
 
new file 100644
1
 
/*
2
 
 * Portions Copyright 2008, 2010 VMware, Inc.
3
 
 */
4
 
#ifndef __ARCH_X86_64_ATOMIC__
5
 
#define __ARCH_X86_64_ATOMIC__
6
 

	
7
 
#include <asm/alternative.h>
8
 
#if defined(__VMKLNX__)
9
 
#include "vmkapi.h"
10
 
#endif /* defined(__VMKLNX__) */
11
 

	
12
 
/* atomic_t should be 32 bit signed type */
13
 

	
14
 
/*
15
 
 * Atomic operations that C can't guarantee us.  Useful for
16
 
 * resource counting etc..
17
 
 */
18
 

	
19
 
#ifdef CONFIG_SMP
20
 
#define LOCK "lock ; "
21
 
#else
22
 
#define LOCK ""
23
 
#endif
24
 

	
25
 
/*
26
 
 * Make sure gcc doesn't try to be clever and move things around
27
 
 * on us. We need to use _exactly_ the address the user gave us,
28
 
 * not some alias that contains the same information.
29
 
 */
30
 
typedef struct { volatile int counter; } atomic_t;
31
 

	
32
 
#define ATOMIC_INIT(i)	{ (i) }
33
 

	
34
 
/**
35
 
 * atomic_read - read atomic variable
36
 
 * @v: pointer of type atomic_t
37
 
 * 
38
 
 * Atomically reads the value of @v.
39
 
 */ 
40
 
#define atomic_read(v)		((v)->counter)
41
 

	
42
 
/**
43
 
 * atomic_set - set atomic variable
44
 
 * @v: pointer of type atomic_t
45
 
 * @i: required value
46
 
 * 
47
 
 * Atomically sets the value of @v to @i.
48
 
 */ 
49
 
#define atomic_set(v,i)		(((v)->counter) = (i))
50
 

	
51
 
/**
52
 
 * atomic_add - add integer to atomic variable
53
 
 * @i: integer value to add
54
 
 * @v: pointer of type atomic_t
55
 
 * 
56
 
 * Atomically adds @i to @v.
57
 
 */
58
 
/* _VMKLNX_CODECHECK_: atomic_add */
59
 
static __inline__ void atomic_add(int i, atomic_t *v)
60
 
{
61
 
#if defined(__VMKLNX__)
62
 
	vmk_AtomicPrologue();
63
 
#endif /* defined(__VMKLNX__) */
64
 
	__asm__ __volatile__(
65
 
		LOCK_PREFIX "addl %1,%0"
66
 
		:"=m" (v->counter)
67
 
		:"ir" (i), "m" (v->counter));
68
 
#if defined(__VMKLNX__)
69
 
	vmk_AtomicEpilogue();
70
 
#endif /* defined(__VMKLNX__) */
71
 
}
72
 

	
73
 
/**
74
 
 * atomic_sub - subtract the atomic variable
75
 
 * @i: integer value to subtract
76
 
 * @v: pointer of type atomic_t
77
 
 * 
78
 
 * Atomically subtracts @i from @v.
79
 
 */
80
 
/* _VMKLNX_CODECHECK_: atomic_sub */
81
 
static __inline__ void atomic_sub(int i, atomic_t *v)
82
 
{
83
 
#if defined(__VMKLNX__)
84
 
	vmk_AtomicPrologue();
85
 
#endif /* defined(__VMKLNX__) */
86
 
	__asm__ __volatile__(
87
 
		LOCK_PREFIX "subl %1,%0"
88
 
		:"=m" (v->counter)
89
 
		:"ir" (i), "m" (v->counter));
90
 
#if defined(__VMKLNX__)
91
 
	vmk_AtomicEpilogue();
92
 
#endif /* defined(__VMKLNX__) */
93
 
}
94
 

	
95
 
/**
96
 
 * atomic_sub_and_test - subtract value from variable and test result
97
 
 * @i: integer value to subtract
98
 
 * @v: pointer of type atomic_t
99
 
 * 
100
 
 * Atomically subtracts @i from @v and returns
101
 
 * true if the result is zero, or false for all
102
 
 * other cases.
103
 
 */
104
 
static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
105
 
{
106
 
	unsigned char c;
107
 

	
108
 
#if defined(__VMKLNX__)
109
 
	vmk_AtomicPrologue();
110
 
#endif /* defined(__VMKLNX__) */
111
 
	__asm__ __volatile__(
112
 
		LOCK_PREFIX "subl %2,%0; sete %1"
113
 
		:"=m" (v->counter), "=qm" (c)
114
 
		:"ir" (i), "m" (v->counter) : "memory");
115
 
#if defined(__VMKLNX__)
116
 
	vmk_AtomicEpilogue();
117
 
#endif /* defined(__VMKLNX__) */
118
 
	return c;
119
 
}
120
 

	
121
 
/**
122
 
 * atomic_inc - increment atomic variable
123
 
 * @v: pointer of type atomic_t
124
 
 * 
125
 
 * Atomically increments @v by 1.
126
 
 */ 
127
 
/* _VMKLNX_CODECHECK_: atomic_inc */
128
 
static __inline__ void atomic_inc(atomic_t *v)
129
 
{
130
 
#if defined(__VMKLNX__)
131
 
	vmk_AtomicPrologue();
132
 
#endif /* defined(__VMKLNX__) */
133
 
	__asm__ __volatile__(
134
 
		LOCK_PREFIX "incl %0"
135
 
		:"=m" (v->counter)
136
 
		:"m" (v->counter));
137
 
#if defined(__VMKLNX__)
138
 
	vmk_AtomicEpilogue();
139
 
#endif /* defined(__VMKLNX__) */
140
 
}
141
 

	
142
 
/**
143
 
 * atomic_dec - decrement atomic variable
144
 
 * @v: pointer of type atomic_t
145
 
 * 
146
 
 * Atomically decrements @v by 1.
147
 
 */ 
148
 
/* _VMKLNX_CODECHECK_: atomic_dec */
149
 
static __inline__ void atomic_dec(atomic_t *v)
150
 
{
151
 
#if defined(__VMKLNX__)
152
 
	vmk_AtomicPrologue();
153
 
#endif /* defined(__VMKLNX__) */
154
 
	__asm__ __volatile__(
155
 
		LOCK_PREFIX "decl %0"
156
 
		:"=m" (v->counter)
157
 
		:"m" (v->counter));
158
 
#if defined(__VMKLNX__)
159
 
	vmk_AtomicEpilogue();
160
 
#endif /* defined(__VMKLNX__) */
161
 
}
162
 

	
163
 
/**
164
 
 * atomic_dec_and_test - decrement and test
165
 
 * @v: pointer of type atomic_t
166
 
 * 
167
 
 * Atomically decrements @v by 1 and
168
 
 * returns true if the result is 0, or false for all other
169
 
 * cases.
170
 
 */ 
171
 
/* _VMKLNX_CODECHECK_: atomic_dec_and_test */
172
 
static __inline__ int atomic_dec_and_test(atomic_t *v)
173
 
{
174
 
	unsigned char c;
175
 

	
176
 
#if defined(__VMKLNX__)
177
 
	vmk_AtomicPrologue();
178
 
#endif /* defined(__VMKLNX__) */
179
 
	__asm__ __volatile__(
180
 
		LOCK_PREFIX "decl %0; sete %1"
181
 
		:"=m" (v->counter), "=qm" (c)
182
 
		:"m" (v->counter) : "memory");
183
 
#if defined(__VMKLNX__)
184
 
	vmk_AtomicEpilogue();
185
 
#endif /* defined(__VMKLNX__) */
186
 
	return c != 0;
187
 
}
188
 

	
189
 
/**
190
 
 * atomic_inc_and_test - increment and test 
191
 
 * @v: pointer of type atomic_t
192
 
 * 
193
 
 * Atomically increments @v by 1
194
 
 * and returns true if the result is zero, or false for all
195
 
 * other cases.
196
 
 */ 
197
 
static __inline__ int atomic_inc_and_test(atomic_t *v)
198
 
{
199
 
	unsigned char c;
200
 

	
201
 
#if defined(__VMKLNX__)
202
 
	vmk_AtomicPrologue();
203
 
#endif /* defined(__VMKLNX__) */
204
 
	__asm__ __volatile__(
205
 
		LOCK_PREFIX "incl %0; sete %1"
206
 
		:"=m" (v->counter), "=qm" (c)
207
 
		:"m" (v->counter) : "memory");
208
 
#if defined(__VMKLNX__)
209
 
	vmk_AtomicEpilogue();
210
 
#endif /* defined(__VMKLNX__) */
211
 
	return c != 0;
212
 
}
213
 

	
214
 
/**
215
 
 * atomic_add_negative - add and test if negative
216
 
 * @i: integer value to add
217
 
 * @v: pointer of type atomic_t
218
 
 * 
219
 
 * Atomically adds @i to @v and returns true
220
 
 * if the result is negative, or false when
221
 
 * result is greater than or equal to zero.
222
 
 */ 
223
 
static __inline__ int atomic_add_negative(int i, atomic_t *v)
224
 
{
225
 
	unsigned char c;
226
 

	
227
 
#if defined(__VMKLNX__)
228
 
	vmk_AtomicPrologue();
229
 
#endif /* defined(__VMKLNX__) */
230
 
	__asm__ __volatile__(
231
 
		LOCK_PREFIX "addl %2,%0; sets %1"
232
 
		:"=m" (v->counter), "=qm" (c)
233
 
		:"ir" (i), "m" (v->counter) : "memory");
234
 
#if defined(__VMKLNX__)
235
 
	vmk_AtomicEpilogue();
236
 
#endif /* defined(__VMKLNX__) */
237
 
	return c;
238
 
}
239
 

	
240
 
/**
241
 
 * atomic_add_return - add and return
242
 
 * @i: integer value to add
243
 
 * @v: pointer of type atomic_t
244
 
 *
245
 
 * Atomically adds @i to @v and returns @i + @v
246
 
 */
247
 
/* _VMKLNX_CODECHECK_: atomic_add_return */
248
 
static __inline__ int atomic_add_return(int i, atomic_t *v)
249
 
{
250
 
	int __i = i;
251
 

	
252
 
#if defined(__VMKLNX__)
253
 
	vmk_AtomicPrologue();
254
 
#endif /* defined(__VMKLNX__) */
255
 
	__asm__ __volatile__(
256
 
		LOCK_PREFIX "xaddl %0, %1;"
257
 
		:"=r"(i)
258
 
		:"m"(v->counter), "0"(i));
259
 
#if defined(__VMKLNX__)
260
 
	vmk_AtomicEpilogue();
261
 
#endif /* defined(__VMKLNX__) */
262
 
	return i + __i;
263
 
}
264
 

	
265
 
static __inline__ int atomic_sub_return(int i, atomic_t *v)
266
 
{
267
 
	return atomic_add_return(-i,v);
268
 
}
269
 

	
270
 
/**
271
 
 * atomic_inc_return - increment by 1 and return
272
 
 * @v: integer value to increment
273
 
 *
274
 
 * Atomically increments @v by 1 returns @v + 1
275
 
 *
276
 
 * SYNOPSIS:
277
 
 * #define atomic_inc_return(v)
278
 
 *
279
 
 * RETURN VALUE:
280
 
 * Returns @v + 1
281
 
 */
282
 
/* _VMKLNX_CODECHECK_: atomic_inc_return */
283
 
#define atomic_inc_return(v)  (atomic_add_return(1,v))
284
 
#define atomic_dec_return(v)  (atomic_sub_return(1,v))
285
 

	
286
 
/* An 64bit atomic type */
287
 

	
288
 
typedef struct { volatile long counter; } atomic64_t;
289
 

	
290
 
#define ATOMIC64_INIT(i)	{ (i) }
291
 

	
292
 
/**
293
 
 * atomic64_read - read atomic64 variable
294
 
 * @v: pointer of type atomic64_t
295
 
 *
296
 
 * Atomically reads the value of @v.
297
 
 * Doesn't imply a read memory barrier.
298
 
 */
299
 
#define atomic64_read(v)		((v)->counter)
300
 

	
301
 
/**
302
 
 * atomic64_set - set atomic64 variable
303
 
 * @v: pointer to type atomic64_t
304
 
 * @i: required value
305
 
 *
306
 
 * Atomically sets the value of @v to @i.
307
 
 */
308
 
#if defined(__VMKLNX__)
309
 
static __inline__ void atomic64_set(atomic64_t *v, long i)
310
 
{
311
 
  /*
312
 
   * Ensure that we do a single movq. Without this, the compiler
313
 
   * may do write with a constant as two movl operations.
314
 
   */
315
 
  __asm__ __volatile__(
316
 
     "movq %1, %0"
317
 
     : "+m" (v->counter)
318
 
     : "r" (i)
319
 
  );
320
 
}
321
 
#else /* !defined(__VMKLNX__) */
322
 
#define atomic64_set(v,i)		(((v)->counter) = (i))
323
 
#endif /* defined(__VMKLNX__) */
324
 

	
325
 

	
326
 
/**
327
 
 * atomic64_add - add integer to atomic64 variable
328
 
 * @i: integer value to add
329
 
 * @v: pointer to type atomic64_t
330
 
 *
331
 
 * Atomically adds @i to @v.
332
 
 */
333
 
static __inline__ void atomic64_add(long i, atomic64_t *v)
334
 
{
335
 
#if defined(__VMKLNX__)
336
 
	vmk_AtomicPrologue();
337
 
#endif /* defined(__VMKLNX__) */
338
 
	__asm__ __volatile__(
339
 
		LOCK_PREFIX "addq %1,%0"
340
 
		:"=m" (v->counter)
341
 
		:"ir" (i), "m" (v->counter));
342
 
#if defined(__VMKLNX__)
343
 
	vmk_AtomicEpilogue();
344
 
#endif /* defined(__VMKLNX__) */
345
 
}
346
 

	
347
 
/**
348
 
 * atomic64_sub - subtract the atomic64 variable
349
 
 * @i: integer value to subtract
350
 
 * @v: pointer to type atomic64_t
351
 
 *
352
 
 * Atomically subtracts @i from @v.
353
 
 */
354
 
static __inline__ void atomic64_sub(long i, atomic64_t *v)
355
 
{
356
 
#if defined(__VMKLNX__)
357
 
	vmk_AtomicPrologue();
358
 
#endif /* defined(__VMKLNX__) */
359
 
	__asm__ __volatile__(
360
 
		LOCK_PREFIX "subq %1,%0"
361
 
		:"=m" (v->counter)
362
 
		:"ir" (i), "m" (v->counter));
363
 
#if defined(__VMKLNX__)
364
 
	vmk_AtomicEpilogue();
365
 
#endif /* defined(__VMKLNX__) */
366
 
}
367
 

	
368
 
/**
369
 
 * atomic64_sub_and_test - subtract value from variable and test result
370
 
 * @i: integer value to subtract
371
 
 * @v: pointer to type atomic64_t
372
 
 *
373
 
 * Atomically subtracts @i from @v and returns
374
 
 * true if the result is zero, or false for all
375
 
 * other cases.
376
 
 */
377
 
static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v)
378
 
{
379
 
	unsigned char c;
380
 

	
381
 
#if defined(__VMKLNX__)
382
 
	vmk_AtomicPrologue();
383
 
#endif /* defined(__VMKLNX__) */
384
 
	__asm__ __volatile__(
385
 
		LOCK_PREFIX "subq %2,%0; sete %1"
386
 
		:"=m" (v->counter), "=qm" (c)
387
 
		:"ir" (i), "m" (v->counter) : "memory");
388
 
#if defined(__VMKLNX__)
389
 
	vmk_AtomicEpilogue();
390
 
#endif /* defined(__VMKLNX__) */
391
 
	return c;
392
 
}
393
 

	
394
 
/**
395
 
 * atomic64_inc - increment atomic64 variable
396
 
 * @v: pointer to type atomic64_t
397
 
 *
398
 
 * Atomically increments @v by 1.
399
 
 */
400
 
static __inline__ void atomic64_inc(atomic64_t *v)
401
 
{
402
 
#if defined(__VMKLNX__)
403
 
	vmk_AtomicPrologue();
404
 
#endif /* defined(__VMKLNX__) */
405
 
	__asm__ __volatile__(
406
 
		LOCK_PREFIX "incq %0"
407
 
		:"=m" (v->counter)
408
 
		:"m" (v->counter));
409
 
#if defined(__VMKLNX__)
410
 
	vmk_AtomicEpilogue();
411
 
#endif /* defined(__VMKLNX__) */
412
 
}
413
 

	
414
 
/**
415
 
 * atomic64_dec - decrement atomic64 variable
416
 
 * @v: pointer to type atomic64_t
417
 
 *
418
 
 * Atomically decrements @v by 1.
419
 
 */
420
 
static __inline__ void atomic64_dec(atomic64_t *v)
421
 
{
422
 
#if defined(__VMKLNX__)
423
 
	vmk_AtomicPrologue();
424
 
#endif /* defined(__VMKLNX__) */
425
 
	__asm__ __volatile__(
426
 
		LOCK_PREFIX "decq %0"
427
 
		:"=m" (v->counter)
428
 
		:"m" (v->counter));
429
 
#if defined(__VMKLNX__)
430
 
	vmk_AtomicEpilogue();
431
 
#endif /* defined(__VMKLNX__) */
432
 
}
433
 

	
434
 
/**
435
 
 * atomic64_dec_and_test - decrement and test
436
 
 * @v: pointer to type atomic64_t
437
 
 *
438
 
 * Atomically decrements @v by 1 and
439
 
 * returns true if the result is 0, or false for all other
440
 
 * cases.
441
 
 */
442
 
static __inline__ int atomic64_dec_and_test(atomic64_t *v)
443
 
{
444
 
	unsigned char c;
445
 

	
446
 
#if defined(__VMKLNX__)
447
 
	vmk_AtomicPrologue();
448
 
#endif /* defined(__VMKLNX__) */
449
 
	__asm__ __volatile__(
450
 
		LOCK_PREFIX "decq %0; sete %1"
451
 
		:"=m" (v->counter), "=qm" (c)
452
 
		:"m" (v->counter) : "memory");
453
 
#if defined(__VMKLNX__)
454
 
	vmk_AtomicEpilogue();
455
 
#endif /* defined(__VMKLNX__) */
456
 
	return c != 0;
457
 
}
458
 

	
459
 
/**
460
 
 * atomic64_inc_and_test - increment and test
461
 
 * @v: pointer to type atomic64_t
462
 
 *
463
 
 * Atomically increments @v by 1
464
 
 * and returns true if the result is zero, or false for all
465
 
 * other cases.
466
 
 */
467
 
static __inline__ int atomic64_inc_and_test(atomic64_t *v)
468
 
{
469
 
	unsigned char c;
470
 

	
471
 
#if defined(__VMKLNX__)
472
 
	vmk_AtomicPrologue();
473
 
#endif /* defined(__VMKLNX__) */
474
 
	__asm__ __volatile__(
475
 
		LOCK_PREFIX "incq %0; sete %1"
476
 
		:"=m" (v->counter), "=qm" (c)
477
 
		:"m" (v->counter) : "memory");
478
 
#if defined(__VMKLNX__)
479
 
	vmk_AtomicEpilogue();
480
 
#endif /* defined(__VMKLNX__) */
481
 
	return c != 0;
482
 
}
483
 

	
484
 
/**
485
 
 * atomic64_add_negative - add and test if negative
486
 
 * @i: integer value to add
487
 
 * @v: pointer to type atomic64_t
488
 
 *
489
 
 * Atomically adds @i to @v and returns true
490
 
 * if the result is negative, or false when
491
 
 * result is greater than or equal to zero.
492
 
 */
493
 
static __inline__ int atomic64_add_negative(long i, atomic64_t *v)
494
 
{
495
 
	unsigned char c;
496
 

	
497
 
#if defined(__VMKLNX__)
498
 
	vmk_AtomicPrologue();
499
 
#endif /* defined(__VMKLNX__) */
500
 
	__asm__ __volatile__(
501
 
		LOCK_PREFIX "addq %2,%0; sets %1"
502
 
		:"=m" (v->counter), "=qm" (c)
503
 
		:"ir" (i), "m" (v->counter) : "memory");
504
 
#if defined(__VMKLNX__)
505
 
	vmk_AtomicEpilogue();
506
 
#endif /* defined(__VMKLNX__) */
507
 
	return c;
508
 
}
509
 

	
510
 
/**
511
 
 * atomic64_add_return - add and return
512
 
 * @i: integer value to add
513
 
 * @v: pointer to type atomic64_t
514
 
 *
515
 
 * Atomically adds @i to @v and returns @i + @v
516
 
 */
517
 
static __inline__ long atomic64_add_return(long i, atomic64_t *v)
518
 
{
519
 
	long __i = i;
520
 

	
521
 
#if defined(__VMKLNX__)
522
 
	vmk_AtomicPrologue();
523
 
#endif /* defined(__VMKLNX__) */
524
 
	__asm__ __volatile__(
525
 
		LOCK_PREFIX "xaddq %0, %1;"
526
 
		:"=r"(i)
527
 
		:"m"(v->counter), "0"(i));
528
 
#if defined(__VMKLNX__)
529
 
	vmk_AtomicEpilogue();
530
 
#endif /* defined(__VMKLNX__) */
531
 
	return i + __i;
532
 
}
533
 

	
534
 
static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
535
 
{
536
 
	return atomic64_add_return(-i,v);
537
 
}
538
 

	
539
 
#define atomic64_inc_return(v)  (atomic64_add_return(1,v))
540
 
#define atomic64_dec_return(v)  (atomic64_sub_return(1,v))
541
 

	
542
 
#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
543
 
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
544
 

	
545
 
/**
546
 
 * atomic_add_unless - add unless the number is a given value
547
 
 * @v: pointer of type atomic_t
548
 
 * @a: the amount to add to v...
549
 
 * @u: ...unless v is equal to u.
550
 
 *
551
 
 * Atomically adds @a to @v, so long as it was not @u.
552
 
 * Returns non-zero if @v was not @u, and zero otherwise.
553
 
 */
554
 
#define atomic_add_unless(v, a, u)				\
555
 
({								\
556
 
	int c, old;						\
557
 
	c = atomic_read(v);					\
558
 
	for (;;) {						\
559
 
		if (unlikely(c == (u)))				\
560
 
			break;					\
561
 
		old = atomic_cmpxchg((v), c, c + (a));		\
562
 
		if (likely(old == c))				\
563
 
			break;					\
564
 
		c = old;					\
565
 
	}							\
566
 
	c != (u);						\
567
 
})
568
 
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
569
 

	
570
 
/* These are x86-specific, used by some header files */
571
 
#if defined(__VMKLNX__)
572
 

	
573
 
#define atomic_clear_mask(mask, addr)                       \
574
 
do {                                                        \
575
 
        vmk_AtomicPrologue();                               \
576
 
	__asm__ __volatile__(LOCK_PREFIX "andl %0,%1"       \
577
 
	: : "r" (~(mask)),"m" (*addr) : "memory") ;         \
578
 
	vmk_AtomicEpilogue();                               \
579
 
} while (0)
580
 

	
581
 
#define atomic_set_mask(mask, addr)                         \
582
 
do {                                                        \
583
 
        vmk_AtomicPrologue();                               \
584
 
	__asm__ __volatile__(LOCK_PREFIX "orl %0,%1"        \
585
 
	: : "r" ((unsigned)mask),"m" (*(addr)) : "memory"); \
586
 
	vmk_AtomicEpilogue();                               \
587
 
} while (0)
588
 

	
589
 
#else /* !defined(__VMKLNX__) */
590
 

	
591
 
#define atomic_clear_mask(mask, addr) \
592
 
__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
593
 
: : "r" (~(mask)),"m" (*addr) : "memory")
594
 

	
595
 
#define atomic_set_mask(mask, addr) \
596
 
__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
597
 
: : "r" ((unsigned)mask),"m" (*(addr)) : "memory")
598
 

	
599
 
#endif /* defined(__VMKLNX__) */
600
 

	
601
 
/* Atomic operations are already serializing on x86 */
602
 
#define smp_mb__before_atomic_dec()	barrier()
603
 
#define smp_mb__after_atomic_dec()	barrier()
604
 
#define smp_mb__before_atomic_inc()	barrier()
605
 
#define smp_mb__after_atomic_inc()	barrier()
606
 

	
607
 
#include <asm-generic/atomic.h>
608
 
#endif
 
new file 100644
1
 
#ifndef __ASM_X86_64_AUXVEC_H
2
 
#define __ASM_X86_64_AUXVEC_H
3
 

	
4
 
#endif
 
new file 100644
1
 
/*
2
 
 * Portions Copyright 2008, 2009 VMware, Inc.
3
 
 */
4
 
#ifndef _X86_64_BITOPS_H
5
 
#define _X86_64_BITOPS_H
6
 

	
7
 
#if defined(__VMKLNX__)
8
 
#include "vmkapi.h"
9
 
#endif /* defined(__VMKLNX__) */
10
 

	
11
 
/*
12
 
 * Copyright 1992, Linus Torvalds.
13
 
 */
14
 

	
15
 
#include <asm/alternative.h>
16
 

	
17
 
#define ADDR (*(volatile long *) addr)
18
 

	
19
 
/**
20
 
 * set_bit - Atomically set a bit in memory
21
 
 * @nr: the bit to set
22
 
 * @addr: the address to start counting from
23
 
 *
24
 
 * This function is atomic and may not be reordered.  See __set_bit()
25
 
 * if you do not require the atomic guarantees.
26
 
 * Note that @nr may be almost arbitrarily large; this function is not
27
 
 * restricted to acting on a single-word quantity.
28
 
 *
29
 
 * RETURN VALUE: 
30
 
 * NONE
31
 
 *
32
 
 */
33
 
/* _VMKLNX_CODECHECK_: set_bit */
34
 
static __inline__ void set_bit(int nr, volatile void * addr)
35
 
{
36
 
	__asm__ __volatile__( LOCK_PREFIX
37
 
		"btsl %1,%0"
38
 
		:"+m" (ADDR)
39
 
		:"dIr" (nr) : "memory");
40
 
}
41
 

	
42
 
/**
43
 
 * __set_bit - Set a bit in memory
44
 
 * @nr: the bit to set
45
 
 * @addr: the address to start counting from
46
 
 *
47
 
 * Unlike set_bit(), this function is non-atomic and may be reordered.
48
 
 * If it's called on the same region of memory simultaneously, the effect
49
 
 * may be that only one operation succeeds.
50
 
 */
51
 
static __inline__ void __set_bit(int nr, volatile void * addr)
52
 
{
53
 
	__asm__ volatile(
54
 
		"btsl %1,%0"
55
 
		:"+m" (ADDR)
56
 
		:"dIr" (nr) : "memory");
57
 
}
58
 

	
59
 
/**
60
 
 * clear_bit - Clears a bit in memory
61
 
 * @nr: Bit to clear
62
 
 * @addr: Address to start counting from
63
 
 *
64
 
 * Clears a bit in memory.
65
 
 * clear_bit() is atomic and may not be reordered.  However, it does
66
 
 * not contain a memory barrier, so if it is used for locking purposes,
67
 
 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
68
 
 * in order to ensure changes are visible on other processors.
69
 
 *
70
 
 * RETURN VALUE:
71
 
 * None
72
 
 *
73
 
 */
74
 
/* _VMKLNX_CODECHECK_: clear_bit */
75
 
static __inline__ void clear_bit(int nr, volatile void * addr)
76
 
{
77
 
	__asm__ __volatile__( LOCK_PREFIX
78
 
		"btrl %1,%0"
79
 
		:"+m" (ADDR)
80
 
		:"dIr" (nr));
81
 
}
82
 

	
83
 
static __inline__ void __clear_bit(int nr, volatile void * addr)
84
 
{
85
 
	__asm__ __volatile__(
86
 
		"btrl %1,%0"
87
 
		:"+m" (ADDR)
88
 
		:"dIr" (nr));
89
 
}
90
 

	
91
 
#define smp_mb__before_clear_bit()	barrier()
92
 
#define smp_mb__after_clear_bit()	barrier()
93
 

	
94
 
/**
95
 
 * __change_bit - Toggle a bit in memory
96
 
 * @nr: the bit to change
97
 
 * @addr: the address to start counting from
98
 
 *
99
 
 * Unlike change_bit(), this function is non-atomic and may be reordered.
100
 
 * If it's called on the same region of memory simultaneously, the effect
101
 
 * may be that only one operation succeeds.
102
 
 */
103
 
static __inline__ void __change_bit(int nr, volatile void * addr)
104
 
{
105
 
	__asm__ __volatile__(
106
 
		"btcl %1,%0"
107
 
		:"+m" (ADDR)
108
 
		:"dIr" (nr));
109
 
}
110
 

	
111
 
/**
112
 
 * change_bit - Toggle a bit in memory
113
 
 * @nr: Bit to change
114
 
 * @addr: Address to start counting from
115
 
 *
116
 
 * change_bit() is atomic and may not be reordered.
117
 
 * Note that @nr may be almost arbitrarily large; this function is not
118
 
 * restricted to acting on a single-word quantity.
119
 
 */
120
 
static __inline__ void change_bit(int nr, volatile void * addr)
121
 
{
122
 
	__asm__ __volatile__( LOCK_PREFIX
123
 
		"btcl %1,%0"
124
 
		:"+m" (ADDR)
125
 
		:"dIr" (nr));
126
 
}
127
 

	
128
 
/**
129
 
 * test_and_set_bit - Set a bit and return its old state
130
 
 * @nr: Bit to set
131
 
 * @addr: Address to count from
132
 
 *
133
 
 * This operation is atomic and cannot be reordered.  
134
 
 * It also implies a memory barrier.
135
 
 * It tests if the bit at position nr in *addr is 0 or not and sets it to 1.
136
 
 * Note that the return value need not be 1 (just non-zero) if the bit was 1.
137
 
 *
138
 
 * RETURN VALUE:
139
 
 * 0 if original bit was 0 and NON-ZERO otherwise
140
 
 */
141
 
/* _VMKLNX_CODECHECK_: test_and_set_bit */
142
 
static __inline__ int test_and_set_bit(int nr, volatile void * addr)
143
 
{
144
 
	int oldbit;
145
 

	
146
 
	__asm__ __volatile__( LOCK_PREFIX
147
 
		"btsl %2,%1\n\tsbbl %0,%0"
148
 
		:"=r" (oldbit),"+m" (ADDR)
149
 
		:"dIr" (nr) : "memory");
150
 
	return oldbit;
151
 
}
152
 

	
153
 
/**
154
 
 * __test_and_set_bit - Set a bit and return its old state
155
 
 * @nr: Bit to set
156
 
 * @addr: Address to count from
157
 
 *
158
 
 * This operation is non-atomic and can be reordered.  
159
 
 * If two examples of this operation race, one can appear to succeed
160
 
 * but actually fail.  You must protect multiple accesses with a lock.
161
 
 * It tests if the  bit at position nr in *addr is 0 or not and sets it to 1.
162
 
 * Note that the return value need not be 1 (just non-zero) if the bit was 1.
163
 
 *
164
 
 * RETURN VALUE:
165
 
 * 0 if original bit was 0 and NON-ZERO otherwise
166
 
 *
167
 
 * SEE ALSO:
168
 
 * test_and_set_bit
169
 
 */
170
 
static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
171
 
{
172
 
	int oldbit;
173
 

	
174
 
	__asm__(
175
 
		"btsl %2,%1\n\tsbbl %0,%0"
176
 
		:"=r" (oldbit),"+m" (ADDR)
177
 
		:"dIr" (nr));
178
 
	return oldbit;
179
 
}
180
 

	
181
 
/**
182
 
 * test_and_clear_bit - Clear a bit and return its old state
183
 
 * @nr: Bit to clear
184
 
 * @addr: Address to count from
185
 
 *
186
 
 * This operation is atomic and cannot be reordered.  
187
 
 * It also implies a memory barrier.
188
 
 * It tests if the  bit at position nr in *addr is 0 or not and sets it to 0.
189
 
 * Note that the return value need not be 1 (just non-zero) if the bit was 1.
190
 
 *
191
 
 * RETURN VALUE:
192
 
 * 0 if original bit was 0 and NON-ZERO otherwise
193
 
 */
194
 
/* _VMKLNX_CODECHECK_: test_and_clear_bit */
195
 
static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
196
 
{
197
 
	int oldbit;
198
 

	
199
 
	__asm__ __volatile__( LOCK_PREFIX
200
 
		"btrl %2,%1\n\tsbbl %0,%0"
201
 
		:"=r" (oldbit),"+m" (ADDR)
202
 
		:"dIr" (nr) : "memory");
203
 
	return oldbit;
204
 
}
205
 

	
206
 
/**
207
 
 * __test_and_clear_bit - Clear a bit and return its old state
208
 
 * @nr: Bit to clear
209
 
 * @addr: Address to count from
210
 
 *
211
 
 * This operation is non-atomic and can be reordered.  
212
 
 * If two examples of this operation race, one can appear to succeed
213
 
 * but actually fail.  You must protect multiple accesses with a lock.
214
 
 * It tests if the  bit at position nr in *addr is 0 or not and sets it to 0.
215
 
 * Note that the return value need not be 1 (just non-zero) if the bit was 1.
216
 
 *
217
 
 * RETURN VALUE:
218
 
 * 0 if original bit was 0 and NON-ZERO otherwise
219
 
 *
220
 
 * SEE ALSO:
221
 
 * test_and_clear_bit
222
 
 */
223
 
static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
224
 
{
225
 
	int oldbit;
226
 

	
227
 
	__asm__(
228
 
		"btrl %2,%1\n\tsbbl %0,%0"
229
 
		:"=r" (oldbit),"+m" (ADDR)
230
 
		:"dIr" (nr));
231
 
	return oldbit;
232
 
}
233
 

	
234
 
/**
235
 
 * __test_and_change_bit - Toggle a bit and return its old state
236
 
 * @nr: Bit to toggle
237
 
 * @addr: Address to count from
238
 
 *
239
 
 * This operation is non-atomic and can be reordered.  
240
 
 * It also implies a memory barrier.
241
 
 * It tests if the  bit at position nr in *addr is 0 or not and toggles it.
242
 
 * Note that the return value need not be 1 (just non-zero) if the bit was 1.
243
 
 *
244
 
 * RETURN VALUE:
245
 
 * 0 if original bit was 0 and NON-ZERO otherwise
246
 
 *
247
 
 * SEE ALSO:
248
 
 * test_and_change_bit
249
 
 */
250
 
static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
251
 
{
252
 
	int oldbit;
253
 

	
254
 
	__asm__ __volatile__(
255
 
		"btcl %2,%1\n\tsbbl %0,%0"
256
 
		:"=r" (oldbit),"+m" (ADDR)
257
 
		:"dIr" (nr) : "memory");
258
 
	return oldbit;
259
 
}
260
 

	
261
 
/**
262
 
 * test_and_change_bit - Toggle a bit and return its old state
263
 
 * @nr: Bit to toggle
264
 
 * @addr: Address to count from
265
 
 *
266
 
 * This operation is atomic and cannot be reordered.  
267
 
 * It also implies a memory barrier.
268
 
 * It tests if the  bit at position nr in *addr is 0 or not and toggles it.
269
 
 * Note that the return value need not be 1 (just non-zero) if the bit was 1.
270
 
 *
271
 
 * RETURN VALUE:
272
 
 * 0 if original bit was 0 and NON-ZERO otherwise
273
 
 */
274
 
/* _VMKLNX_CODECHECK_: test_and_change_bit */
275
 
static __inline__ int test_and_change_bit(int nr, volatile void * addr)
276
 
{
277
 
	int oldbit;
278
 

	
279
 
	__asm__ __volatile__( LOCK_PREFIX
280
 
		"btcl %2,%1\n\tsbbl %0,%0"
281
 
		:"=r" (oldbit),"+m" (ADDR)
282
 
		:"dIr" (nr) : "memory");
283
 
	return oldbit;
284
 
}
285
 

	
286
 
/**                                          
287
 
 *  constant_test_bit - determine whether a bit is set
288
 
 *  @nr: bit number to test
289
 
 *  @addr: addr to test
290
 
 *                                           
291
 
 *  Determines the state of the specified bit.
292
 
 *  This is used when @nr is known to be constant at compile-time.
293
 
 *  Use test_bit() instead of using this directly.
294
 
 *                                           
295
 
 *  RETURN VALUE:
296
 
 *  0 if the bit was 0 and NON-ZERO otherwise
297
 
 */
298
 
/* _VMKLNX_CODECHECK_: constant_test_bit */
299
 
static __inline__ int constant_test_bit(int nr, const volatile void * addr)
300
 
{
301
 
	return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
302
 
}
303
 

	
304
 
/**                                          
305
 
 *  variable_test_bit - determine whether a bit is set
306
 
 *  @nr: bit number to test
307
 
 *  @addr: addr to test
308
 
 *                                           
309
 
 *  Determines the state of the specified bit.
310
 
 *  This is used when @nr is a variable.
311
 
 *  Use test_bit() instead of using this directly.
312
 
 * 
313
 
 *  RETURN VALUE:
314
 
 *  0 if the bit was 0 and NON-ZERO otherwise
315
 
 */
316
 
/* _VMKLNX_CODECHECK_: variable_test_bit */
317
 
static __inline__ int variable_test_bit(int nr, volatile const void * addr)
318
 
{
319
 
	int oldbit;
320
 

	
321
 
	__asm__ __volatile__(
322
 
		"btl %2,%1\n\tsbbl %0,%0"
323
 
		:"=r" (oldbit)
324
 
		:"m" (ADDR),"dIr" (nr));
325
 
	return oldbit;
326
 
}
327
 
/**
328
 
 *  test_bit - Determine if bit at given position is set
329
 
 *  @nr: number of bit to be tested
330
 
 *  @addr: pointer to byte to test
331
 
 *
332
 
 *  It tests if the bit at position nr in *addr is 0 or not.
333
 
 *  If the bit number is a constant an optimized bit extract is done.
334
 
 *  Note that the return value need not be 1 (just non-zero) if the bit was 1.
335
 
 *
336
 
 *  SYNOPSIS:
337
 
 *  #define test_bit(nr,addr)
338
 
 *
339
 
 *  RETURN VALUE:
340
 
 *  0 if the bit was 0 and NON-ZERO otherwise
341
 
 */
342
 
/* _VMKLNX_CODECHECK_: test_bit */
343
 
#define test_bit(nr,addr) \
344
 
(__builtin_constant_p(nr) ? \
345
 
 constant_test_bit((nr),(addr)) : \
346
 
 variable_test_bit((nr),(addr)))
347
 

	
348
 
#undef ADDR
349
 
#if defined(__VMKLNX__)
350
 
/**
351
 
 * find_first_zero_bit - find the first zero bit in a memory region
352
 
 * @addr: The address to start the search at
353
 
 * @size: The maximum bitnumber to search
354
 
 *
355
 
 * Finds the first zero bit in a specified memory region
356
 
 *
357
 
 * RETURN VALUE:
358
 
 * Returns the bit-number of the first zero bit, not the number of the byte
359
 
 * containing a bit.
360
 
 * If result is equal to or greater than size means no zero bit is found
361
 
 */
362
 
/* _VMKLNX_CODECHECK_: find_first_zero_bit */
363
 
static __inline__ long 
364
 
find_first_zero_bit(const unsigned long * addr, unsigned long size)
365
 
{
366
 
        long d0, d1, d2;
367
 
        long res;
368
 

	
369
 
        /*
370
 
         * We must test the size in words, not in bits, because
371
 
         * otherwise incoming sizes in the range -63..-1 will not run
372
 
         * any scasq instructions, and then the flags used by the je
373
 
         * instruction will have whatever random value was in place
374
 
         * before.  Nobody should call us like that, but
375
 
         * find_next_zero_bit() does when offset and size are at the
376
 
         * same word and it fails to find a zero itself.
377
 
         */
378
 
        size += 63;
379
 
        size >>= 6;
380
 
        if (!size)
381
 
                return 0;
382
 

	
383
 
        vmk_CPUEnsureClearDF();
384
 
        asm volatile(
385
 
                "  repe; scasq\n"
386
 
                "  je 1f\n"
387
 
                "  xorq -8(%%rdi),%%rax\n"
388
 
                "  subq $8,%%rdi\n"
389
 
                "  bsfq %%rax,%%rdx\n"
390
 
                "1:  subq %[addr],%%rdi\n"
391
 
                "  shlq $3,%%rdi\n"
392
 
                "  addq %%rdi,%%rdx"
393
 
                :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
394
 
                :"0" (0ULL), "1" (size), "2" (addr), "3" (-1ULL),
395
 
                 [addr] "S" (addr) : "memory");
396
 
        /*
397
 
         * Any register would do for [addr] above, but GCC tends to
398
 
         * prefer rbx over rsi, even though rsi is readily available
399
 
         * and doesn't have to be saved.
400
 
         */
401
 

	
402
 
        return res;
403
 
}
404
 

	
405
 
/**
406
 
 * find_next_zero_bit - find the first zero bit in a memory region
407
 
 * @addr: The address to base the search on
408
 
 * @offset: The bitnumber to start searching at
409
 
 * @size: The maximum size to search
410
 
 *
411
 
 * Finds the first zero bit in a specified memory region
412
 
 *
413
 
 * RETURN VALUE:
414
 
 * Returns the bit-number of the first zero bit in a memory region after the 
415
 
 * specified offset
416
 
 * If result is equal to or greater than size means no zero bit is found
417
 
 */
418
 
/* _VMKLNX_CODECHECK_: find_next_zero_bit */
419
 
static __inline__ long 
420
 
find_next_zero_bit (const unsigned long * addr, long size, long offset)
421
 
{
422
 
        const unsigned long * p = addr + (offset >> 6);
423
 
        unsigned long set = 0;
424
 
        unsigned long res, bit = offset&63;
425
 

	
426
 
        if (bit) {
427
 

	
428
 
                /*
429
 
                 * Look for zero in first word
430
 
                 */
431
 
                asm("bsfq %1,%0\n\t"
432
 
                    "cmoveq %2,%0"
433
 
                    : "=r" (set)
434
 
                    : "r" (~(*p >> bit)), "r"(64L));
435
 
                if (set < (64 - bit))
436
 
                        return set + offset;
437
 
                set = 64 - bit;
438
 
                p++;
439
 
        }
440
 

	
441
 
        /*
442
 
         * No zero yet, search remaining full words for a zero
443
 
         */
444
 
        res = find_first_zero_bit (p, size - 64 * (p - addr));
445
 

	
446
 
        return (offset + set + res);
447
 
}
448
 

	
449
 
/**
450
 
 * find_first_bit - find the first set bit in a memory region
451
 
 * @addr: The address to start the search at
452
 
 * @size: The maximum size to search
453
 
 *
454
 
 * Finds the first set bit in a specified memory region
455
 
 *
456
 
 * RETURN VALUE:
457
 
 * Returns the bit-number of the first set bit, not the number of the byte
458
 
 * containing a bit.
459
 
 *
460
 
 */
461
 
/* _VMKLNX_CODECHECK_: find_first_bit */
462
 
static __inline__ long find_first_bit(const unsigned long * addr, unsigned long size)
463
 
{
464
 
	long d0, d1;
465
 
	long res;
466
 

	
467
 
	/*
468
 
	 * We must test the size in words, not in bits, because
469
 
	 * otherwise incoming sizes in the range -63..-1 will not run
470
 
	 * any scasq instructions, and then the flags used by the jz
471
 
	 * instruction will have whatever random value was in place
472
 
	 * before.  Nobody should call us like that, but
473
 
	 * find_next_bit() does when offset and size are at the same
474
 
	 * word and it fails to find a one itself.
475
 
	 */
476
 
	size += 63;
477
 
	size >>= 6;
478
 
	if (!size)
479
 
		return 0;
480
 

	
481
 
	vmk_CPUEnsureClearDF();
482
 
	asm volatile(
483
 
                "   repe; scasq\n"
484
 
		"   jz 1f\n"
485
 
		"   subq $8,%%rdi\n"
486
 
		"   bsfq (%%rdi),%%rax\n"