diff --git a/BLD/build/HEADERS/92-vmkdrivers-asm-x64/vmkernel64/release/asm/smp.h b/BLD/build/HEADERS/92-vmkdrivers-asm-x64/vmkernel64/release/asm/smp.h new file mode 100644 index 0000000000000000000000000000000000000000..762bb779a994768db70706a219ce2c3f6c57a11a --- /dev/null +++ b/BLD/build/HEADERS/92-vmkdrivers-asm-x64/vmkernel64/release/asm/smp.h @@ -0,0 +1,161 @@ +/* + * Portions Copyright 2008 VMware, Inc. + */ +#ifndef __ASM_SMP_H +#define __ASM_SMP_H + +/* + * We need the APIC definitions automatically as part of 'smp.h' + */ +#ifndef __ASSEMBLY__ +#include +#include +#include +extern int disable_apic; +#endif + +#ifdef CONFIG_X86_LOCAL_APIC +#ifndef __ASSEMBLY__ +#include +#include +#ifdef CONFIG_X86_IO_APIC +#include +#endif +#include +#include +#endif +#endif + +#ifdef CONFIG_SMP +#ifndef ASSEMBLY + +#include +#if defined(__VMKLNX__) +#include "vmkapi.h" +#endif /* defined(__VMKLNX__) */ + +struct pt_regs; + +#if !defined(__VMKLNX__) +extern cpumask_t cpu_present_mask; +extern cpumask_t cpu_possible_map; +extern cpumask_t cpu_online_map; +extern cpumask_t cpu_callout_map; +extern cpumask_t cpu_initialized; + +/* + * Private routines/data + */ + +extern void smp_alloc_memory(void); +extern volatile unsigned long smp_invalidate_needed; +extern int pic_mode; +extern void lock_ipi_call_lock(void); +extern void unlock_ipi_call_lock(void); +extern int smp_num_siblings; +extern void smp_send_reschedule(int cpu); +void smp_stop_cpu(void); +extern int smp_call_function_single(int cpuid, void (*func) (void *info), + void *info, int retry, int wait); + +extern cpumask_t cpu_sibling_map[NR_CPUS]; +extern cpumask_t cpu_core_map[NR_CPUS]; +extern u8 cpu_llc_id[NR_CPUS]; + +#define SMP_TRAMPOLINE_BASE 0x6000 + +/* + * On x86 all CPUs are mapped 1:1 to the APIC space. + * This simplifies scheduling and IPI sending and + * compresses data structures. + */ + +static inline int num_booting_cpus(void) +{ + return cpus_weight(cpu_callout_map); +} +#endif /* !defined(__VMKLNX__) */ + +#if defined(__VMKLNX__) +extern uint32_t raw_smp_processor_id(void); +#else /* !defined(__VMKLNX__) */ +#define raw_smp_processor_id() read_pda(cpunumber) +#endif /* defined(__VMKLNX__) */ + +static inline int hard_smp_processor_id(void) +{ + /* we don't want to mark this access volatile - bad code generation */ + return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID)); +} + +extern int __cpu_disable(void); +extern void __cpu_die(unsigned int cpu); +extern void prefill_possible_map(void); +extern unsigned num_processors; +extern unsigned disabled_cpus; + +#endif /* !ASSEMBLY */ + +#define NO_PROC_ID 0xFF /* No processor magic marker */ + +#endif + +#if !defined(__VMKLNX__) +#ifndef ASSEMBLY +/* + * Some lowlevel functions might want to know about + * the real APIC ID <-> CPU # mapping. + */ +extern u8 x86_cpu_to_apicid[NR_CPUS]; /* physical ID */ +extern u8 x86_cpu_to_log_apicid[NR_CPUS]; +extern u8 bios_cpu_apicid[]; + +static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) +{ + return cpus_addr(cpumask)[0]; +} + +static inline int cpu_present_to_apicid(int mps_cpu) +{ + if (mps_cpu < NR_CPUS) + return (int)bios_cpu_apicid[mps_cpu]; + else + return BAD_APICID; +} + +#endif /* !ASSEMBLY */ +#endif /* !defined(__VMKLNX__) */ + +#ifndef CONFIG_SMP +#define stack_smp_processor_id() 0 +#define cpu_logical_map(x) (x) +#else +#include +#define stack_smp_processor_id() \ +({ \ + struct thread_info *ti; \ + __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ + ti->cpu; \ +}) +#endif + +#ifndef __ASSEMBLY__ +static __inline int logical_smp_processor_id(void) +{ + /* we don't want to mark this access volatile - bad code generation */ + return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR)); +} +#endif + +#if defined(__VMKLNX__) +u8 cpu_physical_id(uint32_t cpu); +#else /* !defined(__VMKLNX__) */ +#ifdef CONFIG_SMP +#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu] +#else +#define cpu_physical_id(cpu) boot_cpu_id +#endif +#endif /* defined(__VMKLNX__) */ + +#endif +