! ! !
unknown - 5 years ago 2015-10-23 19:48:45
unknown@example.com
ESXi-5.0-GA
2911 files changed:
Changeset was too big and was cut off... Show full diff anyway
↑ Collapse Diff ↑
 
new file 100644
1
 

	
2
 
/*
3
 
 * DO NOT EDIT THIS FILE - IT IS GENERATED BY THE DRIVER BUILD.
4
 
 * 
5
 
 * If you need to change the driver's name spaces, look in the scons
6
 
 * files for the driver's defineVmkDriver() rule.
7
 
 */
8
 

	
9
 
VMK_NAMESPACE_PROVIDES("com.vmware.vmkplexer", "1.0");
10
 
#define VMKLNX_MY_NAMESPACE_VERSION "1.0"
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/8253pit.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/8253pit.h
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/a.out.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/a.out.h
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/acpi.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/acpi.h
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/agp.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/agp.h
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/alternative.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/alternative.h
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/apic.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/apic.h
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/apicdef.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/apicdef.h
 
new file 100644
1
 
/*
2
 
 * Portions Copyright 2008, 2010 VMware, Inc.
3
 
 */
4
 
#ifndef __ARCH_X86_64_ATOMIC__
5
 
#define __ARCH_X86_64_ATOMIC__
6
 

	
7
 
#include <asm/alternative.h>
8
 
#if defined(__VMKLNX__)
9
 
#include "vmkapi.h"
10
 
#endif /* defined(__VMKLNX__) */
11
 

	
12
 
/* atomic_t should be 32 bit signed type */
13
 

	
14
 
/*
15
 
 * Atomic operations that C can't guarantee us.  Useful for
16
 
 * resource counting etc..
17
 
 */
18
 

	
19
 
#ifdef CONFIG_SMP
20
 
#define LOCK "lock ; "
21
 
#else
22
 
#define LOCK ""
23
 
#endif
24
 

	
25
 
/*
26
 
 * Make sure gcc doesn't try to be clever and move things around
27
 
 * on us. We need to use _exactly_ the address the user gave us,
28
 
 * not some alias that contains the same information.
29
 
 */
30
 
typedef struct { volatile int counter; } atomic_t;
31
 

	
32
 
#define ATOMIC_INIT(i)	{ (i) }
33
 

	
34
 
/**
35
 
 * atomic_read - read atomic variable
36
 
 * @v: pointer of type atomic_t
37
 
 * 
38
 
 * Atomically reads the value of @v.
39
 
 */ 
40
 
#define atomic_read(v)		((v)->counter)
41
 

	
42
 
/**
43
 
 * atomic_set - set atomic variable
44
 
 * @v: pointer of type atomic_t
45
 
 * @i: required value
46
 
 * 
47
 
 * Atomically sets the value of @v to @i.
48
 
 */ 
49
 
#define atomic_set(v,i)		(((v)->counter) = (i))
50
 

	
51
 
/**
52
 
 * atomic_add - add integer to atomic variable
53
 
 * @i: integer value to add
54
 
 * @v: pointer of type atomic_t
55
 
 * 
56
 
 * Atomically adds @i to @v.
57
 
 */
58
 
/* _VMKLNX_CODECHECK_: atomic_add */
59
 
static __inline__ void atomic_add(int i, atomic_t *v)
60
 
{
61
 
#if defined(__VMKLNX__)
62
 
	vmk_AtomicPrologue();
63
 
#endif /* defined(__VMKLNX__) */
64
 
	__asm__ __volatile__(
65
 
		LOCK_PREFIX "addl %1,%0"
66
 
		:"=m" (v->counter)
67
 
		:"ir" (i), "m" (v->counter));
68
 
#if defined(__VMKLNX__)
69
 
	vmk_AtomicEpilogue();
70
 
#endif /* defined(__VMKLNX__) */
71
 
}
72
 

	
73
 
/**
74
 
 * atomic_sub - subtract the atomic variable
75
 
 * @i: integer value to subtract
76
 
 * @v: pointer of type atomic_t
77
 
 * 
78
 
 * Atomically subtracts @i from @v.
79
 
 */
80
 
/* _VMKLNX_CODECHECK_: atomic_sub */
81
 
static __inline__ void atomic_sub(int i, atomic_t *v)
82
 
{
83
 
#if defined(__VMKLNX__)
84
 
	vmk_AtomicPrologue();
85
 
#endif /* defined(__VMKLNX__) */
86
 
	__asm__ __volatile__(
87
 
		LOCK_PREFIX "subl %1,%0"
88
 
		:"=m" (v->counter)
89
 
		:"ir" (i), "m" (v->counter));
90
 
#if defined(__VMKLNX__)
91
 
	vmk_AtomicEpilogue();
92
 
#endif /* defined(__VMKLNX__) */
93
 
}
94
 

	
95
 
/**
96
 
 * atomic_sub_and_test - subtract value from variable and test result
97
 
 * @i: integer value to subtract
98
 
 * @v: pointer of type atomic_t
99
 
 * 
100
 
 * Atomically subtracts @i from @v and returns
101
 
 * true if the result is zero, or false for all
102
 
 * other cases.
103
 
 */
104
 
static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
105
 
{
106
 
	unsigned char c;
107
 

	
108
 
#if defined(__VMKLNX__)
109
 
	vmk_AtomicPrologue();
110
 
#endif /* defined(__VMKLNX__) */
111
 
	__asm__ __volatile__(
112
 
		LOCK_PREFIX "subl %2,%0; sete %1"
113
 
		:"=m" (v->counter), "=qm" (c)
114
 
		:"ir" (i), "m" (v->counter) : "memory");
115
 
#if defined(__VMKLNX__)
116
 
	vmk_AtomicEpilogue();
117
 
#endif /* defined(__VMKLNX__) */
118
 
	return c;
119
 
}
120
 

	
121
 
/**
122
 
 * atomic_inc - increment atomic variable
123
 
 * @v: pointer of type atomic_t
124
 
 * 
125
 
 * Atomically increments @v by 1.
126
 
 */ 
127
 
/* _VMKLNX_CODECHECK_: atomic_inc */
128
 
static __inline__ void atomic_inc(atomic_t *v)
129
 
{
130
 
#if defined(__VMKLNX__)
131
 
	vmk_AtomicPrologue();
132
 
#endif /* defined(__VMKLNX__) */
133
 
	__asm__ __volatile__(
134
 
		LOCK_PREFIX "incl %0"
135
 
		:"=m" (v->counter)
136
 
		:"m" (v->counter));
137
 
#if defined(__VMKLNX__)
138
 
	vmk_AtomicEpilogue();
139
 
#endif /* defined(__VMKLNX__) */
140
 
}
141
 

	
142
 
/**
143
 
 * atomic_dec - decrement atomic variable
144
 
 * @v: pointer of type atomic_t
145
 
 * 
146
 
 * Atomically decrements @v by 1.
147
 
 */ 
148
 
/* _VMKLNX_CODECHECK_: atomic_dec */
149
 
static __inline__ void atomic_dec(atomic_t *v)
150
 
{
151
 
#if defined(__VMKLNX__)
152
 
	vmk_AtomicPrologue();
153
 
#endif /* defined(__VMKLNX__) */
154
 
	__asm__ __volatile__(
155
 
		LOCK_PREFIX "decl %0"
156
 
		:"=m" (v->counter)
157
 
		:"m" (v->counter));
158
 
#if defined(__VMKLNX__)
159
 
	vmk_AtomicEpilogue();
160
 
#endif /* defined(__VMKLNX__) */
161
 
}
162
 

	
163
 
/**
164
 
 * atomic_dec_and_test - decrement and test
165
 
 * @v: pointer of type atomic_t
166
 
 * 
167
 
 * Atomically decrements @v by 1 and
168
 
 * returns true if the result is 0, or false for all other
169
 
 * cases.
170
 
 */ 
171
 
/* _VMKLNX_CODECHECK_: atomic_dec_and_test */
172
 
static __inline__ int atomic_dec_and_test(atomic_t *v)
173
 
{
174
 
	unsigned char c;
175
 

	
176
 
#if defined(__VMKLNX__)
177
 
	vmk_AtomicPrologue();
178
 
#endif /* defined(__VMKLNX__) */
179
 
	__asm__ __volatile__(
180
 
		LOCK_PREFIX "decl %0; sete %1"
181
 
		:"=m" (v->counter), "=qm" (c)
182
 
		:"m" (v->counter) : "memory");
183
 
#if defined(__VMKLNX__)
184
 
	vmk_AtomicEpilogue();
185
 
#endif /* defined(__VMKLNX__) */
186
 
	return c != 0;
187
 
}
188
 

	
189
 
/**
190
 
 * atomic_inc_and_test - increment and test 
191
 
 * @v: pointer of type atomic_t
192
 
 * 
193
 
 * Atomically increments @v by 1
194
 
 * and returns true if the result is zero, or false for all
195
 
 * other cases.
196
 
 */ 
197
 
static __inline__ int atomic_inc_and_test(atomic_t *v)
198
 
{
199
 
	unsigned char c;
200
 

	
201
 
#if defined(__VMKLNX__)
202
 
	vmk_AtomicPrologue();
203
 
#endif /* defined(__VMKLNX__) */
204
 
	__asm__ __volatile__(
205
 
		LOCK_PREFIX "incl %0; sete %1"
206
 
		:"=m" (v->counter), "=qm" (c)
207
 
		:"m" (v->counter) : "memory");
208
 
#if defined(__VMKLNX__)
209
 
	vmk_AtomicEpilogue();
210
 
#endif /* defined(__VMKLNX__) */
211
 
	return c != 0;
212
 
}
213
 

	
214
 
/**
215
 
 * atomic_add_negative - add and test if negative
216
 
 * @i: integer value to add
217
 
 * @v: pointer of type atomic_t
218
 
 * 
219
 
 * Atomically adds @i to @v and returns true
220
 
 * if the result is negative, or false when
221
 
 * result is greater than or equal to zero.
222
 
 */ 
223
 
static __inline__ int atomic_add_negative(int i, atomic_t *v)
224
 
{
225
 
	unsigned char c;
226
 

	
227
 
#if defined(__VMKLNX__)
228
 
	vmk_AtomicPrologue();
229
 
#endif /* defined(__VMKLNX__) */
230
 
	__asm__ __volatile__(
231
 
		LOCK_PREFIX "addl %2,%0; sets %1"
232
 
		:"=m" (v->counter), "=qm" (c)
233
 
		:"ir" (i), "m" (v->counter) : "memory");
234
 
#if defined(__VMKLNX__)
235
 
	vmk_AtomicEpilogue();
236
 
#endif /* defined(__VMKLNX__) */
237
 
	return c;
238
 
}
239
 

	
240
 
/**
241
 
 * atomic_add_return - add and return
242
 
 * @i: integer value to add
243
 
 * @v: pointer of type atomic_t
244
 
 *
245
 
 * Atomically adds @i to @v and returns @i + @v
246
 
 */
247
 
/* _VMKLNX_CODECHECK_: atomic_add_return */
248
 
static __inline__ int atomic_add_return(int i, atomic_t *v)
249
 
{
250
 
	int __i = i;
251
 

	
252
 
#if defined(__VMKLNX__)
253
 
	vmk_AtomicPrologue();
254
 
#endif /* defined(__VMKLNX__) */
255
 
	__asm__ __volatile__(
256
 
		LOCK_PREFIX "xaddl %0, %1;"
257
 
		:"=r"(i)
258
 
		:"m"(v->counter), "0"(i));
259
 
#if defined(__VMKLNX__)
260
 
	vmk_AtomicEpilogue();
261
 
#endif /* defined(__VMKLNX__) */
262
 
	return i + __i;
263
 
}
264
 

	
265
 
static __inline__ int atomic_sub_return(int i, atomic_t *v)
266
 
{
267
 
	return atomic_add_return(-i,v);
268
 
}
269
 

	
270
 
/**
271
 
 * atomic_inc_return - increment by 1 and return
272
 
 * @v: integer value to increment
273
 
 *
274
 
 * Atomically increments @v by 1 returns @v + 1
275
 
 *
276
 
 * SYNOPSIS:
277
 
 * #define atomic_inc_return(v)
278
 
 *
279
 
 * RETURN VALUE:
280
 
 * Returns @v + 1
281
 
 */
282
 
/* _VMKLNX_CODECHECK_: atomic_inc_return */
283
 
#define atomic_inc_return(v)  (atomic_add_return(1,v))
284
 
#define atomic_dec_return(v)  (atomic_sub_return(1,v))
285
 

	
286
 
/* An 64bit atomic type */
287
 

	
288
 
typedef struct { volatile long counter; } atomic64_t;
289
 

	
290
 
#define ATOMIC64_INIT(i)	{ (i) }
291
 

	
292
 
/**
293
 
 * atomic64_read - read atomic64 variable
294
 
 * @v: pointer of type atomic64_t
295
 
 *
296
 
 * Atomically reads the value of @v.
297
 
 * Doesn't imply a read memory barrier.
298
 
 */
299
 
#define atomic64_read(v)		((v)->counter)
300
 

	
301
 
/**
302
 
 * atomic64_set - set atomic64 variable
303
 
 * @v: pointer to type atomic64_t
304
 
 * @i: required value
305
 
 *
306
 
 * Atomically sets the value of @v to @i.
307
 
 */
308
 
#if defined(__VMKLNX__)
309
 
static __inline__ void atomic64_set(atomic64_t *v, long i)
310
 
{
311
 
  /*
312
 
   * Ensure that we do a single movq. Without this, the compiler
313
 
   * may do write with a constant as two movl operations.
314
 
   */
315
 
  __asm__ __volatile__(
316
 
     "movq %1, %0"
317
 
     : "=m" (v->counter)
318
 
     : "r" (i)
319
 
  );
320
 
}
321
 
#else /* !defined(__VMKLNX__) */
322
 
#define atomic64_set(v,i)		(((v)->counter) = (i))
323
 
#endif /* defined(__VMKLNX__) */
324
 

	
325
 

	
326
 
/**
327
 
 * atomic64_add - add integer to atomic64 variable
328
 
 * @i: integer value to add
329
 
 * @v: pointer to type atomic64_t
330
 
 *
331
 
 * Atomically adds @i to @v.
332
 
 */
333
 
static __inline__ void atomic64_add(long i, atomic64_t *v)
334
 
{
335
 
#if defined(__VMKLNX__)
336
 
	vmk_AtomicPrologue();
337
 
#endif /* defined(__VMKLNX__) */
338
 
	__asm__ __volatile__(
339
 
		LOCK_PREFIX "addq %1,%0"
340
 
		:"=m" (v->counter)
341
 
		:"ir" (i), "m" (v->counter));
342
 
#if defined(__VMKLNX__)
343
 
	vmk_AtomicEpilogue();
344
 
#endif /* defined(__VMKLNX__) */
345
 
}
346
 

	
347
 
/**
348
 
 * atomic64_sub - subtract the atomic64 variable
349
 
 * @i: integer value to subtract
350
 
 * @v: pointer to type atomic64_t
351
 
 *
352
 
 * Atomically subtracts @i from @v.
353
 
 */
354
 
static __inline__ void atomic64_sub(long i, atomic64_t *v)
355
 
{
356
 
#if defined(__VMKLNX__)
357
 
	vmk_AtomicPrologue();
358
 
#endif /* defined(__VMKLNX__) */
359
 
	__asm__ __volatile__(
360
 
		LOCK_PREFIX "subq %1,%0"
361
 
		:"=m" (v->counter)
362
 
		:"ir" (i), "m" (v->counter));
363
 
#if defined(__VMKLNX__)
364
 
	vmk_AtomicEpilogue();
365
 
#endif /* defined(__VMKLNX__) */
366
 
}
367
 

	
368
 
/**
369
 
 * atomic64_sub_and_test - subtract value from variable and test result
370
 
 * @i: integer value to subtract
371
 
 * @v: pointer to type atomic64_t
372
 
 *
373
 
 * Atomically subtracts @i from @v and returns
374
 
 * true if the result is zero, or false for all
375
 
 * other cases.
376
 
 */
377
 
static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v)
378
 
{
379
 
	unsigned char c;
380
 

	
381
 
#if defined(__VMKLNX__)
382
 
	vmk_AtomicPrologue();
383
 
#endif /* defined(__VMKLNX__) */
384
 
	__asm__ __volatile__(
385
 
		LOCK_PREFIX "subq %2,%0; sete %1"
386
 
		:"=m" (v->counter), "=qm" (c)
387
 
		:"ir" (i), "m" (v->counter) : "memory");
388
 
#if defined(__VMKLNX__)
389
 
	vmk_AtomicEpilogue();
390
 
#endif /* defined(__VMKLNX__) */
391
 
	return c;
392
 
}
393
 

	
394
 
/**
395
 
 * atomic64_inc - increment atomic64 variable
396
 
 * @v: pointer to type atomic64_t
397
 
 *
398
 
 * Atomically increments @v by 1.
399
 
 */
400
 
static __inline__ void atomic64_inc(atomic64_t *v)
401
 
{
402
 
#if defined(__VMKLNX__)
403
 
	vmk_AtomicPrologue();
404
 
#endif /* defined(__VMKLNX__) */
405
 
	__asm__ __volatile__(
406
 
		LOCK_PREFIX "incq %0"
407
 
		:"=m" (v->counter)
408
 
		:"m" (v->counter));
409
 
#if defined(__VMKLNX__)
410
 
	vmk_AtomicEpilogue();
411
 
#endif /* defined(__VMKLNX__) */
412
 
}
413
 

	
414
 
/**
415
 
 * atomic64_dec - decrement atomic64 variable
416
 
 * @v: pointer to type atomic64_t
417
 
 *
418
 
 * Atomically decrements @v by 1.
419
 
 */
420
 
static __inline__ void atomic64_dec(atomic64_t *v)
421
 
{
422
 
#if defined(__VMKLNX__)
423
 
	vmk_AtomicPrologue();
424
 
#endif /* defined(__VMKLNX__) */
425
 
	__asm__ __volatile__(
426
 
		LOCK_PREFIX "decq %0"
427
 
		:"=m" (v->counter)
428
 
		:"m" (v->counter));
429
 
#if defined(__VMKLNX__)
430
 
	vmk_AtomicEpilogue();
431
 
#endif /* defined(__VMKLNX__) */
432
 
}
433
 

	
434
 
/**
435
 
 * atomic64_dec_and_test - decrement and test
436
 
 * @v: pointer to type atomic64_t
437
 
 *
438
 
 * Atomically decrements @v by 1 and
439
 
 * returns true if the result is 0, or false for all other
440
 
 * cases.
441
 
 */
442
 
static __inline__ int atomic64_dec_and_test(atomic64_t *v)
443
 
{
444
 
	unsigned char c;
445
 

	
446
 
#if defined(__VMKLNX__)
447
 
	vmk_AtomicPrologue();
448
 
#endif /* defined(__VMKLNX__) */
449
 
	__asm__ __volatile__(
450
 
		LOCK_PREFIX "decq %0; sete %1"
451
 
		:"=m" (v->counter), "=qm" (c)
452
 
		:"m" (v->counter) : "memory");
453
 
#if defined(__VMKLNX__)
454
 
	vmk_AtomicEpilogue();
455
 
#endif /* defined(__VMKLNX__) */
456
 
	return c != 0;
457
 
}
458
 

	
459
 
/**
460
 
 * atomic64_inc_and_test - increment and test
461
 
 * @v: pointer to type atomic64_t
462
 
 *
463
 
 * Atomically increments @v by 1
464
 
 * and returns true if the result is zero, or false for all
465
 
 * other cases.
466
 
 */
467
 
static __inline__ int atomic64_inc_and_test(atomic64_t *v)
468
 
{
469
 
	unsigned char c;
470
 

	
471
 
#if defined(__VMKLNX__)
472
 
	vmk_AtomicPrologue();
473
 
#endif /* defined(__VMKLNX__) */
474
 
	__asm__ __volatile__(
475
 
		LOCK_PREFIX "incq %0; sete %1"
476
 
		:"=m" (v->counter), "=qm" (c)
477
 
		:"m" (v->counter) : "memory");
478
 
#if defined(__VMKLNX__)
479
 
	vmk_AtomicEpilogue();
480
 
#endif /* defined(__VMKLNX__) */
481
 
	return c != 0;
482
 
}
483
 

	
484
 
/**
485
 
 * atomic64_add_negative - add and test if negative
486
 
 * @i: integer value to add
487
 
 * @v: pointer to type atomic64_t
488
 
 *
489
 
 * Atomically adds @i to @v and returns true
490
 
 * if the result is negative, or false when
491
 
 * result is greater than or equal to zero.
492
 
 */
493
 
static __inline__ int atomic64_add_negative(long i, atomic64_t *v)
494
 
{
495
 
	unsigned char c;
496
 

	
497
 
#if defined(__VMKLNX__)
498
 
	vmk_AtomicPrologue();
499
 
#endif /* defined(__VMKLNX__) */
500
 
	__asm__ __volatile__(
501
 
		LOCK_PREFIX "addq %2,%0; sets %1"
502
 
		:"=m" (v->counter), "=qm" (c)
503
 
		:"ir" (i), "m" (v->counter) : "memory");
504
 
#if defined(__VMKLNX__)
505
 
	vmk_AtomicEpilogue();
506
 
#endif /* defined(__VMKLNX__) */
507
 
	return c;
508
 
}
509
 

	
510
 
/**
511
 
 * atomic64_add_return - add and return
512
 
 * @i: integer value to add
513
 
 * @v: pointer to type atomic64_t
514
 
 *
515
 
 * Atomically adds @i to @v and returns @i + @v
516
 
 */
517
 
static __inline__ long atomic64_add_return(long i, atomic64_t *v)
518
 
{
519
 
	long __i = i;
520
 

	
521
 
#if defined(__VMKLNX__)
522
 
	vmk_AtomicPrologue();
523
 
#endif /* defined(__VMKLNX__) */
524
 
	__asm__ __volatile__(
525
 
		LOCK_PREFIX "xaddq %0, %1;"
526
 
		:"=r"(i)
527
 
		:"m"(v->counter), "0"(i));
528
 
#if defined(__VMKLNX__)
529
 
	vmk_AtomicEpilogue();
530
 
#endif /* defined(__VMKLNX__) */
531
 
	return i + __i;
532
 
}
533
 

	
534
 
static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
535
 
{
536
 
	return atomic64_add_return(-i,v);
537
 
}
538
 

	
539
 
#define atomic64_inc_return(v)  (atomic64_add_return(1,v))
540
 
#define atomic64_dec_return(v)  (atomic64_sub_return(1,v))
541
 

	
542
 
#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
543
 
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
544
 

	
545
 
/**
546
 
 * atomic_add_unless - add unless the number is a given value
547
 
 * @v: pointer of type atomic_t
548
 
 * @a: the amount to add to v...
549
 
 * @u: ...unless v is equal to u.
550
 
 *
551
 
 * Atomically adds @a to @v, so long as it was not @u.
552
 
 * Returns non-zero if @v was not @u, and zero otherwise.
553
 
 */
554
 
#define atomic_add_unless(v, a, u)				\
555
 
({								\
556
 
	int c, old;						\
557
 
	c = atomic_read(v);					\
558
 
	for (;;) {						\
559
 
		if (unlikely(c == (u)))				\
560
 
			break;					\
561
 
		old = atomic_cmpxchg((v), c, c + (a));		\
562
 
		if (likely(old == c))				\
563
 
			break;					\
564
 
		c = old;					\
565
 
	}							\
566
 
	c != (u);						\
567
 
})
568
 
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
569
 

	
570
 
/* These are x86-specific, used by some header files */
571
 
#if defined(__VMKLNX__)
572
 

	
573
 
#define atomic_clear_mask(mask, addr)                       \
574
 
do {                                                        \
575
 
        vmk_AtomicPrologue();                               \
576
 
	__asm__ __volatile__(LOCK_PREFIX "andl %0,%1"       \
577
 
	: : "r" (~(mask)),"m" (*addr) : "memory") ;         \
578
 
	vmk_AtomicEpilogue();                               \
579
 
} while (0)
580
 

	
581
 
#define atomic_set_mask(mask, addr)                         \
582
 
do {                                                        \
583
 
        vmk_AtomicPrologue();                               \
584
 
	__asm__ __volatile__(LOCK_PREFIX "orl %0,%1"        \
585
 
	: : "r" ((unsigned)mask),"m" (*(addr)) : "memory"); \
586
 
	vmk_AtomicEpilogue();                               \
587
 
} while (0)
588
 

	
589
 
#else /* !defined(__VMKLNX__) */
590
 

	
591
 
#define atomic_clear_mask(mask, addr) \
592
 
__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
593
 
: : "r" (~(mask)),"m" (*addr) : "memory")
594
 

	
595
 
#define atomic_set_mask(mask, addr) \
596
 
__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
597
 
: : "r" ((unsigned)mask),"m" (*(addr)) : "memory")
598
 

	
599
 
#endif /* defined(__VMKLNX__) */
600
 

	
601
 
/* Atomic operations are already serializing on x86 */
602
 
#define smp_mb__before_atomic_dec()	barrier()
603
 
#define smp_mb__after_atomic_dec()	barrier()
604
 
#define smp_mb__before_atomic_inc()	barrier()
605
 
#define smp_mb__after_atomic_inc()	barrier()
606
 

	
607
 
#include <asm-generic/atomic.h>
608
 
#endif
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/auxvec.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/auxvec.h
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/bitops.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/bitops.h
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/boot.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/boot.h
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/bootsetup.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/bootsetup.h
 
new file 100644
1
 
#ifndef __ASM_X8664_BUG_H
2
 
#define __ASM_X8664_BUG_H 1
3
 

	
4
 
#if defined(__VMKLNX__)
5
 

	
6
 
/*
7
 
 * For vmklinux, the trick below doesn't work.  It works by raising a
8
 
 * ud2 exception, and then the exception handler knows the rip is
9
 
 * pointing to a struct bug_frame.  However, pushq can only have a
10
 
 * 32-bit immediate, so we can't push the 64-address of __FILE__ into
11
 
 * it (this works on linux, because linux is loaded at -2Gb, and when
12
 
 * 'signed int filename' is cast to long it is sign-extended, the top
13
 
 * bits filled to 0xffff... and the address is correct.  Besides this,
14
 
 * the vmkernel ud2 exception handler doesn't know anything about
15
 
 * this.  So the long and the short of it is - we don't do any of this
16
 
 * arch specific BUG() stuff, and just fall back to the generic
17
 
 * panic()
18
 
 */
19
 

	
20
 
#include <asm-generic/bug.h>
21
 

	
22
 
#else /* !defined(__VMKLNX__) */
23
 

	
24
 
#include <linux/stringify.h>
25
 

	
26
 
/*
27
 
 * Tell the user there is some problem.  The exception handler decodes 
28
 
 * this frame.
29
 
 */
30
 
struct bug_frame {
31
 
	unsigned char ud2[2];
32
 
	unsigned char push;
33
 
	signed int filename;
34
 
	unsigned char ret;
35
 
	unsigned short line;
36
 
} __attribute__((packed));
37
 

	
38
 
#ifdef CONFIG_BUG
39
 
#define HAVE_ARCH_BUG
40
 
/* We turn the bug frame into valid instructions to not confuse
41
 
   the disassembler. Thanks to Jan Beulich & Suresh Siddha
42
 
   for nice instruction selection.
43
 
   The magic numbers generate mov $64bitimm,%eax ; ret $offset. */
44
 

	
45
 
#define BUG() 								\
46
 
	asm volatile(							\
47
 
	"ud2 ; pushq $%c1 ; ret $%c0" :: 				\
48
 
		     "i"(__LINE__), "i" (__FILE__))
49
 
void out_of_line_bug(void);
50
 
#else
51
 
static inline void out_of_line_bug(void) { }
52
 
#endif
53
 

	
54
 
#include <asm-generic/bug.h>
55
 
#endif
56
 

	
57
 
#endif /* defined(__VMKLNX __) */
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/bugs.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/bugs.h
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/byteorder.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/byteorder.h
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/cache.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/cache.h
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/cacheflush.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/cacheflush.h
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/calgary.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/calgary.h
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/calling.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/calling.h
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/checksum.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/checksum.h
 
new file 100644
1
 
#ifndef _ASM_X86_64_COMPAT_H
2
 
#define _ASM_X86_64_COMPAT_H
3
 

	
4
 
/*
5
 
 * Architecture specific compatibility types
6
 
 */
7
 
#include <linux/types.h>
8
 
#include <linux/sched.h>
9
 

	
10
 
#define COMPAT_USER_HZ	100
11
 

	
12
 
typedef u32		compat_size_t;
13
 
typedef s32		compat_ssize_t;
14
 
typedef s32		compat_time_t;
15
 
typedef s32		compat_clock_t;
16
 
typedef s32		compat_pid_t;
17
 
typedef u16		__compat_uid_t;
18
 
typedef u16		__compat_gid_t;
19
 
typedef u32		__compat_uid32_t;
20
 
typedef u32		__compat_gid32_t;
21
 
typedef u16		compat_mode_t;
22
 
typedef u32		compat_ino_t;
23
 
typedef u16		compat_dev_t;
24
 
typedef s32		compat_off_t;
25
 
typedef s64		compat_loff_t;
26
 
typedef u16		compat_nlink_t;
27
 
typedef u16		compat_ipc_pid_t;
28
 
typedef s32		compat_daddr_t;
29
 
typedef u32		compat_caddr_t;
30
 
typedef __kernel_fsid_t	compat_fsid_t;
31
 
typedef s32		compat_timer_t;
32
 
typedef s32		compat_key_t;
33
 

	
34
 
typedef s32		compat_int_t;
35
 
typedef s32		compat_long_t;
36
 
typedef u32		compat_uint_t;
37
 
typedef u32		compat_ulong_t;
38
 

	
39
 
struct compat_timespec {
40
 
	compat_time_t	tv_sec;
41
 
	s32		tv_nsec;
42
 
};
43
 

	
44
 
struct compat_timeval {
45
 
	compat_time_t	tv_sec;
46
 
	s32		tv_usec;
47
 
};
48
 

	
49
 
struct compat_stat {
50
 
	compat_dev_t	st_dev;
51
 
	u16		__pad1;
52
 
	compat_ino_t	st_ino;
53
 
	compat_mode_t	st_mode;
54
 
	compat_nlink_t	st_nlink;
55
 
	__compat_uid_t	st_uid;
56
 
	__compat_gid_t	st_gid;
57
 
	compat_dev_t	st_rdev;
58
 
	u16		__pad2;
59
 
	u32		st_size;
60
 
	u32		st_blksize;
61
 
	u32		st_blocks;
62
 
	u32		st_atime;
63
 
	u32		st_atime_nsec;
64
 
	u32		st_mtime;
65
 
	u32		st_mtime_nsec;
66
 
	u32		st_ctime;
67
 
	u32		st_ctime_nsec;
68
 
	u32		__unused4;
69
 
	u32		__unused5;
70
 
};
71
 

	
72
 
struct compat_flock {
73
 
	short		l_type;
74
 
	short		l_whence;
75
 
	compat_off_t	l_start;
76
 
	compat_off_t	l_len;
77
 
	compat_pid_t	l_pid;
78
 
};
79
 

	
80
 
#define F_GETLK64	12	/*  using 'struct flock64' */
81
 
#define F_SETLK64	13
82
 
#define F_SETLKW64	14
83
 

	
84
 
/*
85
 
 * IA32 uses 4 byte alignment for 64 bit quantities,
86
 
 * so we need to pack this structure.
87
 
 */
88
 
struct compat_flock64 {
89
 
	short		l_type;
90
 
	short		l_whence;
91
 
	compat_loff_t	l_start;
92
 
	compat_loff_t	l_len;
93
 
	compat_pid_t	l_pid;
94
 
} __attribute__((packed));
95
 

	
96
 
struct compat_statfs {
97
 
	int		f_type;
98
 
	int		f_bsize;
99
 
	int		f_blocks;
100
 
	int		f_bfree;
101
 
	int		f_bavail;
102
 
	int		f_files;
103
 
	int		f_ffree;
104
 
	compat_fsid_t	f_fsid;
105
 
	int		f_namelen;	/* SunOS ignores this field. */
106
 
	int		f_frsize;
107
 
	int		f_spare[5];
108
 
};
109
 

	
110
 
#define COMPAT_RLIM_OLD_INFINITY	0x7fffffff
111
 
#define COMPAT_RLIM_INFINITY		0xffffffff
112
 

	
113
 
typedef u32		compat_old_sigset_t;	/* at least 32 bits */
114
 

	
115
 
#define _COMPAT_NSIG		64
116
 
#define _COMPAT_NSIG_BPW	32
117
 

	
118
 
typedef u32               compat_sigset_word;
119
 

	
120
 
#define COMPAT_OFF_T_MAX	0x7fffffff
121
 
#define COMPAT_LOFF_T_MAX	0x7fffffffffffffffL
122
 

	
123
 
struct compat_ipc64_perm {
124
 
	compat_key_t key;
125
 
	__compat_uid32_t uid;
126
 
	__compat_gid32_t gid;
127
 
	__compat_uid32_t cuid;
128
 
	__compat_gid32_t cgid;
129
 
	unsigned short mode;
130
 
	unsigned short __pad1;
131
 
	unsigned short seq;
132
 
	unsigned short __pad2;
133
 
	compat_ulong_t unused1;
134
 
	compat_ulong_t unused2;
135
 
};
136
 

	
137
 
struct compat_semid64_ds {
138
 
	struct compat_ipc64_perm sem_perm;
139
 
	compat_time_t  sem_otime;
140
 
	compat_ulong_t __unused1;
141
 
	compat_time_t  sem_ctime;
142
 
	compat_ulong_t __unused2;
143
 
	compat_ulong_t sem_nsems;
144
 
	compat_ulong_t __unused3;
145
 
	compat_ulong_t __unused4;
146
 
};
147
 

	
148
 
struct compat_msqid64_ds {
149
 
	struct compat_ipc64_perm msg_perm;
150
 
	compat_time_t  msg_stime;
151
 
	compat_ulong_t __unused1;
152
 
	compat_time_t  msg_rtime;
153
 
	compat_ulong_t __unused2;
154
 
	compat_time_t  msg_ctime;
155
 
	compat_ulong_t __unused3;
156
 
	compat_ulong_t msg_cbytes;
157
 
	compat_ulong_t msg_qnum;
158
 
	compat_ulong_t msg_qbytes;
159
 
	compat_pid_t   msg_lspid;
160
 
	compat_pid_t   msg_lrpid;
161
 
	compat_ulong_t __unused4;
162
 
	compat_ulong_t __unused5;
163
 
};
164
 

	
165
 
struct compat_shmid64_ds {
166
 
	struct compat_ipc64_perm shm_perm;
167
 
	compat_size_t  shm_segsz;
168
 
	compat_time_t  shm_atime;
169
 
	compat_ulong_t __unused1;
170
 
	compat_time_t  shm_dtime;
171
 
	compat_ulong_t __unused2;
172
 
	compat_time_t  shm_ctime;
173
 
	compat_ulong_t __unused3;
174
 
	compat_pid_t   shm_cpid;
175
 
	compat_pid_t   shm_lpid;
176
 
	compat_ulong_t shm_nattch;
177
 
	compat_ulong_t __unused4;
178
 
	compat_ulong_t __unused5;
179
 
};
180
 

	
181
 
/*
182
 
 * A pointer passed in from user mode. This should not
183
 
 * be used for syscall parameters, just declare them
184
 
 * as pointers because the syscall entry code will have
185
 
 * appropriately comverted them already.
186
 
 */
187
 
typedef	u32		compat_uptr_t;
188
 

	
189
 
/**                                          
190
 
 *  compat_ptr - Convert a user provided pointer to a pointer suitable for user access functions
191
 
 *  @uptr: Pointer in user mode
192
 
 *                                           
193
 
 *  Convert a user provided pointer to a pointer suitable 
194
 
 *  for use with user access functions (e.g. copy_from_user, 
195
 
 *  copy_to_user, etc.).
196
 
 *                                           
197
 
 *  RETURN VALUE:
198
 
 *  A (void * ) pointer in user space.
199
 
 *                                           
200
 
 */                                          
201
 
/* _VMKLNX_CODECHECK_: compat_ptr */
202
 
static inline void __user *compat_ptr(compat_uptr_t uptr)
203
 
{
204
 
	return (void __user *)(unsigned long)uptr;
205
 
}
206
 

	
207
 
static inline compat_uptr_t ptr_to_compat(void __user *uptr)
208
 
{
209
 
	return (u32)(unsigned long)uptr;
210
 
}
211
 

	
212
 
#if !defined(__VMKLNX__)
213
 
static __inline__ void __user *compat_alloc_user_space(long len)
214
 
{
215
 
	struct pt_regs *regs = task_pt_regs(current);
216
 
	return (void __user *)regs->rsp - len; 
217
 
}
218
 
#else
219
 
extern void *compat_alloc_user_space(long len);
220
 
#endif
221
 

	
222
 
#if !defined(__VMKLNX__)
223
 
static inline int is_compat_task(void)
224
 
{
225
 
	return current_thread_info()->status & TS_COMPAT;
226
 
}
227
 
#endif /* !defined(__VMKLNX__) */
228
 

	
229
 
#endif /* _ASM_X86_64_COMPAT_H */
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/const.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/const.h
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/cpu.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/cpu.h
 
new file 100644
1
 
/*
2
 
 * cpufeature.h
3
 
 *
4
 
 * Defines x86 CPU feature bits
5
 
 */
6
 

	
7
 
#ifndef __ASM_X8664_CPUFEATURE_H
8
 
#define __ASM_X8664_CPUFEATURE_H
9
 

	
10
 
#define NCAPINTS	7	/* N 32-bit words worth of info */
11
 

	
12
 
/* Intel-defined CPU features, CPUID level 0x00000001, word 0 */
13
 
#define X86_FEATURE_FPU		(0*32+ 0) /* Onboard FPU */
14
 
#define X86_FEATURE_VME		(0*32+ 1) /* Virtual Mode Extensions */
15
 
#define X86_FEATURE_DE		(0*32+ 2) /* Debugging Extensions */
16
 
#define X86_FEATURE_PSE 	(0*32+ 3) /* Page Size Extensions */
17
 
#define X86_FEATURE_TSC		(0*32+ 4) /* Time Stamp Counter */
18
 
#define X86_FEATURE_MSR		(0*32+ 5) /* Model-Specific Registers, RDMSR, WRMSR */
19
 
#define X86_FEATURE_PAE		(0*32+ 6) /* Physical Address Extensions */
20
 
#define X86_FEATURE_MCE		(0*32+ 7) /* Machine Check Architecture */
21
 
#define X86_FEATURE_CX8		(0*32+ 8) /* CMPXCHG8 instruction */
22
 
#define X86_FEATURE_APIC	(0*32+ 9) /* Onboard APIC */
23
 
#define X86_FEATURE_SEP		(0*32+11) /* SYSENTER/SYSEXIT */
24
 
#define X86_FEATURE_MTRR	(0*32+12) /* Memory Type Range Registers */
25
 
#define X86_FEATURE_PGE		(0*32+13) /* Page Global Enable */
26
 
#define X86_FEATURE_MCA		(0*32+14) /* Machine Check Architecture */
27
 
#define X86_FEATURE_CMOV	(0*32+15) /* CMOV instruction (FCMOVCC and FCOMI too if FPU present) */
28
 
#define X86_FEATURE_PAT		(0*32+16) /* Page Attribute Table */
29
 
#define X86_FEATURE_PSE36	(0*32+17) /* 36-bit PSEs */
30
 
#define X86_FEATURE_PN		(0*32+18) /* Processor serial number */
31
 
#define X86_FEATURE_CLFLSH	(0*32+19) /* Supports the CLFLUSH instruction */
32
 
#define X86_FEATURE_DTES	(0*32+21) /* Debug Trace Store */
33
 
#define X86_FEATURE_ACPI	(0*32+22) /* ACPI via MSR */
34
 
#define X86_FEATURE_MMX		(0*32+23) /* Multimedia Extensions */
35
 
#define X86_FEATURE_FXSR	(0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */
36
 
				          /* of FPU context), and CR4.OSFXSR available */
37
 
#define X86_FEATURE_XMM		(0*32+25) /* Streaming SIMD Extensions */
38
 
#define X86_FEATURE_XMM2	(0*32+26) /* Streaming SIMD Extensions-2 */
39
 
#define X86_FEATURE_SELFSNOOP	(0*32+27) /* CPU self snoop */
40
 
#define X86_FEATURE_HT		(0*32+28) /* Hyper-Threading */
41
 
#define X86_FEATURE_ACC		(0*32+29) /* Automatic clock control */
42
 
#define X86_FEATURE_IA64	(0*32+30) /* IA-64 processor */
43
 

	
44
 
/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
45
 
/* Don't duplicate feature flags which are redundant with Intel! */
46
 
#define X86_FEATURE_SYSCALL	(1*32+11) /* SYSCALL/SYSRET */
47
 
#define X86_FEATURE_MMXEXT	(1*32+22) /* AMD MMX extensions */
48
 
#define X86_FEATURE_FXSR_OPT	(1*32+25) /* FXSR optimizations */
49
 
#define X86_FEATURE_RDTSCP	(1*32+27) /* RDTSCP */
50
 
#define X86_FEATURE_LM		(1*32+29) /* Long Mode (x86-64) */
51
 
#define X86_FEATURE_3DNOWEXT	(1*32+30) /* AMD 3DNow! extensions */
52
 
#define X86_FEATURE_3DNOW	(1*32+31) /* 3DNow! */
53
 

	
54
 
/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
55
 
#define X86_FEATURE_RECOVERY	(2*32+ 0) /* CPU in recovery mode */
56
 
#define X86_FEATURE_LONGRUN	(2*32+ 1) /* Longrun power control */
57
 
#define X86_FEATURE_LRTI	(2*32+ 3) /* LongRun table interface */
58
 

	
59
 
/* Other features, Linux-defined mapping, word 3 */
60
 
/* This range is used for feature bits which conflict or are synthesized */
61
 
#define X86_FEATURE_CXMMX	(3*32+ 0) /* Cyrix MMX extensions */
62
 
#define X86_FEATURE_K6_MTRR	(3*32+ 1) /* AMD K6 nonstandard MTRRs */
63
 
#define X86_FEATURE_CYRIX_ARR	(3*32+ 2) /* Cyrix ARRs (= MTRRs) */
64
 
#define X86_FEATURE_CENTAUR_MCR	(3*32+ 3) /* Centaur MCRs (= MTRRs) */
65
 
#define X86_FEATURE_REP_GOOD	(3*32+ 4) /* rep microcode works well on this CPU */
66
 
#define X86_FEATURE_CONSTANT_TSC (3*32+5) /* TSC runs at constant rate */
67
 
#define X86_FEATURE_SYNC_RDTSC  (3*32+6)  /* RDTSC syncs CPU core */
68
 
#define X86_FEATURE_FXSAVE_LEAK (3*32+7)  /* FIP/FOP/FDP leaks through FXSAVE */
69
 
#define X86_FEATURE_UP		(3*32+8) /* SMP kernel running on UP */
70
 
#define X86_FEATURE_ARCH_PERFMON (3*32+9) /* Intel Architectural PerfMon */
71
 

	
72
 
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
73
 
#define X86_FEATURE_XMM3	(4*32+ 0) /* Streaming SIMD Extensions-3 */
74
 
#define X86_FEATURE_MWAIT	(4*32+ 3) /* Monitor/Mwait support */
75
 
#define X86_FEATURE_DSCPL	(4*32+ 4) /* CPL Qualified Debug Store */
76
 
#define X86_FEATURE_EST		(4*32+ 7) /* Enhanced SpeedStep */
77
 
#define X86_FEATURE_TM2		(4*32+ 8) /* Thermal Monitor 2 */
78
 
#define X86_FEATURE_CID		(4*32+10) /* Context ID */
79
 
#define X86_FEATURE_CX16	(4*32+13) /* CMPXCHG16B */
80
 
#define X86_FEATURE_XTPR	(4*32+14) /* Send Task Priority Messages */
81
 
#if defined(__VMKLNX__)
82
 
/* From Linux 2.6.29 arch/x86/include/asm/cpufeature.h */
83
 
#define X86_FEATURE_DCA         (4*32+18) /* Direct Cache Access */
84
 
#endif /* defined(__VMKLNX__) */
85
 
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
86
 
#define X86_FEATURE_XSTORE	(5*32+ 2) /* on-CPU RNG present (xstore insn) */
87
 
#define X86_FEATURE_XSTORE_EN	(5*32+ 3) /* on-CPU RNG enabled */
88
 
#define X86_FEATURE_XCRYPT	(5*32+ 6) /* on-CPU crypto (xcrypt insn) */
89
 
#define X86_FEATURE_XCRYPT_EN	(5*32+ 7) /* on-CPU crypto enabled */
90
 

	
91
 
/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
92
 
#define X86_FEATURE_LAHF_LM	(6*32+ 0) /* LAHF/SAHF in long mode */
93
 
#define X86_FEATURE_CMP_LEGACY	(6*32+ 1) /* If yes HyperThreading not valid */
94
 

	
95
 
#define cpu_has(c, bit)                test_bit(bit, (c)->x86_capability)
96
 
#define boot_cpu_has(bit)      test_bit(bit, boot_cpu_data.x86_capability)
97
 

	
98
 
#define cpu_has_fpu            1
99
 
#define cpu_has_vme            0
100
 
#define cpu_has_de             1
101
 
#define cpu_has_pse            1
102
 
#define cpu_has_tsc            1
103
 
#define cpu_has_pae            ___BUG___
104
 
#define cpu_has_pge            1
105
 
#define cpu_has_apic           boot_cpu_has(X86_FEATURE_APIC)
106
 
#define cpu_has_mtrr           1
107
 
#define cpu_has_mmx            1
108
 
#define cpu_has_fxsr           1
109
 
#define cpu_has_xmm            1
110
 
#define cpu_has_xmm2           1
111
 
#define cpu_has_xmm3           boot_cpu_has(X86_FEATURE_XMM3)
112
 
#define cpu_has_ht             boot_cpu_has(X86_FEATURE_HT)
113
 
#define cpu_has_mp             1 /* XXX */
114
 
#define cpu_has_k6_mtrr        0
115
 
#define cpu_has_cyrix_arr      0
116
 
#define cpu_has_centaur_mcr    0
117
 
#define cpu_has_clflush	       boot_cpu_has(X86_FEATURE_CLFLSH)
118
 

	
119
 
#endif /* __ASM_X8664_CPUFEATURE_H */
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/cputime.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/cputime.h
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/crash.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/crash.h
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/crashdump.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/crashdump.h
 
new file 100644
1
 
/*
2
 
 * Portions Copyright 2008-2010 VMware, Inc.
3
 
 */
4
 
#ifndef _X86_64_CURRENT_H
5
 
#define _X86_64_CURRENT_H
6
 

	
7
 
#if !defined(__ASSEMBLY__) 
8
 
struct task_struct;
9
 

	
10
 
#include <asm/pda.h>
11
 

	
12
 
#if defined(__VMKLNX__)
13
 
#include <vmklinux_dist.h>
14
 
/**                                          
15
 
 *  get_current - Gets current task pointer for the current world. 
16
 
 *
17
 
 *  Gets current task pointer for the current world. 
18
 
 *
19
 
 *  RETURN VALUE:
20
 
 *  Pointer to the task struct of the running process.
21
 
 */                                          
22
 
/* _VMKLNX_CODECHECK_: get_current */
23
 
static inline struct task_struct *get_current(void) 
24
 
{ 
25
 
	return vmklnx_GetCurrent();
26
 
} 
27
 
#else /* !defined(__VMKLNX__) */
28
 
static inline struct task_struct *get_current(void) 
29
 
{ 
30
 
	struct task_struct *t = read_pda(pcurrent); 
31
 
	return t;
32
 
} 
33
 
#endif /* defined(__VMKLNX__) */
34
 

	
35
 
/**                                          
36
 
 *  current - Get current task pointer of current task       
37
 
 *                                           
38
 
 *  Returns a pointer to the task struct of the running task
39
 
 *                                           
40
 
 *  SYNOPSIS:
41
 
 *     #define current
42
 
 *                                           
43
 
 *  RETURN VALUE:                     
44
 
 *  Pointer to current task of type task_struct  
45
 
 *                                           
46
 
 */                                          
47
 
/* _VMKLNX_CODECHECK_: current */
48
 
#define current get_current()
49
 

	
50
 
#else
51
 

	
52
 
#ifndef ASM_OFFSET_H
53
 
#include <asm/asm-offsets.h> 
54
 
#endif
55
 

	
56
 
#define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg
57
 

	
58
 
#endif
59
 

	
60
 
#endif /* !(_X86_64_CURRENT_H) */
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/debugreg.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/debugreg.h
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/delay.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/delay.h
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/desc.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/desc.h
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/device.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/device.h
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/diskdump.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/diskdump.h
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/div64.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/div64.h
 
new file 100644
1
 
/*
2
 
 * Portions Copyright 2008 VMware, Inc.
3
 
 */
4
 
#ifndef _X8664_DMA_MAPPING_H
5
 
#define _X8664_DMA_MAPPING_H 1
6
 

	
7
 
/*
8
 
 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
9
 
 * documentation.
10
 
 */
11
 

	
12
 

	
13
 
#include <asm/scatterlist.h>
14
 
#include <asm/io.h>
15
 
#include <asm/swiotlb.h>
16
 

	
17
 
struct dma_mapping_ops {
18
 
	int             (*mapping_error)(dma_addr_t dma_addr);
19
 
	void*           (*alloc_coherent)(struct device *dev, size_t size,
20
 
                                dma_addr_t *dma_handle, gfp_t gfp);
21
 
	void            (*free_coherent)(struct device *dev, size_t size,
22
 
                                void *vaddr, dma_addr_t dma_handle);
23
 
	dma_addr_t      (*map_single)(struct device *hwdev, void *ptr,
24
 
                                size_t size, int direction);
25
 
	/* like map_single, but doesn't check the device mask */
26
 
	dma_addr_t      (*map_simple)(struct device *hwdev, char *ptr,
27
 
                                size_t size, int direction);
28
 
	void            (*unmap_single)(struct device *dev, dma_addr_t addr,
29
 
		                size_t size, int direction);
30
 
	void            (*sync_single_for_cpu)(struct device *hwdev,
31
 
		                dma_addr_t dma_handle, size_t size,
32
 
				int direction);
33
 
	void            (*sync_single_for_device)(struct device *hwdev,
34
 
                                dma_addr_t dma_handle, size_t size,
35
 
				int direction);
36
 
	void            (*sync_single_range_for_cpu)(struct device *hwdev,
37
 
                                dma_addr_t dma_handle, unsigned long offset,
38
 
		                size_t size, int direction);
39
 
	void            (*sync_single_range_for_device)(struct device *hwdev,
40
 
				dma_addr_t dma_handle, unsigned long offset,
41
 
		                size_t size, int direction);
42
 
	void            (*sync_sg_for_cpu)(struct device *hwdev,
43
 
                                struct scatterlist *sg, int nelems,
44
 
				int direction);
45
 
	void            (*sync_sg_for_device)(struct device *hwdev,
46
 
				struct scatterlist *sg, int nelems,
47
 
				int direction);
48
 
	int             (*map_sg)(struct device *hwdev, struct scatterlist *sg,
49
 
		                int nents, int direction);
50
 
	void            (*unmap_sg)(struct device *hwdev,
51
 
				struct scatterlist *sg, int nents,
52
 
				int direction);
53
 
	int             (*dma_supported)(struct device *hwdev, u64 mask);
54
 
	int		is_phys;
55
 
};
56
 

	
57
 
extern dma_addr_t bad_dma_address;
58
 
extern struct dma_mapping_ops* dma_ops;
59
 
extern int iommu_merge;
60
 

	
61
 
static inline int valid_dma_direction(int dma_direction)
62
 
{
63
 
	return ((dma_direction == DMA_BIDIRECTIONAL) ||
64
 
		(dma_direction == DMA_TO_DEVICE) ||
65
 
		(dma_direction == DMA_FROM_DEVICE));
66
 
}
67
 

	
68
 
extern void *dma_alloc_coherent(struct device *dev, size_t size,
69
 
				dma_addr_t *dma_handle, gfp_t gfp);
70
 
extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
71
 
			      dma_addr_t dma_handle);
72
 

	
73
 

	
74
 

	
75
 
#if defined(__VMKLNX__)
76
 

	
77
 
/*
78
 
 * DMA mapping functions on vmklinux are not inlined so as to 
79
 
 * support further revision and improvements for the behavior of
80
 
 * of stable third-party binary drivers using these functions.
81
 
 */
82
 

	
83
 
extern int dma_mapping_error(dma_addr_t dma_addr);
84
 

	
85
 
#define dma_unmap_page(dev,dma_address,size,dir) \
86
 
	dma_unmap_single(dev,dma_address,size,dir)
87
 

	
88
 
extern dma_addr_t
89
 
dma_map_single(struct device *hwdev, void *ptr, size_t size,
90
 
	       int direction);
91
 

	
92
 
extern dma_addr_t
93
 
dma_map_page(struct device *hwdev, struct page *page, unsigned long offset,
94
 
             size_t size, int direction);
95
 

	
96
 
extern void
97
 
dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
98
 
		 int direction);
99
 
extern void
100
 
dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
101
 
			size_t size, int direction);
102
 
extern void
103
 
dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
104
 
			   size_t size, int direction);
105
 
extern void
106
 
dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
107
 
			      unsigned long offset, size_t size, int direction);
108
 
extern void
109
 
dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
110
 
				 unsigned long offset, size_t size, int direction);
111
 
extern void
112
 
dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
113
 
		    int nelems, int direction);
114
 
extern void
115
 
dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
116
 
		       int nelems, int direction);
117
 
extern int
118
 
dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction);
119
 
extern void
120
 
dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
121
 
	     int direction);
122
 

	
123
 
struct vmklnx_codma;
124
 
extern struct vmklnx_codma vmklnx_codma;
125
 
extern int vmklnx_dma_supported(struct vmklnx_codma *codma,
126
 
                                struct device *hwdev, u64 mask);
127
 

	
128
 
static inline int dma_supported(struct device *hwdev, u64 mask)
129
 
{
130
 
        return vmklnx_dma_supported(&vmklnx_codma, hwdev, mask);
131
 
}
132
 

	
133
 
#else /* !defined(__VMKLNX__) */
134
 

	
135
 
/**                                          
136
 
 *  dma_mapping_error - Check a bus address for a mapping error       
137
 
 *  @dma_addr: bus address previously returned by dma_map_single or dma_map_page
138
 
 *                                           
139
 
 *  Performs a platform-specific check to determine if the
140
 
 *  mapped bus address is valid for use with DMA
141
 
 *                                           
142
 
 *  RETURN VALUE:
143
 
 *  TRUE if the bus address incurred a mapping error, FALSE otherwise
144
 
 *
145
 
 *  SEE ALSO:
146
 
 *  dma_map_single
147
 
 *                                           
148
 
 */                                          
149
 
static inline int dma_mapping_error(dma_addr_t dma_addr)
150
 
{
151
 
	if (dma_ops->mapping_error)
152
 
		return dma_ops->mapping_error(dma_addr);
153
 

	
154
 
	return (dma_addr == bad_dma_address);
155
 
}
156
 

	
157
 
/**                                          
158
 
 *  dma_map_single - Map a buffer for streaming DMA use with a given device       
159
 
 *  @hwdev: device to be used in the DMA operation    
160
 
 *  @ptr: virtual address of the buffer
161
 
 *  @size: length of the buffer, in bytes
162
 
 *  @direction: direction of the DMA to set up
163
 
 *                                           
164
 
 *  Sets up any platform-specific bus connectivity required to
165
 
 *  make a buffer usable for a DMA operation and returns a mapped bus address
166
 
 *  for the buffer.  The mapped address should be checked for an error using 
167
 
 *  dma_mapping_error.  When the buffer will no longer be used for DMA, the 
168
 
 *  buffer should be unmapped using dma_unmap_single.
169
 
 *  'direction' can be any one of
170
 
 *  DMA_BIDIRECTIONAL (the device either reads or writes the buffer),
171
 
 *  DMA_TO_DEVICE (the device reads the buffer), 
172
 
 *  DMA_FROM_DEVICE (the device writes the buffer), or
173
 
 *  DMA_NONE (neither reads nor writes should be allowed - may not be supported
174
 
 *  on all platforms)
175
 
 *
176
 
 *  RETURN VALUE:
177
 
 *  A bus address accessible by the device
178
 
 *
179
 
 *  SEE ALSO:
180
 
 *  dma_unmap_single, dma_mapping_error
181
 
 *                                           
182
 
 */                                          
183
 
static inline dma_addr_t
184
 
dma_map_single(struct device *hwdev, void *ptr, size_t size,
185
 
	       int direction)
186
 
{
187
 
	BUG_ON(!valid_dma_direction(direction));
188
 
	return dma_ops->map_single(hwdev, ptr, size, direction);
189
 
}
190
 

	
191
 
#define dma_map_page(dev,page,offset,size,dir) \
192
 
	dma_map_single((dev), page_address(page)+(offset), (size), (dir))
193
 

	
194
 
#define dma_unmap_page dma_unmap_single
195
 

	
196
 
/**                                          
197
 
 *  dma_unmap_single - Tear down a streaming DMA mapping for a buffer       
198
 
 *  @dev: device that had been used in the DMA operation     
199
 
 *  @addr: mapped bus address for the buffer, previously returned by dma_map_single
200
 
 *  @size: length of the buffer, in bytes
201
 
 *  @direction: direction of the DMA that was set up by dma_map_single
202
 
 *                                           
203
 
 *  Tears down the platform-specific bus connectivity that was needed to make a 
204
 
 *  buffer usable for DMA.  
205
 
 *  'direction' can be any one of
206
 
 *  DMA_BIDIRECTIONAL (the device either reads or writes the buffer),
207
 
 *  DMA_TO_DEVICE (the device reads the buffer), 
208
 
 *  DMA_FROM_DEVICE (the device writes the buffer), or
209
 
 *  DMA_NONE (neither reads nor writes should be allowed)
210
 
 *
211
 
 *  RETURN VALUE:
212
 
 *  Does not return any value
213
 
 *                                           
214
 
 *  SEE ALSO:
215
 
 *  dma_map_single
216
 
 *                                           
217
 
 */                                          
218
 
static inline void
219
 
dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
220
 
		 int direction)
221
 
{
222
 
	BUG_ON(!valid_dma_direction(direction));
223
 
	dma_ops->unmap_single(dev, addr, size, direction);
224
 
}
225
 

	
226
 
/**                                          
227
 
 *  dma_sync_single_for_cpu - Allow the CPU to access a buffer that is currently DMA-mapped      
228
 
 *  @hwdev: device to which the buffer is mapped    
229
 
 *  @dma_handle: bus address of the buffer
230
 
 *  @size: length of the buffer, in bytes
231
 
 *  @direction: direction of the existing DMA mapping
232
 
 *
233
 
 *  Transfers access ownership for a buffer that has been set up for DMA back to
234
 
 *  the CPU and synchronizes any changes that have been made by the device with
235
 
 *  the CPU.  The bus mapping that was created with dma_map_single is not 
236
 
 *  destroyed.  Afterward, the CPU can safely read and write the buffer.  The
237
 
 *  device should not access the buffer until access rights have been 
238
 
 *  transferred back to the device using dma_sync_single_for_device.
239
 
 *
240
 
 *  RETURN VALUE:
241
 
 *  Does not return any value
242
 
 *
243
 
 *  SEE ALSO:
244
 
 *  dma_sync_single_for_device, dma_map_single
245
 
 *                                           
246
 
 */                                          
247
 
static inline void
248
 
dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
249
 
			size_t size, int direction)
250
 
{
251
 
	BUG_ON(!valid_dma_direction(direction));
252
 
	if (dma_ops->sync_single_for_cpu)
253
 
		dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
254
 
					     direction);
255
 
	flush_write_buffers();
256
 
}
257
 

	
258
 
/**                                          
259
 
 *  dma_sync_single_for_device - Re-enable device access to a DMA-mapped buffer     
260
 
 *  @hwdev: device to which the buffer is mapped    
261
 
 *  @dma_handle: bus address of the buffer
262
 
 *  @size: length of the buffer, in bytes
263
 
 *  @direction: direction of the existing DMA mapping
264
 
 *                                           
265
 
 *  Transfers access ownership back to a device from the CPU and synchronizes 
266
 
 *  any changes that the CPU has made so that they will be visible by the device.
267
 
 *
268
 
 *  RETURN VALUE:
269
 
 *  Does not return any value
270
 
 *
271
 
 *  SEE ALSO:
272
 
 *  dma_sync_single_for_cpu, dma_map_single
273
 
 *                                           
274
 
 */                                          
275
 
static inline void
276
 
dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
277
 
			   size_t size, int direction)
278
 
{
279
 
	BUG_ON(!valid_dma_direction(direction));
280
 
	if (dma_ops->sync_single_for_device)
281
 
		dma_ops->sync_single_for_device(hwdev, dma_handle, size,
282
 
						direction);
283
 
	flush_write_buffers();
284
 
}
285
 

	
286
 
static inline void
287
 
dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
288
 
			      unsigned long offset, size_t size, int direction)
289
 
{
290
 
	BUG_ON(!valid_dma_direction(direction));
291
 
	if (dma_ops->sync_single_range_for_cpu) {
292
 
		dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
293
 
	}
294
 

	
295
 
	flush_write_buffers();
296
 
}
297
 

	
298
 
static inline void
299
 
dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
300
 
				 unsigned long offset, size_t size, int direction)
301
 
{
302
 
	BUG_ON(!valid_dma_direction(direction));
303
 
	if (dma_ops->sync_single_range_for_device)
304
 
		dma_ops->sync_single_range_for_device(hwdev, dma_handle,
305
 
						      offset, size, direction);
306
 

	
307
 
	flush_write_buffers();
308
 
}
309
 

	
310
 
static inline void
311
 
dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
312
 
		    int nelems, int direction)
313
 
{
314
 
	BUG_ON(!valid_dma_direction(direction));
315
 
	if (dma_ops->sync_sg_for_cpu)
316
 
		dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
317
 
	flush_write_buffers();
318
 
}
319
 

	
320
 
static inline void
321
 
dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
322
 
		       int nelems, int direction)
323
 
{
324
 
	BUG_ON(!valid_dma_direction(direction));
325
 
	if (dma_ops->sync_sg_for_device) {
326
 
		dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
327
 
	}
328
 

	
329
 
	flush_write_buffers();
330
 
}
331
 

	
332
 
/**                                          
333
 
 *  dma_map_sg - Map scatter/gather buffers for DMA use with a hardware device  
334
 
 *  @hwdev: device to be used in the DMA operations    
335
 
 *  @sg: start of the scatter/gather list of entries to be mapped
336
 
 *  @nents: number of elements in the list to be mapped
337
 
 *  @direction: direction of the DMA, with values as in dma_map_single
338
 
 *                                           
339
 
 *  Sets up the platform-specific bus connectivity for each of the buffers in a 
340
 
 *  scatterlist so that they may be used in DMA with the given hardware device.
341
 
 *  dma_unmap_sg should be used on these scatterlist elements when they will no
342
 
 *  longer be used with DMA.
343
 
 *                                           
344
 
 *  RETURN VALUE:
345
 
 *  0 if a failure was encountered, nents if the mappings succeeded
346
 
 *
347
 
 *  SEE ALSO:
348
 
 *  dma_map_single, dma_unmap_sg
349
 
 *                                           
350
 
 */                                          
351
 
static inline int
352
 
dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
353
 
{
354
 
	BUG_ON(!valid_dma_direction(direction));
355
 
	return dma_ops->map_sg(hwdev, sg, nents, direction);
356
 
}
357
 

	
358
 
/**                                          
359
 
 *  dma_unmap_sg - Unmap scatter/gather buffers that were previously mapped for DMA
360
 
 *  @hwdev: device to which these buffers have been mapped    
361
 
 *  @sg: start of the scatter/gather list of entries to be unmapped
362
 
 *  @nents: number of elements in the list to be unmapped
363
 
 *  @direction: direction of the existing DMA mapping
364
 
 *                                           
365
 
 *  Tears down the platform-specific bus connectivity for each of the buffers in 
366
 
 *  a scatterlist that had been previously set up for DMA using
367
 
 *  dma_map_sg.
368
 
 *
369
 
 *  RETURN VALUE:
370
 
 *  Does not return any value
371
 
 *
372
 
 *  SEE ALSO:
373
 
 *  dma_map_sg, dma_map_single  
374
 
 *
375
 
 */                                          
376
 
static inline void
377
 
dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
378
 
	     int direction)
379
 
{
380
 
	BUG_ON(!valid_dma_direction(direction));
381
 
	/* take out this ifdef block when we have iommu support */
382
 
	dma_ops->unmap_sg(hwdev, sg, nents, direction);
383
 
}
384
 

	
385
 
extern int dma_supported(struct device *hwdev, u64 mask);
386
 

	
387
 
#endif /* !defined(__VMKLNX__) */
388
 

	
389
 
/* same for gart, swiotlb, and nommu */
390
 
static inline int dma_get_cache_alignment(void)
391
 
{
392
 
	return boot_cpu_data.x86_clflush_size;
393
 
}
394
 

	
395
 
#define dma_is_consistent(h) 1
396
 

	
397
 
extern int dma_set_mask(struct device *dev, u64 mask);
398
 

	
399
 
static inline void
400
 
dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir)
401
 
{
402
 
	flush_write_buffers();
403
 
}
404
 

	
405
 
extern struct device fallback_dev;
406
 
extern int panic_on_overflow;
407
 

	
408
 
#endif /* _X8664_DMA_MAPPING_H */
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/dma.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/dma.h
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/dmi.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/dmi.h
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/dwarf2.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/dwarf2.h
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/e820.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/e820.h
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/edac.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/edac.h
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/elf.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/elf.h
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/emergency-restart.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/emergency-restart.h
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/errno.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/errno.h
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/fcntl.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/fcntl.h
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/fixmap.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/fixmap.h
 
file renamed from BLD/build/HEADERS/KLnext-vmkdrivers-asm-x64/vmkernel64/release/asm/floppy.h to BLD/build/HEADERS/CUR-9-vmkdrivers-asm-x64/vmkernel64/release/asm/floppy.h