forked from KolibriOS/kolibrios
video/drm: batch update
git-svn-id: svn://kolibrios.org@3031 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
@@ -129,7 +129,7 @@ static inline void alternatives_smp_switch(int smp) {}
|
||||
* use this macro(s) if you need more than one output parameter
|
||||
* in alternative_io
|
||||
*/
|
||||
#define ASM_OUTPUT2(a, b) a, b
|
||||
#define ASM_OUTPUT2(a) a
|
||||
|
||||
struct paravirt_patch_site;
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
|
@@ -266,7 +266,7 @@ typedef struct {
|
||||
u64 __aligned(8) counter;
|
||||
} atomic64_t;
|
||||
|
||||
#define ATOMIC64_INIT(val) { (val) }
|
||||
|
||||
|
||||
extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
|
||||
|
||||
@@ -278,7 +278,21 @@ extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
|
||||
* Atomically xchgs the value of @ptr to @new_val and returns
|
||||
* the old value.
|
||||
*/
|
||||
extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
|
||||
static inline long long atomic64_xchg(atomic64_t *v, long long n)
|
||||
{
|
||||
long long o;
|
||||
unsigned high = (unsigned)(n >> 32);
|
||||
unsigned low = (unsigned)n;
|
||||
|
||||
asm volatile(
|
||||
"1: \n\t"
|
||||
"cmpxchg8b (%%esi) \n\t"
|
||||
"jnz 1b \n\t"
|
||||
:"=&A" (o)
|
||||
:"S" (v), "b" (low), "c" (high)
|
||||
: "memory", "cc");
|
||||
return o;
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic64_set - set atomic64 variable
|
||||
@@ -287,7 +301,20 @@ extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
|
||||
*
|
||||
* Atomically sets the value of @ptr to @new_val.
|
||||
*/
|
||||
extern void atomic64_set(atomic64_t *ptr, u64 new_val);
|
||||
|
||||
static inline void atomic64_set(atomic64_t *v, long long i)
|
||||
{
|
||||
unsigned high = (unsigned)(i >> 32);
|
||||
unsigned low = (unsigned)i;
|
||||
asm volatile (
|
||||
"1: \n\t"
|
||||
"cmpxchg8b (%%esi) \n\t"
|
||||
"jnz 1b \n\t"
|
||||
:
|
||||
:"S" (v), "b" (low), "c" (high)
|
||||
: "eax", "edx", "memory", "cc");
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* atomic64_read - read atomic64 variable
|
||||
@@ -317,7 +344,6 @@ static inline u64 atomic64_read(atomic64_t *ptr)
|
||||
return res;
|
||||
}
|
||||
|
||||
extern u64 atomic64_read(atomic64_t *ptr);
|
||||
|
||||
/**
|
||||
* atomic64_add_return - add and return
|
||||
|
@@ -15,6 +15,8 @@
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/alternative.h>
|
||||
|
||||
#define BIT_64(n) (U64_C(1) << (n))
|
||||
|
||||
/*
|
||||
* These have to be done with inline assembly: that way the bit-setting
|
||||
* is guaranteed to be atomic. All bit operations return 0 if the bit
|
||||
@@ -262,6 +264,13 @@ static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
|
||||
* This operation is non-atomic and can be reordered.
|
||||
* If two examples of this operation race, one can appear to succeed
|
||||
* but actually fail. You must protect multiple accesses with a lock.
|
||||
*
|
||||
* Note: the operation is performed atomically with respect to
|
||||
* the local CPU, but not other CPUs. Portable code should not
|
||||
* rely on this behaviour.
|
||||
* KVM relies on this behaviour on x86 for modifying memory that is also
|
||||
* accessed from a hypervisor on the same CPU if running in a VM: don't change
|
||||
* this without also updating arch/x86/kernel/kvm.c
|
||||
*/
|
||||
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
@@ -309,7 +318,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
|
||||
static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
|
||||
{
|
||||
return ((1UL << (nr % BITS_PER_LONG)) &
|
||||
(((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
|
||||
(addr[nr / BITS_PER_LONG])) != 0;
|
||||
}
|
||||
|
||||
static inline int variable_test_bit(int nr, volatile const unsigned long *addr)
|
||||
@@ -346,7 +355,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
|
||||
*/
|
||||
static inline unsigned long __ffs(unsigned long word)
|
||||
{
|
||||
asm("bsf %1,%0"
|
||||
asm("rep; bsf %1,%0"
|
||||
: "=r" (word)
|
||||
: "rm" (word));
|
||||
return word;
|
||||
@@ -360,7 +369,7 @@ static inline unsigned long __ffs(unsigned long word)
|
||||
*/
|
||||
static inline unsigned long ffz(unsigned long word)
|
||||
{
|
||||
asm("bsf %1,%0"
|
||||
asm("rep; bsf %1,%0"
|
||||
: "=r" (word)
|
||||
: "r" (~word));
|
||||
return word;
|
||||
@@ -380,6 +389,8 @@ static inline unsigned long __fls(unsigned long word)
|
||||
return word;
|
||||
}
|
||||
|
||||
#undef ADDR
|
||||
|
||||
#ifdef __KERNEL__
|
||||
/**
|
||||
* ffs - find first set bit in word
|
||||
@@ -398,7 +409,7 @@ static inline int ffs(int x)
|
||||
#ifdef CONFIG_X86_CMOV
|
||||
asm("bsfl %1,%0\n\t"
|
||||
"cmovzl %2,%0"
|
||||
: "=r" (r) : "rm" (x), "r" (-1));
|
||||
: "=&r" (r) : "rm" (x), "r" (-1));
|
||||
#else
|
||||
asm("bsfl %1,%0\n\t"
|
||||
"jnz 1f\n\t"
|
||||
|
@@ -6,7 +6,7 @@
|
||||
|
||||
#include <asm/required-features.h>
|
||||
|
||||
#define NCAPINTS 9 /* N 32-bit words worth of info */
|
||||
#define NCAPINTS 10 /* N 32-bit words worth of info */
|
||||
|
||||
/*
|
||||
* Note: If the comment begins with a quoted string, that string is used
|
||||
@@ -89,7 +89,7 @@
|
||||
#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */
|
||||
#define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */
|
||||
#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
|
||||
#define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */
|
||||
/* 21 available, was AMD_C1E */
|
||||
#define X86_FEATURE_XTOPOLOGY (3*32+22) /* cpu topology enum extensions */
|
||||
#define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */
|
||||
#define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */
|
||||
@@ -97,6 +97,7 @@
|
||||
#define X86_FEATURE_EXTD_APICID (3*32+26) /* has extended APICID (8 bits) */
|
||||
#define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */
|
||||
#define X86_FEATURE_APERFMPERF (3*32+28) /* APERFMPERF */
|
||||
#define X86_FEATURE_EAGER_FPU (3*32+29) /* "eagerfpu" Non lazy FPU restore */
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
|
||||
#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
|
||||
@@ -114,16 +115,20 @@
|
||||
#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */
|
||||
#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */
|
||||
#define X86_FEATURE_PDCM (4*32+15) /* Performance Capabilities */
|
||||
#define X86_FEATURE_PCID (4*32+17) /* Process Context Identifiers */
|
||||
#define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */
|
||||
#define X86_FEATURE_XMM4_1 (4*32+19) /* "sse4_1" SSE-4.1 */
|
||||
#define X86_FEATURE_XMM4_2 (4*32+20) /* "sse4_2" SSE-4.2 */
|
||||
#define X86_FEATURE_X2APIC (4*32+21) /* x2APIC */
|
||||
#define X86_FEATURE_MOVBE (4*32+22) /* MOVBE instruction */
|
||||
#define X86_FEATURE_POPCNT (4*32+23) /* POPCNT instruction */
|
||||
#define X86_FEATURE_TSC_DEADLINE_TIMER (4*32+24) /* Tsc deadline timer */
|
||||
#define X86_FEATURE_AES (4*32+25) /* AES instructions */
|
||||
#define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
|
||||
#define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */
|
||||
#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */
|
||||
#define X86_FEATURE_F16C (4*32+29) /* 16-bit fp conversions */
|
||||
#define X86_FEATURE_RDRAND (4*32+30) /* The RDRAND instruction */
|
||||
#define X86_FEATURE_HYPERVISOR (4*32+31) /* Running on a hypervisor */
|
||||
|
||||
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
|
||||
@@ -150,23 +155,62 @@
|
||||
#define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */
|
||||
#define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */
|
||||
#define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */
|
||||
#define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */
|
||||
#define X86_FEATURE_XOP (6*32+11) /* extended AVX instructions */
|
||||
#define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */
|
||||
#define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */
|
||||
#define X86_FEATURE_LWP (6*32+15) /* Light Weight Profiling */
|
||||
#define X86_FEATURE_FMA4 (6*32+16) /* 4 operands MAC instructions */
|
||||
#define X86_FEATURE_TCE (6*32+17) /* translation cache extension */
|
||||
#define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */
|
||||
#define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */
|
||||
#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */
|
||||
#define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */
|
||||
|
||||
/*
|
||||
* Auxiliary flags: Linux defined - For features scattered in various
|
||||
* CPUID levels like 0x6, 0xA etc
|
||||
* CPUID levels like 0x6, 0xA etc, word 7
|
||||
*/
|
||||
#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */
|
||||
#define X86_FEATURE_ARAT (7*32+ 1) /* Always Running APIC Timer */
|
||||
#define X86_FEATURE_CPB (7*32+ 2) /* AMD Core Performance Boost */
|
||||
#define X86_FEATURE_EPB (7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
|
||||
#define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */
|
||||
#define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */
|
||||
#define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */
|
||||
#define X86_FEATURE_DTHERM (7*32+ 7) /* Digital Thermal Sensor */
|
||||
#define X86_FEATURE_HW_PSTATE (7*32+ 8) /* AMD HW-PState */
|
||||
|
||||
/* Virtualization flags: Linux defined */
|
||||
/* Virtualization flags: Linux defined, word 8 */
|
||||
#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
|
||||
#define X86_FEATURE_VNMI (8*32+ 1) /* Intel Virtual NMI */
|
||||
#define X86_FEATURE_FLEXPRIORITY (8*32+ 2) /* Intel FlexPriority */
|
||||
#define X86_FEATURE_EPT (8*32+ 3) /* Intel Extended Page Table */
|
||||
#define X86_FEATURE_VPID (8*32+ 4) /* Intel Virtual Processor ID */
|
||||
#define X86_FEATURE_NPT (8*32+ 5) /* AMD Nested Page Table support */
|
||||
#define X86_FEATURE_LBRV (8*32+ 6) /* AMD LBR Virtualization support */
|
||||
#define X86_FEATURE_SVML (8*32+ 7) /* "svm_lock" AMD SVM locking MSR */
|
||||
#define X86_FEATURE_NRIPS (8*32+ 8) /* "nrip_save" AMD SVM next_rip save */
|
||||
#define X86_FEATURE_TSCRATEMSR (8*32+ 9) /* "tsc_scale" AMD TSC scaling support */
|
||||
#define X86_FEATURE_VMCBCLEAN (8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */
|
||||
#define X86_FEATURE_FLUSHBYASID (8*32+11) /* AMD flush-by-ASID support */
|
||||
#define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */
|
||||
#define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */
|
||||
#define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */
|
||||
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
|
||||
#define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
|
||||
#define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
|
||||
#define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
|
||||
#define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
|
||||
#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
|
||||
#define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
|
||||
#define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
|
||||
#define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
|
||||
#define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */
|
||||
#define X86_FEATURE_RDSEED (9*32+18) /* The RDSEED instruction */
|
||||
#define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */
|
||||
#define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */
|
||||
|
||||
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
|
||||
|
||||
@@ -178,8 +222,7 @@ extern const char * const x86_power_flags[32];
|
||||
#define test_cpu_cap(c, bit) \
|
||||
test_bit(bit, (unsigned long *)((c)->x86_capability))
|
||||
|
||||
#define cpu_has(c, bit) \
|
||||
(__builtin_constant_p(bit) && \
|
||||
#define REQUIRED_MASK_BIT_SET(bit) \
|
||||
( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \
|
||||
(((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \
|
||||
(((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \
|
||||
@@ -187,10 +230,18 @@ extern const char * const x86_power_flags[32];
|
||||
(((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \
|
||||
(((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \
|
||||
(((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \
|
||||
(((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) ) \
|
||||
? 1 : \
|
||||
(((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) || \
|
||||
(((bit)>>5)==8 && (1UL<<((bit)&31) & REQUIRED_MASK8)) || \
|
||||
(((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9)) )
|
||||
|
||||
#define cpu_has(c, bit) \
|
||||
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
|
||||
test_cpu_cap(c, bit))
|
||||
|
||||
#define this_cpu_has(bit) \
|
||||
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
|
||||
x86_this_cpu_test_bit(bit, (unsigned long *)&cpu_info.x86_capability))
|
||||
|
||||
#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit)
|
||||
|
||||
#define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability))
|
||||
@@ -219,7 +270,9 @@ extern const char * const x86_power_flags[32];
|
||||
#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
|
||||
#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2)
|
||||
#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3)
|
||||
#define cpu_has_ssse3 boot_cpu_has(X86_FEATURE_SSSE3)
|
||||
#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES)
|
||||
#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX)
|
||||
#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
|
||||
#define cpu_has_mp boot_cpu_has(X86_FEATURE_MP)
|
||||
#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX)
|
||||
@@ -247,8 +300,14 @@ extern const char * const x86_power_flags[32];
|
||||
#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
|
||||
#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
|
||||
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
|
||||
#define cpu_has_xsaveopt boot_cpu_has(X86_FEATURE_XSAVEOPT)
|
||||
#define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE)
|
||||
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
|
||||
#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
|
||||
#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
|
||||
#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8)
|
||||
#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
|
||||
#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
|
||||
|
||||
#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
|
||||
# define cpu_has_invlpg 1
|
||||
|
@@ -1,60 +1,66 @@
|
||||
#ifndef _ASM_X86_DIV64_H
|
||||
#define _ASM_X86_DIV64_H
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* do_div() is NOT a C function. It wants to return
|
||||
* two values (the quotient and the remainder), but
|
||||
* since that doesn't work very well in C, what it
|
||||
* does is:
|
||||
*
|
||||
* - modifies the 64-bit dividend _in_place_
|
||||
* - returns the 32-bit remainder
|
||||
*
|
||||
* This ends up being the most efficient "calling
|
||||
* convention" on x86.
|
||||
*/
|
||||
#define do_div(n, base) \
|
||||
({ \
|
||||
unsigned long __upper, __low, __high, __mod, __base; \
|
||||
__base = (base); \
|
||||
asm("":"=a" (__low), "=d" (__high) : "A" (n)); \
|
||||
__upper = __high; \
|
||||
if (__high) { \
|
||||
__upper = __high % (__base); \
|
||||
__high = __high / (__base); \
|
||||
} \
|
||||
asm("divl %2":"=a" (__low), "=d" (__mod) \
|
||||
: "rm" (__base), "0" (__low), "1" (__upper)); \
|
||||
asm("":"=A" (n) : "a" (__low), "d" (__high)); \
|
||||
__mod; \
|
||||
})
|
||||
|
||||
static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
|
||||
{
|
||||
union {
|
||||
u64 v64;
|
||||
u32 v32[2];
|
||||
} d = { dividend };
|
||||
u32 upper;
|
||||
|
||||
upper = d.v32[1];
|
||||
d.v32[1] = 0;
|
||||
if (upper >= divisor) {
|
||||
d.v32[1] = upper / divisor;
|
||||
upper %= divisor;
|
||||
}
|
||||
asm ("divl %2" : "=a" (d.v32[0]), "=d" (*remainder) :
|
||||
"rm" (divisor), "0" (d.v32[0]), "1" (upper));
|
||||
return d.v64;
|
||||
}
|
||||
#define div_u64_rem div_u64_rem
|
||||
|
||||
#else
|
||||
# include <asm-generic/div64.h>
|
||||
#endif /* CONFIG_X86_32 */
|
||||
|
||||
#endif /* _ASM_X86_DIV64_H */
|
||||
#ifndef _ASM_X86_DIV64_H
|
||||
#define _ASM_X86_DIV64_H
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/log2.h>
|
||||
|
||||
/*
|
||||
* do_div() is NOT a C function. It wants to return
|
||||
* two values (the quotient and the remainder), but
|
||||
* since that doesn't work very well in C, what it
|
||||
* does is:
|
||||
*
|
||||
* - modifies the 64-bit dividend _in_place_
|
||||
* - returns the 32-bit remainder
|
||||
*
|
||||
* This ends up being the most efficient "calling
|
||||
* convention" on x86.
|
||||
*/
|
||||
#define do_div(n, base) \
|
||||
({ \
|
||||
unsigned long __upper, __low, __high, __mod, __base; \
|
||||
__base = (base); \
|
||||
if (__builtin_constant_p(__base) && is_power_of_2(__base)) { \
|
||||
__mod = n & (__base - 1); \
|
||||
n >>= ilog2(__base); \
|
||||
} else { \
|
||||
asm("" : "=a" (__low), "=d" (__high) : "A" (n));\
|
||||
__upper = __high; \
|
||||
if (__high) { \
|
||||
__upper = __high % (__base); \
|
||||
__high = __high / (__base); \
|
||||
} \
|
||||
asm("divl %2" : "=a" (__low), "=d" (__mod) \
|
||||
: "rm" (__base), "0" (__low), "1" (__upper)); \
|
||||
asm("" : "=A" (n) : "a" (__low), "d" (__high)); \
|
||||
} \
|
||||
__mod; \
|
||||
})
|
||||
|
||||
static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
|
||||
{
|
||||
union {
|
||||
u64 v64;
|
||||
u32 v32[2];
|
||||
} d = { dividend };
|
||||
u32 upper;
|
||||
|
||||
upper = d.v32[1];
|
||||
d.v32[1] = 0;
|
||||
if (upper >= divisor) {
|
||||
d.v32[1] = upper / divisor;
|
||||
upper %= divisor;
|
||||
}
|
||||
asm ("divl %2" : "=a" (d.v32[0]), "=d" (*remainder) :
|
||||
"rm" (divisor), "0" (d.v32[0]), "1" (upper));
|
||||
return d.v64;
|
||||
}
|
||||
#define div_u64_rem div_u64_rem
|
||||
|
||||
#else
|
||||
# include <asm-generic/div64.h>
|
||||
#endif /* CONFIG_X86_32 */
|
||||
|
||||
#endif /* _ASM_X86_DIV64_H */
|
||||
|
@@ -84,5 +84,7 @@
|
||||
#define REQUIRED_MASK5 0
|
||||
#define REQUIRED_MASK6 0
|
||||
#define REQUIRED_MASK7 0
|
||||
#define REQUIRED_MASK8 0
|
||||
#define REQUIRED_MASK9 0
|
||||
|
||||
#endif /* _ASM_X86_REQUIRED_FEATURES_H */
|
||||
|
Reference in New Issue
Block a user