git-svn-id: svn://kolibrios.org@6082 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge)
2016-01-20 04:19:53 +00:00
parent fc6f59584d
commit 9e5dea1997
153 changed files with 11372 additions and 3456 deletions

View File

@@ -90,7 +90,7 @@
#ifdef CONFIG_DEBUG_SPINLOCK
extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
struct lock_class_key *key);
struct lock_class_key *key);
# define raw_spin_lock_init(lock) \
do { \
static struct lock_class_key __key; \
@@ -119,7 +119,7 @@ do { \
/*
* Despite its name it doesn't necessarily has to be a full barrier.
* It should only guarantee that a STORE before the critical section
* can not be reordered with a LOAD inside this section.
* can not be reordered with LOADs and STOREs inside this section.
* spin_lock() is the one-way barrier, this LOAD can not escape out
* of the region. So the default implementation simply ensures that
* a STORE can not move into the critical section, smp_wmb() should
@@ -129,16 +129,6 @@ do { \
#define smp_mb__before_spinlock() smp_wmb()
#endif
/*
* Place this after a lock-acquisition primitive to guarantee that
* an UNLOCK+LOCK pair act as a full barrier. This guarantee applies
* if the UNLOCK and LOCK are executed by the same CPU or if the
* UNLOCK and LOCK operate on the same lock variable.
*/
#ifndef smp_mb__after_unlock_lock
#define smp_mb__after_unlock_lock() do { } while (0)
#endif
/**
* raw_spin_unlock_wait - wait until the spinlock gets unlocked
* @lock: the spinlock in question.
@@ -189,6 +179,8 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define raw_spin_lock_nested(lock, subclass) \
_raw_spin_lock_nested(lock, subclass)
# define raw_spin_lock_bh_nested(lock, subclass) \
_raw_spin_lock_bh_nested(lock, subclass)
# define raw_spin_lock_nest_lock(lock, nest_lock) \
do { \
@@ -204,6 +196,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
# define raw_spin_lock_nested(lock, subclass) \
_raw_spin_lock(((void)(subclass), (lock)))
# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
# define raw_spin_lock_bh_nested(lock, subclass) _raw_spin_lock_bh(lock)
#endif
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
@@ -247,8 +240,8 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
#define raw_spin_unlock_irqrestore(lock, flags) \
do { \
typecheck(unsigned long, flags); \
do { \
typecheck(unsigned long, flags); \
_raw_spin_unlock_irqrestore(lock, flags); \
} while (0)
#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
@@ -292,7 +285,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
* Map the spin_lock functions to the raw variants for PREEMPT_RT=n
*/
static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
{
return &lock->rlock;
}
@@ -303,17 +296,17 @@ do { \
raw_spin_lock_init(&(_lock)->rlock); \
} while (0)
static inline void spin_lock(spinlock_t *lock)
static __always_inline void spin_lock(spinlock_t *lock)
{
raw_spin_lock(&lock->rlock);
}
static inline void spin_lock_bh(spinlock_t *lock)
static __always_inline void spin_lock_bh(spinlock_t *lock)
{
raw_spin_lock_bh(&lock->rlock);
}
static inline int spin_trylock(spinlock_t *lock)
static __always_inline int spin_trylock(spinlock_t *lock)
{
return raw_spin_trylock(&lock->rlock);
}
@@ -323,12 +316,17 @@ do { \
raw_spin_lock_nested(spinlock_check(lock), subclass); \
} while (0)
#define spin_lock_bh_nested(lock, subclass) \
do { \
raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\
} while (0)
#define spin_lock_nest_lock(lock, nest_lock) \
do { \
raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
} while (0)
static inline void spin_lock_irq(spinlock_t *lock)
static __always_inline void spin_lock_irq(spinlock_t *lock)
{
raw_spin_lock_irq(&lock->rlock);
}
@@ -343,57 +341,57 @@ do { \
raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
} while (0)
static inline void spin_unlock(spinlock_t *lock)
static __always_inline void spin_unlock(spinlock_t *lock)
{
raw_spin_unlock(&lock->rlock);
}
static inline void spin_unlock_bh(spinlock_t *lock)
static __always_inline void spin_unlock_bh(spinlock_t *lock)
{
raw_spin_unlock_bh(&lock->rlock);
}
static inline void spin_unlock_irq(spinlock_t *lock)
static __always_inline void spin_unlock_irq(spinlock_t *lock)
{
raw_spin_unlock_irq(&lock->rlock);
}
static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
{
raw_spin_unlock_irqrestore(&lock->rlock, flags);
}
static inline int spin_trylock_bh(spinlock_t *lock)
static __always_inline int spin_trylock_bh(spinlock_t *lock)
{
return raw_spin_trylock_bh(&lock->rlock);
}
static inline int spin_trylock_irq(spinlock_t *lock)
static __always_inline int spin_trylock_irq(spinlock_t *lock)
{
return raw_spin_trylock_irq(&lock->rlock);
}
#define spin_trylock_irqsave(lock, flags) \
({ \
({ \
raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
})
static inline void spin_unlock_wait(spinlock_t *lock)
static __always_inline void spin_unlock_wait(spinlock_t *lock)
{
raw_spin_unlock_wait(&lock->rlock);
}
static inline int spin_is_locked(spinlock_t *lock)
static __always_inline int spin_is_locked(spinlock_t *lock)
{
return raw_spin_is_locked(&lock->rlock);
}
static inline int spin_is_contended(spinlock_t *lock)
static __always_inline int spin_is_contended(spinlock_t *lock)
{
return raw_spin_is_contended(&lock->rlock);
}
static inline int spin_can_lock(spinlock_t *lock)
static __always_inline int spin_can_lock(spinlock_t *lock)
{
return raw_spin_can_lock(&lock->rlock);
}