forked from KolibriOS/kolibrios
ddk: 3.19-rc1
git-svn-id: svn://kolibrios.org@5270 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
@@ -4,7 +4,7 @@
|
||||
* Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
|
||||
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
*
|
||||
* see Documentation/lockdep-design.txt for more details.
|
||||
* see Documentation/locking/lockdep-design.txt for more details.
|
||||
*/
|
||||
#ifndef __LINUX_LOCKDEP_H
|
||||
#define __LINUX_LOCKDEP_H
|
||||
@@ -12,6 +12,10 @@
|
||||
struct task_struct;
|
||||
struct lockdep_map;
|
||||
|
||||
/* for sysctl */
|
||||
extern int prove_locking;
|
||||
extern int lock_stat;
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
|
||||
#include <linux/linkage.h>
|
||||
@@ -51,6 +55,8 @@ struct lock_class_key {
|
||||
struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
|
||||
};
|
||||
|
||||
extern struct lock_class_key __lockdep_no_validate__;
|
||||
|
||||
#define LOCKSTAT_POINTS 4
|
||||
|
||||
/*
|
||||
@@ -151,6 +157,24 @@ struct lockdep_map {
|
||||
#endif
|
||||
};
|
||||
|
||||
static inline void lockdep_copy_map(struct lockdep_map *to,
|
||||
struct lockdep_map *from)
|
||||
{
|
||||
int i;
|
||||
|
||||
*to = *from;
|
||||
/*
|
||||
* Since the class cache can be modified concurrently we could observe
|
||||
* half pointers (64bit arch using 32bit copy insns). Therefore clear
|
||||
* the caches and take the performance hit.
|
||||
*
|
||||
* XXX it doesn't work well with lockdep_set_class_and_subclass(), since
|
||||
* that relies on cache abuse.
|
||||
*/
|
||||
for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
|
||||
to->class_cache[i] = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Every lock has a list of other locks that were taken after it.
|
||||
* We only grow the list, never remove from it:
|
||||
@@ -338,6 +362,10 @@ extern void lockdep_trace_alloc(gfp_t mask);
|
||||
WARN_ON(debug_locks && !lockdep_is_held(l)); \
|
||||
} while (0)
|
||||
|
||||
#define lockdep_assert_held_once(l) do { \
|
||||
WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \
|
||||
} while (0)
|
||||
|
||||
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
|
||||
|
||||
#else /* !CONFIG_LOCKDEP */
|
||||
@@ -388,6 +416,7 @@ struct lock_class_key { };
|
||||
#define lockdep_depth(tsk) (0)
|
||||
|
||||
#define lockdep_assert_held(l) do { (void)(l); } while (0)
|
||||
#define lockdep_assert_held_once(l) do { (void)(l); } while (0)
|
||||
|
||||
#define lockdep_recursing(tsk) (0)
|
||||
|
||||
@@ -454,82 +483,35 @@ static inline void print_irqtrace_events(struct task_struct *curr)
|
||||
* on the per lock-class debug mode:
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
# ifdef CONFIG_PROVE_LOCKING
|
||||
# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
|
||||
# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
|
||||
# else
|
||||
# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
|
||||
# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i)
|
||||
# endif
|
||||
# define spin_release(l, n, i) lock_release(l, n, i)
|
||||
#else
|
||||
# define spin_acquire(l, s, t, i) do { } while (0)
|
||||
# define spin_release(l, n, i) do { } while (0)
|
||||
#endif
|
||||
#define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
|
||||
#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
|
||||
#define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
# ifdef CONFIG_PROVE_LOCKING
|
||||
# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
|
||||
# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i)
|
||||
# else
|
||||
# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
|
||||
# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i)
|
||||
# endif
|
||||
# define rwlock_release(l, n, i) lock_release(l, n, i)
|
||||
#else
|
||||
# define rwlock_acquire(l, s, t, i) do { } while (0)
|
||||
# define rwlock_acquire_read(l, s, t, i) do { } while (0)
|
||||
# define rwlock_release(l, n, i) do { } while (0)
|
||||
#endif
|
||||
#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
|
||||
#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
|
||||
#define spin_release(l, n, i) lock_release(l, n, i)
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
# ifdef CONFIG_PROVE_LOCKING
|
||||
# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
|
||||
# define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
|
||||
# else
|
||||
# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
|
||||
# define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
|
||||
# endif
|
||||
# define mutex_release(l, n, i) lock_release(l, n, i)
|
||||
#else
|
||||
# define mutex_acquire(l, s, t, i) do { } while (0)
|
||||
# define mutex_acquire_nest(l, s, t, n, i) do { } while (0)
|
||||
# define mutex_release(l, n, i) do { } while (0)
|
||||
#endif
|
||||
#define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
|
||||
#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
|
||||
#define rwlock_release(l, n, i) lock_release(l, n, i)
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
# ifdef CONFIG_PROVE_LOCKING
|
||||
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
|
||||
# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
|
||||
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i)
|
||||
# else
|
||||
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
|
||||
# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
|
||||
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i)
|
||||
# endif
|
||||
# define rwsem_release(l, n, i) lock_release(l, n, i)
|
||||
#else
|
||||
# define rwsem_acquire(l, s, t, i) do { } while (0)
|
||||
# define rwsem_acquire_nest(l, s, t, n, i) do { } while (0)
|
||||
# define rwsem_acquire_read(l, s, t, i) do { } while (0)
|
||||
# define rwsem_release(l, n, i) do { } while (0)
|
||||
#endif
|
||||
#define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
|
||||
#define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
|
||||
#define seqcount_release(l, n, i) lock_release(l, n, i)
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
# ifdef CONFIG_PROVE_LOCKING
|
||||
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
|
||||
# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 2, NULL, _THIS_IP_)
|
||||
# else
|
||||
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
|
||||
# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 1, NULL, _THIS_IP_)
|
||||
# endif
|
||||
# define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
|
||||
#else
|
||||
# define lock_map_acquire(l) do { } while (0)
|
||||
# define lock_map_acquire_read(l) do { } while (0)
|
||||
# define lock_map_release(l) do { } while (0)
|
||||
#endif
|
||||
#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
|
||||
#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
|
||||
#define mutex_release(l, n, i) lock_release(l, n, i)
|
||||
|
||||
#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
|
||||
#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
|
||||
#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
|
||||
#define rwsem_release(l, n, i) lock_release(l, n, i)
|
||||
|
||||
#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
|
||||
#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
|
||||
#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
|
||||
#define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
|
||||
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
# define might_lock(lock) \
|
||||
|
Reference in New Issue
Block a user