ddk: 3.17-rc1

git-svn-id: svn://kolibrios.org@5056 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge)
2014-08-23 10:29:27 +00:00
parent 144b0c94b0
commit 0ede580d2f
79 changed files with 6316 additions and 2130 deletions

View File

@@ -7,4 +7,36 @@
#ifndef _LINUX_BACKLIGHT_H
#define _LINUX_BACKLIGHT_H
/* Notes on locking:
*
* backlight_device->ops_lock is an internal backlight lock protecting the
* ops pointer and no code outside the core should need to touch it.
*
* Access to update_status() is serialised by the update_lock mutex since
* most drivers seem to need this and historically get it wrong.
*
* Most drivers don't need locking on their get_brightness() method.
* If yours does, you need to implement it in the driver. You can use the
* update_lock mutex if appropriate.
*
* Any other use of the locks below is probably wrong.
*/
enum backlight_update_reason {
BACKLIGHT_UPDATE_HOTKEY,
BACKLIGHT_UPDATE_SYSFS,
};
enum backlight_type {
BACKLIGHT_RAW = 1,
BACKLIGHT_PLATFORM,
BACKLIGHT_FIRMWARE,
BACKLIGHT_TYPE_MAX,
};
enum backlight_notification {
BACKLIGHT_REGISTERED,
BACKLIGHT_UNREGISTERED,
};
#endif

View File

@@ -88,32 +88,32 @@
* lib/bitmap.c provides these functions:
*/
extern int __bitmap_empty(const unsigned long *bitmap, int bits);
extern int __bitmap_full(const unsigned long *bitmap, int bits);
extern int __bitmap_empty(const unsigned long *bitmap, unsigned int nbits);
extern int __bitmap_full(const unsigned long *bitmap, unsigned int nbits);
extern int __bitmap_equal(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
const unsigned long *bitmap2, unsigned int nbits);
extern void __bitmap_complement(unsigned long *dst, const unsigned long *src,
int bits);
unsigned int nbits);
extern void __bitmap_shift_right(unsigned long *dst,
const unsigned long *src, int shift, int bits);
extern void __bitmap_shift_left(unsigned long *dst,
const unsigned long *src, int shift, int bits);
extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
const unsigned long *bitmap2, unsigned int nbits);
extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
const unsigned long *bitmap2, unsigned int nbits);
extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
const unsigned long *bitmap2, unsigned int nbits);
extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
const unsigned long *bitmap2, unsigned int nbits);
extern int __bitmap_intersects(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
const unsigned long *bitmap2, unsigned int nbits);
extern int __bitmap_subset(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern int __bitmap_weight(const unsigned long *bitmap, int bits);
const unsigned long *bitmap2, unsigned int nbits);
extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
extern void bitmap_set(unsigned long *map, int i, int len);
extern void bitmap_clear(unsigned long *map, int start, int nr);
extern void bitmap_set(unsigned long *map, unsigned int start, int len);
extern void bitmap_clear(unsigned long *map, unsigned int start, int len);
extern unsigned long bitmap_find_next_zero_area(unsigned long *map,
unsigned long size,
unsigned long start,
@@ -140,9 +140,9 @@ extern void bitmap_onto(unsigned long *dst, const unsigned long *orig,
const unsigned long *relmap, int bits);
extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
int sz, int bits);
extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order);
extern void bitmap_release_region(unsigned long *bitmap, int pos, int order);
extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order);
extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits);
extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits);
@@ -188,15 +188,15 @@ static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
}
static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, int nbits)
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return (*dst = *src1 & *src2) != 0;
return (*dst = *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)) != 0;
return __bitmap_and(dst, src1, src2, nbits);
}
static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, int nbits)
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = *src1 | *src2;
@@ -205,7 +205,7 @@ static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
}
static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, int nbits)
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = *src1 ^ *src2;
@@ -214,24 +214,24 @@ static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
}
static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, int nbits)
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return (*dst = *src1 & ~(*src2)) != 0;
return (*dst = *src1 & ~(*src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
return __bitmap_andnot(dst, src1, src2, nbits);
}
static inline void bitmap_complement(unsigned long *dst, const unsigned long *src,
int nbits)
unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits);
*dst = ~(*src);
else
__bitmap_complement(dst, src, nbits);
}
static inline int bitmap_equal(const unsigned long *src1,
const unsigned long *src2, int nbits)
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
@@ -240,7 +240,7 @@ static inline int bitmap_equal(const unsigned long *src1,
}
static inline int bitmap_intersects(const unsigned long *src1,
const unsigned long *src2, int nbits)
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
@@ -249,7 +249,7 @@ static inline int bitmap_intersects(const unsigned long *src1,
}
static inline int bitmap_subset(const unsigned long *src1,
const unsigned long *src2, int nbits)
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits));
@@ -257,7 +257,7 @@ static inline int bitmap_subset(const unsigned long *src1,
return __bitmap_subset(src1, src2, nbits);
}
static inline int bitmap_empty(const unsigned long *src, int nbits)
static inline int bitmap_empty(const unsigned long *src, unsigned nbits)
{
if (small_const_nbits(nbits))
return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
@@ -265,7 +265,7 @@ static inline int bitmap_empty(const unsigned long *src, int nbits)
return __bitmap_empty(src, nbits);
}
static inline int bitmap_full(const unsigned long *src, int nbits)
static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
@@ -273,7 +273,7 @@ static inline int bitmap_full(const unsigned long *src, int nbits)
return __bitmap_full(src, nbits);
}
static inline int bitmap_weight(const unsigned long *src, int nbits)
static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
@@ -284,7 +284,7 @@ static inline void bitmap_shift_right(unsigned long *dst,
const unsigned long *src, int n, int nbits)
{
if (small_const_nbits(nbits))
*dst = *src >> n;
*dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> n;
else
__bitmap_shift_right(dst, src, n, nbits);
}

View File

@@ -4,12 +4,23 @@
#ifdef __KERNEL__
#define BIT(nr) (1UL << (nr))
#define BIT_ULL(nr) (1ULL << (nr))
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
#define BITS_PER_BYTE 8
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
#endif
/*
* Create a contiguous bitmask starting at bit position @l and ending at
* position @h. For example
* GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
*/
#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l))
#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l))
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);
extern unsigned int __sw_hweight32(unsigned int w);
@@ -185,6 +196,21 @@ static inline unsigned long __ffs64(u64 word)
#ifdef __KERNEL__
#ifndef set_mask_bits
#define set_mask_bits(ptr, _mask, _bits) \
({ \
const typeof(*ptr) mask = (_mask), bits = (_bits); \
typeof(*ptr) old, new; \
\
do { \
old = ACCESS_ONCE(*ptr); \
new = (old & ~mask) | bits; \
} while (cmpxchg(ptr, old, new) != old); \
\
new; \
})
#endif
#ifndef find_last_bit
/**
* find_last_bit - find the last set bit in a memory region

View File

@@ -57,6 +57,7 @@
#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0)
/* Force a compilation error if a constant expression is not a power of 2 */
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))

View File

@@ -2,7 +2,7 @@
#define _LINUX_BYTEORDER_GENERIC_H
/*
* linux/byteorder_generic.h
* linux/byteorder/generic.h
* Generic Byte-reordering support
*
* The "... p" macros, like le64_to_cpup, can be used with pointers

View File

@@ -37,6 +37,9 @@
__asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
(typeof(ptr)) (__ptr + (off)); })
/* Make the optimizer believe the variable can be manipulated arbitrarily. */
#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var))
#ifdef __CHECKER__
#define __must_be_array(arr) 0
#else
@@ -50,9 +53,14 @@
*/
#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
!defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
# define inline inline __attribute__((always_inline))
# define __inline__ __inline__ __attribute__((always_inline))
# define __inline __inline __attribute__((always_inline))
# define inline inline __attribute__((always_inline)) notrace
# define __inline__ __inline__ __attribute__((always_inline)) notrace
# define __inline __inline __attribute__((always_inline)) notrace
#else
/* A lot of inline functions can cause havoc with function tracing */
# define inline inline notrace
# define __inline__ __inline__ notrace
# define __inline __inline notrace
#endif
#define __deprecated __attribute__((deprecated))

View File

@@ -75,11 +75,7 @@
*
* (asm goto is automatically volatile - the naming reflects this.)
*/
#if GCC_VERSION <= 40801
# define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
#else
# define asm_volatile_goto(x...) do { asm goto(x); } while (0)
#endif
#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
#if GCC_VERSION >= 40400

View File

@@ -63,6 +63,13 @@ extern void __chk_io_ptr(const volatile void __iomem *);
# include <linux/compiler-intel.h>
#endif
/* Clang compiler defines __GNUC__. So we will overwrite implementations
* coming from above header files here
*/
#ifdef __clang__
#include <linux/compiler-clang.h>
#endif
/*
* Generic compiler-dependent macros required for kernel
* build go below this comment. Actual compiler/compiler version
@@ -170,6 +177,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
(typeof(ptr)) (__ptr + (off)); })
#endif
#ifndef OPTIMIZER_HIDE_VAR
#define OPTIMIZER_HIDE_VAR(var) barrier()
#endif
/* Not-quite-unique ID. */
#ifndef __UNIQUE_ID
# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
@@ -298,6 +309,11 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
#endif
/* Is this type a native word size -- useful for atomic operations */
#ifndef __native_word
# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
#endif
/* Compile time object size, -1 for unknown */
#ifndef __compiletime_object_size
# define __compiletime_object_size(obj) -1
@@ -307,9 +323,18 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
#endif
#ifndef __compiletime_error
# define __compiletime_error(message)
/*
* Sparse complains of variable sized arrays due to the temporary variable in
* __compiletime_assert. Unfortunately we can't just expand it out to make
* sparse see a constant array size without breaking compiletime_assert on old
* versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
*/
# ifndef __CHECKER__
# define __compiletime_error_fallback(condition) \
do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
#else
# endif
#endif
#ifndef __compiletime_error_fallback
# define __compiletime_error_fallback(condition) do { } while (0)
#endif
@@ -337,6 +362,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
#define compiletime_assert(condition, msg) \
_compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
#define compiletime_assert_atomic_type(t) \
compiletime_assert(__native_word(t), \
"Need native word sized stores/loads for atomicity.")
/*
* Prevent the compiler from merging or refetching accesses. The compiler
* is also forbidden from reordering successive instances of ACCESS_ONCE(),
@@ -354,7 +383,9 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
#ifdef CONFIG_KPROBES
# define __kprobes __attribute__((__section__(".kprobes.text")))
# define nokprobe_inline __always_inline
#else
# define __kprobes
# define nokprobe_inline inline
#endif
#endif /* __LINUX_COMPILER_H */

View File

@@ -115,6 +115,7 @@ struct dma_buf_ops {
* @exp_name: name of the exporter; useful for debugging.
* @list_node: node for dma_buf accounting and debugging.
* @priv: exporter specific private data for this buffer object.
* @resv: reservation object linked to this dma-buf
*/
struct dma_buf {
size_t size;
@@ -168,10 +169,11 @@ void dma_buf_detach(struct dma_buf *dmabuf,
struct dma_buf_attachment *dmabuf_attach);
struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
size_t size, int flags, const char *);
size_t size, int flags, const char *,
struct reservation_object *);
#define dma_buf_export(priv, ops, size, flags) \
dma_buf_export_named(priv, ops, size, flags, __FILE__)
#define dma_buf_export(priv, ops, size, flags, resv) \
dma_buf_export_named(priv, ops, size, flags, KBUILD_MODNAME, resv)
int dma_buf_fd(struct dma_buf *dmabuf, int flags);
struct dma_buf *dma_buf_get(int fd);
@@ -194,4 +196,6 @@ int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
unsigned long);
void *dma_buf_vmap(struct dma_buf *);
void dma_buf_vunmap(struct dma_buf *, void *vaddr);
int dma_buf_debugfs_create_file(const char *name,
int (*write)(struct seq_file *));
#endif /* __DMA_BUF_H__ */

View File

@@ -2,12 +2,13 @@
#define _LINUX_ERR_H
#include <linux/compiler.h>
#include <linux/types.h>
#include <errno.h>
/*
* Kernel pointers have redundant information, so we can use a
* scheme where we can return either an error code or a dentry
* scheme where we can return either an error code or a normal
* pointer with the same return value.
*
* This should be a per-architecture thing, to allow different
@@ -29,12 +30,12 @@ static inline long __must_check PTR_ERR(__force const void *ptr)
return (long) ptr;
}
static inline long __must_check IS_ERR(__force const void *ptr)
static inline bool __must_check IS_ERR(__force const void *ptr)
{
return IS_ERR_VALUE((unsigned long)ptr);
}
static inline long __must_check IS_ERR_OR_NULL(__force const void *ptr)
static inline bool __must_check IS_ERR_OR_NULL(__force const void *ptr)
{
return !ptr || IS_ERR_VALUE((unsigned long)ptr);
}

View File

@@ -413,6 +413,8 @@ struct vm_area_struct;
struct fb_info;
struct device;
struct file;
struct videomode;
struct device_node;
/* Definitions below are used in the parsed monitor specs */
#define FB_DPMS_ACTIVE_OFF 1
@@ -439,6 +441,7 @@ struct file;
#define FB_MISC_PRIM_COLOR 1
#define FB_MISC_1ST_DETAIL 2 /* First Detailed Timing is preferred */
#define FB_MISC_HDMI 4
struct fb_chroma {
__u32 redx; /* in fraction of 1024 */
__u32 greenx;
@@ -690,6 +693,10 @@ struct fb_ops {
/* teardown any resources to do with this framebuffer */
void (*fb_destroy)(struct fb_info *info);
/* called at KDB enter and leave time to prepare the console */
int (*fb_debug_enter)(struct fb_info *info);
int (*fb_debug_leave)(struct fb_info *info);
};
#ifdef CONFIG_FB_TILEBLITTING
@@ -938,7 +945,7 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
#define fb_memcpy_fromfb sbus_memcpy_fromio
#define fb_memcpy_tofb sbus_memcpy_toio
#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__) || defined(__bfin__)
#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__) || defined(__bfin__) || defined(__arm__)
#define fb_readb __raw_readb
#define fb_readw __raw_readw
@@ -999,7 +1006,7 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
extern int register_framebuffer(struct fb_info *fb_info);
extern int unregister_framebuffer(struct fb_info *fb_info);
extern int unlink_framebuffer(struct fb_info *fb_info);
extern void remove_conflicting_framebuffers(struct apertures_struct *a,
extern int remove_conflicting_framebuffers(struct apertures_struct *a,
const char *name, bool primary);
extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
extern int fb_show_logo(struct fb_info *fb_info, int rotate);
@@ -1027,7 +1034,7 @@ static inline void unlock_fb_info(struct fb_info *info)
static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch,
u8 *src, u32 s_pitch, u32 height)
{
int i, j;
u32 i, j;
d_pitch -= s_pitch;

View File

@@ -4,4 +4,5 @@
#ifndef __LINUX_FILE_H
#define __LINUX_FILE_H
struct file;
#endif /* __LINUX_FILE_H */

View File

@@ -43,9 +43,11 @@ struct builtin_fw {
int request_firmware(const struct firmware **fw, const char *name,
struct device *device);
int request_firmware_nowait(
struct module *module, int uevent,
const char *name, struct device *device, void *context,
struct module *module, bool uevent,
const char *name, struct device *device, gfp_t gfp, void *context,
void (*cont)(const struct firmware *fw, void *context));
int request_firmware_direct(const struct firmware **fw, const char *name,
struct device *device);
void release_firmware(const struct firmware *fw);

View File

@@ -0,0 +1,205 @@
/*
* Statically sized hash table implementation
* (C) 2012 Sasha Levin <levinsasha928@gmail.com>
*/
#ifndef _LINUX_HASHTABLE_H
#define _LINUX_HASHTABLE_H
#include <linux/list.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/hash.h>
#include <linux/rculist.h>
#define DEFINE_HASHTABLE(name, bits) \
struct hlist_head name[1 << (bits)] = \
{ [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
#define DECLARE_HASHTABLE(name, bits) \
struct hlist_head name[1 << (bits)]
#define HASH_SIZE(name) (ARRAY_SIZE(name))
#define HASH_BITS(name) ilog2(HASH_SIZE(name))
/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */
#define hash_min(val, bits) \
(sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits))
static inline void __hash_init(struct hlist_head *ht, unsigned int sz)
{
unsigned int i;
for (i = 0; i < sz; i++)
INIT_HLIST_HEAD(&ht[i]);
}
/**
* hash_init - initialize a hash table
* @hashtable: hashtable to be initialized
*
* Calculates the size of the hashtable from the given parameter, otherwise
* same as hash_init_size.
*
* This has to be a macro since HASH_BITS() will not work on pointers since
* it calculates the size during preprocessing.
*/
#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable))
/**
* hash_add - add an object to a hashtable
* @hashtable: hashtable to add to
* @node: the &struct hlist_node of the object to be added
* @key: the key of the object to be added
*/
#define hash_add(hashtable, node, key) \
hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
/**
* hash_add_rcu - add an object to a rcu enabled hashtable
* @hashtable: hashtable to add to
* @node: the &struct hlist_node of the object to be added
* @key: the key of the object to be added
*/
#define hash_add_rcu(hashtable, node, key) \
hlist_add_head_rcu(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
/**
* hash_hashed - check whether an object is in any hashtable
* @node: the &struct hlist_node of the object to be checked
*/
static inline bool hash_hashed(struct hlist_node *node)
{
return !hlist_unhashed(node);
}
static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz)
{
unsigned int i;
for (i = 0; i < sz; i++)
if (!hlist_empty(&ht[i]))
return false;
return true;
}
/**
* hash_empty - check whether a hashtable is empty
* @hashtable: hashtable to check
*
* This has to be a macro since HASH_BITS() will not work on pointers since
* it calculates the size during preprocessing.
*/
#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable))
/**
* hash_del - remove an object from a hashtable
* @node: &struct hlist_node of the object to remove
*/
static inline void hash_del(struct hlist_node *node)
{
hlist_del_init(node);
}
/**
* hash_del_rcu - remove an object from a rcu enabled hashtable
* @node: &struct hlist_node of the object to remove
*/
static inline void hash_del_rcu(struct hlist_node *node)
{
hlist_del_init_rcu(node);
}
/**
* hash_for_each - iterate over a hashtable
* @name: hashtable to iterate
* @bkt: integer to use as bucket loop cursor
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
*/
#define hash_for_each(name, bkt, obj, member) \
for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
(bkt)++)\
hlist_for_each_entry(obj, &name[bkt], member)
/**
* hash_for_each_rcu - iterate over a rcu enabled hashtable
* @name: hashtable to iterate
* @bkt: integer to use as bucket loop cursor
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
*/
#define hash_for_each_rcu(name, bkt, obj, member) \
for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
(bkt)++)\
hlist_for_each_entry_rcu(obj, &name[bkt], member)
/**
* hash_for_each_safe - iterate over a hashtable safe against removal of
* hash entry
* @name: hashtable to iterate
* @bkt: integer to use as bucket loop cursor
* @tmp: a &struct used for temporary storage
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
*/
#define hash_for_each_safe(name, bkt, tmp, obj, member) \
for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
(bkt)++)\
hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
/**
* hash_for_each_possible - iterate over all possible objects hashing to the
* same bucket
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
#define hash_for_each_possible(name, obj, member, key) \
hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member)
/**
* hash_for_each_possible_rcu - iterate over all possible objects hashing to the
* same bucket in an rcu enabled hashtable
* in a rcu enabled hashtable
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
#define hash_for_each_possible_rcu(name, obj, member, key) \
hlist_for_each_entry_rcu(obj, &name[hash_min(key, HASH_BITS(name))],\
member)
/**
* hash_for_each_possible_rcu_notrace - iterate over all possible objects hashing
* to the same bucket in an rcu enabled hashtable in a rcu enabled hashtable
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*
* This is the same as hash_for_each_possible_rcu() except that it does
* not do any RCU debugging or tracing.
*/
#define hash_for_each_possible_rcu_notrace(name, obj, member, key) \
hlist_for_each_entry_rcu_notrace(obj, \
&name[hash_min(key, HASH_BITS(name))], member)
/**
* hash_for_each_possible_safe - iterate over all possible objects hashing to the
* same bucket safe against removals
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
* @tmp: a &struct used for temporary storage
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
#define hash_for_each_possible_safe(name, obj, tmp, member, key) \
hlist_for_each_entry_safe(obj, tmp,\
&name[hash_min(key, HASH_BITS(name))], member)
#endif

View File

@@ -262,6 +262,18 @@ union hdmi_vendor_any_infoframe {
struct hdmi_vendor_infoframe hdmi;
};
/**
* union hdmi_infoframe - overall union of all abstract infoframe representations
* @any: generic infoframe
* @avi: avi infoframe
* @spd: spd infoframe
* @vendor: union of all vendor infoframes
* @audio: audio infoframe
*
* This is used by the generic pack function. This works since all infoframes
* have the same header which also indicates which type of infoframe should be
* packed.
*/
union hdmi_infoframe {
struct hdmi_any_infoframe any;
struct hdmi_avi_infoframe avi;

View File

@@ -135,7 +135,6 @@ struct i2c_driver {
* @name: Indicates the type of the device, usually a chip name that's
* generic enough to hide second-sourcing and compatible revisions.
* @adapter: manages the bus segment hosting this I2C device
* @driver: device's driver, hence pointer to access routines
* @dev: Driver model device node for the slave.
* @irq: indicates the IRQ generated by this device (if any)
* @detected: member of an i2c_driver.clients list or i2c-core's
@@ -152,7 +151,6 @@ struct i2c_client {
/* _LOWER_ 7 bits */
char name[I2C_NAME_SIZE];
struct i2c_adapter *adapter; /* the adapter we sit on */
struct i2c_driver *driver; /* and our access routines */
struct device dev; /* the device structure */
int irq; /* irq issued by device */
struct list_head detected;
@@ -160,6 +158,7 @@ struct i2c_client {
#define to_i2c_client(d) container_of(d, struct i2c_client, dev)
extern struct i2c_client *i2c_verify_client(struct device *dev);
extern struct i2c_adapter *i2c_verify_adapter(struct device *dev);
/**
* struct i2c_board_info - template for device creation
@@ -209,6 +208,10 @@ struct i2c_board_info {
* i2c_algorithm is the interface to a class of hardware solutions which can
* be addressed using the same bus algorithms - i.e. bit-banging or the PCF8584
* to name two of the most common.
*
* The return codes from the @master_xfer field should indicate the type of
* error code that occured during the transfer, as documented in the kernel
* Documentation file Documentation/i2c/fault-codes.
*/
struct i2c_algorithm {
/* If an adapter algorithm can't do I2C-level access, set master_xfer
@@ -275,6 +278,7 @@ void i2c_unlock_adapter(struct i2c_adapter *);
#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */
#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */
#define I2C_CLASS_SPD (1<<7) /* Memory modules */
#define I2C_CLASS_DEPRECATED (1<<8) /* Warn users that adapter will stop using classes */
/* Internal numbers to terminate lists */
#define I2C_CLIENT_END 0xfffeU

View File

@@ -35,21 +35,24 @@
struct idr_layer {
int prefix; /* the ID prefix of this idr_layer */
DECLARE_BITMAP(bitmap, IDR_SIZE); /* A zero bit means "space here" */
int layer; /* distance from leaf */
struct idr_layer __rcu *ary[1<<IDR_BITS];
int count; /* When zero, we can release it */
int layer; /* distance from leaf */
union {
/* A zero bit means "space here" */
DECLARE_BITMAP(bitmap, IDR_SIZE);
struct rcu_head rcu_head;
};
};
struct idr {
struct idr_layer __rcu *hint; /* the last layer allocated from */
struct idr_layer __rcu *top;
struct idr_layer *id_free;
int layers; /* only valid w/o concurrent changes */
int id_free_cnt;
int cur; /* current pos for cyclic allocation */
spinlock_t lock;
int id_free_cnt;
struct idr_layer *id_free;
};
#define IDR_INIT(name) \
@@ -88,9 +91,9 @@ int idr_for_each(struct idr *idp,
void *idr_get_next(struct idr *idp, int *nextid);
void *idr_replace(struct idr *idp, void *ptr, int id);
void idr_remove(struct idr *idp, int id);
void idr_free(struct idr *idp, int id);
void idr_destroy(struct idr *idp);
void idr_init(struct idr *idp);
bool idr_is_empty(struct idr *idp);
/**
* idr_preload_end - end preload section started with idr_preload()
@@ -138,69 +141,6 @@ static inline void *idr_find(struct idr *idr, int id)
#define idr_for_each_entry(idp, entry, id) \
for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id)
/*
* Don't use the following functions. These exist only to suppress
* deprecated warnings on EXPORT_SYMBOL()s.
*/
int __idr_pre_get(struct idr *idp, gfp_t gfp_mask);
int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id);
void __idr_remove_all(struct idr *idp);
/**
* idr_pre_get - reserve resources for idr allocation
* @idp: idr handle
* @gfp_mask: memory allocation flags
*
* Part of old alloc interface. This is going away. Use
* idr_preload[_end]() and idr_alloc() instead.
*/
static inline int __deprecated idr_pre_get(struct idr *idp, gfp_t gfp_mask)
{
return __idr_pre_get(idp, gfp_mask);
}
/**
* idr_get_new_above - allocate new idr entry above or equal to a start id
* @idp: idr handle
* @ptr: pointer you want associated with the id
* @starting_id: id to start search at
* @id: pointer to the allocated handle
*
* Part of old alloc interface. This is going away. Use
* idr_preload[_end]() and idr_alloc() instead.
*/
static inline int __deprecated idr_get_new_above(struct idr *idp, void *ptr,
int starting_id, int *id)
{
return __idr_get_new_above(idp, ptr, starting_id, id);
}
/**
* idr_get_new - allocate new idr entry
* @idp: idr handle
* @ptr: pointer you want associated with the id
* @id: pointer to the allocated handle
*
* Part of old alloc interface. This is going away. Use
* idr_preload[_end]() and idr_alloc() instead.
*/
static inline int __deprecated idr_get_new(struct idr *idp, void *ptr, int *id)
{
return __idr_get_new_above(idp, ptr, 0, id);
}
/**
* idr_remove_all - remove all ids from the given idr tree
* @idp: idr handle
*
* If you're trying to destroy @idp, calling idr_destroy() is enough.
* This is going away. Don't use.
*/
static inline void __deprecated idr_remove_all(struct idr *idp)
{
__idr_remove_all(idp);
}
/*
* IDA - IDR based id allocator, use when translation from id to
* pointer isn't necessary.

View File

@@ -0,0 +1,27 @@
#ifndef _LINUX_INTERVAL_TREE_H
#define _LINUX_INTERVAL_TREE_H
#include <linux/rbtree.h>
struct interval_tree_node {
struct rb_node rb;
unsigned long start; /* Start of interval */
unsigned long last; /* Last location _in_ interval */
unsigned long __subtree_last;
};
extern void
interval_tree_insert(struct interval_tree_node *node, struct rb_root *root);
extern void
interval_tree_remove(struct interval_tree_node *node, struct rb_root *root);
extern struct interval_tree_node *
interval_tree_iter_first(struct rb_root *root,
unsigned long start, unsigned long last);
extern struct interval_tree_node *
interval_tree_iter_next(struct interval_tree_node *node,
unsigned long start, unsigned long last);
#endif /* _LINUX_INTERVAL_TREE_H */

View File

@@ -0,0 +1,191 @@
/*
Interval Trees
(C) 2012 Michel Lespinasse <walken@google.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
include/linux/interval_tree_generic.h
*/
#include <linux/rbtree_augmented.h>
/*
* Template for implementing interval trees
*
* ITSTRUCT: struct type of the interval tree nodes
* ITRB: name of struct rb_node field within ITSTRUCT
* ITTYPE: type of the interval endpoints
* ITSUBTREE: name of ITTYPE field within ITSTRUCT holding last-in-subtree
* ITSTART(n): start endpoint of ITSTRUCT node n
* ITLAST(n): last endpoint of ITSTRUCT node n
* ITSTATIC: 'static' or empty
* ITPREFIX: prefix to use for the inline tree definitions
*
* Note - before using this, please consider if non-generic version
* (interval_tree.h) would work for you...
*/
#define INTERVAL_TREE_DEFINE(ITSTRUCT, ITRB, ITTYPE, ITSUBTREE, \
ITSTART, ITLAST, ITSTATIC, ITPREFIX) \
\
/* Callbacks for augmented rbtree insert and remove */ \
\
static inline ITTYPE ITPREFIX ## _compute_subtree_last(ITSTRUCT *node) \
{ \
ITTYPE max = ITLAST(node), subtree_last; \
if (node->ITRB.rb_left) { \
subtree_last = rb_entry(node->ITRB.rb_left, \
ITSTRUCT, ITRB)->ITSUBTREE; \
if (max < subtree_last) \
max = subtree_last; \
} \
if (node->ITRB.rb_right) { \
subtree_last = rb_entry(node->ITRB.rb_right, \
ITSTRUCT, ITRB)->ITSUBTREE; \
if (max < subtree_last) \
max = subtree_last; \
} \
return max; \
} \
\
RB_DECLARE_CALLBACKS(static, ITPREFIX ## _augment, ITSTRUCT, ITRB, \
ITTYPE, ITSUBTREE, ITPREFIX ## _compute_subtree_last) \
\
/* Insert / remove interval nodes from the tree */ \
\
ITSTATIC void ITPREFIX ## _insert(ITSTRUCT *node, struct rb_root *root) \
{ \
struct rb_node **link = &root->rb_node, *rb_parent = NULL; \
ITTYPE start = ITSTART(node), last = ITLAST(node); \
ITSTRUCT *parent; \
\
while (*link) { \
rb_parent = *link; \
parent = rb_entry(rb_parent, ITSTRUCT, ITRB); \
if (parent->ITSUBTREE < last) \
parent->ITSUBTREE = last; \
if (start < ITSTART(parent)) \
link = &parent->ITRB.rb_left; \
else \
link = &parent->ITRB.rb_right; \
} \
\
node->ITSUBTREE = last; \
rb_link_node(&node->ITRB, rb_parent, link); \
rb_insert_augmented(&node->ITRB, root, &ITPREFIX ## _augment); \
} \
\
ITSTATIC void ITPREFIX ## _remove(ITSTRUCT *node, struct rb_root *root) \
{ \
rb_erase_augmented(&node->ITRB, root, &ITPREFIX ## _augment); \
} \
\
/* \
* Iterate over intervals intersecting [start;last] \
* \
* Note that a node's interval intersects [start;last] iff: \
* Cond1: ITSTART(node) <= last \
* and \
* Cond2: start <= ITLAST(node) \
*/ \
\
static ITSTRUCT * \
ITPREFIX ## _subtree_search(ITSTRUCT *node, ITTYPE start, ITTYPE last) \
{ \
while (true) { \
/* \
* Loop invariant: start <= node->ITSUBTREE \
* (Cond2 is satisfied by one of the subtree nodes) \
*/ \
if (node->ITRB.rb_left) { \
ITSTRUCT *left = rb_entry(node->ITRB.rb_left, \
ITSTRUCT, ITRB); \
if (start <= left->ITSUBTREE) { \
/* \
* Some nodes in left subtree satisfy Cond2. \
* Iterate to find the leftmost such node N. \
* If it also satisfies Cond1, that's the \
* match we are looking for. Otherwise, there \
* is no matching interval as nodes to the \
* right of N can't satisfy Cond1 either. \
*/ \
node = left; \
continue; \
} \
} \
if (ITSTART(node) <= last) { /* Cond1 */ \
if (start <= ITLAST(node)) /* Cond2 */ \
return node; /* node is leftmost match */ \
if (node->ITRB.rb_right) { \
node = rb_entry(node->ITRB.rb_right, \
ITSTRUCT, ITRB); \
if (start <= node->ITSUBTREE) \
continue; \
} \
} \
return NULL; /* No match */ \
} \
} \
\
ITSTATIC ITSTRUCT * \
ITPREFIX ## _iter_first(struct rb_root *root, ITTYPE start, ITTYPE last) \
{ \
ITSTRUCT *node; \
\
if (!root->rb_node) \
return NULL; \
node = rb_entry(root->rb_node, ITSTRUCT, ITRB); \
if (node->ITSUBTREE < start) \
return NULL; \
return ITPREFIX ## _subtree_search(node, start, last); \
} \
\
ITSTATIC ITSTRUCT * \
ITPREFIX ## _iter_next(ITSTRUCT *node, ITTYPE start, ITTYPE last) \
{ \
struct rb_node *rb = node->ITRB.rb_right, *prev; \
\
while (true) { \
/* \
* Loop invariants: \
* Cond1: ITSTART(node) <= last \
* rb == node->ITRB.rb_right \
* \
* First, search right subtree if suitable \
*/ \
if (rb) { \
ITSTRUCT *right = rb_entry(rb, ITSTRUCT, ITRB); \
if (start <= right->ITSUBTREE) \
return ITPREFIX ## _subtree_search(right, \
start, last); \
} \
\
/* Move up the tree until we come from a node's left child */ \
do { \
rb = rb_parent(&node->ITRB); \
if (!rb) \
return NULL; \
prev = &node->ITRB; \
node = rb_entry(rb, ITSTRUCT, ITRB); \
rb = node->ITRB.rb_right; \
} while (prev == rb); \
\
/* Check if the node intersects [start;last] */ \
if (last < ITSTART(node)) /* !Cond1 */ \
return NULL; \
else if (start <= ITLAST(node)) /* Cond2 */ \
return node; \
} \
}

View File

@@ -57,7 +57,7 @@ struct resource_list {
#define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */
#define IORESOURCE_DISABLED 0x10000000
#define IORESOURCE_UNSET 0x20000000
#define IORESOURCE_UNSET 0x20000000 /* No address assigned yet */
#define IORESOURCE_AUTO 0x40000000
#define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */

View File

@@ -14,6 +14,6 @@ enum irqreturn {
};
typedef enum irqreturn irqreturn_t;
#define IRQ_RETVAL(x) ((x) != IRQ_NONE)
#define IRQ_RETVAL(x) ((x) ? IRQ_HANDLED : IRQ_NONE)
#endif

View File

@@ -76,10 +76,18 @@ extern int register_refined_jiffies(long clock_tick_rate);
* The 64-bit value is not atomic - you MUST NOT read it
* without sampling the sequence number in jiffies_lock.
* get_jiffies_64() will do this for you as appropriate.
*/
extern u64 jiffies_64;
extern unsigned long volatile jiffies;
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void);
#else
static inline u64 get_jiffies_64(void)
{
return (u64)GetTimerTicks();
return (u64)jiffies;
}
#endif
/*
* These inlines deal with timer wrapping correctly. You are
@@ -290,6 +298,12 @@ extern unsigned long preset_lpj;
*/
extern unsigned int jiffies_to_msecs(const unsigned long j);
extern unsigned int jiffies_to_usecs(const unsigned long j);
static inline u64 jiffies_to_nsecs(const unsigned long j)
{
return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
}
extern unsigned long msecs_to_jiffies(const unsigned int m);
extern unsigned long usecs_to_jiffies(const unsigned int u);
extern unsigned long timespec_to_jiffies(const struct timespec *value);

View File

@@ -31,6 +31,19 @@
#define ULLONG_MAX (~0ULL)
#define SIZE_MAX (~(size_t)0)
#define U8_MAX ((u8)~0U)
#define S8_MAX ((s8)(U8_MAX>>1))
#define S8_MIN ((s8)(-S8_MAX - 1))
#define U16_MAX ((u16)~0U)
#define S16_MAX ((s16)(U16_MAX>>1))
#define S16_MIN ((s16)(-S16_MAX - 1))
#define U32_MAX ((u32)~0U)
#define S32_MAX ((s32)(U32_MAX>>1))
#define S32_MIN ((s32)(-S32_MAX - 1))
#define U64_MAX ((u64)~0ULL)
#define S64_MAX ((s64)(U64_MAX>>1))
#define S64_MIN ((s64)(-S64_MAX - 1))
#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1)
#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask))
#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
@@ -126,6 +139,13 @@
*/
#define lower_32_bits(n) ((u32)(n))
#define abs64(x) ({ \
s64 __x = (x); \
(__x < 0) ? -__x : __x; \
})
#define KERN_EMERG "<0>" /* system is unusable */
#define KERN_ALERT "<1>" /* action must be taken immediately */
#define KERN_CRIT "<2>" /* critical conditions */
@@ -159,6 +179,9 @@ int hex2bin(u8 *dst, const char *src, size_t count);
#define printk(fmt, arg...) dbgprintf(fmt , ##arg)
extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...);
extern __printf(2, 3)
char *kasprintf(gfp_t gfp, const char *fmt, ...);
/*
* min()/max()/clamp() macros that also do
@@ -493,5 +516,35 @@ extern unsigned int tsc_khz;
})
static inline __must_check long __copy_to_user(void __user *to,
const void *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
switch(n) {
case 1:
*(u8 __force *)to = *(u8 *)from;
return 0;
case 2:
*(u16 __force *)to = *(u16 *)from;
return 0;
case 4:
*(u32 __force *)to = *(u32 *)from;
return 0;
#ifdef CONFIG_64BIT
case 8:
*(u64 __force *)to = *(u64 *)from;
return 0;
#endif
default:
break;
}
}
memcpy((void __force *)to, from, n);
return 0;
}
struct seq_file;
#endif

View File

@@ -0,0 +1,24 @@
#ifndef _KDB_H
#define _KDB_H
/*
* Kernel Debugger Architecture Independent Global Headers
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2000-2007 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (C) 2000 Stephane Eranian <eranian@hpl.hp.com>
* Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com>
*/
typedef enum {
KDB_REPEAT_NONE = 0, /* Do not repeat this command */
KDB_REPEAT_NO_ARGS, /* Repeat the command without arguments */
KDB_REPEAT_WITH_ARGS, /* Repeat the command including its arguments */
} kdb_repeat_t;
typedef int (*kdb_func_t)(int, const char **);
#endif /* !_KDB_H */

View File

@@ -31,8 +31,10 @@
#define UEVENT_NUM_ENVP 32 /* number of env pointers */
#define UEVENT_BUFFER_SIZE 2048 /* buffer for the variables */
#ifdef CONFIG_UEVENT_HELPER
/* path to the userspace helper executed on an event */
extern char uevent_helper[];
#endif
/* counter to tag the uevent, read only except for the kobject core */
extern u64 uevent_seqnum;
@@ -65,6 +67,9 @@ struct kobject {
struct kobj_type *ktype;
// struct sysfs_dirent *sd;
struct kref kref;
#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
struct delayed_work release;
#endif
unsigned int state_initialized:1;
unsigned int state_in_sysfs:1;
unsigned int state_add_uevent_sent:1;
@@ -103,6 +108,7 @@ extern int __must_check kobject_move(struct kobject *, struct kobject *);
extern struct kobject *kobject_get(struct kobject *kobj);
extern void kobject_put(struct kobject *kobj);
extern const void *kobject_namespace(struct kobject *kobj);
extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
struct kobj_type {

View File

@@ -360,6 +360,17 @@ static inline void list_splice_tail_init(struct list_head *list,
#define list_first_entry(ptr, type, member) \
list_entry((ptr)->next, type, member)
/**
* list_last_entry - get the last element from a list
* @ptr: the list head to take the element from.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_struct within the struct.
*
* Note, that list is expected to be not empty.
*/
#define list_last_entry(ptr, type, member) \
list_entry((ptr)->prev, type, member)
/**
* list_first_entry_or_null - get the first element from a list
* @ptr: the list head to take the element from.
@@ -371,6 +382,22 @@ static inline void list_splice_tail_init(struct list_head *list,
#define list_first_entry_or_null(ptr, type, member) \
(!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
/**
* list_next_entry - get the next element in list
* @pos: the type * to cursor
* @member: the name of the list_struct within the struct.
*/
#define list_next_entry(pos, member) \
list_entry((pos)->member.next, typeof(*(pos)), member)
/**
* list_prev_entry - get the prev element in list
* @pos: the type * to cursor
* @member: the name of the list_struct within the struct.
*/
#define list_prev_entry(pos, member) \
list_entry((pos)->member.prev, typeof(*(pos)), member)
/**
* list_for_each - iterate over a list
* @pos: the &struct list_head to use as a loop cursor.
@@ -415,9 +442,9 @@ static inline void list_splice_tail_init(struct list_head *list,
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry(pos, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member); \
for (pos = list_first_entry(head, typeof(*pos), member); \
&pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member))
pos = list_next_entry(pos, member))
/**
* list_for_each_entry_reverse - iterate backwards over list of given type.
@@ -426,9 +453,9 @@ static inline void list_splice_tail_init(struct list_head *list,
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_reverse(pos, head, member) \
for (pos = list_entry((head)->prev, typeof(*pos), member); \
for (pos = list_last_entry(head, typeof(*pos), member); \
&pos->member != (head); \
pos = list_entry(pos->member.prev, typeof(*pos), member))
pos = list_prev_entry(pos, member))
/**
* list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
@@ -451,9 +478,9 @@ static inline void list_splice_tail_init(struct list_head *list,
* the current position.
*/
#define list_for_each_entry_continue(pos, head, member) \
for (pos = list_entry(pos->member.next, typeof(*pos), member); \
for (pos = list_next_entry(pos, member); \
&pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member))
pos = list_next_entry(pos, member))
/**
* list_for_each_entry_continue_reverse - iterate backwards from the given point
@@ -465,9 +492,9 @@ static inline void list_splice_tail_init(struct list_head *list,
* the current position.
*/
#define list_for_each_entry_continue_reverse(pos, head, member) \
for (pos = list_entry(pos->member.prev, typeof(*pos), member); \
for (pos = list_prev_entry(pos, member); \
&pos->member != (head); \
pos = list_entry(pos->member.prev, typeof(*pos), member))
pos = list_prev_entry(pos, member))
/**
* list_for_each_entry_from - iterate over list of given type from the current point
@@ -479,7 +506,7 @@ static inline void list_splice_tail_init(struct list_head *list,
*/
#define list_for_each_entry_from(pos, head, member) \
for (; &pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member))
pos = list_next_entry(pos, member))
/**
* list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
@@ -489,10 +516,10 @@ static inline void list_splice_tail_init(struct list_head *list,
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_safe(pos, n, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member), \
n = list_entry(pos->member.next, typeof(*pos), member); \
for (pos = list_first_entry(head, typeof(*pos), member), \
n = list_next_entry(pos, member); \
&pos->member != (head); \
pos = n, n = list_entry(n->member.next, typeof(*n), member))
pos = n, n = list_next_entry(n, member))
/**
* list_for_each_entry_safe_continue - continue list iteration safe against removal
@@ -505,10 +532,10 @@ static inline void list_splice_tail_init(struct list_head *list,
* safe against removal of list entry.
*/
#define list_for_each_entry_safe_continue(pos, n, head, member) \
for (pos = list_entry(pos->member.next, typeof(*pos), member), \
n = list_entry(pos->member.next, typeof(*pos), member); \
for (pos = list_next_entry(pos, member), \
n = list_next_entry(pos, member); \
&pos->member != (head); \
pos = n, n = list_entry(n->member.next, typeof(*n), member))
pos = n, n = list_next_entry(n, member))
/**
* list_for_each_entry_safe_from - iterate over list from current point safe against removal
@@ -521,9 +548,9 @@ static inline void list_splice_tail_init(struct list_head *list,
* removal of list entry.
*/
#define list_for_each_entry_safe_from(pos, n, head, member) \
for (n = list_entry(pos->member.next, typeof(*pos), member); \
for (n = list_next_entry(pos, member); \
&pos->member != (head); \
pos = n, n = list_entry(n->member.next, typeof(*n), member))
pos = n, n = list_next_entry(n, member))
/**
* list_for_each_entry_safe_reverse - iterate backwards over list safe against removal
@@ -536,10 +563,10 @@ static inline void list_splice_tail_init(struct list_head *list,
* of list entry.
*/
#define list_for_each_entry_safe_reverse(pos, n, head, member) \
for (pos = list_entry((head)->prev, typeof(*pos), member), \
n = list_entry(pos->member.prev, typeof(*pos), member); \
for (pos = list_last_entry(head, typeof(*pos), member), \
n = list_prev_entry(pos, member); \
&pos->member != (head); \
pos = n, n = list_entry(n->member.prev, typeof(*n), member))
pos = n, n = list_prev_entry(n, member))
/**
* list_safe_reset_next - reset a stale list_for_each_entry_safe loop
@@ -554,7 +581,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* completing the current iteration of the loop body.
*/
#define list_safe_reset_next(pos, n, member) \
n = list_entry(pos->member.next, typeof(*pos), member)
n = list_next_entry(pos, member)
/*
* Double linked lists with a single pointer list head.
@@ -626,15 +653,15 @@ static inline void hlist_add_before(struct hlist_node *n,
*(n->pprev) = n;
}
static inline void hlist_add_after(struct hlist_node *n,
struct hlist_node *next)
static inline void hlist_add_behind(struct hlist_node *n,
struct hlist_node *prev)
{
next->next = n->next;
n->next = next;
next->pprev = &n->next;
n->next = prev->next;
prev->next = n;
n->pprev = &prev->next;
if(next->next)
next->next->pprev = &next->next;
if (n->next)
n->next->pprev = &n->next;
}
/* after that we'll appear to be on some hlist and hlist_del will work */

View File

@@ -228,9 +228,9 @@ struct held_lock {
unsigned int trylock:1; /* 16 bits */
unsigned int read:2; /* see lock_acquire() comment */
unsigned int check:2; /* see lock_acquire() comment */
unsigned int check:1; /* see lock_acquire() comment */
unsigned int hardirqs_off:1;
unsigned int references:11; /* 32 bits */
unsigned int references:12; /* 32 bits */
};
/*
@@ -241,7 +241,7 @@ extern void lockdep_info(void);
extern void lockdep_reset(void);
extern void lockdep_reset_lock(struct lockdep_map *lock);
extern void lockdep_free_key_range(void *start, unsigned long size);
extern void lockdep_sys_exit(void);
extern asmlinkage void lockdep_sys_exit(void);
extern void lockdep_off(void);
extern void lockdep_on(void);
@@ -279,7 +279,7 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
(lock)->dep_map.key, sub)
#define lockdep_set_novalidate_class(lock) \
lockdep_set_class(lock, &__lockdep_no_validate__)
lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
/*
* Compare locking classes
*/
@@ -302,9 +302,8 @@ static inline int lockdep_match_key(struct lockdep_map *lock,
*
* Values for check:
*
* 0: disabled
* 1: simple checks (freeing, held-at-exit-time, etc.)
* 2: full validation
* 0: simple checks (freeing, held-at-exit-time, etc.)
* 1: full validation
*/
extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
int trylock, int read, int check,
@@ -335,9 +334,13 @@ extern void lockdep_trace_alloc(gfp_t mask);
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
#define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l))
#define lockdep_assert_held(l) do { \
WARN_ON(debug_locks && !lockdep_is_held(l)); \
} while (0)
#else /* !LOCKDEP */
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
#else /* !CONFIG_LOCKDEP */
static inline void lockdep_off(void)
{
@@ -384,7 +387,7 @@ struct lock_class_key { };
#define lockdep_depth(tsk) (0)
#define lockdep_assert_held(l) do { } while (0)
#define lockdep_assert_held(l) do { (void)(l); } while (0)
#define lockdep_recursing(tsk) (0)
@@ -532,13 +535,13 @@ static inline void print_irqtrace_events(struct task_struct *curr)
# define might_lock(lock) \
do { \
typecheck(struct lockdep_map *, &(lock)->dep_map); \
lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \
lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
} while (0)
# define might_lock_read(lock) \
do { \
typecheck(struct lockdep_map *, &(lock)->dep_map); \
lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \
lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
} while (0)
#else

View File

@@ -133,4 +133,34 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
return ret;
}
#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
#ifndef mul_u64_u32_shr
static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
{
return (u64)(((unsigned __int128)a * mul) >> shift);
}
#endif /* mul_u64_u32_shr */
#else
#ifndef mul_u64_u32_shr
static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
{
u32 ah, al;
u64 ret;
al = a;
ah = a >> 32;
ret = ((u64)al * mul) >> shift;
if (ah)
ret += ((u64)ah * mul) << (32 - shift);
return ret;
}
#endif /* mul_u64_u32_shr */
#endif
#endif /* _LINUX_MATH64_H */

View File

@@ -431,6 +431,14 @@ struct spi_device_id {
kernel_ulong_t driver_data; /* Data private to the driver */
};
#define SPMI_NAME_SIZE 32
#define SPMI_MODULE_PREFIX "spmi:"
struct spmi_device_id {
char name[SPMI_NAME_SIZE];
kernel_ulong_t driver_data; /* Data private to the driver */
};
/* dmi */
enum dmi_field {
DMI_NONE,
@@ -547,6 +555,11 @@ struct amba_id {
* See documentation of "x86_match_cpu" for details.
*/
/*
* MODULE_DEVICE_TABLE expects this struct to be called x86cpu_device_id.
* Although gcc seems to ignore this error, clang fails without this define.
*/
#define x86cpu_device_id x86_cpu_id
struct x86_cpu_id {
__u16 vendor;
__u16 family;
@@ -563,6 +576,15 @@ struct x86_cpu_id {
#define X86_MODEL_ANY 0
#define X86_FEATURE_ANY 0 /* Same as FPU, you can't test for that */
/*
* Generic table type for matching CPU features.
* @feature: the bit number of the feature (0 - 65535)
*/
struct cpu_feature {
__u16 feature;
};
#define IPACK_ANY_FORMAT 0xff
#define IPACK_ANY_ID (~0)
struct ipack_device_id {
@@ -598,4 +620,9 @@ struct rio_device_id {
__u16 asm_did, asm_vid;
};
struct mcb_device_id {
__u16 device;
kernel_ulong_t driver_data;
};
#endif /* LINUX_MOD_DEVICETABLE_H */

View File

@@ -92,4 +92,16 @@ static inline int mutex_is_locked(struct mutex *lock)
return atomic_read(&lock->count) != 1;
}
static inline int mutex_trylock(struct mutex *lock)
{
if (likely(atomic_cmpxchg(&lock->count, 1, 0) == 1))
return 1;
return 0;
}
static inline void mutex_destroy(struct mutex *lock)
{
};
#endif

View File

@@ -456,6 +456,7 @@ struct pci_dev {
(pci_resource_end((dev), (bar)) - \
pci_resource_start((dev), (bar)) + 1))
#define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */
struct pci_bus {
struct list_head node; /* node in list of buses */
@@ -480,7 +481,7 @@ struct pci_bus {
char name[48];
unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */
pci_bus_flags_t bus_flags; /* Inherited by child busses */
pci_bus_flags_t bus_flags; /* inherited by child buses */
struct device *bridge;
struct device dev;
struct bin_attribute *legacy_io; /* legacy I/O for this bus */
@@ -508,8 +509,12 @@ struct pci_sysdata {
#define to_pci_bus(n) container_of(n, struct pci_bus, dev)
/*
* Returns true if the pci bus is root (behind host-pci bridge),
* Returns true if the PCI bus is root (behind host-PCI bridge),
* false otherwise
*
* Some code assumes that "bus->self == NULL" means that bus is a root bus.
* This is incorrect because "virtual" buses added for SR-IOV (via
* virtfn_add_bus()) have "bus->self == NULL" but are not root buses.
*/
static inline bool pci_is_root_bus(struct pci_bus *pbus)
{
@@ -531,6 +536,32 @@ pci_find_next_bus(const struct pci_bus *from);
#define PCIBIOS_SET_FAILED 0x88
#define PCIBIOS_BUFFER_TOO_SMALL 0x89
/*
* Translate above to generic errno for passing back through non-PCI code.
*/
static inline int pcibios_err_to_errno(int err)
{
if (err <= PCIBIOS_SUCCESSFUL)
return err; /* Assume already errno */
switch (err) {
case PCIBIOS_FUNC_NOT_SUPPORTED:
return -ENOENT;
case PCIBIOS_BAD_VENDOR_ID:
return -EINVAL;
case PCIBIOS_DEVICE_NOT_FOUND:
return -ENODEV;
case PCIBIOS_BAD_REGISTER_NUMBER:
return -EFAULT;
case PCIBIOS_SET_FAILED:
return -EIO;
case PCIBIOS_BUFFER_TOO_SMALL:
return -ENOSPC;
}
return -ENOTTY;
}
/* Low-level architecture-dependent routines */
struct pci_ops {
@@ -586,7 +617,7 @@ static inline int pci_pcie_cap(struct pci_dev *dev)
* pci_is_pcie - check if the PCI device is PCI Express capable
* @dev: PCI device
*
* Retrun true if the PCI device is PCI Express capable, false otherwise.
* Returns: true if the PCI device is PCI Express capable, false otherwise.
*/
static inline bool pci_is_pcie(struct pci_dev *dev)
{
@@ -672,6 +703,11 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size);
#define pci_name(x) "radeon"
static inline dma_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
{
return pdev->resource[bar].start;
}
#endif //__PCI__H__

View File

@@ -85,6 +85,11 @@ static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
*rb_link = node;
}
#define rb_entry_safe(ptr, type, member) \
({ typeof(ptr) ____ptr = (ptr); \
____ptr ? rb_entry(____ptr, type, member) : NULL; \
})
/**
* rbtree_postorder_for_each_entry_safe - iterate over rb_root in post order of
* given type safe against removal of rb_node entry
@@ -95,12 +100,9 @@ static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
* @field: the name of the rb_node field within 'type'.
*/
#define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \
for (pos = rb_entry(rb_first_postorder(root), typeof(*pos), field),\
n = rb_entry(rb_next_postorder(&pos->field), \
typeof(*pos), field); \
&pos->field; \
pos = n, \
n = rb_entry(rb_next_postorder(&pos->field), \
typeof(*pos), field))
for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \
pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \
typeof(*pos), field); 1; }); \
pos = n)
#endif /* _LINUX_RBTREE_H */

View File

@@ -40,7 +40,7 @@ static inline void __list_add_rcu(struct list_head *new,
next->prev = new;
}
#else
extern void __list_add_rcu(struct list_head *new,
void __list_add_rcu(struct list_head *new,
struct list_head *prev, struct list_head *next);
#endif
@@ -191,7 +191,11 @@ static inline void list_splice_init_rcu(struct list_head *list,
if (list_empty(list))
return;
/* "first" and "last" tracking list, so initialize it. */
/*
* "first" and "last" tracking list, so initialize it. RCU readers
* have access to this list, so we must use INIT_LIST_HEAD_RCU()
* instead of INIT_LIST_HEAD().
*/
INIT_LIST_HEAD(list);
@@ -228,9 +232,10 @@ static inline void list_splice_init_rcu(struct list_head *list,
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
*/
#define list_entry_rcu(ptr, type, member) \
({typeof (*ptr) __rcu *__ptr = (typeof (*ptr) __rcu __force *)ptr; \
({ \
typeof(*ptr) __rcu *__ptr = (typeof(*ptr) __rcu __force *)ptr; \
container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \
})
})
/**
* Where are list_empty_rcu() and list_first_entry_rcu()?
@@ -266,11 +271,11 @@ static inline void list_splice_init_rcu(struct list_head *list,
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
*/
#define list_first_or_null_rcu(ptr, type, member) \
({struct list_head *__ptr = (ptr); \
({ \
struct list_head *__ptr = (ptr); \
struct list_head *__next = ACCESS_ONCE(__ptr->next); \
likely(__ptr != __next) ? \
list_entry_rcu(__next, type, member) : NULL; \
})
likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \
})
/**
* list_for_each_entry_rcu - iterate over rcu list of given type
@@ -412,9 +417,9 @@ static inline void hlist_add_before_rcu(struct hlist_node *n,
}
/**
* hlist_add_after_rcu
* @prev: the existing element to add the new element after.
* hlist_add_behind_rcu
* @n: the new element to add to the hash list.
* @prev: the existing element to add the new element after.
*
* Description:
* Adds the specified element to the specified hlist
@@ -429,8 +434,8 @@ static inline void hlist_add_before_rcu(struct hlist_node *n,
* hlist_for_each_entry_rcu(), used to prevent memory-consistency
* problems on Alpha CPUs.
*/
static inline void hlist_add_after_rcu(struct hlist_node *prev,
struct hlist_node *n)
static inline void hlist_add_behind_rcu(struct hlist_node *n,
struct hlist_node *prev)
{
n->next = prev->next;
n->pprev = &prev->next;

View File

@@ -0,0 +1,62 @@
/*
* Header file for reservations for dma-buf and ttm
*
* Copyright(C) 2011 Linaro Limited. All rights reserved.
* Copyright (C) 2012-2013 Canonical Ltd
* Copyright (C) 2012 Texas Instruments
*
* Authors:
* Rob Clark <robdclark@gmail.com>
* Maarten Lankhorst <maarten.lankhorst@canonical.com>
* Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*
* Based on bo.c which bears the following copyright notice,
* but is dual licensed:
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _LINUX_RESERVATION_H
#define _LINUX_RESERVATION_H
#include <linux/ww_mutex.h>
extern struct ww_class reservation_ww_class;
struct reservation_object {
struct ww_mutex lock;
};
static inline void
reservation_object_init(struct reservation_object *obj)
{
ww_mutex_init(&obj->lock, &reservation_ww_class);
}
static inline void
reservation_object_fini(struct reservation_object *obj)
{
ww_mutex_destroy(&obj->lock);
}
#endif /* _LINUX_RESERVATION_H */

View File

@@ -101,19 +101,6 @@ static inline struct page *sg_page(struct scatterlist *sg)
return (struct page *)((sg)->page_link & ~0x3);
}
/**
* sg_set_buf - Set sg entry to point at given data
* @sg: SG entry
* @buf: Data
* @buflen: Data length
*
**/
//static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
// unsigned int buflen)
//{
// sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
//}
/*
* Loop over each sg element, following the pointer to a new list if necessary
*/
@@ -226,10 +213,10 @@ void sg_init_one(struct scatterlist *, const void *, unsigned int);
typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
void __sg_free_table(struct sg_table *, unsigned int, sg_free_fn *);
void __sg_free_table(struct sg_table *, unsigned int, bool, sg_free_fn *);
void sg_free_table(struct sg_table *);
int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, gfp_t,
sg_alloc_fn *);
int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int,
struct scatterlist *, gfp_t, sg_alloc_fn *);
int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
int sg_alloc_table_from_pages(struct sg_table *sgt,
struct page **pages, unsigned int n_pages,
@@ -241,6 +228,11 @@ size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen);
size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen, off_t skip);
size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen, off_t skip);
/*
* Maximum number of entries that will be allocated in one piece, if
* a list larger than this is required then chaining will be utilized.

View File

@@ -3,6 +3,8 @@
#define TASK_UNINTERRUPTIBLE 2
/* Task command name length */
#define TASK_COMM_LEN 16
#define schedule_timeout(x) delay(x)

View File

@@ -51,6 +51,9 @@ extern int strncasecmp(const char *s1, const char *s2, size_t n);
#ifndef __HAVE_ARCH_STRCHR
extern char * strchr(const char *,int);
#endif
#ifndef __HAVE_ARCH_STRCHRNUL
extern char * strchrnul(const char *,int);
#endif
#ifndef __HAVE_ARCH_STRNCHR
extern char * strnchr(const char *, size_t, int);
#endif

View File

@@ -116,10 +116,11 @@ typedef __u8 uint8_t;
typedef __u16 uint16_t;
typedef __u32 uint32_t;
#if defined(__GNUC__)
typedef __u64 uint64_t;
typedef __u64 u_int64_t;
typedef __signed__ long long int64_t;
typedef __s64 int64_t;
#endif
/* this is a special 64bit data type that is 8-byte aligned */
#define aligned_u64 __u64 __attribute__((aligned(8)))
@@ -150,6 +151,7 @@ typedef unsigned long blkcnt_t;
#define pgoff_t unsigned long
#endif
/* A dma_addr_t can hold any valid DMA or bus address for the platform */
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
typedef u64 dma_addr_t;
#else
@@ -200,6 +202,7 @@ typedef __u32 __bitwise __wsum;
#ifdef __KERNEL__
typedef unsigned __bitwise__ gfp_t;
typedef unsigned __bitwise__ fmode_t;
typedef unsigned __bitwise__ oom_flags_t;
#ifdef CONFIG_PHYS_ADDR_T_64BIT
typedef u64 phys_addr_t;
@@ -209,6 +212,12 @@ typedef u32 phys_addr_t;
typedef phys_addr_t resource_size_t;
/*
* This type is the placeholder for a hardware interrupt number. It has to be
* big enough to enclose whatever representation is used by a given platform.
*/
typedef unsigned long irq_hw_number_t;
typedef struct {
int counter;
} atomic_t;

View File

@@ -39,7 +39,7 @@
#if defined(__KERNEL__) || defined(__linux__)
#include <linux/types.h>
#include <asm/ioctl.h>
//#include <asm/ioctl.h>
typedef unsigned int drm_handle_t;
#else /* One of the BSDs */
@@ -619,6 +619,17 @@ struct drm_gem_open {
#define DRM_PRIME_CAP_EXPORT 0x2
#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
/*
* The CURSOR_WIDTH and CURSOR_HEIGHT capabilities return a valid widthxheight
* combination for the hardware cursor. The intention is that a hardware
* agnostic userspace can query a cursor plane size to use.
*
* Note that the cross-driver contract is to merely return a valid size;
* drivers are free to attach another meaning on top, eg. i915 returns the
* maximum plane size.
*/
#define DRM_CAP_CURSOR_WIDTH 0x8
#define DRM_CAP_CURSOR_HEIGHT 0x9
/** DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
@@ -635,6 +646,14 @@ struct drm_get_cap {
*/
#define DRM_CLIENT_CAP_STEREO_3D 1
/**
* DRM_CLIENT_CAP_UNIVERSAL_PLANES
*
* If set to 1, the DRM core will expose all planes (overlay, primary, and
* cursor) to userspace.
*/
#define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2
/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
struct drm_set_client_cap {
__u64 capability;
@@ -761,7 +780,7 @@ struct drm_prime_handle {
/**
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x99.
* The device specific ioctl range is from 0x40 to 0x9f.
* Generic IOCTLS restart at 0xA0.
*
* \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and

View File

@@ -0,0 +1,135 @@
/*
* Copyright 2011 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef DRM_FOURCC_H
#define DRM_FOURCC_H
#include <linux/types.h>
#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \
((__u32)(c) << 16) | ((__u32)(d) << 24))
#define DRM_FORMAT_BIG_ENDIAN (1<<31) /* format is big endian instead of little endian */
/* color index */
#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
/* 8 bpp RGB */
#define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */
#define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */
/* 16 bpp RGB */
#define DRM_FORMAT_XRGB4444 fourcc_code('X', 'R', '1', '2') /* [15:0] x:R:G:B 4:4:4:4 little endian */
#define DRM_FORMAT_XBGR4444 fourcc_code('X', 'B', '1', '2') /* [15:0] x:B:G:R 4:4:4:4 little endian */
#define DRM_FORMAT_RGBX4444 fourcc_code('R', 'X', '1', '2') /* [15:0] R:G:B:x 4:4:4:4 little endian */
#define DRM_FORMAT_BGRX4444 fourcc_code('B', 'X', '1', '2') /* [15:0] B:G:R:x 4:4:4:4 little endian */
#define DRM_FORMAT_ARGB4444 fourcc_code('A', 'R', '1', '2') /* [15:0] A:R:G:B 4:4:4:4 little endian */
#define DRM_FORMAT_ABGR4444 fourcc_code('A', 'B', '1', '2') /* [15:0] A:B:G:R 4:4:4:4 little endian */
#define DRM_FORMAT_RGBA4444 fourcc_code('R', 'A', '1', '2') /* [15:0] R:G:B:A 4:4:4:4 little endian */
#define DRM_FORMAT_BGRA4444 fourcc_code('B', 'A', '1', '2') /* [15:0] B:G:R:A 4:4:4:4 little endian */
#define DRM_FORMAT_XRGB1555 fourcc_code('X', 'R', '1', '5') /* [15:0] x:R:G:B 1:5:5:5 little endian */
#define DRM_FORMAT_XBGR1555 fourcc_code('X', 'B', '1', '5') /* [15:0] x:B:G:R 1:5:5:5 little endian */
#define DRM_FORMAT_RGBX5551 fourcc_code('R', 'X', '1', '5') /* [15:0] R:G:B:x 5:5:5:1 little endian */
#define DRM_FORMAT_BGRX5551 fourcc_code('B', 'X', '1', '5') /* [15:0] B:G:R:x 5:5:5:1 little endian */
#define DRM_FORMAT_ARGB1555 fourcc_code('A', 'R', '1', '5') /* [15:0] A:R:G:B 1:5:5:5 little endian */
#define DRM_FORMAT_ABGR1555 fourcc_code('A', 'B', '1', '5') /* [15:0] A:B:G:R 1:5:5:5 little endian */
#define DRM_FORMAT_RGBA5551 fourcc_code('R', 'A', '1', '5') /* [15:0] R:G:B:A 5:5:5:1 little endian */
#define DRM_FORMAT_BGRA5551 fourcc_code('B', 'A', '1', '5') /* [15:0] B:G:R:A 5:5:5:1 little endian */
#define DRM_FORMAT_RGB565 fourcc_code('R', 'G', '1', '6') /* [15:0] R:G:B 5:6:5 little endian */
#define DRM_FORMAT_BGR565 fourcc_code('B', 'G', '1', '6') /* [15:0] B:G:R 5:6:5 little endian */
/* 24 bpp RGB */
#define DRM_FORMAT_RGB888 fourcc_code('R', 'G', '2', '4') /* [23:0] R:G:B little endian */
#define DRM_FORMAT_BGR888 fourcc_code('B', 'G', '2', '4') /* [23:0] B:G:R little endian */
/* 32 bpp RGB */
#define DRM_FORMAT_XRGB8888 fourcc_code('X', 'R', '2', '4') /* [31:0] x:R:G:B 8:8:8:8 little endian */
#define DRM_FORMAT_XBGR8888 fourcc_code('X', 'B', '2', '4') /* [31:0] x:B:G:R 8:8:8:8 little endian */
#define DRM_FORMAT_RGBX8888 fourcc_code('R', 'X', '2', '4') /* [31:0] R:G:B:x 8:8:8:8 little endian */
#define DRM_FORMAT_BGRX8888 fourcc_code('B', 'X', '2', '4') /* [31:0] B:G:R:x 8:8:8:8 little endian */
#define DRM_FORMAT_ARGB8888 fourcc_code('A', 'R', '2', '4') /* [31:0] A:R:G:B 8:8:8:8 little endian */
#define DRM_FORMAT_ABGR8888 fourcc_code('A', 'B', '2', '4') /* [31:0] A:B:G:R 8:8:8:8 little endian */
#define DRM_FORMAT_RGBA8888 fourcc_code('R', 'A', '2', '4') /* [31:0] R:G:B:A 8:8:8:8 little endian */
#define DRM_FORMAT_BGRA8888 fourcc_code('B', 'A', '2', '4') /* [31:0] B:G:R:A 8:8:8:8 little endian */
#define DRM_FORMAT_XRGB2101010 fourcc_code('X', 'R', '3', '0') /* [31:0] x:R:G:B 2:10:10:10 little endian */
#define DRM_FORMAT_XBGR2101010 fourcc_code('X', 'B', '3', '0') /* [31:0] x:B:G:R 2:10:10:10 little endian */
#define DRM_FORMAT_RGBX1010102 fourcc_code('R', 'X', '3', '0') /* [31:0] R:G:B:x 10:10:10:2 little endian */
#define DRM_FORMAT_BGRX1010102 fourcc_code('B', 'X', '3', '0') /* [31:0] B:G:R:x 10:10:10:2 little endian */
#define DRM_FORMAT_ARGB2101010 fourcc_code('A', 'R', '3', '0') /* [31:0] A:R:G:B 2:10:10:10 little endian */
#define DRM_FORMAT_ABGR2101010 fourcc_code('A', 'B', '3', '0') /* [31:0] A:B:G:R 2:10:10:10 little endian */
#define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
/* packed YCbCr */
#define DRM_FORMAT_YUYV fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */
#define DRM_FORMAT_YVYU fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */
#define DRM_FORMAT_UYVY fourcc_code('U', 'Y', 'V', 'Y') /* [31:0] Y1:Cr0:Y0:Cb0 8:8:8:8 little endian */
#define DRM_FORMAT_VYUY fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */
#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
/*
* 2 plane YCbCr
* index 0 = Y plane, [7:0] Y
* index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
* or
* index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
*/
#define DRM_FORMAT_NV12 fourcc_code('N', 'V', '1', '2') /* 2x2 subsampled Cr:Cb plane */
#define DRM_FORMAT_NV21 fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */
#define DRM_FORMAT_NV16 fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */
#define DRM_FORMAT_NV61 fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */
#define DRM_FORMAT_NV24 fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */
#define DRM_FORMAT_NV42 fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */
/* special NV12 tiled format */
#define DRM_FORMAT_NV12MT fourcc_code('T', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane 64x32 macroblocks */
/*
* 3 plane YCbCr
* index 0: Y plane, [7:0] Y
* index 1: Cb plane, [7:0] Cb
* index 2: Cr plane, [7:0] Cr
* or
* index 1: Cr plane, [7:0] Cr
* index 2: Cb plane, [7:0] Cb
*/
#define DRM_FORMAT_YUV410 fourcc_code('Y', 'U', 'V', '9') /* 4x4 subsampled Cb (1) and Cr (2) planes */
#define DRM_FORMAT_YVU410 fourcc_code('Y', 'V', 'U', '9') /* 4x4 subsampled Cr (1) and Cb (2) planes */
#define DRM_FORMAT_YUV411 fourcc_code('Y', 'U', '1', '1') /* 4x1 subsampled Cb (1) and Cr (2) planes */
#define DRM_FORMAT_YVU411 fourcc_code('Y', 'V', '1', '1') /* 4x1 subsampled Cr (1) and Cb (2) planes */
#define DRM_FORMAT_YUV420 fourcc_code('Y', 'U', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */
#define DRM_FORMAT_YVU420 fourcc_code('Y', 'V', '1', '2') /* 2x2 subsampled Cr (1) and Cb (2) planes */
#define DRM_FORMAT_YUV422 fourcc_code('Y', 'U', '1', '6') /* 2x1 subsampled Cb (1) and Cr (2) planes */
#define DRM_FORMAT_YVU422 fourcc_code('Y', 'V', '1', '6') /* 2x1 subsampled Cr (1) and Cb (2) planes */
#define DRM_FORMAT_YUV444 fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */
#define DRM_FORMAT_YVU444 fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */
#endif /* DRM_FOURCC_H */

View File

@@ -0,0 +1,520 @@
/*
* Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
* Copyright (c) 2007 Jakob Bornecrantz <wallbraker@gmail.com>
* Copyright (c) 2008 Red Hat Inc.
* Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
* Copyright (c) 2007-2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef _DRM_MODE_H
#define _DRM_MODE_H
#include <linux/types.h>
#define DRM_DISPLAY_INFO_LEN 32
#define DRM_CONNECTOR_NAME_LEN 32
#define DRM_DISPLAY_MODE_LEN 32
#define DRM_PROP_NAME_LEN 32
#define DRM_MODE_TYPE_BUILTIN (1<<0)
#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN)
#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN)
#define DRM_MODE_TYPE_PREFERRED (1<<3)
#define DRM_MODE_TYPE_DEFAULT (1<<4)
#define DRM_MODE_TYPE_USERDEF (1<<5)
#define DRM_MODE_TYPE_DRIVER (1<<6)
/* Video mode flags */
/* bit compatible with the xorg definitions. */
#define DRM_MODE_FLAG_PHSYNC (1<<0)
#define DRM_MODE_FLAG_NHSYNC (1<<1)
#define DRM_MODE_FLAG_PVSYNC (1<<2)
#define DRM_MODE_FLAG_NVSYNC (1<<3)
#define DRM_MODE_FLAG_INTERLACE (1<<4)
#define DRM_MODE_FLAG_DBLSCAN (1<<5)
#define DRM_MODE_FLAG_CSYNC (1<<6)
#define DRM_MODE_FLAG_PCSYNC (1<<7)
#define DRM_MODE_FLAG_NCSYNC (1<<8)
#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */
#define DRM_MODE_FLAG_BCAST (1<<10)
#define DRM_MODE_FLAG_PIXMUX (1<<11)
#define DRM_MODE_FLAG_DBLCLK (1<<12)
#define DRM_MODE_FLAG_CLKDIV2 (1<<13)
/*
* When adding a new stereo mode don't forget to adjust DRM_MODE_FLAGS_3D_MAX
* (define not exposed to user space).
*/
#define DRM_MODE_FLAG_3D_MASK (0x1f<<14)
#define DRM_MODE_FLAG_3D_NONE (0<<14)
#define DRM_MODE_FLAG_3D_FRAME_PACKING (1<<14)
#define DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE (2<<14)
#define DRM_MODE_FLAG_3D_LINE_ALTERNATIVE (3<<14)
#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL (4<<14)
#define DRM_MODE_FLAG_3D_L_DEPTH (5<<14)
#define DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH (6<<14)
#define DRM_MODE_FLAG_3D_TOP_AND_BOTTOM (7<<14)
#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF (8<<14)
/* DPMS flags */
/* bit compatible with the xorg definitions. */
#define DRM_MODE_DPMS_ON 0
#define DRM_MODE_DPMS_STANDBY 1
#define DRM_MODE_DPMS_SUSPEND 2
#define DRM_MODE_DPMS_OFF 3
/* Scaling mode options */
#define DRM_MODE_SCALE_NONE 0 /* Unmodified timing (display or
software can still scale) */
#define DRM_MODE_SCALE_FULLSCREEN 1 /* Full screen, ignore aspect */
#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */
#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */
/* Picture aspect ratio options */
#define DRM_MODE_PICTURE_ASPECT_NONE 0
#define DRM_MODE_PICTURE_ASPECT_4_3 1
#define DRM_MODE_PICTURE_ASPECT_16_9 2
/* Dithering mode options */
#define DRM_MODE_DITHERING_OFF 0
#define DRM_MODE_DITHERING_ON 1
#define DRM_MODE_DITHERING_AUTO 2
/* Dirty info options */
#define DRM_MODE_DIRTY_OFF 0
#define DRM_MODE_DIRTY_ON 1
#define DRM_MODE_DIRTY_ANNOTATE 2
struct drm_mode_modeinfo {
__u32 clock;
__u16 hdisplay, hsync_start, hsync_end, htotal, hskew;
__u16 vdisplay, vsync_start, vsync_end, vtotal, vscan;
__u32 vrefresh;
__u32 flags;
__u32 type;
char name[DRM_DISPLAY_MODE_LEN];
};
struct drm_mode_card_res {
__u64 fb_id_ptr;
__u64 crtc_id_ptr;
__u64 connector_id_ptr;
__u64 encoder_id_ptr;
__u32 count_fbs;
__u32 count_crtcs;
__u32 count_connectors;
__u32 count_encoders;
__u32 min_width, max_width;
__u32 min_height, max_height;
};
struct drm_mode_crtc {
__u64 set_connectors_ptr;
__u32 count_connectors;
__u32 crtc_id; /**< Id */
__u32 fb_id; /**< Id of framebuffer */
__u32 x, y; /**< Position on the frameuffer */
__u32 gamma_size;
__u32 mode_valid;
struct drm_mode_modeinfo mode;
};
#define DRM_MODE_PRESENT_TOP_FIELD (1<<0)
#define DRM_MODE_PRESENT_BOTTOM_FIELD (1<<1)
/* Planes blend with or override other bits on the CRTC */
struct drm_mode_set_plane {
__u32 plane_id;
__u32 crtc_id;
__u32 fb_id; /* fb object contains surface format type */
__u32 flags; /* see above flags */
/* Signed dest location allows it to be partially off screen */
__s32 crtc_x, crtc_y;
__u32 crtc_w, crtc_h;
/* Source values are 16.16 fixed point */
__u32 src_x, src_y;
__u32 src_h, src_w;
};
struct drm_mode_get_plane {
__u32 plane_id;
__u32 crtc_id;
__u32 fb_id;
__u32 possible_crtcs;
__u32 gamma_size;
__u32 count_format_types;
__u64 format_type_ptr;
};
struct drm_mode_get_plane_res {
__u64 plane_id_ptr;
__u32 count_planes;
};
#define DRM_MODE_ENCODER_NONE 0
#define DRM_MODE_ENCODER_DAC 1
#define DRM_MODE_ENCODER_TMDS 2
#define DRM_MODE_ENCODER_LVDS 3
#define DRM_MODE_ENCODER_TVDAC 4
#define DRM_MODE_ENCODER_VIRTUAL 5
#define DRM_MODE_ENCODER_DSI 6
#define DRM_MODE_ENCODER_DPMST 7
struct drm_mode_get_encoder {
__u32 encoder_id;
__u32 encoder_type;
__u32 crtc_id; /**< Id of crtc */
__u32 possible_crtcs;
__u32 possible_clones;
};
/* This is for connectors with multiple signal types. */
/* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */
#define DRM_MODE_SUBCONNECTOR_Automatic 0
#define DRM_MODE_SUBCONNECTOR_Unknown 0
#define DRM_MODE_SUBCONNECTOR_DVID 3
#define DRM_MODE_SUBCONNECTOR_DVIA 4
#define DRM_MODE_SUBCONNECTOR_Composite 5
#define DRM_MODE_SUBCONNECTOR_SVIDEO 6
#define DRM_MODE_SUBCONNECTOR_Component 8
#define DRM_MODE_SUBCONNECTOR_SCART 9
#define DRM_MODE_CONNECTOR_Unknown 0
#define DRM_MODE_CONNECTOR_VGA 1
#define DRM_MODE_CONNECTOR_DVII 2
#define DRM_MODE_CONNECTOR_DVID 3
#define DRM_MODE_CONNECTOR_DVIA 4
#define DRM_MODE_CONNECTOR_Composite 5
#define DRM_MODE_CONNECTOR_SVIDEO 6
#define DRM_MODE_CONNECTOR_LVDS 7
#define DRM_MODE_CONNECTOR_Component 8
#define DRM_MODE_CONNECTOR_9PinDIN 9
#define DRM_MODE_CONNECTOR_DisplayPort 10
#define DRM_MODE_CONNECTOR_HDMIA 11
#define DRM_MODE_CONNECTOR_HDMIB 12
#define DRM_MODE_CONNECTOR_TV 13
#define DRM_MODE_CONNECTOR_eDP 14
#define DRM_MODE_CONNECTOR_VIRTUAL 15
#define DRM_MODE_CONNECTOR_DSI 16
struct drm_mode_get_connector {
__u64 encoders_ptr;
__u64 modes_ptr;
__u64 props_ptr;
__u64 prop_values_ptr;
__u32 count_modes;
__u32 count_props;
__u32 count_encoders;
__u32 encoder_id; /**< Current Encoder */
__u32 connector_id; /**< Id */
__u32 connector_type;
__u32 connector_type_id;
__u32 connection;
__u32 mm_width, mm_height; /**< HxW in millimeters */
__u32 subpixel;
__u32 pad;
};
#define DRM_MODE_PROP_PENDING (1<<0)
#define DRM_MODE_PROP_RANGE (1<<1)
#define DRM_MODE_PROP_IMMUTABLE (1<<2)
#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */
#define DRM_MODE_PROP_BLOB (1<<4)
#define DRM_MODE_PROP_BITMASK (1<<5) /* bitmask of enumerated types */
/* non-extended types: legacy bitmask, one bit per type: */
#define DRM_MODE_PROP_LEGACY_TYPE ( \
DRM_MODE_PROP_RANGE | \
DRM_MODE_PROP_ENUM | \
DRM_MODE_PROP_BLOB | \
DRM_MODE_PROP_BITMASK)
/* extended-types: rather than continue to consume a bit per type,
* grab a chunk of the bits to use as integer type id.
*/
#define DRM_MODE_PROP_EXTENDED_TYPE 0x0000ffc0
#define DRM_MODE_PROP_TYPE(n) ((n) << 6)
#define DRM_MODE_PROP_OBJECT DRM_MODE_PROP_TYPE(1)
#define DRM_MODE_PROP_SIGNED_RANGE DRM_MODE_PROP_TYPE(2)
struct drm_mode_property_enum {
__u64 value;
char name[DRM_PROP_NAME_LEN];
};
struct drm_mode_get_property {
__u64 values_ptr; /* values and blob lengths */
__u64 enum_blob_ptr; /* enum and blob id ptrs */
__u32 prop_id;
__u32 flags;
char name[DRM_PROP_NAME_LEN];
__u32 count_values;
__u32 count_enum_blobs;
};
struct drm_mode_connector_set_property {
__u64 value;
__u32 prop_id;
__u32 connector_id;
};
struct drm_mode_obj_get_properties {
__u64 props_ptr;
__u64 prop_values_ptr;
__u32 count_props;
__u32 obj_id;
__u32 obj_type;
};
struct drm_mode_obj_set_property {
__u64 value;
__u32 prop_id;
__u32 obj_id;
__u32 obj_type;
};
struct drm_mode_get_blob {
__u32 blob_id;
__u32 length;
__u64 data;
};
struct drm_mode_fb_cmd {
__u32 fb_id;
__u32 width, height;
__u32 pitch;
__u32 bpp;
__u32 depth;
/* driver specific handle */
__u32 handle;
};
#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
struct drm_mode_fb_cmd2 {
__u32 fb_id;
__u32 width, height;
__u32 pixel_format; /* fourcc code from drm_fourcc.h */
__u32 flags; /* see above flags */
/*
* In case of planar formats, this ioctl allows up to 4
* buffer objects with offets and pitches per plane.
* The pitch and offset order is dictated by the fourcc,
* e.g. NV12 (http://fourcc.org/yuv.php#NV12) is described as:
*
* YUV 4:2:0 image with a plane of 8 bit Y samples
* followed by an interleaved U/V plane containing
* 8 bit 2x2 subsampled colour difference samples.
*
* So it would consist of Y as offset[0] and UV as
* offeset[1]. Note that offset[0] will generally
* be 0.
*/
__u32 handles[4];
__u32 pitches[4]; /* pitch for each plane */
__u32 offsets[4]; /* offset of each plane */
};
#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
#define DRM_MODE_FB_DIRTY_FLAGS 0x03
#define DRM_MODE_FB_DIRTY_MAX_CLIPS 256
/*
* Mark a region of a framebuffer as dirty.
*
* Some hardware does not automatically update display contents
* as a hardware or software draw to a framebuffer. This ioctl
* allows userspace to tell the kernel and the hardware what
* regions of the framebuffer have changed.
*
* The kernel or hardware is free to update more then just the
* region specified by the clip rects. The kernel or hardware
* may also delay and/or coalesce several calls to dirty into a
* single update.
*
* Userspace may annotate the updates, the annotates are a
* promise made by the caller that the change is either a copy
* of pixels or a fill of a single color in the region specified.
*
* If the DRM_MODE_FB_DIRTY_ANNOTATE_COPY flag is given then
* the number of updated regions are half of num_clips given,
* where the clip rects are paired in src and dst. The width and
* height of each one of the pairs must match.
*
* If the DRM_MODE_FB_DIRTY_ANNOTATE_FILL flag is given the caller
* promises that the region specified of the clip rects is filled
* completely with a single color as given in the color argument.
*/
struct drm_mode_fb_dirty_cmd {
__u32 fb_id;
__u32 flags;
__u32 color;
__u32 num_clips;
__u64 clips_ptr;
};
struct drm_mode_mode_cmd {
__u32 connector_id;
struct drm_mode_modeinfo mode;
};
#define DRM_MODE_CURSOR_BO 0x01
#define DRM_MODE_CURSOR_MOVE 0x02
#define DRM_MODE_CURSOR_FLAGS 0x03
/*
* depending on the value in flags different members are used.
*
* CURSOR_BO uses
* crtc_id
* width
* height
* handle - if 0 turns the cursor off
*
* CURSOR_MOVE uses
* crtc_id
* x
* y
*/
struct drm_mode_cursor {
__u32 flags;
__u32 crtc_id;
__s32 x;
__s32 y;
__u32 width;
__u32 height;
/* driver specific handle */
__u32 handle;
};
struct drm_mode_cursor2 {
__u32 flags;
__u32 crtc_id;
__s32 x;
__s32 y;
__u32 width;
__u32 height;
/* driver specific handle */
__u32 handle;
__s32 hot_x;
__s32 hot_y;
};
struct drm_mode_crtc_lut {
__u32 crtc_id;
__u32 gamma_size;
/* pointers to arrays */
__u64 red;
__u64 green;
__u64 blue;
};
#define DRM_MODE_PAGE_FLIP_EVENT 0x01
#define DRM_MODE_PAGE_FLIP_ASYNC 0x02
#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT|DRM_MODE_PAGE_FLIP_ASYNC)
/*
* Request a page flip on the specified crtc.
*
* This ioctl will ask KMS to schedule a page flip for the specified
* crtc. Once any pending rendering targeting the specified fb (as of
* ioctl time) has completed, the crtc will be reprogrammed to display
* that fb after the next vertical refresh. The ioctl returns
* immediately, but subsequent rendering to the current fb will block
* in the execbuffer ioctl until the page flip happens. If a page
* flip is already pending as the ioctl is called, EBUSY will be
* returned.
*
* Flag DRM_MODE_PAGE_FLIP_EVENT requests that drm sends back a vblank
* event (see drm.h: struct drm_event_vblank) when the page flip is
* done. The user_data field passed in with this ioctl will be
* returned as the user_data field in the vblank event struct.
*
* Flag DRM_MODE_PAGE_FLIP_ASYNC requests that the flip happen
* 'as soon as possible', meaning that it not delay waiting for vblank.
* This may cause tearing on the screen.
*
* The reserved field must be zero until we figure out something
* clever to use it for.
*/
struct drm_mode_crtc_page_flip {
__u32 crtc_id;
__u32 fb_id;
__u32 flags;
__u32 reserved;
__u64 user_data;
};
/* create a dumb scanout buffer */
struct drm_mode_create_dumb {
uint32_t height;
uint32_t width;
uint32_t bpp;
uint32_t flags;
/* handle, pitch, size will be returned */
uint32_t handle;
uint32_t pitch;
uint64_t size;
};
/* set up for mmap of a dumb scanout buffer */
struct drm_mode_map_dumb {
/** Handle for the object being mapped. */
__u32 handle;
__u32 pad;
/**
* Fake offset to use for subsequent mmap call
*
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 offset;
};
struct drm_mode_destroy_dumb {
uint32_t handle;
};
#endif

View File

@@ -223,6 +223,7 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_GEM_GET_CACHING 0x30
#define DRM_I915_REG_READ 0x31
#define DRM_I915_GET_RESET_STATS 0x32
#define DRM_I915_GEM_USERPTR 0x33
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -273,6 +274,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
#define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
@@ -337,6 +339,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_EXEC_NO_RELOC 25
#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
#define I915_PARAM_HAS_WT 27
#define I915_PARAM_CMD_PARSER_VERSION 28
typedef struct drm_i915_getparam {
int param;
@@ -1049,6 +1052,20 @@ struct drm_i915_reset_stats {
__u32 pad;
};
struct drm_i915_gem_userptr {
__u64 user_ptr;
__u64 user_size;
__u32 flags;
#define I915_USERPTR_READ_ONLY 0x1
#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
/**
* Returned handle for the object.
*
* Object handles are nonzero.
*/
__u32 handle;
};
struct drm_i915_mask {
__u32 handle;
__u32 width;

File diff suppressed because it is too large Load Diff

View File

@@ -87,6 +87,16 @@
#define DRM_VMW_PARAM_MAX_SURF_MEMORY 7
#define DRM_VMW_PARAM_3D_CAPS_SIZE 8
#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9
#define DRM_VMW_PARAM_MAX_MOB_SIZE 10
/**
* enum drm_vmw_handle_type - handle type for ref ioctls
*
*/
enum drm_vmw_handle_type {
DRM_VMW_HANDLE_LEGACY = 0,
DRM_VMW_HANDLE_PRIME = 1
};
/**
* struct drm_vmw_getparam_arg
@@ -176,6 +186,7 @@ struct drm_vmw_surface_create_req {
* struct drm_wmv_surface_arg
*
* @sid: Surface id of created surface or surface to destroy or reference.
* @handle_type: Handle type for DRM_VMW_REF_SURFACE Ioctl.
*
* Output data from the DRM_VMW_CREATE_SURFACE Ioctl.
* Input argument to the DRM_VMW_UNREF_SURFACE Ioctl.
@@ -184,7 +195,7 @@ struct drm_vmw_surface_create_req {
struct drm_vmw_surface_arg {
int32_t sid;
uint32_t pad64;
enum drm_vmw_handle_type handle_type;
};
/**

View File

@@ -84,7 +84,7 @@ do{ \
#define wait_event(wq, condition) \
do{ \
do{ \
wait_queue_t __wait = { \
.task_list = LIST_HEAD_INIT(__wait.task_list), \
.evnt = CreateEvent(NULL, MANUAL_DESTROY), \
@@ -109,7 +109,13 @@ do{ \
DestroyEvent(__wait.evnt); \
} while (0)
#define wait_event_interruptible(wq, condition) \
({ \
int __ret = 0; \
if (!(condition)) \
wait_event(wq, condition); \
__ret; \
})
static inline

View File

@@ -0,0 +1,381 @@
/*
* Wound/Wait Mutexes: blocking mutual exclusion locks with deadlock avoidance
*
* Original mutex implementation started by Ingo Molnar:
*
* Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*
* Wound/wait implementation:
* Copyright (C) 2013 Canonical Ltd.
*
* This file contains the main data structure and API definitions.
*/
#ifndef __LINUX_WW_MUTEX_H
#define __LINUX_WW_MUTEX_H
#include <linux/mutex.h>
#include <syscall.h>
#define current (void*)GetPid()
struct ww_class {
atomic_long_t stamp;
struct lock_class_key acquire_key;
struct lock_class_key mutex_key;
const char *acquire_name;
const char *mutex_name;
};
struct ww_acquire_ctx {
struct task_struct *task;
unsigned long stamp;
unsigned acquired;
#ifdef CONFIG_DEBUG_MUTEXES
unsigned done_acquire;
struct ww_class *ww_class;
struct ww_mutex *contending_lock;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
unsigned deadlock_inject_interval;
unsigned deadlock_inject_countdown;
#endif
};
struct ww_mutex {
struct mutex base;
struct ww_acquire_ctx *ctx;
#ifdef CONFIG_DEBUG_MUTEXES
struct ww_class *ww_class;
#endif
};
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) \
, .ww_class = &ww_class
#else
# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class)
#endif
#define __WW_CLASS_INITIALIZER(ww_class) \
{ .stamp = ATOMIC_LONG_INIT(0) \
, .acquire_name = #ww_class "_acquire" \
, .mutex_name = #ww_class "_mutex" }
#define __WW_MUTEX_INITIALIZER(lockname, class) \
{ .base = { \__MUTEX_INITIALIZER(lockname) } \
__WW_CLASS_MUTEX_INITIALIZER(lockname, class) }
#define DEFINE_WW_CLASS(classname) \
struct ww_class classname = __WW_CLASS_INITIALIZER(classname)
#define DEFINE_WW_MUTEX(mutexname, ww_class) \
struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class)
/**
* ww_mutex_init - initialize the w/w mutex
* @lock: the mutex to be initialized
* @ww_class: the w/w class the mutex should belong to
*
* Initialize the w/w mutex to unlocked state and associate it with the given
* class.
*
* It is not allowed to initialize an already locked mutex.
*/
static inline void ww_mutex_init(struct ww_mutex *lock,
struct ww_class *ww_class)
{
MutexInit(&lock->base);
lock->ctx = NULL;
#ifdef CONFIG_DEBUG_MUTEXES
lock->ww_class = ww_class;
#endif
}
/**
* ww_acquire_init - initialize a w/w acquire context
* @ctx: w/w acquire context to initialize
* @ww_class: w/w class of the context
*
* Initializes an context to acquire multiple mutexes of the given w/w class.
*
* Context-based w/w mutex acquiring can be done in any order whatsoever within
* a given lock class. Deadlocks will be detected and handled with the
* wait/wound logic.
*
* Mixing of context-based w/w mutex acquiring and single w/w mutex locking can
* result in undetected deadlocks and is so forbidden. Mixing different contexts
* for the same w/w class when acquiring mutexes can also result in undetected
* deadlocks, and is hence also forbidden. Both types of abuse will be caught by
* enabling CONFIG_PROVE_LOCKING.
*
* Nesting of acquire contexts for _different_ w/w classes is possible, subject
* to the usual locking rules between different lock classes.
*
* An acquire context must be released with ww_acquire_fini by the same task
* before the memory is freed. It is recommended to allocate the context itself
* on the stack.
*/
static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
struct ww_class *ww_class)
{
ctx->task = current;
ctx->stamp = atomic_long_inc_return(&ww_class->stamp);
ctx->acquired = 0;
#ifdef CONFIG_DEBUG_MUTEXES
ctx->ww_class = ww_class;
ctx->done_acquire = 0;
ctx->contending_lock = NULL;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
debug_check_no_locks_freed((void *)ctx, sizeof(*ctx));
lockdep_init_map(&ctx->dep_map, ww_class->acquire_name,
&ww_class->acquire_key, 0);
mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_);
#endif
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
ctx->deadlock_inject_interval = 1;
ctx->deadlock_inject_countdown = ctx->stamp & 0xf;
#endif
}
/**
* ww_acquire_done - marks the end of the acquire phase
* @ctx: the acquire context
*
* Marks the end of the acquire phase, any further w/w mutex lock calls using
* this context are forbidden.
*
* Calling this function is optional, it is just useful to document w/w mutex
* code and clearly designated the acquire phase from actually using the locked
* data structures.
*/
static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
{
#ifdef CONFIG_DEBUG_MUTEXES
lockdep_assert_held(ctx);
DEBUG_LOCKS_WARN_ON(ctx->done_acquire);
ctx->done_acquire = 1;
#endif
}
/**
* ww_acquire_fini - releases a w/w acquire context
* @ctx: the acquire context to free
*
* Releases a w/w acquire context. This must be called _after_ all acquired w/w
* mutexes have been released with ww_mutex_unlock.
*/
static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
{
#ifdef CONFIG_DEBUG_MUTEXES
mutex_release(&ctx->dep_map, 0, _THIS_IP_);
DEBUG_LOCKS_WARN_ON(ctx->acquired);
if (!config_enabled(CONFIG_PROVE_LOCKING))
/*
* lockdep will normally handle this,
* but fail without anyway
*/
ctx->done_acquire = 1;
if (!config_enabled(CONFIG_DEBUG_LOCK_ALLOC))
/* ensure ww_acquire_fini will still fail if called twice */
ctx->acquired = ~0U;
#endif
}
extern int __must_check __ww_mutex_lock(struct ww_mutex *lock,
struct ww_acquire_ctx *ctx);
extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock,
struct ww_acquire_ctx *ctx);
/**
* ww_mutex_lock - acquire the w/w mutex
* @lock: the mutex to be acquired
* @ctx: w/w acquire context, or NULL to acquire only a single lock.
*
* Lock the w/w mutex exclusively for this task.
*
* Deadlocks within a given w/w class of locks are detected and handled with the
* wait/wound algorithm. If the lock isn't immediately avaiable this function
* will either sleep until it is (wait case). Or it selects the current context
* for backing off by returning -EDEADLK (wound case). Trying to acquire the
* same lock with the same context twice is also detected and signalled by
* returning -EALREADY. Returns 0 if the mutex was successfully acquired.
*
* In the wound case the caller must release all currently held w/w mutexes for
* the given context and then wait for this contending lock to be available by
* calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this
* lock and proceed with trying to acquire further w/w mutexes (e.g. when
* scanning through lru lists trying to free resources).
*
* The mutex must later on be released by the same task that
* acquired it. The task may not exit without first unlocking the mutex. Also,
* kernel memory where the mutex resides must not be freed with the mutex still
* locked. The mutex must first be initialized (or statically defined) before it
* can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be
* of the same w/w lock class as was used to initialize the acquire context.
*
* A mutex acquired with this function must be released with ww_mutex_unlock.
*/
static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
if (ctx)
return __ww_mutex_lock(lock, ctx);
mutex_lock(&lock->base);
return 0;
}
/**
* ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible
* @lock: the mutex to be acquired
* @ctx: w/w acquire context
*
* Lock the w/w mutex exclusively for this task.
*
* Deadlocks within a given w/w class of locks are detected and handled with the
* wait/wound algorithm. If the lock isn't immediately avaiable this function
* will either sleep until it is (wait case). Or it selects the current context
* for backing off by returning -EDEADLK (wound case). Trying to acquire the
* same lock with the same context twice is also detected and signalled by
* returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a
* signal arrives while waiting for the lock then this function returns -EINTR.
*
* In the wound case the caller must release all currently held w/w mutexes for
* the given context and then wait for this contending lock to be available by
* calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to
* not acquire this lock and proceed with trying to acquire further w/w mutexes
* (e.g. when scanning through lru lists trying to free resources).
*
* The mutex must later on be released by the same task that
* acquired it. The task may not exit without first unlocking the mutex. Also,
* kernel memory where the mutex resides must not be freed with the mutex still
* locked. The mutex must first be initialized (or statically defined) before it
* can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be
* of the same w/w lock class as was used to initialize the acquire context.
*
* A mutex acquired with this function must be released with ww_mutex_unlock.
*/
static inline int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
struct ww_acquire_ctx *ctx)
{
if (ctx)
return __ww_mutex_lock_interruptible(lock, ctx);
else
return mutex_lock_interruptible(&lock->base);
}
/**
* ww_mutex_lock_slow - slowpath acquiring of the w/w mutex
* @lock: the mutex to be acquired
* @ctx: w/w acquire context
*
* Acquires a w/w mutex with the given context after a wound case. This function
* will sleep until the lock becomes available.
*
* The caller must have released all w/w mutexes already acquired with the
* context and then call this function on the contended lock.
*
* Afterwards the caller may continue to (re)acquire the other w/w mutexes it
* needs with ww_mutex_lock. Note that the -EALREADY return code from
* ww_mutex_lock can be used to avoid locking this contended mutex twice.
*
* It is forbidden to call this function with any other w/w mutexes associated
* with the context held. It is forbidden to call this on anything else than the
* contending mutex.
*
* Note that the slowpath lock acquiring can also be done by calling
* ww_mutex_lock directly. This function here is simply to help w/w mutex
* locking code readability by clearly denoting the slowpath.
*/
static inline void
ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
int ret;
#ifdef CONFIG_DEBUG_MUTEXES
DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
#endif
ret = ww_mutex_lock(lock, ctx);
(void)ret;
}
/**
* ww_mutex_lock_slow_interruptible - slowpath acquiring of the w/w mutex, interruptible
* @lock: the mutex to be acquired
* @ctx: w/w acquire context
*
* Acquires a w/w mutex with the given context after a wound case. This function
* will sleep until the lock becomes available and returns 0 when the lock has
* been acquired. If a signal arrives while waiting for the lock then this
* function returns -EINTR.
*
* The caller must have released all w/w mutexes already acquired with the
* context and then call this function on the contended lock.
*
* Afterwards the caller may continue to (re)acquire the other w/w mutexes it
* needs with ww_mutex_lock. Note that the -EALREADY return code from
* ww_mutex_lock can be used to avoid locking this contended mutex twice.
*
* It is forbidden to call this function with any other w/w mutexes associated
* with the given context held. It is forbidden to call this on anything else
* than the contending mutex.
*
* Note that the slowpath lock acquiring can also be done by calling
* ww_mutex_lock_interruptible directly. This function here is simply to help
* w/w mutex locking code readability by clearly denoting the slowpath.
*/
static inline int __must_check
ww_mutex_lock_slow_interruptible(struct ww_mutex *lock,
struct ww_acquire_ctx *ctx)
{
#ifdef CONFIG_DEBUG_MUTEXES
DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
#endif
return ww_mutex_lock_interruptible(lock, ctx);
}
extern void ww_mutex_unlock(struct ww_mutex *lock);
/**
* ww_mutex_trylock - tries to acquire the w/w mutex without acquire context
* @lock: mutex to lock
*
* Trylocks a mutex without acquire context, so no deadlock detection is
* possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
*/
static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock)
{
return mutex_trylock(&lock->base);
}
/***
* ww_mutex_destroy - mark a w/w mutex unusable
* @lock: the mutex to be destroyed
*
* This function marks the mutex uninitialized, and any subsequent
* use of the mutex is forbidden. The mutex must not be locked when
* this function is called.
*/
static inline void ww_mutex_destroy(struct ww_mutex *lock)
{
mutex_destroy(&lock->base);
}
/**
* ww_mutex_is_locked - is the w/w mutex locked
* @lock: the mutex to be queried
*
* Returns 1 if the mutex is locked, 0 if unlocked.
*/
static inline bool ww_mutex_is_locked(struct ww_mutex *lock)
{
return mutex_is_locked(&lock->base);
}
#endif