forked from KolibriOS/kolibrios
ddk: update to 3.12-rc6
git-svn-id: svn://kolibrios.org@4103 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
parent
46851141b8
commit
4adaeb28fb
@ -28,6 +28,7 @@ NAME_SRCS:= \
|
|||||||
linux/firmware.c \
|
linux/firmware.c \
|
||||||
linux/kref.c \
|
linux/kref.c \
|
||||||
linux/list_sort.c \
|
linux/list_sort.c \
|
||||||
|
linux/rbtree.c \
|
||||||
linux/dmapool.c \
|
linux/dmapool.c \
|
||||||
linux/ctype.c \
|
linux/ctype.c \
|
||||||
linux/string.c \
|
linux/string.c \
|
||||||
|
@ -33,6 +33,11 @@
|
|||||||
#include <linux/idr.h>
|
#include <linux/idr.h>
|
||||||
//#include <stdlib.h>
|
//#include <stdlib.h>
|
||||||
|
|
||||||
|
static inline void * __must_check ERR_PTR(long error)
|
||||||
|
{
|
||||||
|
return (void *) error;
|
||||||
|
}
|
||||||
|
|
||||||
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
|
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
|
||||||
unsigned long offset);
|
unsigned long offset);
|
||||||
|
|
||||||
@ -49,6 +54,7 @@ unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
|
|||||||
static struct idr_layer *idr_preload_head;
|
static struct idr_layer *idr_preload_head;
|
||||||
static int idr_preload_cnt;
|
static int idr_preload_cnt;
|
||||||
|
|
||||||
|
static DEFINE_SPINLOCK(simple_ida_lock);
|
||||||
|
|
||||||
/* the maximum ID which can be allocated given idr->layers */
|
/* the maximum ID which can be allocated given idr->layers */
|
||||||
static int idr_max(int layers)
|
static int idr_max(int layers)
|
||||||
@ -462,6 +468,33 @@ int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(idr_alloc);
|
EXPORT_SYMBOL_GPL(idr_alloc);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* idr_alloc_cyclic - allocate new idr entry in a cyclical fashion
|
||||||
|
* @idr: the (initialized) idr
|
||||||
|
* @ptr: pointer to be associated with the new id
|
||||||
|
* @start: the minimum id (inclusive)
|
||||||
|
* @end: the maximum id (exclusive, <= 0 for max)
|
||||||
|
* @gfp_mask: memory allocation flags
|
||||||
|
*
|
||||||
|
* Essentially the same as idr_alloc, but prefers to allocate progressively
|
||||||
|
* higher ids if it can. If the "cur" counter wraps, then it will start again
|
||||||
|
* at the "start" end of the range and allocate one that has already been used.
|
||||||
|
*/
|
||||||
|
int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end,
|
||||||
|
gfp_t gfp_mask)
|
||||||
|
{
|
||||||
|
int id;
|
||||||
|
|
||||||
|
id = idr_alloc(idr, ptr, max(start, idr->cur), end, gfp_mask);
|
||||||
|
if (id == -ENOSPC)
|
||||||
|
id = idr_alloc(idr, ptr, start, end, gfp_mask);
|
||||||
|
|
||||||
|
if (likely(id >= 0))
|
||||||
|
idr->cur = id + 1;
|
||||||
|
return id;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(idr_alloc_cyclic);
|
||||||
|
|
||||||
static void idr_remove_warning(int id)
|
static void idr_remove_warning(int id)
|
||||||
{
|
{
|
||||||
WARN(1, "idr_remove called for id=%d which is not allocated.\n", id);
|
WARN(1, "idr_remove called for id=%d which is not allocated.\n", id);
|
||||||
@ -632,7 +665,6 @@ void *idr_find_slowpath(struct idr *idp, int id)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(idr_find_slowpath);
|
EXPORT_SYMBOL(idr_find_slowpath);
|
||||||
|
|
||||||
#if 0
|
|
||||||
/**
|
/**
|
||||||
* idr_for_each - iterate through all stored pointers
|
* idr_for_each - iterate through all stored pointers
|
||||||
* @idp: idr handle
|
* @idp: idr handle
|
||||||
@ -790,10 +822,6 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(idr_replace);
|
EXPORT_SYMBOL(idr_replace);
|
||||||
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
void __init idr_init_cache(void)
|
void __init idr_init_cache(void)
|
||||||
{
|
{
|
||||||
//idr_layer_cache = kmem_cache_create("idr_layer_cache",
|
//idr_layer_cache = kmem_cache_create("idr_layer_cache",
|
||||||
@ -858,7 +886,7 @@ static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
|
|||||||
int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
|
int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
/* allocate idr_layers */
|
/* allocate idr_layers */
|
||||||
if (!idr_pre_get(&ida->idr, gfp_mask))
|
if (!__idr_pre_get(&ida->idr, gfp_mask))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* allocate free_bitmap */
|
/* allocate free_bitmap */
|
||||||
@ -1022,6 +1050,74 @@ void ida_destroy(struct ida *ida)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ida_destroy);
|
EXPORT_SYMBOL(ida_destroy);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ida_simple_get - get a new id.
|
||||||
|
* @ida: the (initialized) ida.
|
||||||
|
* @start: the minimum id (inclusive, < 0x8000000)
|
||||||
|
* @end: the maximum id (exclusive, < 0x8000000 or 0)
|
||||||
|
* @gfp_mask: memory allocation flags
|
||||||
|
*
|
||||||
|
* Allocates an id in the range start <= id < end, or returns -ENOSPC.
|
||||||
|
* On memory allocation failure, returns -ENOMEM.
|
||||||
|
*
|
||||||
|
* Use ida_simple_remove() to get rid of an id.
|
||||||
|
*/
|
||||||
|
int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
|
||||||
|
gfp_t gfp_mask)
|
||||||
|
{
|
||||||
|
int ret, id;
|
||||||
|
unsigned int max;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
BUG_ON((int)start < 0);
|
||||||
|
BUG_ON((int)end < 0);
|
||||||
|
|
||||||
|
if (end == 0)
|
||||||
|
max = 0x80000000;
|
||||||
|
else {
|
||||||
|
BUG_ON(end < start);
|
||||||
|
max = end - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
again:
|
||||||
|
if (!ida_pre_get(ida, gfp_mask))
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&simple_ida_lock, flags);
|
||||||
|
ret = ida_get_new_above(ida, start, &id);
|
||||||
|
if (!ret) {
|
||||||
|
if (id > max) {
|
||||||
|
ida_remove(ida, id);
|
||||||
|
ret = -ENOSPC;
|
||||||
|
} else {
|
||||||
|
ret = id;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
spin_unlock_irqrestore(&simple_ida_lock, flags);
|
||||||
|
|
||||||
|
if (unlikely(ret == -EAGAIN))
|
||||||
|
goto again;
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ida_simple_get);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ida_simple_remove - remove an allocated id.
|
||||||
|
* @ida: the (initialized) ida.
|
||||||
|
* @id: the id returned by ida_simple_get.
|
||||||
|
*/
|
||||||
|
void ida_simple_remove(struct ida *ida, unsigned int id)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
BUG_ON((int)id < 0);
|
||||||
|
spin_lock_irqsave(&simple_ida_lock, flags);
|
||||||
|
ida_remove(ida, id);
|
||||||
|
spin_unlock_irqrestore(&simple_ida_lock, flags);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ida_simple_remove);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ida_init - initialize ida handle
|
* ida_init - initialize ida handle
|
||||||
* @ida: ida handle
|
* @ida: ida handle
|
||||||
|
560
drivers/ddk/linux/rbtree.c
Normal file
560
drivers/ddk/linux/rbtree.c
Normal file
@ -0,0 +1,560 @@
|
|||||||
|
/*
|
||||||
|
Red Black Trees
|
||||||
|
(C) 1999 Andrea Arcangeli <andrea@suse.de>
|
||||||
|
(C) 2002 David Woodhouse <dwmw2@infradead.org>
|
||||||
|
(C) 2012 Michel Lespinasse <walken@google.com>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License
|
||||||
|
along with this program; if not, write to the Free Software
|
||||||
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
|
|
||||||
|
linux/lib/rbtree.c
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/rbtree_augmented.h>
|
||||||
|
#include <linux/export.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* red-black trees properties: http://en.wikipedia.org/wiki/Rbtree
|
||||||
|
*
|
||||||
|
* 1) A node is either red or black
|
||||||
|
* 2) The root is black
|
||||||
|
* 3) All leaves (NULL) are black
|
||||||
|
* 4) Both children of every red node are black
|
||||||
|
* 5) Every simple path from root to leaves contains the same number
|
||||||
|
* of black nodes.
|
||||||
|
*
|
||||||
|
* 4 and 5 give the O(log n) guarantee, since 4 implies you cannot have two
|
||||||
|
* consecutive red nodes in a path and every red node is therefore followed by
|
||||||
|
* a black. So if B is the number of black nodes on every simple path (as per
|
||||||
|
* 5), then the longest possible path due to 4 is 2B.
|
||||||
|
*
|
||||||
|
* We shall indicate color with case, where black nodes are uppercase and red
|
||||||
|
* nodes will be lowercase. Unknown color nodes shall be drawn as red within
|
||||||
|
* parentheses and have some accompanying text comment.
|
||||||
|
*/
|
||||||
|
|
||||||
|
static inline void rb_set_black(struct rb_node *rb)
|
||||||
|
{
|
||||||
|
rb->__rb_parent_color |= RB_BLACK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline struct rb_node *rb_red_parent(struct rb_node *red)
|
||||||
|
{
|
||||||
|
return (struct rb_node *)red->__rb_parent_color;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Helper function for rotations:
|
||||||
|
* - old's parent and color get assigned to new
|
||||||
|
* - old gets assigned new as a parent and 'color' as a color.
|
||||||
|
*/
|
||||||
|
static inline void
|
||||||
|
__rb_rotate_set_parents(struct rb_node *old, struct rb_node *new,
|
||||||
|
struct rb_root *root, int color)
|
||||||
|
{
|
||||||
|
struct rb_node *parent = rb_parent(old);
|
||||||
|
new->__rb_parent_color = old->__rb_parent_color;
|
||||||
|
rb_set_parent_color(old, new, color);
|
||||||
|
__rb_change_child(old, new, parent, root);
|
||||||
|
}
|
||||||
|
|
||||||
|
static __always_inline void
|
||||||
|
__rb_insert(struct rb_node *node, struct rb_root *root,
|
||||||
|
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
|
||||||
|
{
|
||||||
|
struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
/*
|
||||||
|
* Loop invariant: node is red
|
||||||
|
*
|
||||||
|
* If there is a black parent, we are done.
|
||||||
|
* Otherwise, take some corrective action as we don't
|
||||||
|
* want a red root or two consecutive red nodes.
|
||||||
|
*/
|
||||||
|
if (!parent) {
|
||||||
|
rb_set_parent_color(node, NULL, RB_BLACK);
|
||||||
|
break;
|
||||||
|
} else if (rb_is_black(parent))
|
||||||
|
break;
|
||||||
|
|
||||||
|
gparent = rb_red_parent(parent);
|
||||||
|
|
||||||
|
tmp = gparent->rb_right;
|
||||||
|
if (parent != tmp) { /* parent == gparent->rb_left */
|
||||||
|
if (tmp && rb_is_red(tmp)) {
|
||||||
|
/*
|
||||||
|
* Case 1 - color flips
|
||||||
|
*
|
||||||
|
* G g
|
||||||
|
* / \ / \
|
||||||
|
* p u --> P U
|
||||||
|
* / /
|
||||||
|
* n N
|
||||||
|
*
|
||||||
|
* However, since g's parent might be red, and
|
||||||
|
* 4) does not allow this, we need to recurse
|
||||||
|
* at g.
|
||||||
|
*/
|
||||||
|
rb_set_parent_color(tmp, gparent, RB_BLACK);
|
||||||
|
rb_set_parent_color(parent, gparent, RB_BLACK);
|
||||||
|
node = gparent;
|
||||||
|
parent = rb_parent(node);
|
||||||
|
rb_set_parent_color(node, parent, RB_RED);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
tmp = parent->rb_right;
|
||||||
|
if (node == tmp) {
|
||||||
|
/*
|
||||||
|
* Case 2 - left rotate at parent
|
||||||
|
*
|
||||||
|
* G G
|
||||||
|
* / \ / \
|
||||||
|
* p U --> n U
|
||||||
|
* \ /
|
||||||
|
* n p
|
||||||
|
*
|
||||||
|
* This still leaves us in violation of 4), the
|
||||||
|
* continuation into Case 3 will fix that.
|
||||||
|
*/
|
||||||
|
parent->rb_right = tmp = node->rb_left;
|
||||||
|
node->rb_left = parent;
|
||||||
|
if (tmp)
|
||||||
|
rb_set_parent_color(tmp, parent,
|
||||||
|
RB_BLACK);
|
||||||
|
rb_set_parent_color(parent, node, RB_RED);
|
||||||
|
augment_rotate(parent, node);
|
||||||
|
parent = node;
|
||||||
|
tmp = node->rb_right;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Case 3 - right rotate at gparent
|
||||||
|
*
|
||||||
|
* G P
|
||||||
|
* / \ / \
|
||||||
|
* p U --> n g
|
||||||
|
* / \
|
||||||
|
* n U
|
||||||
|
*/
|
||||||
|
gparent->rb_left = tmp; /* == parent->rb_right */
|
||||||
|
parent->rb_right = gparent;
|
||||||
|
if (tmp)
|
||||||
|
rb_set_parent_color(tmp, gparent, RB_BLACK);
|
||||||
|
__rb_rotate_set_parents(gparent, parent, root, RB_RED);
|
||||||
|
augment_rotate(gparent, parent);
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
tmp = gparent->rb_left;
|
||||||
|
if (tmp && rb_is_red(tmp)) {
|
||||||
|
/* Case 1 - color flips */
|
||||||
|
rb_set_parent_color(tmp, gparent, RB_BLACK);
|
||||||
|
rb_set_parent_color(parent, gparent, RB_BLACK);
|
||||||
|
node = gparent;
|
||||||
|
parent = rb_parent(node);
|
||||||
|
rb_set_parent_color(node, parent, RB_RED);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
tmp = parent->rb_left;
|
||||||
|
if (node == tmp) {
|
||||||
|
/* Case 2 - right rotate at parent */
|
||||||
|
parent->rb_left = tmp = node->rb_right;
|
||||||
|
node->rb_right = parent;
|
||||||
|
if (tmp)
|
||||||
|
rb_set_parent_color(tmp, parent,
|
||||||
|
RB_BLACK);
|
||||||
|
rb_set_parent_color(parent, node, RB_RED);
|
||||||
|
augment_rotate(parent, node);
|
||||||
|
parent = node;
|
||||||
|
tmp = node->rb_left;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Case 3 - left rotate at gparent */
|
||||||
|
gparent->rb_right = tmp; /* == parent->rb_left */
|
||||||
|
parent->rb_left = gparent;
|
||||||
|
if (tmp)
|
||||||
|
rb_set_parent_color(tmp, gparent, RB_BLACK);
|
||||||
|
__rb_rotate_set_parents(gparent, parent, root, RB_RED);
|
||||||
|
augment_rotate(gparent, parent);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Inline version for rb_erase() use - we want to be able to inline
|
||||||
|
* and eliminate the dummy_rotate callback there
|
||||||
|
*/
|
||||||
|
static __always_inline void
|
||||||
|
____rb_erase_color(struct rb_node *parent, struct rb_root *root,
|
||||||
|
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
|
||||||
|
{
|
||||||
|
struct rb_node *node = NULL, *sibling, *tmp1, *tmp2;
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
/*
|
||||||
|
* Loop invariants:
|
||||||
|
* - node is black (or NULL on first iteration)
|
||||||
|
* - node is not the root (parent is not NULL)
|
||||||
|
* - All leaf paths going through parent and node have a
|
||||||
|
* black node count that is 1 lower than other leaf paths.
|
||||||
|
*/
|
||||||
|
sibling = parent->rb_right;
|
||||||
|
if (node != sibling) { /* node == parent->rb_left */
|
||||||
|
if (rb_is_red(sibling)) {
|
||||||
|
/*
|
||||||
|
* Case 1 - left rotate at parent
|
||||||
|
*
|
||||||
|
* P S
|
||||||
|
* / \ / \
|
||||||
|
* N s --> p Sr
|
||||||
|
* / \ / \
|
||||||
|
* Sl Sr N Sl
|
||||||
|
*/
|
||||||
|
parent->rb_right = tmp1 = sibling->rb_left;
|
||||||
|
sibling->rb_left = parent;
|
||||||
|
rb_set_parent_color(tmp1, parent, RB_BLACK);
|
||||||
|
__rb_rotate_set_parents(parent, sibling, root,
|
||||||
|
RB_RED);
|
||||||
|
augment_rotate(parent, sibling);
|
||||||
|
sibling = tmp1;
|
||||||
|
}
|
||||||
|
tmp1 = sibling->rb_right;
|
||||||
|
if (!tmp1 || rb_is_black(tmp1)) {
|
||||||
|
tmp2 = sibling->rb_left;
|
||||||
|
if (!tmp2 || rb_is_black(tmp2)) {
|
||||||
|
/*
|
||||||
|
* Case 2 - sibling color flip
|
||||||
|
* (p could be either color here)
|
||||||
|
*
|
||||||
|
* (p) (p)
|
||||||
|
* / \ / \
|
||||||
|
* N S --> N s
|
||||||
|
* / \ / \
|
||||||
|
* Sl Sr Sl Sr
|
||||||
|
*
|
||||||
|
* This leaves us violating 5) which
|
||||||
|
* can be fixed by flipping p to black
|
||||||
|
* if it was red, or by recursing at p.
|
||||||
|
* p is red when coming from Case 1.
|
||||||
|
*/
|
||||||
|
rb_set_parent_color(sibling, parent,
|
||||||
|
RB_RED);
|
||||||
|
if (rb_is_red(parent))
|
||||||
|
rb_set_black(parent);
|
||||||
|
else {
|
||||||
|
node = parent;
|
||||||
|
parent = rb_parent(node);
|
||||||
|
if (parent)
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* Case 3 - right rotate at sibling
|
||||||
|
* (p could be either color here)
|
||||||
|
*
|
||||||
|
* (p) (p)
|
||||||
|
* / \ / \
|
||||||
|
* N S --> N Sl
|
||||||
|
* / \ \
|
||||||
|
* sl Sr s
|
||||||
|
* \
|
||||||
|
* Sr
|
||||||
|
*/
|
||||||
|
sibling->rb_left = tmp1 = tmp2->rb_right;
|
||||||
|
tmp2->rb_right = sibling;
|
||||||
|
parent->rb_right = tmp2;
|
||||||
|
if (tmp1)
|
||||||
|
rb_set_parent_color(tmp1, sibling,
|
||||||
|
RB_BLACK);
|
||||||
|
augment_rotate(sibling, tmp2);
|
||||||
|
tmp1 = sibling;
|
||||||
|
sibling = tmp2;
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* Case 4 - left rotate at parent + color flips
|
||||||
|
* (p and sl could be either color here.
|
||||||
|
* After rotation, p becomes black, s acquires
|
||||||
|
* p's color, and sl keeps its color)
|
||||||
|
*
|
||||||
|
* (p) (s)
|
||||||
|
* / \ / \
|
||||||
|
* N S --> P Sr
|
||||||
|
* / \ / \
|
||||||
|
* (sl) sr N (sl)
|
||||||
|
*/
|
||||||
|
parent->rb_right = tmp2 = sibling->rb_left;
|
||||||
|
sibling->rb_left = parent;
|
||||||
|
rb_set_parent_color(tmp1, sibling, RB_BLACK);
|
||||||
|
if (tmp2)
|
||||||
|
rb_set_parent(tmp2, parent);
|
||||||
|
__rb_rotate_set_parents(parent, sibling, root,
|
||||||
|
RB_BLACK);
|
||||||
|
augment_rotate(parent, sibling);
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
sibling = parent->rb_left;
|
||||||
|
if (rb_is_red(sibling)) {
|
||||||
|
/* Case 1 - right rotate at parent */
|
||||||
|
parent->rb_left = tmp1 = sibling->rb_right;
|
||||||
|
sibling->rb_right = parent;
|
||||||
|
rb_set_parent_color(tmp1, parent, RB_BLACK);
|
||||||
|
__rb_rotate_set_parents(parent, sibling, root,
|
||||||
|
RB_RED);
|
||||||
|
augment_rotate(parent, sibling);
|
||||||
|
sibling = tmp1;
|
||||||
|
}
|
||||||
|
tmp1 = sibling->rb_left;
|
||||||
|
if (!tmp1 || rb_is_black(tmp1)) {
|
||||||
|
tmp2 = sibling->rb_right;
|
||||||
|
if (!tmp2 || rb_is_black(tmp2)) {
|
||||||
|
/* Case 2 - sibling color flip */
|
||||||
|
rb_set_parent_color(sibling, parent,
|
||||||
|
RB_RED);
|
||||||
|
if (rb_is_red(parent))
|
||||||
|
rb_set_black(parent);
|
||||||
|
else {
|
||||||
|
node = parent;
|
||||||
|
parent = rb_parent(node);
|
||||||
|
if (parent)
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
/* Case 3 - right rotate at sibling */
|
||||||
|
sibling->rb_right = tmp1 = tmp2->rb_left;
|
||||||
|
tmp2->rb_left = sibling;
|
||||||
|
parent->rb_left = tmp2;
|
||||||
|
if (tmp1)
|
||||||
|
rb_set_parent_color(tmp1, sibling,
|
||||||
|
RB_BLACK);
|
||||||
|
augment_rotate(sibling, tmp2);
|
||||||
|
tmp1 = sibling;
|
||||||
|
sibling = tmp2;
|
||||||
|
}
|
||||||
|
/* Case 4 - left rotate at parent + color flips */
|
||||||
|
parent->rb_left = tmp2 = sibling->rb_right;
|
||||||
|
sibling->rb_right = parent;
|
||||||
|
rb_set_parent_color(tmp1, sibling, RB_BLACK);
|
||||||
|
if (tmp2)
|
||||||
|
rb_set_parent(tmp2, parent);
|
||||||
|
__rb_rotate_set_parents(parent, sibling, root,
|
||||||
|
RB_BLACK);
|
||||||
|
augment_rotate(parent, sibling);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Non-inline version for rb_erase_augmented() use */
|
||||||
|
void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
|
||||||
|
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
|
||||||
|
{
|
||||||
|
____rb_erase_color(parent, root, augment_rotate);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__rb_erase_color);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Non-augmented rbtree manipulation functions.
|
||||||
|
*
|
||||||
|
* We use dummy augmented callbacks here, and have the compiler optimize them
|
||||||
|
* out of the rb_insert_color() and rb_erase() function definitions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
static inline void dummy_propagate(struct rb_node *node, struct rb_node *stop) {}
|
||||||
|
static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
|
||||||
|
static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
|
||||||
|
|
||||||
|
static const struct rb_augment_callbacks dummy_callbacks = {
|
||||||
|
dummy_propagate, dummy_copy, dummy_rotate
|
||||||
|
};
|
||||||
|
|
||||||
|
void rb_insert_color(struct rb_node *node, struct rb_root *root)
|
||||||
|
{
|
||||||
|
__rb_insert(node, root, dummy_rotate);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(rb_insert_color);
|
||||||
|
|
||||||
|
void rb_erase(struct rb_node *node, struct rb_root *root)
|
||||||
|
{
|
||||||
|
struct rb_node *rebalance;
|
||||||
|
rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);
|
||||||
|
if (rebalance)
|
||||||
|
____rb_erase_color(rebalance, root, dummy_rotate);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(rb_erase);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Augmented rbtree manipulation functions.
|
||||||
|
*
|
||||||
|
* This instantiates the same __always_inline functions as in the non-augmented
|
||||||
|
* case, but this time with user-defined callbacks.
|
||||||
|
*/
|
||||||
|
|
||||||
|
void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
|
||||||
|
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
|
||||||
|
{
|
||||||
|
__rb_insert(node, root, augment_rotate);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__rb_insert_augmented);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This function returns the first node (in sort order) of the tree.
|
||||||
|
*/
|
||||||
|
struct rb_node *rb_first(const struct rb_root *root)
|
||||||
|
{
|
||||||
|
struct rb_node *n;
|
||||||
|
|
||||||
|
n = root->rb_node;
|
||||||
|
if (!n)
|
||||||
|
return NULL;
|
||||||
|
while (n->rb_left)
|
||||||
|
n = n->rb_left;
|
||||||
|
return n;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(rb_first);
|
||||||
|
|
||||||
|
struct rb_node *rb_last(const struct rb_root *root)
|
||||||
|
{
|
||||||
|
struct rb_node *n;
|
||||||
|
|
||||||
|
n = root->rb_node;
|
||||||
|
if (!n)
|
||||||
|
return NULL;
|
||||||
|
while (n->rb_right)
|
||||||
|
n = n->rb_right;
|
||||||
|
return n;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(rb_last);
|
||||||
|
|
||||||
|
struct rb_node *rb_next(const struct rb_node *node)
|
||||||
|
{
|
||||||
|
struct rb_node *parent;
|
||||||
|
|
||||||
|
if (RB_EMPTY_NODE(node))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we have a right-hand child, go down and then left as far
|
||||||
|
* as we can.
|
||||||
|
*/
|
||||||
|
if (node->rb_right) {
|
||||||
|
node = node->rb_right;
|
||||||
|
while (node->rb_left)
|
||||||
|
node=node->rb_left;
|
||||||
|
return (struct rb_node *)node;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* No right-hand children. Everything down and left is smaller than us,
|
||||||
|
* so any 'next' node must be in the general direction of our parent.
|
||||||
|
* Go up the tree; any time the ancestor is a right-hand child of its
|
||||||
|
* parent, keep going up. First time it's a left-hand child of its
|
||||||
|
* parent, said parent is our 'next' node.
|
||||||
|
*/
|
||||||
|
while ((parent = rb_parent(node)) && node == parent->rb_right)
|
||||||
|
node = parent;
|
||||||
|
|
||||||
|
return parent;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(rb_next);
|
||||||
|
|
||||||
|
struct rb_node *rb_prev(const struct rb_node *node)
|
||||||
|
{
|
||||||
|
struct rb_node *parent;
|
||||||
|
|
||||||
|
if (RB_EMPTY_NODE(node))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we have a left-hand child, go down and then right as far
|
||||||
|
* as we can.
|
||||||
|
*/
|
||||||
|
if (node->rb_left) {
|
||||||
|
node = node->rb_left;
|
||||||
|
while (node->rb_right)
|
||||||
|
node=node->rb_right;
|
||||||
|
return (struct rb_node *)node;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* No left-hand children. Go up till we find an ancestor which
|
||||||
|
* is a right-hand child of its parent.
|
||||||
|
*/
|
||||||
|
while ((parent = rb_parent(node)) && node == parent->rb_left)
|
||||||
|
node = parent;
|
||||||
|
|
||||||
|
return parent;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(rb_prev);
|
||||||
|
|
||||||
|
void rb_replace_node(struct rb_node *victim, struct rb_node *new,
|
||||||
|
struct rb_root *root)
|
||||||
|
{
|
||||||
|
struct rb_node *parent = rb_parent(victim);
|
||||||
|
|
||||||
|
/* Set the surrounding nodes to point to the replacement */
|
||||||
|
__rb_change_child(victim, new, parent, root);
|
||||||
|
if (victim->rb_left)
|
||||||
|
rb_set_parent(victim->rb_left, new);
|
||||||
|
if (victim->rb_right)
|
||||||
|
rb_set_parent(victim->rb_right, new);
|
||||||
|
|
||||||
|
/* Copy the pointers/colour from the victim to the replacement */
|
||||||
|
*new = *victim;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(rb_replace_node);
|
||||||
|
|
||||||
|
static struct rb_node *rb_left_deepest_node(const struct rb_node *node)
|
||||||
|
{
|
||||||
|
for (;;) {
|
||||||
|
if (node->rb_left)
|
||||||
|
node = node->rb_left;
|
||||||
|
else if (node->rb_right)
|
||||||
|
node = node->rb_right;
|
||||||
|
else
|
||||||
|
return (struct rb_node *)node;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct rb_node *rb_next_postorder(const struct rb_node *node)
|
||||||
|
{
|
||||||
|
const struct rb_node *parent;
|
||||||
|
if (!node)
|
||||||
|
return NULL;
|
||||||
|
parent = rb_parent(node);
|
||||||
|
|
||||||
|
/* If we're sitting on node, we've already seen our children */
|
||||||
|
if (parent && node == parent->rb_left && parent->rb_right) {
|
||||||
|
/* If we are the parent's left node, go to the parent's right
|
||||||
|
* node then all the way down to the left */
|
||||||
|
return rb_left_deepest_node(parent->rb_right);
|
||||||
|
} else
|
||||||
|
/* Otherwise we are the parent's right node, and the parent
|
||||||
|
* should be next */
|
||||||
|
return (struct rb_node *)parent;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(rb_next_postorder);
|
||||||
|
|
||||||
|
struct rb_node *rb_first_postorder(const struct rb_root *root)
|
||||||
|
{
|
||||||
|
if (!root->rb_node)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
return rb_left_deepest_node(root->rb_node);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(rb_first_postorder);
|
@ -81,12 +81,12 @@
|
|||||||
#include <linux/workqueue.h>
|
#include <linux/workqueue.h>
|
||||||
|
|
||||||
|
|
||||||
#include "drm.h"
|
#include <drm/drm.h>
|
||||||
|
#include <drm/drm_vma_manager.h>
|
||||||
|
|
||||||
#include <linux/idr.h>
|
#include <linux/idr.h>
|
||||||
|
|
||||||
#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
|
#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
|
||||||
#define __OS_HAS_MTRR (defined(CONFIG_MTRR))
|
|
||||||
|
|
||||||
struct module;
|
struct module;
|
||||||
|
|
||||||
@ -95,6 +95,14 @@ struct drm_device;
|
|||||||
|
|
||||||
struct device_node;
|
struct device_node;
|
||||||
struct videomode;
|
struct videomode;
|
||||||
|
|
||||||
|
struct inode;
|
||||||
|
struct poll_table_struct;
|
||||||
|
struct drm_lock_data;
|
||||||
|
|
||||||
|
struct sg_table;
|
||||||
|
struct dma_buf;
|
||||||
|
|
||||||
//#include <drm/drm_os_linux.h>
|
//#include <drm/drm_os_linux.h>
|
||||||
#include <drm/drm_hashtab.h>
|
#include <drm/drm_hashtab.h>
|
||||||
#include <drm/drm_mm.h>
|
#include <drm/drm_mm.h>
|
||||||
@ -159,19 +167,15 @@ int drm_err(const char *func, const char *format, ...);
|
|||||||
/* driver capabilities and requirements mask */
|
/* driver capabilities and requirements mask */
|
||||||
#define DRIVER_USE_AGP 0x1
|
#define DRIVER_USE_AGP 0x1
|
||||||
#define DRIVER_REQUIRE_AGP 0x2
|
#define DRIVER_REQUIRE_AGP 0x2
|
||||||
#define DRIVER_USE_MTRR 0x4
|
|
||||||
#define DRIVER_PCI_DMA 0x8
|
#define DRIVER_PCI_DMA 0x8
|
||||||
#define DRIVER_SG 0x10
|
#define DRIVER_SG 0x10
|
||||||
#define DRIVER_HAVE_DMA 0x20
|
#define DRIVER_HAVE_DMA 0x20
|
||||||
#define DRIVER_HAVE_IRQ 0x40
|
#define DRIVER_HAVE_IRQ 0x40
|
||||||
#define DRIVER_IRQ_SHARED 0x80
|
#define DRIVER_IRQ_SHARED 0x80
|
||||||
#define DRIVER_IRQ_VBL 0x100
|
|
||||||
#define DRIVER_DMA_QUEUE 0x200
|
|
||||||
#define DRIVER_FB_DMA 0x400
|
|
||||||
#define DRIVER_IRQ_VBL2 0x800
|
|
||||||
#define DRIVER_GEM 0x1000
|
#define DRIVER_GEM 0x1000
|
||||||
#define DRIVER_MODESET 0x2000
|
#define DRIVER_MODESET 0x2000
|
||||||
#define DRIVER_PRIME 0x4000
|
#define DRIVER_PRIME 0x4000
|
||||||
|
#define DRIVER_RENDER 0x8000
|
||||||
|
|
||||||
#define DRIVER_BUS_PCI 0x1
|
#define DRIVER_BUS_PCI 0x1
|
||||||
#define DRIVER_BUS_PLATFORM 0x2
|
#define DRIVER_BUS_PLATFORM 0x2
|
||||||
@ -274,9 +278,6 @@ int drm_err(const char *func, const char *format, ...);
|
|||||||
|
|
||||||
#define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x)
|
#define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x)
|
||||||
|
|
||||||
#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1))
|
|
||||||
#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
|
|
||||||
|
|
||||||
#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
|
#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -319,6 +320,7 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
|
|||||||
#define DRM_ROOT_ONLY 0x4
|
#define DRM_ROOT_ONLY 0x4
|
||||||
#define DRM_CONTROL_ALLOW 0x8
|
#define DRM_CONTROL_ALLOW 0x8
|
||||||
#define DRM_UNLOCKED 0x10
|
#define DRM_UNLOCKED 0x10
|
||||||
|
#define DRM_RENDER_ALLOW 0x20
|
||||||
|
|
||||||
struct drm_ioctl_desc {
|
struct drm_ioctl_desc {
|
||||||
unsigned int cmd;
|
unsigned int cmd;
|
||||||
@ -574,7 +576,6 @@ struct drm_local_map {
|
|||||||
|
|
||||||
typedef struct drm_local_map drm_local_map_t;
|
typedef struct drm_local_map drm_local_map_t;
|
||||||
|
|
||||||
#if 0
|
|
||||||
/**
|
/**
|
||||||
* Mappings list
|
* Mappings list
|
||||||
*/
|
*/
|
||||||
@ -584,7 +585,6 @@ struct drm_map_list {
|
|||||||
struct drm_local_map *map; /**< mapping */
|
struct drm_local_map *map; /**< mapping */
|
||||||
uint64_t user_token;
|
uint64_t user_token;
|
||||||
struct drm_master *master;
|
struct drm_master *master;
|
||||||
struct drm_mm_node *file_offset_node; /**< fake offset */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -619,12 +619,9 @@ struct drm_ati_pcigart_info {
|
|||||||
* GEM specific mm private for tracking GEM objects
|
* GEM specific mm private for tracking GEM objects
|
||||||
*/
|
*/
|
||||||
struct drm_gem_mm {
|
struct drm_gem_mm {
|
||||||
struct drm_mm offset_manager; /**< Offset mgmt for buffer objects */
|
struct drm_vma_offset_manager vma_manager;
|
||||||
struct drm_open_hash offset_hash; /**< User token hash table for maps */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This structure defines the drm_mm memory object, which will be used by the
|
* This structure defines the drm_mm memory object, which will be used by the
|
||||||
* DRM for its buffer objects.
|
* DRM for its buffer objects.
|
||||||
@ -633,8 +630,16 @@ struct drm_gem_object {
|
|||||||
/** Reference count of this object */
|
/** Reference count of this object */
|
||||||
struct kref refcount;
|
struct kref refcount;
|
||||||
|
|
||||||
/** Handle count of this object. Each handle also holds a reference */
|
/**
|
||||||
atomic_t handle_count; /* number of handles on this object */
|
* handle_count - gem file_priv handle count of this object
|
||||||
|
*
|
||||||
|
* Each handle also holds a reference. Note that when the handle_count
|
||||||
|
* drops to 0 any global names (e.g. the id in the flink namespace) will
|
||||||
|
* be cleared.
|
||||||
|
*
|
||||||
|
* Protected by dev->object_name_lock.
|
||||||
|
* */
|
||||||
|
unsigned handle_count;
|
||||||
|
|
||||||
/** Related drm device */
|
/** Related drm device */
|
||||||
struct drm_device *dev;
|
struct drm_device *dev;
|
||||||
@ -643,6 +648,7 @@ struct drm_gem_object {
|
|||||||
struct file *filp;
|
struct file *filp;
|
||||||
|
|
||||||
/* Mapping info for this object */
|
/* Mapping info for this object */
|
||||||
|
struct drm_vma_offset_node vma_node;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Size of the object, in bytes. Immutable over the object's
|
* Size of the object, in bytes. Immutable over the object's
|
||||||
@ -731,6 +737,7 @@ struct drm_bus {
|
|||||||
int (*irq_by_busid)(struct drm_device *dev, struct drm_irq_busid *p);
|
int (*irq_by_busid)(struct drm_device *dev, struct drm_irq_busid *p);
|
||||||
/* hooks that are for PCI */
|
/* hooks that are for PCI */
|
||||||
int (*agp_init)(struct drm_device *dev);
|
int (*agp_init)(struct drm_device *dev);
|
||||||
|
void (*agp_destroy)(struct drm_device *dev);
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -879,8 +886,6 @@ struct drm_driver {
|
|||||||
void (*irq_preinstall) (struct drm_device *dev);
|
void (*irq_preinstall) (struct drm_device *dev);
|
||||||
int (*irq_postinstall) (struct drm_device *dev);
|
int (*irq_postinstall) (struct drm_device *dev);
|
||||||
void (*irq_uninstall) (struct drm_device *dev);
|
void (*irq_uninstall) (struct drm_device *dev);
|
||||||
void (*set_version) (struct drm_device *dev,
|
|
||||||
struct drm_set_version *sv);
|
|
||||||
|
|
||||||
/* Master routines */
|
/* Master routines */
|
||||||
int (*master_create)(struct drm_device *dev, struct drm_master *master);
|
int (*master_create)(struct drm_device *dev, struct drm_master *master);
|
||||||
@ -949,7 +954,7 @@ struct drm_driver {
|
|||||||
|
|
||||||
u32 driver_features;
|
u32 driver_features;
|
||||||
int dev_priv_size;
|
int dev_priv_size;
|
||||||
struct drm_ioctl_desc *ioctls;
|
const struct drm_ioctl_desc *ioctls;
|
||||||
int num_ioctls;
|
int num_ioctls;
|
||||||
const struct file_operations *fops;
|
const struct file_operations *fops;
|
||||||
union {
|
union {
|
||||||
@ -965,9 +970,10 @@ struct drm_driver {
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define DRM_IRQ_ARGS int irq, void *arg
|
#define DRM_IRQ_ARGS int irq, void *arg
|
||||||
|
|
||||||
struct drm_driver {
|
struct drm_driver {
|
||||||
|
int (*load) (struct drm_device *, unsigned long flags);
|
||||||
int (*open) (struct drm_device *, struct drm_file *);
|
int (*open) (struct drm_device *, struct drm_file *);
|
||||||
|
|
||||||
irqreturn_t (*irq_handler) (DRM_IRQ_ARGS);
|
irqreturn_t (*irq_handler) (DRM_IRQ_ARGS);
|
||||||
@ -979,7 +985,7 @@ struct drm_driver {
|
|||||||
int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
|
int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
|
||||||
void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
|
void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
|
||||||
u32 driver_features;
|
u32 driver_features;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
#define DRM_MINOR_UNASSIGNED 0
|
#define DRM_MINOR_UNASSIGNED 0
|
||||||
@ -1126,12 +1132,7 @@ struct drm_device {
|
|||||||
/*@{ */
|
/*@{ */
|
||||||
int irq_enabled; /**< True if irq handler is enabled */
|
int irq_enabled; /**< True if irq handler is enabled */
|
||||||
__volatile__ long context_flag; /**< Context swapping flag */
|
__volatile__ long context_flag; /**< Context swapping flag */
|
||||||
__volatile__ long interrupt_flag; /**< Interruption handler flag */
|
|
||||||
__volatile__ long dma_flag; /**< DMA dispatch flag */
|
|
||||||
// wait_queue_head_t context_wait; /**< Processes waiting on ctx switch */
|
|
||||||
int last_checked; /**< Last context checked for DMA */
|
|
||||||
int last_context; /**< Last current context */
|
int last_context; /**< Last current context */
|
||||||
unsigned long last_switch; /**< jiffies at last context switch */
|
|
||||||
/*@} */
|
/*@} */
|
||||||
|
|
||||||
// struct work_struct work;
|
// struct work_struct work;
|
||||||
@ -1169,12 +1170,6 @@ struct drm_device {
|
|||||||
spinlock_t event_lock;
|
spinlock_t event_lock;
|
||||||
|
|
||||||
/*@} */
|
/*@} */
|
||||||
// cycles_t ctx_start;
|
|
||||||
// cycles_t lck_start;
|
|
||||||
|
|
||||||
// struct fasync_struct *buf_async;/**< Processes waiting for SIGIO */
|
|
||||||
// wait_queue_head_t buf_readers; /**< Processes waiting to read */
|
|
||||||
// wait_queue_head_t buf_writers; /**< Processes waiting to ctx switch */
|
|
||||||
|
|
||||||
// struct drm_agp_head *agp; /**< AGP data */
|
// struct drm_agp_head *agp; /**< AGP data */
|
||||||
|
|
||||||
@ -1199,7 +1194,7 @@ struct drm_device {
|
|||||||
|
|
||||||
/** \name GEM information */
|
/** \name GEM information */
|
||||||
/*@{ */
|
/*@{ */
|
||||||
spinlock_t object_name_lock;
|
struct mutex object_name_lock;
|
||||||
struct idr object_name_idr;
|
struct idr object_name_idr;
|
||||||
/*@} */
|
/*@} */
|
||||||
int switch_power_state;
|
int switch_power_state;
|
||||||
@ -1210,6 +1205,7 @@ struct drm_device {
|
|||||||
#define DRM_SWITCH_POWER_ON 0
|
#define DRM_SWITCH_POWER_ON 0
|
||||||
#define DRM_SWITCH_POWER_OFF 1
|
#define DRM_SWITCH_POWER_OFF 1
|
||||||
#define DRM_SWITCH_POWER_CHANGING 2
|
#define DRM_SWITCH_POWER_CHANGING 2
|
||||||
|
#define DRM_SWITCH_POWER_DYNAMIC_OFF 3
|
||||||
|
|
||||||
static __inline__ int drm_core_check_feature(struct drm_device *dev,
|
static __inline__ int drm_core_check_feature(struct drm_device *dev,
|
||||||
int feature)
|
int feature)
|
||||||
@ -1222,26 +1218,6 @@ static inline int drm_dev_to_irq(struct drm_device *dev)
|
|||||||
return dev->pdev->irq;
|
return dev->pdev->irq;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if 0
|
|
||||||
|
|
||||||
#if __OS_HAS_AGP
|
|
||||||
static inline int drm_core_has_AGP(struct drm_device *dev)
|
|
||||||
{
|
|
||||||
return drm_core_check_feature(dev, DRIVER_USE_AGP);
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
#define drm_core_has_AGP(dev) (0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if __OS_HAS_MTRR
|
|
||||||
static inline int drm_core_has_MTRR(struct drm_device *dev)
|
|
||||||
{
|
|
||||||
return drm_core_check_feature(dev, DRIVER_USE_MTRR);
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
#define drm_core_has_MTRR(dev) (0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static inline void drm_device_set_unplugged(struct drm_device *dev)
|
static inline void drm_device_set_unplugged(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
@ -1275,7 +1251,6 @@ extern int drm_lastclose(struct drm_device *dev);
|
|||||||
extern struct mutex drm_global_mutex;
|
extern struct mutex drm_global_mutex;
|
||||||
extern int drm_open(struct inode *inode, struct file *filp);
|
extern int drm_open(struct inode *inode, struct file *filp);
|
||||||
extern int drm_stub_open(struct inode *inode, struct file *filp);
|
extern int drm_stub_open(struct inode *inode, struct file *filp);
|
||||||
extern int drm_fasync(int fd, struct file *filp, int on);
|
|
||||||
extern ssize_t drm_read(struct file *filp, char __user *buffer,
|
extern ssize_t drm_read(struct file *filp, char __user *buffer,
|
||||||
size_t count, loff_t *offset);
|
size_t count, loff_t *offset);
|
||||||
extern int drm_release(struct inode *inode, struct file *filp);
|
extern int drm_release(struct inode *inode, struct file *filp);
|
||||||
@ -1289,14 +1264,6 @@ extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
|
|||||||
|
|
||||||
/* Memory management support (drm_memory.h) */
|
/* Memory management support (drm_memory.h) */
|
||||||
#include <drm/drm_memory.h>
|
#include <drm/drm_memory.h>
|
||||||
extern void drm_free_agp(DRM_AGP_MEM * handle, int pages);
|
|
||||||
extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
|
|
||||||
extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
|
|
||||||
struct page **pages,
|
|
||||||
unsigned long num_pages,
|
|
||||||
uint32_t gtt_offset,
|
|
||||||
uint32_t type);
|
|
||||||
extern int drm_unbind_agp(DRM_AGP_MEM * handle);
|
|
||||||
|
|
||||||
/* Misc. IOCTL support (drm_ioctl.h) */
|
/* Misc. IOCTL support (drm_ioctl.h) */
|
||||||
extern int drm_irq_by_busid(struct drm_device *dev, void *data,
|
extern int drm_irq_by_busid(struct drm_device *dev, void *data,
|
||||||
@ -1323,8 +1290,6 @@ extern int drm_resctx(struct drm_device *dev, void *data,
|
|||||||
struct drm_file *file_priv);
|
struct drm_file *file_priv);
|
||||||
extern int drm_addctx(struct drm_device *dev, void *data,
|
extern int drm_addctx(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file_priv);
|
struct drm_file *file_priv);
|
||||||
extern int drm_modctx(struct drm_device *dev, void *data,
|
|
||||||
struct drm_file *file_priv);
|
|
||||||
extern int drm_getctx(struct drm_device *dev, void *data,
|
extern int drm_getctx(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file_priv);
|
struct drm_file *file_priv);
|
||||||
extern int drm_switchctx(struct drm_device *dev, void *data,
|
extern int drm_switchctx(struct drm_device *dev, void *data,
|
||||||
@ -1396,8 +1361,8 @@ extern int drm_mapbufs(struct drm_device *dev, void *data,
|
|||||||
extern int drm_order(unsigned long size);
|
extern int drm_order(unsigned long size);
|
||||||
|
|
||||||
/* DMA support (drm_dma.h) */
|
/* DMA support (drm_dma.h) */
|
||||||
extern int drm_dma_setup(struct drm_device *dev);
|
extern int drm_legacy_dma_setup(struct drm_device *dev);
|
||||||
extern void drm_dma_takedown(struct drm_device *dev);
|
extern void drm_legacy_dma_takedown(struct drm_device *dev);
|
||||||
extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf);
|
extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf);
|
||||||
extern void drm_core_reclaim_buffers(struct drm_device *dev,
|
extern void drm_core_reclaim_buffers(struct drm_device *dev,
|
||||||
struct drm_file *filp);
|
struct drm_file *filp);
|
||||||
@ -1411,7 +1376,6 @@ extern int drm_irq_uninstall(struct drm_device *dev);
|
|||||||
extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
|
extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
|
||||||
extern int drm_wait_vblank(struct drm_device *dev, void *data,
|
extern int drm_wait_vblank(struct drm_device *dev, void *data,
|
||||||
struct drm_file *filp);
|
struct drm_file *filp);
|
||||||
extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
|
|
||||||
extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
|
extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
|
||||||
extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
|
extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
|
||||||
struct timeval *vblanktime);
|
struct timeval *vblanktime);
|
||||||
@ -1453,31 +1417,6 @@ extern int drm_modeset_ctl(struct drm_device *dev, void *data,
|
|||||||
struct drm_file *file_priv);
|
struct drm_file *file_priv);
|
||||||
|
|
||||||
/* AGP/GART support (drm_agpsupport.h) */
|
/* AGP/GART support (drm_agpsupport.h) */
|
||||||
extern struct drm_agp_head *drm_agp_init(struct drm_device *dev);
|
|
||||||
extern int drm_agp_acquire(struct drm_device *dev);
|
|
||||||
extern int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
|
|
||||||
struct drm_file *file_priv);
|
|
||||||
extern int drm_agp_release(struct drm_device *dev);
|
|
||||||
extern int drm_agp_release_ioctl(struct drm_device *dev, void *data,
|
|
||||||
struct drm_file *file_priv);
|
|
||||||
extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
|
|
||||||
extern int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
|
|
||||||
struct drm_file *file_priv);
|
|
||||||
extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info);
|
|
||||||
extern int drm_agp_info_ioctl(struct drm_device *dev, void *data,
|
|
||||||
struct drm_file *file_priv);
|
|
||||||
extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
|
|
||||||
extern int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
|
|
||||||
struct drm_file *file_priv);
|
|
||||||
extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
|
|
||||||
extern int drm_agp_free_ioctl(struct drm_device *dev, void *data,
|
|
||||||
struct drm_file *file_priv);
|
|
||||||
extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
|
|
||||||
extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
|
|
||||||
struct drm_file *file_priv);
|
|
||||||
extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
|
|
||||||
extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
|
|
||||||
struct drm_file *file_priv);
|
|
||||||
|
|
||||||
/* Stub support (drm_stub.h) */
|
/* Stub support (drm_stub.h) */
|
||||||
extern int drm_setmaster_ioctl(struct drm_device *dev, void *data,
|
extern int drm_setmaster_ioctl(struct drm_device *dev, void *data,
|
||||||
@ -1491,9 +1430,8 @@ extern void drm_master_put(struct drm_master **master);
|
|||||||
extern void drm_put_dev(struct drm_device *dev);
|
extern void drm_put_dev(struct drm_device *dev);
|
||||||
extern int drm_put_minor(struct drm_minor **minor);
|
extern int drm_put_minor(struct drm_minor **minor);
|
||||||
extern void drm_unplug_dev(struct drm_device *dev);
|
extern void drm_unplug_dev(struct drm_device *dev);
|
||||||
#endif
|
|
||||||
|
|
||||||
extern unsigned int drm_debug;
|
extern unsigned int drm_debug;
|
||||||
|
extern unsigned int drm_rnodes;
|
||||||
|
|
||||||
#if 0
|
#if 0
|
||||||
extern unsigned int drm_vblank_offdelay;
|
extern unsigned int drm_vblank_offdelay;
|
||||||
@ -1501,17 +1439,12 @@ extern unsigned int drm_timestamp_precision;
|
|||||||
extern unsigned int drm_timestamp_monotonic;
|
extern unsigned int drm_timestamp_monotonic;
|
||||||
|
|
||||||
extern struct class *drm_class;
|
extern struct class *drm_class;
|
||||||
extern struct proc_dir_entry *drm_proc_root;
|
|
||||||
extern struct dentry *drm_debugfs_root;
|
extern struct dentry *drm_debugfs_root;
|
||||||
|
|
||||||
extern struct idr drm_minors_idr;
|
extern struct idr drm_minors_idr;
|
||||||
|
|
||||||
extern struct drm_local_map *drm_getsarea(struct drm_device *dev);
|
extern struct drm_local_map *drm_getsarea(struct drm_device *dev);
|
||||||
|
|
||||||
/* Proc support (drm_proc.h) */
|
|
||||||
extern int drm_proc_init(struct drm_minor *minor, struct proc_dir_entry *root);
|
|
||||||
extern int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root);
|
|
||||||
|
|
||||||
/* Debugfs support */
|
/* Debugfs support */
|
||||||
#if defined(CONFIG_DEBUG_FS)
|
#if defined(CONFIG_DEBUG_FS)
|
||||||
extern int drm_debugfs_init(struct drm_minor *minor, int minor_id,
|
extern int drm_debugfs_init(struct drm_minor *minor, int minor_id,
|
||||||
@ -1536,10 +1469,9 @@ extern int drm_vma_info(struct seq_file *m, void *data);
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Scatter Gather Support (drm_scatter.h) */
|
/* Scatter Gather Support (drm_scatter.h) */
|
||||||
extern void drm_sg_cleanup(struct drm_sg_mem * entry);
|
extern void drm_legacy_sg_cleanup(struct drm_device *dev);
|
||||||
extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
|
extern int drm_sg_alloc(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file_priv);
|
struct drm_file *file_priv);
|
||||||
extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request);
|
|
||||||
extern int drm_sg_free(struct drm_device *dev, void *data,
|
extern int drm_sg_free(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file_priv);
|
struct drm_file *file_priv);
|
||||||
|
|
||||||
@ -1563,14 +1495,8 @@ extern void drm_sysfs_destroy(void);
|
|||||||
extern int drm_sysfs_device_add(struct drm_minor *minor);
|
extern int drm_sysfs_device_add(struct drm_minor *minor);
|
||||||
extern void drm_sysfs_hotplug_event(struct drm_device *dev);
|
extern void drm_sysfs_hotplug_event(struct drm_device *dev);
|
||||||
extern void drm_sysfs_device_remove(struct drm_minor *minor);
|
extern void drm_sysfs_device_remove(struct drm_minor *minor);
|
||||||
extern char *drm_get_connector_status_name(enum drm_connector_status status);
|
extern int drm_sysfs_connector_add(struct drm_connector *connector);
|
||||||
|
extern void drm_sysfs_connector_remove(struct drm_connector *connector);
|
||||||
static inline int drm_sysfs_connector_add(struct drm_connector *connector)
|
|
||||||
{ return 0; };
|
|
||||||
|
|
||||||
static inline void drm_sysfs_connector_remove(struct drm_connector *connector)
|
|
||||||
{ };
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Graphics Execution Manager library functions (drm_gem.c) */
|
/* Graphics Execution Manager library functions (drm_gem.c) */
|
||||||
@ -1582,13 +1508,15 @@ struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
|
|||||||
size_t size);
|
size_t size);
|
||||||
int drm_gem_object_init(struct drm_device *dev,
|
int drm_gem_object_init(struct drm_device *dev,
|
||||||
struct drm_gem_object *obj, size_t size);
|
struct drm_gem_object *obj, size_t size);
|
||||||
int drm_gem_private_object_init(struct drm_device *dev,
|
void drm_gem_private_object_init(struct drm_device *dev,
|
||||||
struct drm_gem_object *obj, size_t size);
|
struct drm_gem_object *obj, size_t size);
|
||||||
void drm_gem_object_handle_free(struct drm_gem_object *obj);
|
|
||||||
void drm_gem_vm_open(struct vm_area_struct *vma);
|
void drm_gem_vm_open(struct vm_area_struct *vma);
|
||||||
void drm_gem_vm_close(struct vm_area_struct *vma);
|
void drm_gem_vm_close(struct vm_area_struct *vma);
|
||||||
|
int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
|
||||||
|
struct vm_area_struct *vma);
|
||||||
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
|
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||||
|
|
||||||
|
#include <drm/drm_global.h>
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
drm_gem_object_reference(struct drm_gem_object *obj)
|
drm_gem_object_reference(struct drm_gem_object *obj)
|
||||||
@ -1606,66 +1534,32 @@ drm_gem_object_unreference(struct drm_gem_object *obj)
|
|||||||
static inline void
|
static inline void
|
||||||
drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
|
drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
|
||||||
{
|
{
|
||||||
if (obj != NULL) {
|
if (obj && !atomic_add_unless(&obj->refcount.refcount, -1, 1)) {
|
||||||
struct drm_device *dev = obj->dev;
|
struct drm_device *dev = obj->dev;
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
kref_put(&obj->refcount, drm_gem_object_free);
|
if (likely(atomic_dec_and_test(&obj->refcount.refcount)))
|
||||||
|
drm_gem_object_free(&obj->refcount);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int drm_gem_handle_create_tail(struct drm_file *file_priv,
|
||||||
|
struct drm_gem_object *obj,
|
||||||
|
u32 *handlep);
|
||||||
int drm_gem_handle_create(struct drm_file *file_priv,
|
int drm_gem_handle_create(struct drm_file *file_priv,
|
||||||
struct drm_gem_object *obj,
|
struct drm_gem_object *obj,
|
||||||
u32 *handlep);
|
u32 *handlep);
|
||||||
int drm_gem_handle_delete(struct drm_file *filp, u32 handle);
|
int drm_gem_handle_delete(struct drm_file *filp, u32 handle);
|
||||||
|
|
||||||
static inline void
|
|
||||||
drm_gem_object_handle_reference(struct drm_gem_object *obj)
|
|
||||||
{
|
|
||||||
drm_gem_object_reference(obj);
|
|
||||||
atomic_inc(&obj->handle_count);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
drm_gem_object_handle_unreference(struct drm_gem_object *obj)
|
|
||||||
{
|
|
||||||
if (obj == NULL)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (atomic_read(&obj->handle_count) == 0)
|
|
||||||
return;
|
|
||||||
/*
|
|
||||||
* Must bump handle count first as this may be the last
|
|
||||||
* ref, in which case the object would disappear before we
|
|
||||||
* checked for a name
|
|
||||||
*/
|
|
||||||
if (atomic_dec_and_test(&obj->handle_count))
|
|
||||||
drm_gem_object_handle_free(obj);
|
|
||||||
drm_gem_object_unreference(obj);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
|
|
||||||
{
|
|
||||||
if (obj == NULL)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (atomic_read(&obj->handle_count) == 0)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Must bump handle count first as this may be the last
|
|
||||||
* ref, in which case the object would disappear before we
|
|
||||||
* checked for a name
|
|
||||||
*/
|
|
||||||
|
|
||||||
if (atomic_dec_and_test(&obj->handle_count))
|
|
||||||
drm_gem_object_handle_free(obj);
|
|
||||||
drm_gem_object_unreference_unlocked(obj);
|
|
||||||
}
|
|
||||||
|
|
||||||
void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
|
void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
|
||||||
int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
|
int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
|
||||||
|
int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
|
||||||
|
|
||||||
|
struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
|
||||||
|
void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
|
||||||
|
bool dirty, bool accessed);
|
||||||
|
|
||||||
struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
|
struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
|
||||||
struct drm_file *filp,
|
struct drm_file *filp,
|
||||||
@ -1683,8 +1577,6 @@ extern void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev);
|
|||||||
extern void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
|
extern void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
|
||||||
extern void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
|
extern void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
|
||||||
|
|
||||||
#if 0
|
|
||||||
|
|
||||||
static __inline__ struct drm_local_map *drm_core_findmap(struct drm_device *dev,
|
static __inline__ struct drm_local_map *drm_core_findmap(struct drm_device *dev,
|
||||||
unsigned int token)
|
unsigned int token)
|
||||||
{
|
{
|
||||||
@ -1699,29 +1591,19 @@ static __inline__ void drm_core_dropmap(struct drm_local_map *map)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//#include <drm/drm_mem_util.h>
|
||||||
|
|
||||||
|
extern int drm_fill_in_dev(struct drm_device *dev,
|
||||||
|
const struct pci_device_id *ent,
|
||||||
|
struct drm_driver *driver);
|
||||||
|
int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type);
|
||||||
|
/*@}*/
|
||||||
|
|
||||||
|
|
||||||
static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
|
|
||||||
{
|
|
||||||
if (size * nmemb <= PAGE_SIZE)
|
|
||||||
return kcalloc(nmemb, size, GFP_KERNEL);
|
|
||||||
|
|
||||||
if (size != 0 && nmemb > ULONG_MAX / size)
|
extern int drm_get_pci_dev(struct pci_dev *pdev,
|
||||||
return NULL;
|
const struct pci_device_id *ent,
|
||||||
|
struct drm_driver *driver);
|
||||||
return __vmalloc(size * nmemb,
|
|
||||||
GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
|
|
||||||
}
|
|
||||||
|
|
||||||
static __inline void drm_free_large(void *ptr)
|
|
||||||
{
|
|
||||||
if (!is_vmalloc_addr(ptr))
|
|
||||||
return kfree(ptr);
|
|
||||||
|
|
||||||
vfree(ptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define DRM_PCIE_SPEED_25 1
|
#define DRM_PCIE_SPEED_25 1
|
||||||
#define DRM_PCIE_SPEED_50 2
|
#define DRM_PCIE_SPEED_50 2
|
||||||
@ -1744,5 +1626,7 @@ static __inline__ int drm_device_is_pcie(struct drm_device *dev)
|
|||||||
#define drm_sysfs_connector_remove(connector)
|
#define drm_sysfs_connector_remove(connector)
|
||||||
|
|
||||||
#define LFB_SIZE 0xC00000
|
#define LFB_SIZE 0xC00000
|
||||||
|
extern struct drm_device *main_device;
|
||||||
|
extern struct drm_file *drm_file_handlers[256];
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
34
drivers/include/drm/drm_core.h
Normal file
34
drivers/include/drm/drm_core.h
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2004 Jon Smirl <jonsmirl@gmail.com>
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
|
* to deal in the Software without restriction, including without limitation
|
||||||
|
* the rights to use, copy, modify, merge, publish, distribute, sub license,
|
||||||
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice (including the
|
||||||
|
* next paragraph) shall be included in all copies or substantial portions
|
||||||
|
* of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||||
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||||
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
* DEALINGS IN THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
#define CORE_AUTHOR "Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl"
|
||||||
|
|
||||||
|
#define CORE_NAME "drm"
|
||||||
|
#define CORE_DESC "DRM shared core routines"
|
||||||
|
#define CORE_DATE "20060810"
|
||||||
|
|
||||||
|
#define DRM_IF_MAJOR 1
|
||||||
|
#define DRM_IF_MINOR 4
|
||||||
|
|
||||||
|
#define CORE_MAJOR 1
|
||||||
|
#define CORE_MINOR 1
|
||||||
|
#define CORE_PATCHLEVEL 0
|
@ -49,6 +49,7 @@ struct drm_clip_rect;
|
|||||||
#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
|
#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
|
||||||
#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
|
#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
|
||||||
#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
|
#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
|
||||||
|
#define DRM_MODE_OBJECT_BRIDGE 0xbdbdbdbd
|
||||||
|
|
||||||
struct drm_mode_object {
|
struct drm_mode_object {
|
||||||
uint32_t id;
|
uint32_t id;
|
||||||
@ -305,6 +306,7 @@ struct drm_connector;
|
|||||||
struct drm_encoder;
|
struct drm_encoder;
|
||||||
struct drm_pending_vblank_event;
|
struct drm_pending_vblank_event;
|
||||||
struct drm_plane;
|
struct drm_plane;
|
||||||
|
struct drm_bridge;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* drm_crtc_funcs - control CRTCs for a given device
|
* drm_crtc_funcs - control CRTCs for a given device
|
||||||
@ -363,7 +365,8 @@ struct drm_crtc_funcs {
|
|||||||
*/
|
*/
|
||||||
int (*page_flip)(struct drm_crtc *crtc,
|
int (*page_flip)(struct drm_crtc *crtc,
|
||||||
struct drm_framebuffer *fb,
|
struct drm_framebuffer *fb,
|
||||||
struct drm_pending_vblank_event *event);
|
struct drm_pending_vblank_event *event,
|
||||||
|
uint32_t flags);
|
||||||
|
|
||||||
int (*set_property)(struct drm_crtc *crtc,
|
int (*set_property)(struct drm_crtc *crtc,
|
||||||
struct drm_property *property, uint64_t val);
|
struct drm_property *property, uint64_t val);
|
||||||
@ -494,8 +497,6 @@ struct drm_encoder_funcs {
|
|||||||
void (*destroy)(struct drm_encoder *encoder);
|
void (*destroy)(struct drm_encoder *encoder);
|
||||||
};
|
};
|
||||||
|
|
||||||
#define DRM_CONNECTOR_MAX_UMODES 16
|
|
||||||
#define DRM_CONNECTOR_LEN 32
|
|
||||||
#define DRM_CONNECTOR_MAX_ENCODER 3
|
#define DRM_CONNECTOR_MAX_ENCODER 3
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -507,6 +508,7 @@ struct drm_encoder_funcs {
|
|||||||
* @possible_crtcs: bitmask of potential CRTC bindings
|
* @possible_crtcs: bitmask of potential CRTC bindings
|
||||||
* @possible_clones: bitmask of potential sibling encoders for cloning
|
* @possible_clones: bitmask of potential sibling encoders for cloning
|
||||||
* @crtc: currently bound CRTC
|
* @crtc: currently bound CRTC
|
||||||
|
* @bridge: bridge associated to the encoder
|
||||||
* @funcs: control functions
|
* @funcs: control functions
|
||||||
* @helper_private: mid-layer private data
|
* @helper_private: mid-layer private data
|
||||||
*
|
*
|
||||||
@ -523,6 +525,7 @@ struct drm_encoder {
|
|||||||
uint32_t possible_clones;
|
uint32_t possible_clones;
|
||||||
|
|
||||||
struct drm_crtc *crtc;
|
struct drm_crtc *crtc;
|
||||||
|
struct drm_bridge *bridge;
|
||||||
const struct drm_encoder_funcs *funcs;
|
const struct drm_encoder_funcs *funcs;
|
||||||
void *helper_private;
|
void *helper_private;
|
||||||
};
|
};
|
||||||
@ -682,6 +685,48 @@ struct drm_plane {
|
|||||||
struct drm_object_properties properties;
|
struct drm_object_properties properties;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* drm_bridge_funcs - drm_bridge control functions
|
||||||
|
* @mode_fixup: Try to fixup (or reject entirely) proposed mode for this bridge
|
||||||
|
* @disable: Called right before encoder prepare, disables the bridge
|
||||||
|
* @post_disable: Called right after encoder prepare, for lockstepped disable
|
||||||
|
* @mode_set: Set this mode to the bridge
|
||||||
|
* @pre_enable: Called right before encoder commit, for lockstepped commit
|
||||||
|
* @enable: Called right after encoder commit, enables the bridge
|
||||||
|
* @destroy: make object go away
|
||||||
|
*/
|
||||||
|
struct drm_bridge_funcs {
|
||||||
|
bool (*mode_fixup)(struct drm_bridge *bridge,
|
||||||
|
const struct drm_display_mode *mode,
|
||||||
|
struct drm_display_mode *adjusted_mode);
|
||||||
|
void (*disable)(struct drm_bridge *bridge);
|
||||||
|
void (*post_disable)(struct drm_bridge *bridge);
|
||||||
|
void (*mode_set)(struct drm_bridge *bridge,
|
||||||
|
struct drm_display_mode *mode,
|
||||||
|
struct drm_display_mode *adjusted_mode);
|
||||||
|
void (*pre_enable)(struct drm_bridge *bridge);
|
||||||
|
void (*enable)(struct drm_bridge *bridge);
|
||||||
|
void (*destroy)(struct drm_bridge *bridge);
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* drm_bridge - central DRM bridge control structure
|
||||||
|
* @dev: DRM device this bridge belongs to
|
||||||
|
* @head: list management
|
||||||
|
* @base: base mode object
|
||||||
|
* @funcs: control functions
|
||||||
|
* @driver_private: pointer to the bridge driver's internal context
|
||||||
|
*/
|
||||||
|
struct drm_bridge {
|
||||||
|
struct drm_device *dev;
|
||||||
|
struct list_head head;
|
||||||
|
|
||||||
|
struct drm_mode_object base;
|
||||||
|
|
||||||
|
const struct drm_bridge_funcs *funcs;
|
||||||
|
void *driver_private;
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* drm_mode_set - new values for a CRTC config change
|
* drm_mode_set - new values for a CRTC config change
|
||||||
* @head: list management
|
* @head: list management
|
||||||
@ -742,6 +787,7 @@ struct drm_mode_group {
|
|||||||
uint32_t num_crtcs;
|
uint32_t num_crtcs;
|
||||||
uint32_t num_encoders;
|
uint32_t num_encoders;
|
||||||
uint32_t num_connectors;
|
uint32_t num_connectors;
|
||||||
|
uint32_t num_bridges;
|
||||||
|
|
||||||
/* list of object IDs for this group */
|
/* list of object IDs for this group */
|
||||||
uint32_t *id_list;
|
uint32_t *id_list;
|
||||||
@ -756,6 +802,8 @@ struct drm_mode_group {
|
|||||||
* @fb_list: list of framebuffers available
|
* @fb_list: list of framebuffers available
|
||||||
* @num_connector: number of connectors on this device
|
* @num_connector: number of connectors on this device
|
||||||
* @connector_list: list of connector objects
|
* @connector_list: list of connector objects
|
||||||
|
* @num_bridge: number of bridges on this device
|
||||||
|
* @bridge_list: list of bridge objects
|
||||||
* @num_encoder: number of encoders on this device
|
* @num_encoder: number of encoders on this device
|
||||||
* @encoder_list: list of encoder objects
|
* @encoder_list: list of encoder objects
|
||||||
* @num_crtc: number of CRTCs on this device
|
* @num_crtc: number of CRTCs on this device
|
||||||
@ -793,6 +841,8 @@ struct drm_mode_config {
|
|||||||
|
|
||||||
int num_connector;
|
int num_connector;
|
||||||
struct list_head connector_list;
|
struct list_head connector_list;
|
||||||
|
int num_bridge;
|
||||||
|
struct list_head bridge_list;
|
||||||
int num_encoder;
|
int num_encoder;
|
||||||
struct list_head encoder_list;
|
struct list_head encoder_list;
|
||||||
int num_plane;
|
int num_plane;
|
||||||
@ -839,11 +889,13 @@ struct drm_mode_config {
|
|||||||
|
|
||||||
/* Optional properties */
|
/* Optional properties */
|
||||||
struct drm_property *scaling_mode_property;
|
struct drm_property *scaling_mode_property;
|
||||||
struct drm_property *dithering_mode_property;
|
|
||||||
struct drm_property *dirty_info_property;
|
struct drm_property *dirty_info_property;
|
||||||
|
|
||||||
/* dumb ioctl parameters */
|
/* dumb ioctl parameters */
|
||||||
uint32_t preferred_depth, prefer_shadow;
|
uint32_t preferred_depth, prefer_shadow;
|
||||||
|
|
||||||
|
/* whether async page flip is supported or not */
|
||||||
|
bool async_page_flip;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
|
#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
|
||||||
@ -869,6 +921,8 @@ extern int drm_crtc_init(struct drm_device *dev,
|
|||||||
const struct drm_crtc_funcs *funcs);
|
const struct drm_crtc_funcs *funcs);
|
||||||
extern void drm_crtc_cleanup(struct drm_crtc *crtc);
|
extern void drm_crtc_cleanup(struct drm_crtc *crtc);
|
||||||
|
|
||||||
|
extern void drm_connector_ida_init(void);
|
||||||
|
extern void drm_connector_ida_destroy(void);
|
||||||
extern int drm_connector_init(struct drm_device *dev,
|
extern int drm_connector_init(struct drm_device *dev,
|
||||||
struct drm_connector *connector,
|
struct drm_connector *connector,
|
||||||
const struct drm_connector_funcs *funcs,
|
const struct drm_connector_funcs *funcs,
|
||||||
@ -878,6 +932,10 @@ extern void drm_connector_cleanup(struct drm_connector *connector);
|
|||||||
/* helper to unplug all connectors from sysfs for device */
|
/* helper to unplug all connectors from sysfs for device */
|
||||||
extern void drm_connector_unplug_all(struct drm_device *dev);
|
extern void drm_connector_unplug_all(struct drm_device *dev);
|
||||||
|
|
||||||
|
extern int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge,
|
||||||
|
const struct drm_bridge_funcs *funcs);
|
||||||
|
extern void drm_bridge_cleanup(struct drm_bridge *bridge);
|
||||||
|
|
||||||
extern int drm_encoder_init(struct drm_device *dev,
|
extern int drm_encoder_init(struct drm_device *dev,
|
||||||
struct drm_encoder *encoder,
|
struct drm_encoder *encoder,
|
||||||
const struct drm_encoder_funcs *funcs,
|
const struct drm_encoder_funcs *funcs,
|
||||||
@ -908,7 +966,6 @@ extern struct edid *drm_get_edid(struct drm_connector *connector,
|
|||||||
struct i2c_adapter *adapter);
|
struct i2c_adapter *adapter);
|
||||||
extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
|
extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
|
||||||
extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
|
extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
|
||||||
extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode);
|
|
||||||
extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src);
|
extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src);
|
||||||
extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
|
extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
|
||||||
const struct drm_display_mode *mode);
|
const struct drm_display_mode *mode);
|
||||||
@ -925,14 +982,9 @@ extern int drm_mode_height(const struct drm_display_mode *mode);
|
|||||||
/* for us by fb module */
|
/* for us by fb module */
|
||||||
extern struct drm_display_mode *drm_mode_create(struct drm_device *dev);
|
extern struct drm_display_mode *drm_mode_create(struct drm_device *dev);
|
||||||
extern void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode);
|
extern void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode);
|
||||||
extern void drm_mode_list_concat(struct list_head *head,
|
|
||||||
struct list_head *new);
|
|
||||||
extern void drm_mode_validate_size(struct drm_device *dev,
|
extern void drm_mode_validate_size(struct drm_device *dev,
|
||||||
struct list_head *mode_list,
|
struct list_head *mode_list,
|
||||||
int maxX, int maxY, int maxPitch);
|
int maxX, int maxY, int maxPitch);
|
||||||
extern void drm_mode_validate_clocks(struct drm_device *dev,
|
|
||||||
struct list_head *mode_list,
|
|
||||||
int *min, int *max, int n_ranges);
|
|
||||||
extern void drm_mode_prune_invalid(struct drm_device *dev,
|
extern void drm_mode_prune_invalid(struct drm_device *dev,
|
||||||
struct list_head *mode_list, bool verbose);
|
struct list_head *mode_list, bool verbose);
|
||||||
extern void drm_mode_sort(struct list_head *mode_list);
|
extern void drm_mode_sort(struct list_head *mode_list);
|
||||||
@ -949,9 +1001,6 @@ extern int drm_object_property_set_value(struct drm_mode_object *obj,
|
|||||||
extern int drm_object_property_get_value(struct drm_mode_object *obj,
|
extern int drm_object_property_get_value(struct drm_mode_object *obj,
|
||||||
struct drm_property *property,
|
struct drm_property *property,
|
||||||
uint64_t *value);
|
uint64_t *value);
|
||||||
extern struct drm_display_mode *drm_crtc_mode_create(struct drm_device *dev);
|
|
||||||
extern void drm_framebuffer_set_object(struct drm_device *dev,
|
|
||||||
unsigned long handle);
|
|
||||||
extern int drm_framebuffer_init(struct drm_device *dev,
|
extern int drm_framebuffer_init(struct drm_device *dev,
|
||||||
struct drm_framebuffer *fb,
|
struct drm_framebuffer *fb,
|
||||||
const struct drm_framebuffer_funcs *funcs);
|
const struct drm_framebuffer_funcs *funcs);
|
||||||
@ -962,10 +1011,6 @@ extern void drm_framebuffer_reference(struct drm_framebuffer *fb);
|
|||||||
extern void drm_framebuffer_remove(struct drm_framebuffer *fb);
|
extern void drm_framebuffer_remove(struct drm_framebuffer *fb);
|
||||||
extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
|
extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
|
||||||
extern void drm_framebuffer_unregister_private(struct drm_framebuffer *fb);
|
extern void drm_framebuffer_unregister_private(struct drm_framebuffer *fb);
|
||||||
extern int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
|
|
||||||
extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
|
|
||||||
extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY);
|
|
||||||
extern bool drm_crtc_in_use(struct drm_crtc *crtc);
|
|
||||||
|
|
||||||
extern void drm_object_attach_property(struct drm_mode_object *obj,
|
extern void drm_object_attach_property(struct drm_mode_object *obj,
|
||||||
struct drm_property *property,
|
struct drm_property *property,
|
||||||
@ -990,7 +1035,6 @@ extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
|
|||||||
extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats,
|
extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats,
|
||||||
char *formats[]);
|
char *formats[]);
|
||||||
extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
|
extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
|
||||||
extern int drm_mode_create_dithering_property(struct drm_device *dev);
|
|
||||||
extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
|
extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
|
||||||
extern const char *drm_get_encoder_name(const struct drm_encoder *encoder);
|
extern const char *drm_get_encoder_name(const struct drm_encoder *encoder);
|
||||||
|
|
||||||
@ -1040,17 +1084,12 @@ extern int drm_mode_getblob_ioctl(struct drm_device *dev,
|
|||||||
void *data, struct drm_file *file_priv);
|
void *data, struct drm_file *file_priv);
|
||||||
extern int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
|
extern int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
|
||||||
void *data, struct drm_file *file_priv);
|
void *data, struct drm_file *file_priv);
|
||||||
extern int drm_mode_hotplug_ioctl(struct drm_device *dev,
|
|
||||||
void *data, struct drm_file *file_priv);
|
|
||||||
extern int drm_mode_replacefb(struct drm_device *dev,
|
|
||||||
void *data, struct drm_file *file_priv);
|
|
||||||
extern int drm_mode_getencoder(struct drm_device *dev,
|
extern int drm_mode_getencoder(struct drm_device *dev,
|
||||||
void *data, struct drm_file *file_priv);
|
void *data, struct drm_file *file_priv);
|
||||||
extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
|
extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
|
||||||
void *data, struct drm_file *file_priv);
|
void *data, struct drm_file *file_priv);
|
||||||
extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
|
extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
|
||||||
void *data, struct drm_file *file_priv);
|
void *data, struct drm_file *file_priv);
|
||||||
extern u8 *drm_find_cea_extension(struct edid *edid);
|
|
||||||
extern u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
|
extern u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
|
||||||
extern bool drm_detect_hdmi_monitor(struct edid *edid);
|
extern bool drm_detect_hdmi_monitor(struct edid *edid);
|
||||||
extern bool drm_detect_monitor_audio(struct edid *edid);
|
extern bool drm_detect_monitor_audio(struct edid *edid);
|
||||||
|
@ -343,12 +343,41 @@ u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
|
|||||||
int lane);
|
int lane);
|
||||||
|
|
||||||
#define DP_RECEIVER_CAP_SIZE 0xf
|
#define DP_RECEIVER_CAP_SIZE 0xf
|
||||||
|
#define EDP_PSR_RECEIVER_CAP_SIZE 2
|
||||||
|
|
||||||
void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
|
void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
|
||||||
void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
|
void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
|
||||||
|
|
||||||
u8 drm_dp_link_rate_to_bw_code(int link_rate);
|
u8 drm_dp_link_rate_to_bw_code(int link_rate);
|
||||||
int drm_dp_bw_code_to_link_rate(u8 link_bw);
|
int drm_dp_bw_code_to_link_rate(u8 link_bw);
|
||||||
|
|
||||||
|
struct edp_sdp_header {
|
||||||
|
u8 HB0; /* Secondary Data Packet ID */
|
||||||
|
u8 HB1; /* Secondary Data Packet Type */
|
||||||
|
u8 HB2; /* 7:5 reserved, 4:0 revision number */
|
||||||
|
u8 HB3; /* 7:5 reserved, 4:0 number of valid data bytes */
|
||||||
|
} __packed;
|
||||||
|
|
||||||
|
#define EDP_SDP_HEADER_REVISION_MASK 0x1F
|
||||||
|
#define EDP_SDP_HEADER_VALID_PAYLOAD_BYTES 0x1F
|
||||||
|
|
||||||
|
struct edp_vsc_psr {
|
||||||
|
struct edp_sdp_header sdp_header;
|
||||||
|
u8 DB0; /* Stereo Interface */
|
||||||
|
u8 DB1; /* 0 - PSR State; 1 - Update RFB; 2 - CRC Valid */
|
||||||
|
u8 DB2; /* CRC value bits 7:0 of the R or Cr component */
|
||||||
|
u8 DB3; /* CRC value bits 15:8 of the R or Cr component */
|
||||||
|
u8 DB4; /* CRC value bits 7:0 of the G or Y component */
|
||||||
|
u8 DB5; /* CRC value bits 15:8 of the G or Y component */
|
||||||
|
u8 DB6; /* CRC value bits 7:0 of the B or Cb component */
|
||||||
|
u8 DB7; /* CRC value bits 15:8 of the B or Cb component */
|
||||||
|
u8 DB8_31[24]; /* Reserved */
|
||||||
|
} __packed;
|
||||||
|
|
||||||
|
#define EDP_VSC_PSR_STATE_ACTIVE (1<<0)
|
||||||
|
#define EDP_VSC_PSR_UPDATE_RFB (1<<1)
|
||||||
|
#define EDP_VSC_PSR_CRC_VALUES_VALID (1<<2)
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE])
|
drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE])
|
||||||
{
|
{
|
||||||
|
@ -256,9 +256,11 @@ struct drm_encoder;
|
|||||||
struct drm_connector;
|
struct drm_connector;
|
||||||
struct drm_display_mode;
|
struct drm_display_mode;
|
||||||
struct hdmi_avi_infoframe;
|
struct hdmi_avi_infoframe;
|
||||||
|
struct hdmi_vendor_infoframe;
|
||||||
|
|
||||||
void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid);
|
void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid);
|
||||||
int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads);
|
int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads);
|
||||||
|
int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb);
|
||||||
int drm_av_sync_delay(struct drm_connector *connector,
|
int drm_av_sync_delay(struct drm_connector *connector,
|
||||||
struct drm_display_mode *mode);
|
struct drm_display_mode *mode);
|
||||||
struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
|
struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
|
||||||
@ -268,5 +270,8 @@ int drm_load_edid_firmware(struct drm_connector *connector);
|
|||||||
int
|
int
|
||||||
drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
|
drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
|
||||||
const struct drm_display_mode *mode);
|
const struct drm_display_mode *mode);
|
||||||
|
int
|
||||||
|
drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
|
||||||
|
const struct drm_display_mode *mode);
|
||||||
|
|
||||||
#endif /* __DRM_EDID_H__ */
|
#endif /* __DRM_EDID_H__ */
|
||||||
|
@ -1,160 +1,162 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright 2009 Red Hat Inc.
|
* Copyright 2009 Red Hat Inc.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
* to deal in the Software without restriction, including without limitation
|
* to deal in the Software without restriction, including without limitation
|
||||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
* and/or sell copies of the Software, and to permit persons to whom the
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
* Software is furnished to do so, subject to the following conditions:
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
*
|
*
|
||||||
* The above copyright notice and this permission notice shall be included in
|
* The above copyright notice and this permission notice shall be included in
|
||||||
* all copies or substantial portions of the Software.
|
* all copies or substantial portions of the Software.
|
||||||
*
|
*
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||||
* OTHER DEALINGS IN THE SOFTWARE.
|
* OTHER DEALINGS IN THE SOFTWARE.
|
||||||
*
|
*
|
||||||
* Authors: Dave Airlie
|
* Authors: Dave Airlie
|
||||||
* Christian Konig
|
* Christian König
|
||||||
*/
|
*/
|
||||||
#ifndef DRM_FIXED_H
|
#ifndef DRM_FIXED_H
|
||||||
#define DRM_FIXED_H
|
#define DRM_FIXED_H
|
||||||
|
|
||||||
typedef union dfixed {
|
#include <linux/math64.h>
|
||||||
u32 full;
|
|
||||||
} fixed20_12;
|
typedef union dfixed {
|
||||||
|
u32 full;
|
||||||
|
} fixed20_12;
|
||||||
#define dfixed_const(A) (u32)(((A) << 12))/* + ((B + 0.000122)*4096)) */
|
|
||||||
#define dfixed_const_half(A) (u32)(((A) << 12) + 2048)
|
|
||||||
#define dfixed_const_666(A) (u32)(((A) << 12) + 2731)
|
#define dfixed_const(A) (u32)(((A) << 12))/* + ((B + 0.000122)*4096)) */
|
||||||
#define dfixed_const_8(A) (u32)(((A) << 12) + 3277)
|
#define dfixed_const_half(A) (u32)(((A) << 12) + 2048)
|
||||||
#define dfixed_mul(A, B) ((u64)((u64)(A).full * (B).full + 2048) >> 12)
|
#define dfixed_const_666(A) (u32)(((A) << 12) + 2731)
|
||||||
#define dfixed_init(A) { .full = dfixed_const((A)) }
|
#define dfixed_const_8(A) (u32)(((A) << 12) + 3277)
|
||||||
#define dfixed_init_half(A) { .full = dfixed_const_half((A)) }
|
#define dfixed_mul(A, B) ((u64)((u64)(A).full * (B).full + 2048) >> 12)
|
||||||
#define dfixed_trunc(A) ((A).full >> 12)
|
#define dfixed_init(A) { .full = dfixed_const((A)) }
|
||||||
#define dfixed_frac(A) ((A).full & ((1 << 12) - 1))
|
#define dfixed_init_half(A) { .full = dfixed_const_half((A)) }
|
||||||
|
#define dfixed_trunc(A) ((A).full >> 12)
|
||||||
static inline u32 dfixed_floor(fixed20_12 A)
|
#define dfixed_frac(A) ((A).full & ((1 << 12) - 1))
|
||||||
{
|
|
||||||
u32 non_frac = dfixed_trunc(A);
|
static inline u32 dfixed_floor(fixed20_12 A)
|
||||||
|
{
|
||||||
return dfixed_const(non_frac);
|
u32 non_frac = dfixed_trunc(A);
|
||||||
}
|
|
||||||
|
return dfixed_const(non_frac);
|
||||||
static inline u32 dfixed_ceil(fixed20_12 A)
|
}
|
||||||
{
|
|
||||||
u32 non_frac = dfixed_trunc(A);
|
static inline u32 dfixed_ceil(fixed20_12 A)
|
||||||
|
{
|
||||||
if (A.full > dfixed_const(non_frac))
|
u32 non_frac = dfixed_trunc(A);
|
||||||
return dfixed_const(non_frac + 1);
|
|
||||||
else
|
if (A.full > dfixed_const(non_frac))
|
||||||
return dfixed_const(non_frac);
|
return dfixed_const(non_frac + 1);
|
||||||
}
|
else
|
||||||
|
return dfixed_const(non_frac);
|
||||||
static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
|
}
|
||||||
{
|
|
||||||
u64 tmp = ((u64)A.full << 13);
|
static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
|
||||||
|
{
|
||||||
do_div(tmp, B.full);
|
u64 tmp = ((u64)A.full << 13);
|
||||||
tmp += 1;
|
|
||||||
tmp /= 2;
|
do_div(tmp, B.full);
|
||||||
return lower_32_bits(tmp);
|
tmp += 1;
|
||||||
}
|
tmp /= 2;
|
||||||
|
return lower_32_bits(tmp);
|
||||||
#define DRM_FIXED_POINT 32
|
}
|
||||||
#define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT)
|
|
||||||
#define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1)
|
#define DRM_FIXED_POINT 32
|
||||||
#define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK)
|
#define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT)
|
||||||
|
#define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1)
|
||||||
static inline s64 drm_int2fixp(int a)
|
#define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK)
|
||||||
{
|
|
||||||
return ((s64)a) << DRM_FIXED_POINT;
|
static inline s64 drm_int2fixp(int a)
|
||||||
}
|
{
|
||||||
|
return ((s64)a) << DRM_FIXED_POINT;
|
||||||
static inline int drm_fixp2int(int64_t a)
|
}
|
||||||
{
|
|
||||||
return ((s64)a) >> DRM_FIXED_POINT;
|
static inline int drm_fixp2int(int64_t a)
|
||||||
}
|
{
|
||||||
|
return ((s64)a) >> DRM_FIXED_POINT;
|
||||||
static inline unsigned drm_fixp_msbset(int64_t a)
|
}
|
||||||
{
|
|
||||||
unsigned shift, sign = (a >> 63) & 1;
|
static inline unsigned drm_fixp_msbset(int64_t a)
|
||||||
|
{
|
||||||
for (shift = 62; shift > 0; --shift)
|
unsigned shift, sign = (a >> 63) & 1;
|
||||||
if (((a >> shift) & 1) != sign)
|
|
||||||
return shift;
|
for (shift = 62; shift > 0; --shift)
|
||||||
|
if (((a >> shift) & 1) != sign)
|
||||||
return 0;
|
return shift;
|
||||||
}
|
|
||||||
|
return 0;
|
||||||
static inline s64 drm_fixp_mul(s64 a, s64 b)
|
}
|
||||||
{
|
|
||||||
unsigned shift = drm_fixp_msbset(a) + drm_fixp_msbset(b);
|
static inline s64 drm_fixp_mul(s64 a, s64 b)
|
||||||
s64 result;
|
{
|
||||||
|
unsigned shift = drm_fixp_msbset(a) + drm_fixp_msbset(b);
|
||||||
if (shift > 61) {
|
s64 result;
|
||||||
shift = shift - 61;
|
|
||||||
a >>= (shift >> 1) + (shift & 1);
|
if (shift > 61) {
|
||||||
b >>= shift >> 1;
|
shift = shift - 61;
|
||||||
} else
|
a >>= (shift >> 1) + (shift & 1);
|
||||||
shift = 0;
|
b >>= shift >> 1;
|
||||||
|
} else
|
||||||
result = a * b;
|
shift = 0;
|
||||||
|
|
||||||
if (shift > DRM_FIXED_POINT)
|
result = a * b;
|
||||||
return result << (shift - DRM_FIXED_POINT);
|
|
||||||
|
if (shift > DRM_FIXED_POINT)
|
||||||
if (shift < DRM_FIXED_POINT)
|
return result << (shift - DRM_FIXED_POINT);
|
||||||
return result >> (DRM_FIXED_POINT - shift);
|
|
||||||
|
if (shift < DRM_FIXED_POINT)
|
||||||
return result;
|
return result >> (DRM_FIXED_POINT - shift);
|
||||||
}
|
|
||||||
|
return result;
|
||||||
static inline s64 drm_fixp_div(s64 a, s64 b)
|
}
|
||||||
{
|
|
||||||
unsigned shift = 62 - drm_fixp_msbset(a);
|
static inline s64 drm_fixp_div(s64 a, s64 b)
|
||||||
s64 result;
|
{
|
||||||
|
unsigned shift = 62 - drm_fixp_msbset(a);
|
||||||
a <<= shift;
|
s64 result;
|
||||||
|
|
||||||
if (shift < DRM_FIXED_POINT)
|
a <<= shift;
|
||||||
b >>= (DRM_FIXED_POINT - shift);
|
|
||||||
|
if (shift < DRM_FIXED_POINT)
|
||||||
result = div64_s64(a, b);
|
b >>= (DRM_FIXED_POINT - shift);
|
||||||
|
|
||||||
if (shift > DRM_FIXED_POINT)
|
result = div64_s64(a, b);
|
||||||
return result >> (shift - DRM_FIXED_POINT);
|
|
||||||
|
if (shift > DRM_FIXED_POINT)
|
||||||
return result;
|
return result >> (shift - DRM_FIXED_POINT);
|
||||||
}
|
|
||||||
|
return result;
|
||||||
static inline s64 drm_fixp_exp(s64 x)
|
}
|
||||||
{
|
|
||||||
s64 tolerance = div64_s64(DRM_FIXED_ONE, 1000000);
|
static inline s64 drm_fixp_exp(s64 x)
|
||||||
s64 sum = DRM_FIXED_ONE, term, y = x;
|
{
|
||||||
u64 count = 1;
|
s64 tolerance = div64_s64(DRM_FIXED_ONE, 1000000);
|
||||||
|
s64 sum = DRM_FIXED_ONE, term, y = x;
|
||||||
if (x < 0)
|
u64 count = 1;
|
||||||
y = -1 * x;
|
|
||||||
|
if (x < 0)
|
||||||
term = y;
|
y = -1 * x;
|
||||||
|
|
||||||
while (term >= tolerance) {
|
term = y;
|
||||||
sum = sum + term;
|
|
||||||
count = count + 1;
|
while (term >= tolerance) {
|
||||||
term = drm_fixp_mul(term, div64_s64(y, count));
|
sum = sum + term;
|
||||||
}
|
count = count + 1;
|
||||||
|
term = drm_fixp_mul(term, div64_s64(y, count));
|
||||||
if (x < 0)
|
}
|
||||||
sum = drm_fixp_div(DRM_FIXED_ONE, sum);
|
|
||||||
|
if (x < 0)
|
||||||
return sum;
|
sum = drm_fixp_div(DRM_FIXED_ONE, sum);
|
||||||
}
|
|
||||||
|
return sum;
|
||||||
#endif
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
@ -41,6 +41,11 @@
|
|||||||
#include <linux/seq_file.h>
|
#include <linux/seq_file.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
enum drm_mm_search_flags {
|
||||||
|
DRM_MM_SEARCH_DEFAULT = 0,
|
||||||
|
DRM_MM_SEARCH_BEST = 1 << 0,
|
||||||
|
};
|
||||||
|
|
||||||
struct drm_mm_node {
|
struct drm_mm_node {
|
||||||
struct list_head node_list;
|
struct list_head node_list;
|
||||||
struct list_head hole_stack;
|
struct list_head hole_stack;
|
||||||
@ -62,9 +67,6 @@ struct drm_mm {
|
|||||||
/* head_node.node_list is the list of all memory nodes, ordered
|
/* head_node.node_list is the list of all memory nodes, ordered
|
||||||
* according to the (increasing) start address of the memory node. */
|
* according to the (increasing) start address of the memory node. */
|
||||||
struct drm_mm_node head_node;
|
struct drm_mm_node head_node;
|
||||||
struct list_head unused_nodes;
|
|
||||||
int num_unused;
|
|
||||||
spinlock_t unused_lock;
|
|
||||||
unsigned int scan_check_range : 1;
|
unsigned int scan_check_range : 1;
|
||||||
unsigned scan_alignment;
|
unsigned scan_alignment;
|
||||||
unsigned long scan_color;
|
unsigned long scan_color;
|
||||||
@ -115,13 +117,6 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
|
|||||||
#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
|
#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
|
||||||
&(mm)->head_node.node_list, \
|
&(mm)->head_node.node_list, \
|
||||||
node_list)
|
node_list)
|
||||||
#define drm_mm_for_each_scanned_node_reverse(entry, n, mm) \
|
|
||||||
for (entry = (mm)->prev_scanned_node, \
|
|
||||||
next = entry ? list_entry(entry->node_list.next, \
|
|
||||||
struct drm_mm_node, node_list) : NULL; \
|
|
||||||
entry != NULL; entry = next, \
|
|
||||||
next = entry ? list_entry(entry->node_list.next, \
|
|
||||||
struct drm_mm_node, node_list) : NULL) \
|
|
||||||
|
|
||||||
/* Note that we need to unroll list_for_each_entry in order to inline
|
/* Note that we need to unroll list_for_each_entry in order to inline
|
||||||
* setting hole_start and hole_end on each iteration and keep the
|
* setting hole_start and hole_end on each iteration and keep the
|
||||||
@ -138,124 +133,50 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
|
|||||||
/*
|
/*
|
||||||
* Basic range manager support (drm_mm.c)
|
* Basic range manager support (drm_mm.c)
|
||||||
*/
|
*/
|
||||||
extern struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
|
extern int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
|
||||||
unsigned long start,
|
|
||||||
unsigned long size,
|
extern int drm_mm_insert_node_generic(struct drm_mm *mm,
|
||||||
bool atomic);
|
struct drm_mm_node *node,
|
||||||
extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
|
|
||||||
unsigned long size,
|
unsigned long size,
|
||||||
unsigned alignment,
|
unsigned alignment,
|
||||||
unsigned long color,
|
unsigned long color,
|
||||||
int atomic);
|
enum drm_mm_search_flags flags);
|
||||||
extern struct drm_mm_node *drm_mm_get_block_range_generic(
|
static inline int drm_mm_insert_node(struct drm_mm *mm,
|
||||||
struct drm_mm_node *node,
|
struct drm_mm_node *node,
|
||||||
unsigned long size,
|
unsigned long size,
|
||||||
unsigned alignment,
|
unsigned alignment,
|
||||||
unsigned long color,
|
enum drm_mm_search_flags flags)
|
||||||
unsigned long start,
|
|
||||||
unsigned long end,
|
|
||||||
int atomic);
|
|
||||||
static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
|
|
||||||
unsigned long size,
|
|
||||||
unsigned alignment)
|
|
||||||
{
|
{
|
||||||
return drm_mm_get_block_generic(parent, size, alignment, 0, 0);
|
return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags);
|
||||||
}
|
|
||||||
static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
|
|
||||||
unsigned long size,
|
|
||||||
unsigned alignment)
|
|
||||||
{
|
|
||||||
return drm_mm_get_block_generic(parent, size, alignment, 0, 1);
|
|
||||||
}
|
|
||||||
static inline struct drm_mm_node *drm_mm_get_block_range(
|
|
||||||
struct drm_mm_node *parent,
|
|
||||||
unsigned long size,
|
|
||||||
unsigned alignment,
|
|
||||||
unsigned long start,
|
|
||||||
unsigned long end)
|
|
||||||
{
|
|
||||||
return drm_mm_get_block_range_generic(parent, size, alignment, 0,
|
|
||||||
start, end, 0);
|
|
||||||
}
|
|
||||||
static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
|
|
||||||
struct drm_mm_node *parent,
|
|
||||||
unsigned long size,
|
|
||||||
unsigned alignment,
|
|
||||||
unsigned long start,
|
|
||||||
unsigned long end)
|
|
||||||
{
|
|
||||||
return drm_mm_get_block_range_generic(parent, size, alignment, 0,
|
|
||||||
start, end, 1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
extern int drm_mm_insert_node(struct drm_mm *mm,
|
|
||||||
struct drm_mm_node *node,
|
|
||||||
unsigned long size,
|
|
||||||
unsigned alignment);
|
|
||||||
extern int drm_mm_insert_node_in_range(struct drm_mm *mm,
|
|
||||||
struct drm_mm_node *node,
|
|
||||||
unsigned long size,
|
|
||||||
unsigned alignment,
|
|
||||||
unsigned long start,
|
|
||||||
unsigned long end);
|
|
||||||
extern int drm_mm_insert_node_generic(struct drm_mm *mm,
|
|
||||||
struct drm_mm_node *node,
|
|
||||||
unsigned long size,
|
|
||||||
unsigned alignment,
|
|
||||||
unsigned long color);
|
|
||||||
extern int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
|
extern int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
|
||||||
struct drm_mm_node *node,
|
struct drm_mm_node *node,
|
||||||
unsigned long size,
|
unsigned long size,
|
||||||
unsigned alignment,
|
unsigned alignment,
|
||||||
unsigned long color,
|
unsigned long color,
|
||||||
unsigned long start,
|
unsigned long start,
|
||||||
unsigned long end);
|
|
||||||
extern void drm_mm_put_block(struct drm_mm_node *cur);
|
|
||||||
extern void drm_mm_remove_node(struct drm_mm_node *node);
|
|
||||||
extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
|
|
||||||
extern struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
|
|
||||||
unsigned long size,
|
|
||||||
unsigned alignment,
|
|
||||||
unsigned long color,
|
|
||||||
bool best_match);
|
|
||||||
extern struct drm_mm_node *drm_mm_search_free_in_range_generic(
|
|
||||||
const struct drm_mm *mm,
|
|
||||||
unsigned long size,
|
|
||||||
unsigned alignment,
|
|
||||||
unsigned long color,
|
|
||||||
unsigned long start,
|
|
||||||
unsigned long end,
|
unsigned long end,
|
||||||
bool best_match);
|
enum drm_mm_search_flags flags);
|
||||||
static inline struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
|
static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
|
||||||
unsigned long size,
|
struct drm_mm_node *node,
|
||||||
unsigned alignment,
|
|
||||||
bool best_match)
|
|
||||||
{
|
|
||||||
return drm_mm_search_free_generic(mm,size, alignment, 0, best_match);
|
|
||||||
}
|
|
||||||
static inline struct drm_mm_node *drm_mm_search_free_in_range(
|
|
||||||
const struct drm_mm *mm,
|
|
||||||
unsigned long size,
|
unsigned long size,
|
||||||
unsigned alignment,
|
unsigned alignment,
|
||||||
unsigned long start,
|
unsigned long start,
|
||||||
unsigned long end,
|
unsigned long end,
|
||||||
bool best_match)
|
enum drm_mm_search_flags flags)
|
||||||
{
|
{
|
||||||
return drm_mm_search_free_in_range_generic(mm, size, alignment, 0,
|
return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
|
||||||
start, end, best_match);
|
0, start, end, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extern void drm_mm_remove_node(struct drm_mm_node *node);
|
||||||
|
extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
|
||||||
extern void drm_mm_init(struct drm_mm *mm,
|
extern void drm_mm_init(struct drm_mm *mm,
|
||||||
unsigned long start,
|
unsigned long start,
|
||||||
unsigned long size);
|
unsigned long size);
|
||||||
extern void drm_mm_takedown(struct drm_mm *mm);
|
extern void drm_mm_takedown(struct drm_mm *mm);
|
||||||
extern int drm_mm_clean(struct drm_mm *mm);
|
extern int drm_mm_clean(struct drm_mm *mm);
|
||||||
extern int drm_mm_pre_get(struct drm_mm *mm);
|
|
||||||
|
|
||||||
static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
|
|
||||||
{
|
|
||||||
return block->mm;
|
|
||||||
}
|
|
||||||
|
|
||||||
void drm_mm_init_scan(struct drm_mm *mm,
|
void drm_mm_init_scan(struct drm_mm *mm,
|
||||||
unsigned long size,
|
unsigned long size,
|
||||||
|
@ -1,4 +1,25 @@
|
|||||||
#define radeon_PCI_IDS \
|
#define radeon_PCI_IDS \
|
||||||
|
{0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||||
|
{0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||||
|
{0x1002, 0x1306, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||||
|
{0x1002, 0x1307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||||
|
{0x1002, 0x1309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||||
|
{0x1002, 0x130A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||||
|
{0x1002, 0x130B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||||
|
{0x1002, 0x130C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||||
|
{0x1002, 0x130D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||||
|
{0x1002, 0x130E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||||
|
{0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||||
|
{0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||||
|
{0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||||
|
{0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||||
|
{0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||||
|
{0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||||
|
{0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||||
|
{0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||||
|
{0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||||
|
{0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||||
|
{0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||||
{0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
|
{0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
|
||||||
{0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
{0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||||
{0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
{0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||||
|
257
drivers/include/drm/drm_vma_manager.h
Normal file
257
drivers/include/drm/drm_vma_manager.h
Normal file
@ -0,0 +1,257 @@
|
|||||||
|
#ifndef __DRM_VMA_MANAGER_H__
|
||||||
|
#define __DRM_VMA_MANAGER_H__
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
|
* to deal in the Software without restriction, including without limitation
|
||||||
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in
|
||||||
|
* all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||||
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||||
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||||
|
* OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <drm/drm_mm.h>
|
||||||
|
//#include <linux/fs.h>
|
||||||
|
#include <linux/mm.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/rbtree.h>
|
||||||
|
#include <linux/spinlock.h>
|
||||||
|
#include <linux/types.h>
|
||||||
|
|
||||||
|
struct drm_vma_offset_file {
|
||||||
|
struct rb_node vm_rb;
|
||||||
|
struct file *vm_filp;
|
||||||
|
unsigned long vm_count;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct drm_vma_offset_node {
|
||||||
|
rwlock_t vm_lock;
|
||||||
|
struct drm_mm_node vm_node;
|
||||||
|
struct rb_node vm_rb;
|
||||||
|
struct rb_root vm_files;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct drm_vma_offset_manager {
|
||||||
|
rwlock_t vm_lock;
|
||||||
|
struct rb_root vm_addr_space_rb;
|
||||||
|
struct drm_mm vm_addr_space_mm;
|
||||||
|
};
|
||||||
|
|
||||||
|
void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
|
||||||
|
unsigned long page_offset, unsigned long size);
|
||||||
|
void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr);
|
||||||
|
|
||||||
|
struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
|
||||||
|
unsigned long start,
|
||||||
|
unsigned long pages);
|
||||||
|
struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
|
||||||
|
unsigned long start,
|
||||||
|
unsigned long pages);
|
||||||
|
int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
|
||||||
|
struct drm_vma_offset_node *node, unsigned long pages);
|
||||||
|
void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
|
||||||
|
struct drm_vma_offset_node *node);
|
||||||
|
|
||||||
|
int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp);
|
||||||
|
void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp);
|
||||||
|
bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
|
||||||
|
struct file *filp);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* drm_vma_offset_exact_lookup() - Look up node by exact address
|
||||||
|
* @mgr: Manager object
|
||||||
|
* @start: Start address (page-based, not byte-based)
|
||||||
|
* @pages: Size of object (page-based)
|
||||||
|
*
|
||||||
|
* Same as drm_vma_offset_lookup() but does not allow any offset into the node.
|
||||||
|
* It only returns the exact object with the given start address.
|
||||||
|
*
|
||||||
|
* RETURNS:
|
||||||
|
* Node at exact start address @start.
|
||||||
|
*/
|
||||||
|
static inline struct drm_vma_offset_node *
|
||||||
|
drm_vma_offset_exact_lookup(struct drm_vma_offset_manager *mgr,
|
||||||
|
unsigned long start,
|
||||||
|
unsigned long pages)
|
||||||
|
{
|
||||||
|
struct drm_vma_offset_node *node;
|
||||||
|
|
||||||
|
node = drm_vma_offset_lookup(mgr, start, pages);
|
||||||
|
return (node && node->vm_node.start == start) ? node : NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* drm_vma_offset_lock_lookup() - Lock lookup for extended private use
|
||||||
|
* @mgr: Manager object
|
||||||
|
*
|
||||||
|
* Lock VMA manager for extended lookups. Only *_locked() VMA function calls
|
||||||
|
* are allowed while holding this lock. All other contexts are blocked from VMA
|
||||||
|
* until the lock is released via drm_vma_offset_unlock_lookup().
|
||||||
|
*
|
||||||
|
* Use this if you need to take a reference to the objects returned by
|
||||||
|
* drm_vma_offset_lookup_locked() before releasing this lock again.
|
||||||
|
*
|
||||||
|
* This lock must not be used for anything else than extended lookups. You must
|
||||||
|
* not call any other VMA helpers while holding this lock.
|
||||||
|
*
|
||||||
|
* Note: You're in atomic-context while holding this lock!
|
||||||
|
*
|
||||||
|
* Example:
|
||||||
|
* drm_vma_offset_lock_lookup(mgr);
|
||||||
|
* node = drm_vma_offset_lookup_locked(mgr);
|
||||||
|
* if (node)
|
||||||
|
* kref_get_unless_zero(container_of(node, sth, entr));
|
||||||
|
* drm_vma_offset_unlock_lookup(mgr);
|
||||||
|
*/
|
||||||
|
static inline void drm_vma_offset_lock_lookup(struct drm_vma_offset_manager *mgr)
|
||||||
|
{
|
||||||
|
read_lock(&mgr->vm_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* drm_vma_offset_unlock_lookup() - Unlock lookup for extended private use
|
||||||
|
* @mgr: Manager object
|
||||||
|
*
|
||||||
|
* Release lookup-lock. See drm_vma_offset_lock_lookup() for more information.
|
||||||
|
*/
|
||||||
|
static inline void drm_vma_offset_unlock_lookup(struct drm_vma_offset_manager *mgr)
|
||||||
|
{
|
||||||
|
read_unlock(&mgr->vm_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* drm_vma_node_reset() - Initialize or reset node object
|
||||||
|
* @node: Node to initialize or reset
|
||||||
|
*
|
||||||
|
* Reset a node to its initial state. This must be called before using it with
|
||||||
|
* any VMA offset manager.
|
||||||
|
*
|
||||||
|
* This must not be called on an already allocated node, or you will leak
|
||||||
|
* memory.
|
||||||
|
*/
|
||||||
|
static inline void drm_vma_node_reset(struct drm_vma_offset_node *node)
|
||||||
|
{
|
||||||
|
memset(node, 0, sizeof(*node));
|
||||||
|
node->vm_files = RB_ROOT;
|
||||||
|
rwlock_init(&node->vm_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* drm_vma_node_start() - Return start address for page-based addressing
|
||||||
|
* @node: Node to inspect
|
||||||
|
*
|
||||||
|
* Return the start address of the given node. This can be used as offset into
|
||||||
|
* the linear VM space that is provided by the VMA offset manager. Note that
|
||||||
|
* this can only be used for page-based addressing. If you need a proper offset
|
||||||
|
* for user-space mappings, you must apply "<< PAGE_SHIFT" or use the
|
||||||
|
* drm_vma_node_offset_addr() helper instead.
|
||||||
|
*
|
||||||
|
* RETURNS:
|
||||||
|
* Start address of @node for page-based addressing. 0 if the node does not
|
||||||
|
* have an offset allocated.
|
||||||
|
*/
|
||||||
|
static inline unsigned long drm_vma_node_start(struct drm_vma_offset_node *node)
|
||||||
|
{
|
||||||
|
return node->vm_node.start;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* drm_vma_node_size() - Return size (page-based)
|
||||||
|
* @node: Node to inspect
|
||||||
|
*
|
||||||
|
* Return the size as number of pages for the given node. This is the same size
|
||||||
|
* that was passed to drm_vma_offset_add(). If no offset is allocated for the
|
||||||
|
* node, this is 0.
|
||||||
|
*
|
||||||
|
* RETURNS:
|
||||||
|
* Size of @node as number of pages. 0 if the node does not have an offset
|
||||||
|
* allocated.
|
||||||
|
*/
|
||||||
|
static inline unsigned long drm_vma_node_size(struct drm_vma_offset_node *node)
|
||||||
|
{
|
||||||
|
return node->vm_node.size;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* drm_vma_node_has_offset() - Check whether node is added to offset manager
|
||||||
|
* @node: Node to be checked
|
||||||
|
*
|
||||||
|
* RETURNS:
|
||||||
|
* true iff the node was previously allocated an offset and added to
|
||||||
|
* an vma offset manager.
|
||||||
|
*/
|
||||||
|
static inline bool drm_vma_node_has_offset(struct drm_vma_offset_node *node)
|
||||||
|
{
|
||||||
|
return drm_mm_node_allocated(&node->vm_node);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* drm_vma_node_offset_addr() - Return sanitized offset for user-space mmaps
|
||||||
|
* @node: Linked offset node
|
||||||
|
*
|
||||||
|
* Same as drm_vma_node_start() but returns the address as a valid offset that
|
||||||
|
* can be used for user-space mappings during mmap().
|
||||||
|
* This must not be called on unlinked nodes.
|
||||||
|
*
|
||||||
|
* RETURNS:
|
||||||
|
* Offset of @node for byte-based addressing. 0 if the node does not have an
|
||||||
|
* object allocated.
|
||||||
|
*/
|
||||||
|
static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node)
|
||||||
|
{
|
||||||
|
return ((__u64)node->vm_node.start) << PAGE_SHIFT;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* drm_vma_node_unmap() - Unmap offset node
|
||||||
|
* @node: Offset node
|
||||||
|
* @file_mapping: Address space to unmap @node from
|
||||||
|
*
|
||||||
|
* Unmap all userspace mappings for a given offset node. The mappings must be
|
||||||
|
* associated with the @file_mapping address-space. If no offset exists or
|
||||||
|
* the address-space is invalid, nothing is done.
|
||||||
|
*
|
||||||
|
* This call is unlocked. The caller must guarantee that drm_vma_offset_remove()
|
||||||
|
* is not called on this node concurrently.
|
||||||
|
*/
|
||||||
|
static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node,
|
||||||
|
struct address_space *file_mapping)
|
||||||
|
{
|
||||||
|
if (file_mapping && drm_vma_node_has_offset(node))
|
||||||
|
unmap_mapping_range(file_mapping,
|
||||||
|
drm_vma_node_offset_addr(node),
|
||||||
|
drm_vma_node_size(node) << PAGE_SHIFT, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* drm_vma_node_verify_access() - Access verification helper for TTM
|
||||||
|
* @node: Offset node
|
||||||
|
* @filp: Open-file
|
||||||
|
*
|
||||||
|
* This checks whether @filp is granted access to @node. It is the same as
|
||||||
|
* drm_vma_node_is_allowed() but suitable as drop-in helper for TTM
|
||||||
|
* verify_access() callbacks.
|
||||||
|
*
|
||||||
|
* RETURNS:
|
||||||
|
* 0 if access is granted, -EACCES otherwise.
|
||||||
|
*/
|
||||||
|
static inline int drm_vma_node_verify_access(struct drm_vma_offset_node *node,
|
||||||
|
struct file *filp)
|
||||||
|
{
|
||||||
|
return drm_vma_node_is_allowed(node, filp) ? 0 : -EACCES;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* __DRM_VMA_MANAGER_H__ */
|
@ -26,6 +26,7 @@
|
|||||||
#ifndef _I915_DRM_H_
|
#ifndef _I915_DRM_H_
|
||||||
#define _I915_DRM_H_
|
#define _I915_DRM_H_
|
||||||
|
|
||||||
|
#include <drm/i915_pciids.h>
|
||||||
#include <uapi/drm/i915_drm.h>
|
#include <uapi/drm/i915_drm.h>
|
||||||
|
|
||||||
/* For use by IPS driver */
|
/* For use by IPS driver */
|
||||||
@ -34,4 +35,37 @@ extern bool i915_gpu_raise(void);
|
|||||||
extern bool i915_gpu_lower(void);
|
extern bool i915_gpu_lower(void);
|
||||||
extern bool i915_gpu_busy(void);
|
extern bool i915_gpu_busy(void);
|
||||||
extern bool i915_gpu_turbo_disable(void);
|
extern bool i915_gpu_turbo_disable(void);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The Bridge device's PCI config space has information about the
|
||||||
|
* fb aperture size and the amount of pre-reserved memory.
|
||||||
|
* This is all handled in the intel-gtt.ko module. i915.ko only
|
||||||
|
* cares about the vga bit for the vga rbiter.
|
||||||
|
*/
|
||||||
|
#define INTEL_GMCH_CTRL 0x52
|
||||||
|
#define INTEL_GMCH_VGA_DISABLE (1 << 1)
|
||||||
|
#define SNB_GMCH_CTRL 0x50
|
||||||
|
#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
|
||||||
|
#define SNB_GMCH_GGMS_MASK 0x3
|
||||||
|
#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
|
||||||
|
#define SNB_GMCH_GMS_MASK 0x1f
|
||||||
|
|
||||||
|
#define I830_GMCH_CTRL 0x52
|
||||||
|
|
||||||
|
#define I855_GMCH_GMS_MASK 0xF0
|
||||||
|
#define I855_GMCH_GMS_STOLEN_0M 0x0
|
||||||
|
#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
|
||||||
|
#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4)
|
||||||
|
#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4)
|
||||||
|
#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4)
|
||||||
|
#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4)
|
||||||
|
#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
|
||||||
|
#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
|
||||||
|
#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4)
|
||||||
|
#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4)
|
||||||
|
#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
|
||||||
|
#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
|
||||||
|
#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
|
||||||
|
#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
|
||||||
|
|
||||||
#endif /* _I915_DRM_H_ */
|
#endif /* _I915_DRM_H_ */
|
||||||
|
211
drivers/include/drm/i915_pciids.h
Normal file
211
drivers/include/drm/i915_pciids.h
Normal file
@ -0,0 +1,211 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2013 Intel Corporation
|
||||||
|
* All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the
|
||||||
|
* "Software"), to deal in the Software without restriction, including
|
||||||
|
* without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
* distribute, sub license, and/or sell copies of the Software, and to
|
||||||
|
* permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
* the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice (including the
|
||||||
|
* next paragraph) shall be included in all copies or substantial portions
|
||||||
|
* of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||||
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
* DEALINGS IN THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
#ifndef _I915_PCIIDS_H
|
||||||
|
#define _I915_PCIIDS_H
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A pci_device_id struct {
|
||||||
|
* __u32 vendor, device;
|
||||||
|
* __u32 subvendor, subdevice;
|
||||||
|
* __u32 class, class_mask;
|
||||||
|
* kernel_ulong_t driver_data;
|
||||||
|
* };
|
||||||
|
* Don't use C99 here because "class" is reserved and we want to
|
||||||
|
* give userspace flexibility.
|
||||||
|
*/
|
||||||
|
#define INTEL_VGA_DEVICE(id, info) { \
|
||||||
|
0x8086, id, \
|
||||||
|
~0, ~0, \
|
||||||
|
0x030000, 0xff0000, \
|
||||||
|
(unsigned long) info }
|
||||||
|
|
||||||
|
#define INTEL_QUANTA_VGA_DEVICE(info) { \
|
||||||
|
0x8086, 0x16a, \
|
||||||
|
0x152d, 0x8990, \
|
||||||
|
0x030000, 0xff0000, \
|
||||||
|
(unsigned long) info }
|
||||||
|
|
||||||
|
#define INTEL_I830_IDS(info) \
|
||||||
|
INTEL_VGA_DEVICE(0x3577, info)
|
||||||
|
|
||||||
|
#define INTEL_I845G_IDS(info) \
|
||||||
|
INTEL_VGA_DEVICE(0x2562, info)
|
||||||
|
|
||||||
|
#define INTEL_I85X_IDS(info) \
|
||||||
|
INTEL_VGA_DEVICE(0x3582, info), /* I855_GM */ \
|
||||||
|
INTEL_VGA_DEVICE(0x358e, info)
|
||||||
|
|
||||||
|
#define INTEL_I865G_IDS(info) \
|
||||||
|
INTEL_VGA_DEVICE(0x2572, info) /* I865_G */
|
||||||
|
|
||||||
|
#define INTEL_I915G_IDS(info) \
|
||||||
|
INTEL_VGA_DEVICE(0x2582, info), /* I915_G */ \
|
||||||
|
INTEL_VGA_DEVICE(0x258a, info) /* E7221_G */
|
||||||
|
|
||||||
|
#define INTEL_I915GM_IDS(info) \
|
||||||
|
INTEL_VGA_DEVICE(0x2592, info) /* I915_GM */
|
||||||
|
|
||||||
|
#define INTEL_I945G_IDS(info) \
|
||||||
|
INTEL_VGA_DEVICE(0x2772, info) /* I945_G */
|
||||||
|
|
||||||
|
#define INTEL_I945GM_IDS(info) \
|
||||||
|
INTEL_VGA_DEVICE(0x27a2, info), /* I945_GM */ \
|
||||||
|
INTEL_VGA_DEVICE(0x27ae, info) /* I945_GME */
|
||||||
|
|
||||||
|
#define INTEL_I965G_IDS(info) \
|
||||||
|
INTEL_VGA_DEVICE(0x2972, info), /* I946_GZ */ \
|
||||||
|
INTEL_VGA_DEVICE(0x2982, info), /* G35_G */ \
|
||||||
|
INTEL_VGA_DEVICE(0x2992, info), /* I965_Q */ \
|
||||||
|
INTEL_VGA_DEVICE(0x29a2, info) /* I965_G */
|
||||||
|
|
||||||
|
#define INTEL_G33_IDS(info) \
|
||||||
|
INTEL_VGA_DEVICE(0x29b2, info), /* Q35_G */ \
|
||||||
|
INTEL_VGA_DEVICE(0x29c2, info), /* G33_G */ \
|
||||||
|
INTEL_VGA_DEVICE(0x29d2, info) /* Q33_G */
|
||||||
|
|
||||||
|
#define INTEL_I965GM_IDS(info) \
|
||||||
|
INTEL_VGA_DEVICE(0x2a02, info), /* I965_GM */ \
|
||||||
|
INTEL_VGA_DEVICE(0x2a12, info) /* I965_GME */
|
||||||
|
|
||||||
|
#define INTEL_GM45_IDS(info) \
|
||||||
|
INTEL_VGA_DEVICE(0x2a42, info) /* GM45_G */
|
||||||
|
|
||||||
|
#define INTEL_G45_IDS(info) \
|
||||||
|
INTEL_VGA_DEVICE(0x2e02, info), /* IGD_E_G */ \
|
||||||
|
INTEL_VGA_DEVICE(0x2e12, info), /* Q45_G */ \
|
||||||
|
INTEL_VGA_DEVICE(0x2e22, info), /* G45_G */ \
|
||||||
|
INTEL_VGA_DEVICE(0x2e32, info), /* G41_G */ \
|
||||||
|
INTEL_VGA_DEVICE(0x2e42, info), /* B43_G */ \
|
||||||
|
INTEL_VGA_DEVICE(0x2e92, info) /* B43_G.1 */
|
||||||
|
|
||||||
|
#define INTEL_PINEVIEW_IDS(info) \
|
||||||
|
INTEL_VGA_DEVICE(0xa001, info), \
|
||||||
|
INTEL_VGA_DEVICE(0xa011, info)
|
||||||
|
|
||||||
|
#define INTEL_IRONLAKE_D_IDS(info) \
|
||||||
|
INTEL_VGA_DEVICE(0x0042, info)
|
||||||
|
|
||||||
|
#define INTEL_IRONLAKE_M_IDS(info) \
|
||||||
|
INTEL_VGA_DEVICE(0x0046, info)
|
||||||
|
|
||||||
|
#define INTEL_SNB_D_IDS(info) \
|
||||||
|
INTEL_VGA_DEVICE(0x0102, info), \
|
||||||
|
INTEL_VGA_DEVICE(0x0112, info), \
|
||||||
|
INTEL_VGA_DEVICE(0x0122, info), \
|
||||||
|
INTEL_VGA_DEVICE(0x010A, info)
|
||||||
|
|
||||||
|
#define INTEL_SNB_M_IDS(info) \
|
||||||
|
INTEL_VGA_DEVICE(0x0106, info), \
|
||||||
|
INTEL_VGA_DEVICE(0x0116, info), \
|
||||||
|
INTEL_VGA_DEVICE(0x0126, info)
|
||||||
|
|
||||||
|
#define INTEL_IVB_M_IDS(info) \
|
||||||
|
INTEL_VGA_DEVICE(0x0156, info), /* GT1 mobile */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0166, info) /* GT2 mobile */
|
||||||
|
|
||||||
|
#define INTEL_IVB_D_IDS(info) \
|
||||||
|
INTEL_VGA_DEVICE(0x0152, info), /* GT1 desktop */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0162, info), /* GT2 desktop */ \
|
||||||
|
INTEL_VGA_DEVICE(0x015a, info), /* GT1 server */ \
|
||||||
|
INTEL_VGA_DEVICE(0x016a, info) /* GT2 server */
|
||||||
|
|
||||||
|
#define INTEL_IVB_Q_IDS(info) \
|
||||||
|
INTEL_QUANTA_VGA_DEVICE(info) /* Quanta transcode */
|
||||||
|
|
||||||
|
#define INTEL_HSW_D_IDS(info) \
|
||||||
|
INTEL_VGA_DEVICE(0x0402, info), /* GT1 desktop */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \
|
||||||
|
INTEL_VGA_DEVICE(0x040a, info), /* GT1 server */ \
|
||||||
|
INTEL_VGA_DEVICE(0x041a, info), /* GT2 server */ \
|
||||||
|
INTEL_VGA_DEVICE(0x042a, info), /* GT3 server */ \
|
||||||
|
INTEL_VGA_DEVICE(0x040B, info), /* GT1 reserved */ \
|
||||||
|
INTEL_VGA_DEVICE(0x041B, info), /* GT2 reserved */ \
|
||||||
|
INTEL_VGA_DEVICE(0x042B, info), /* GT3 reserved */ \
|
||||||
|
INTEL_VGA_DEVICE(0x040E, info), /* GT1 reserved */ \
|
||||||
|
INTEL_VGA_DEVICE(0x041E, info), /* GT2 reserved */ \
|
||||||
|
INTEL_VGA_DEVICE(0x042E, info), /* GT3 reserved */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0C02, info), /* SDV GT1 desktop */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0C12, info), /* SDV GT2 desktop */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0C22, info), /* SDV GT3 desktop */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0C0A, info), /* SDV GT1 server */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0C1A, info), /* SDV GT2 server */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0C2A, info), /* SDV GT3 server */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0C0B, info), /* SDV GT1 reserved */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0C1B, info), /* SDV GT2 reserved */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0C2B, info), /* SDV GT3 reserved */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0C0E, info), /* SDV GT1 reserved */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0C1E, info), /* SDV GT2 reserved */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0C2E, info), /* SDV GT3 reserved */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0A02, info), /* ULT GT1 desktop */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0A12, info), /* ULT GT2 desktop */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0A22, info), /* ULT GT3 desktop */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0A0A, info), /* ULT GT1 server */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0A1A, info), /* ULT GT2 server */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0A2A, info), /* ULT GT3 server */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0A0B, info), /* ULT GT1 reserved */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0A1B, info), /* ULT GT2 reserved */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0A2B, info), /* ULT GT3 reserved */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0D02, info), /* CRW GT1 desktop */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0D12, info), /* CRW GT2 desktop */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0D22, info), /* CRW GT3 desktop */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0D0A, info), /* CRW GT1 server */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0D1A, info), /* CRW GT2 server */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0D2A, info), /* CRW GT3 server */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0D0B, info), /* CRW GT1 reserved */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0D1B, info), /* CRW GT2 reserved */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0D0E, info), /* CRW GT1 reserved */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0D1E, info), /* CRW GT2 reserved */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0D2E, info) /* CRW GT3 reserved */ \
|
||||||
|
|
||||||
|
#define INTEL_HSW_M_IDS(info) \
|
||||||
|
INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0426, info), /* GT2 mobile */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0C16, info), /* SDV GT2 mobile */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0A0E, info), /* ULT GT1 reserved */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0A1E, info), /* ULT GT2 reserved */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0A2E, info), /* ULT GT3 reserved */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0D06, info), /* CRW GT1 mobile */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \
|
||||||
|
INTEL_VGA_DEVICE(0x0D26, info) /* CRW GT3 mobile */
|
||||||
|
|
||||||
|
#define INTEL_VLV_M_IDS(info) \
|
||||||
|
INTEL_VGA_DEVICE(0x0f30, info), \
|
||||||
|
INTEL_VGA_DEVICE(0x0f31, info), \
|
||||||
|
INTEL_VGA_DEVICE(0x0f32, info), \
|
||||||
|
INTEL_VGA_DEVICE(0x0f33, info), \
|
||||||
|
INTEL_VGA_DEVICE(0x0157, info)
|
||||||
|
|
||||||
|
#define INTEL_VLV_D_IDS(info) \
|
||||||
|
INTEL_VGA_DEVICE(0x0155, info)
|
||||||
|
|
||||||
|
#endif /* _I915_PCIIDS_H */
|
36
drivers/include/drm/i915_powerwell.h
Normal file
36
drivers/include/drm/i915_powerwell.h
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
/**************************************************************************
|
||||||
|
*
|
||||||
|
* Copyright 2013 Intel Inc.
|
||||||
|
* All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the
|
||||||
|
* "Software"), to deal in the Software without restriction, including
|
||||||
|
* without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
* distribute, sub license, and/or sell copies of the Software, and to
|
||||||
|
* permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
* the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice (including the
|
||||||
|
* next paragraph) shall be included in all copies or substantial portions
|
||||||
|
* of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||||
|
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||||
|
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||||
|
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
*
|
||||||
|
**************************************************************************/
|
||||||
|
|
||||||
|
#ifndef _I915_POWERWELL_H_
|
||||||
|
#define _I915_POWERWELL_H_
|
||||||
|
|
||||||
|
/* For use by hda_i915 driver */
|
||||||
|
extern void i915_request_power_well(void);
|
||||||
|
extern void i915_release_power_well(void);
|
||||||
|
|
||||||
|
#endif /* _I915_POWERWELL_H_ */
|
@ -148,7 +148,6 @@ struct ttm_tt;
|
|||||||
* @type: The bo type.
|
* @type: The bo type.
|
||||||
* @destroy: Destruction function. If NULL, kfree is used.
|
* @destroy: Destruction function. If NULL, kfree is used.
|
||||||
* @num_pages: Actual number of pages.
|
* @num_pages: Actual number of pages.
|
||||||
* @addr_space_offset: Address space offset.
|
|
||||||
* @acc_size: Accounted size for this object.
|
* @acc_size: Accounted size for this object.
|
||||||
* @kref: Reference count of this buffer object. When this refcount reaches
|
* @kref: Reference count of this buffer object. When this refcount reaches
|
||||||
* zero, the object is put on the delayed delete list.
|
* zero, the object is put on the delayed delete list.
|
||||||
@ -169,8 +168,7 @@ struct ttm_tt;
|
|||||||
* @swap: List head for swap LRU list.
|
* @swap: List head for swap LRU list.
|
||||||
* @sync_obj: Pointer to a synchronization object.
|
* @sync_obj: Pointer to a synchronization object.
|
||||||
* @priv_flags: Flags describing buffer object internal state.
|
* @priv_flags: Flags describing buffer object internal state.
|
||||||
* @vm_rb: Rb node for the vm rb tree.
|
* @vma_node: Address space manager node.
|
||||||
* @vm_node: Address space manager node.
|
|
||||||
* @offset: The current GPU offset, which can have different meanings
|
* @offset: The current GPU offset, which can have different meanings
|
||||||
* depending on the memory type. For SYSTEM type memory, it should be 0.
|
* depending on the memory type. For SYSTEM type memory, it should be 0.
|
||||||
* @cur_placement: Hint of current placement.
|
* @cur_placement: Hint of current placement.
|
||||||
|
@ -520,7 +520,7 @@ struct ttm_bo_global {
|
|||||||
* @man: An array of mem_type_managers.
|
* @man: An array of mem_type_managers.
|
||||||
* @fence_lock: Protects the synchronizing members on *all* bos belonging
|
* @fence_lock: Protects the synchronizing members on *all* bos belonging
|
||||||
* to this device.
|
* to this device.
|
||||||
* @addr_space_mm: Range manager for the device address space.
|
* @vma_manager: Address space manager
|
||||||
* lru_lock: Spinlock that protects the buffer+device lru lists and
|
* lru_lock: Spinlock that protects the buffer+device lru lists and
|
||||||
* ddestroy lists.
|
* ddestroy lists.
|
||||||
* @val_seq: Current validation sequence.
|
* @val_seq: Current validation sequence.
|
||||||
@ -774,6 +774,56 @@ extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man,
|
|||||||
bool interruptible);
|
bool interruptible);
|
||||||
extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
|
extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
|
||||||
|
|
||||||
|
extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
|
||||||
|
extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_bo_reserve_nolru:
|
||||||
|
*
|
||||||
|
* @bo: A pointer to a struct ttm_buffer_object.
|
||||||
|
* @interruptible: Sleep interruptible if waiting.
|
||||||
|
* @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
|
||||||
|
* @use_ticket: If @bo is already reserved, Only sleep waiting for
|
||||||
|
* it to become unreserved if @ticket->stamp is older.
|
||||||
|
*
|
||||||
|
* Will not remove reserved buffers from the lru lists.
|
||||||
|
* Otherwise identical to ttm_bo_reserve.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* -EDEADLK: The reservation may cause a deadlock.
|
||||||
|
* Release all buffer reservations, wait for @bo to become unreserved and
|
||||||
|
* try again. (only if use_sequence == 1).
|
||||||
|
* -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
|
||||||
|
* a signal. Release all buffer reservations and return to user-space.
|
||||||
|
* -EBUSY: The function needed to sleep, but @no_wait was true
|
||||||
|
* -EALREADY: Bo already reserved using @ticket. This error code will only
|
||||||
|
* be returned if @use_ticket is set to true.
|
||||||
|
*/
|
||||||
|
static inline int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
|
||||||
|
bool interruptible,
|
||||||
|
bool no_wait, bool use_ticket,
|
||||||
|
struct ww_acquire_ctx *ticket)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
/*
|
||||||
|
if (no_wait) {
|
||||||
|
bool success;
|
||||||
|
if (WARN_ON(ticket))
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
|
success = ww_mutex_trylock(&bo->resv->lock);
|
||||||
|
return success ? 0 : -EBUSY;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (interruptible)
|
||||||
|
ret = ww_mutex_lock_interruptible(&bo->resv->lock, ticket);
|
||||||
|
else
|
||||||
|
ret = ww_mutex_lock(&bo->resv->lock, ticket);
|
||||||
|
if (ret == -EINTR)
|
||||||
|
return -ERESTARTSYS;
|
||||||
|
*/
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ttm_bo_reserve:
|
* ttm_bo_reserve:
|
||||||
@ -884,7 +934,7 @@ static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo,
|
|||||||
ttm_bo_add_to_lru(bo);
|
ttm_bo_add_to_lru(bo);
|
||||||
spin_unlock(&bo->glob->lru_lock);
|
spin_unlock(&bo->glob->lru_lock);
|
||||||
}
|
}
|
||||||
ww_mutex_unlock(&bo->resv->lock);
|
// ww_mutex_unlock(&bo->resv->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -65,6 +65,21 @@
|
|||||||
#define __visible __attribute__((externally_visible))
|
#define __visible __attribute__((externally_visible))
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* GCC 'asm goto' miscompiles certain code sequences:
|
||||||
|
*
|
||||||
|
* http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
|
||||||
|
*
|
||||||
|
* Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
|
||||||
|
* Fixed in GCC 4.8.2 and later versions.
|
||||||
|
*
|
||||||
|
* (asm goto is automatically volatile - the naming reflects this.)
|
||||||
|
*/
|
||||||
|
#if GCC_VERSION <= 40801
|
||||||
|
# define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
|
||||||
|
#else
|
||||||
|
# define asm_volatile_goto(x...) do { asm goto(x); } while (0)
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
|
#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
|
||||||
#if GCC_VERSION >= 40400
|
#if GCC_VERSION >= 40400
|
||||||
|
@ -351,4 +351,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
|
|||||||
*/
|
*/
|
||||||
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
|
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
|
||||||
|
|
||||||
|
/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
|
||||||
|
#ifdef CONFIG_KPROBES
|
||||||
|
# define __kprobes __attribute__((__section__(".kprobes.text")))
|
||||||
|
#else
|
||||||
|
# define __kprobes
|
||||||
|
#endif
|
||||||
#endif /* __LINUX_COMPILER_H */
|
#endif /* __LINUX_COMPILER_H */
|
||||||
|
@ -52,7 +52,7 @@ static inline void * __must_check ERR_CAST(__force const void *ptr)
|
|||||||
return (void *) ptr;
|
return (void *) ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __must_check PTR_RET(__force const void *ptr)
|
static inline int __must_check PTR_ERR_OR_ZERO(__force const void *ptr)
|
||||||
{
|
{
|
||||||
if (IS_ERR(ptr))
|
if (IS_ERR(ptr))
|
||||||
return PTR_ERR(ptr);
|
return PTR_ERR(ptr);
|
||||||
@ -60,6 +60,9 @@ static inline int __must_check PTR_RET(__force const void *ptr)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Deprecated */
|
||||||
|
#define PTR_RET(p) PTR_ERR_OR_ZERO(p)
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* _LINUX_ERR_H */
|
#endif /* _LINUX_ERR_H */
|
||||||
|
@ -5,8 +5,8 @@
|
|||||||
* to reduce the amount of pointless cruft we feed to gcc when only
|
* to reduce the amount of pointless cruft we feed to gcc when only
|
||||||
* exporting a simple symbol or two.
|
* exporting a simple symbol or two.
|
||||||
*
|
*
|
||||||
* If you feel the need to add #include <linux/foo.h> to this file
|
* Try not to add #includes here. It slows compilation and makes kernel
|
||||||
* then you are doing something wrong and should go away silently.
|
* hackers place grumpy comments in header files.
|
||||||
*/
|
*/
|
||||||
#define EXPORT_SYMBOL(sym)
|
#define EXPORT_SYMBOL(sym)
|
||||||
#define EXPORT_SYMBOL_GPL(sym)
|
#define EXPORT_SYMBOL_GPL(sym)
|
||||||
|
@ -18,11 +18,21 @@ enum hdmi_infoframe_type {
|
|||||||
HDMI_INFOFRAME_TYPE_AUDIO = 0x84,
|
HDMI_INFOFRAME_TYPE_AUDIO = 0x84,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define HDMI_IEEE_OUI 0x000c03
|
||||||
#define HDMI_INFOFRAME_HEADER_SIZE 4
|
#define HDMI_INFOFRAME_HEADER_SIZE 4
|
||||||
#define HDMI_AVI_INFOFRAME_SIZE 13
|
#define HDMI_AVI_INFOFRAME_SIZE 13
|
||||||
#define HDMI_SPD_INFOFRAME_SIZE 25
|
#define HDMI_SPD_INFOFRAME_SIZE 25
|
||||||
#define HDMI_AUDIO_INFOFRAME_SIZE 10
|
#define HDMI_AUDIO_INFOFRAME_SIZE 10
|
||||||
|
|
||||||
|
#define HDMI_INFOFRAME_SIZE(type) \
|
||||||
|
(HDMI_INFOFRAME_HEADER_SIZE + HDMI_ ## type ## _INFOFRAME_SIZE)
|
||||||
|
|
||||||
|
struct hdmi_any_infoframe {
|
||||||
|
enum hdmi_infoframe_type type;
|
||||||
|
unsigned char version;
|
||||||
|
unsigned char length;
|
||||||
|
};
|
||||||
|
|
||||||
enum hdmi_colorspace {
|
enum hdmi_colorspace {
|
||||||
HDMI_COLORSPACE_RGB,
|
HDMI_COLORSPACE_RGB,
|
||||||
HDMI_COLORSPACE_YUV422,
|
HDMI_COLORSPACE_YUV422,
|
||||||
@ -100,9 +110,6 @@ struct hdmi_avi_infoframe {
|
|||||||
unsigned char version;
|
unsigned char version;
|
||||||
unsigned char length;
|
unsigned char length;
|
||||||
enum hdmi_colorspace colorspace;
|
enum hdmi_colorspace colorspace;
|
||||||
bool active_info_valid;
|
|
||||||
bool horizontal_bar_valid;
|
|
||||||
bool vertical_bar_valid;
|
|
||||||
enum hdmi_scan_mode scan_mode;
|
enum hdmi_scan_mode scan_mode;
|
||||||
enum hdmi_colorimetry colorimetry;
|
enum hdmi_colorimetry colorimetry;
|
||||||
enum hdmi_picture_aspect picture_aspect;
|
enum hdmi_picture_aspect picture_aspect;
|
||||||
@ -218,14 +225,52 @@ int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame);
|
|||||||
ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
|
ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
|
||||||
void *buffer, size_t size);
|
void *buffer, size_t size);
|
||||||
|
|
||||||
|
enum hdmi_3d_structure {
|
||||||
|
HDMI_3D_STRUCTURE_INVALID = -1,
|
||||||
|
HDMI_3D_STRUCTURE_FRAME_PACKING = 0,
|
||||||
|
HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE,
|
||||||
|
HDMI_3D_STRUCTURE_LINE_ALTERNATIVE,
|
||||||
|
HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL,
|
||||||
|
HDMI_3D_STRUCTURE_L_DEPTH,
|
||||||
|
HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH,
|
||||||
|
HDMI_3D_STRUCTURE_TOP_AND_BOTTOM,
|
||||||
|
HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF = 8,
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
struct hdmi_vendor_infoframe {
|
struct hdmi_vendor_infoframe {
|
||||||
enum hdmi_infoframe_type type;
|
enum hdmi_infoframe_type type;
|
||||||
unsigned char version;
|
unsigned char version;
|
||||||
unsigned char length;
|
unsigned char length;
|
||||||
u8 data[27];
|
unsigned int oui;
|
||||||
|
u8 vic;
|
||||||
|
enum hdmi_3d_structure s3d_struct;
|
||||||
|
unsigned int s3d_ext_data;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame);
|
||||||
ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
|
ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
|
||||||
void *buffer, size_t size);
|
void *buffer, size_t size);
|
||||||
|
|
||||||
|
union hdmi_vendor_any_infoframe {
|
||||||
|
struct {
|
||||||
|
enum hdmi_infoframe_type type;
|
||||||
|
unsigned char version;
|
||||||
|
unsigned char length;
|
||||||
|
unsigned int oui;
|
||||||
|
} any;
|
||||||
|
struct hdmi_vendor_infoframe hdmi;
|
||||||
|
};
|
||||||
|
|
||||||
|
union hdmi_infoframe {
|
||||||
|
struct hdmi_any_infoframe any;
|
||||||
|
struct hdmi_avi_infoframe avi;
|
||||||
|
struct hdmi_spd_infoframe spd;
|
||||||
|
union hdmi_vendor_any_infoframe vendor;
|
||||||
|
struct hdmi_audio_infoframe audio;
|
||||||
|
};
|
||||||
|
|
||||||
|
ssize_t
|
||||||
|
hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size);
|
||||||
|
|
||||||
#endif /* _DRM_HDMI_H */
|
#endif /* _DRM_HDMI_H */
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
#ifndef _LINUX_JIFFIES_H
|
#ifndef _LINUX_JIFFIES_H
|
||||||
#define _LINUX_JIFFIES_H
|
#define _LINUX_JIFFIES_H
|
||||||
|
|
||||||
//#include <linux/math64.h>
|
#include <linux/math64.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
//#include <linux/time.h>
|
#include <linux/time.h>
|
||||||
//#include <linux/timex.h>
|
//#include <linux/timex.h>
|
||||||
//#include <asm/param.h> /* for HZ */
|
//#include <asm/param.h> /* for HZ */
|
||||||
|
|
||||||
@ -43,9 +43,6 @@
|
|||||||
# error Invalid value of HZ.
|
# error Invalid value of HZ.
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* LATCH is used in the interval timer and ftape setup. */
|
|
||||||
#define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */
|
|
||||||
|
|
||||||
/* Suppose we want to divide two numbers NOM and DEN: NOM/DEN, then we can
|
/* Suppose we want to divide two numbers NOM and DEN: NOM/DEN, then we can
|
||||||
* improve accuracy by shifting LSH bits, hence calculating:
|
* improve accuracy by shifting LSH bits, hence calculating:
|
||||||
* (NOM << LSH) / DEN
|
* (NOM << LSH) / DEN
|
||||||
@ -58,19 +55,27 @@
|
|||||||
#define SH_DIV(NOM,DEN,LSH) ( (((NOM) / (DEN)) << (LSH)) \
|
#define SH_DIV(NOM,DEN,LSH) ( (((NOM) / (DEN)) << (LSH)) \
|
||||||
+ ((((NOM) % (DEN)) << (LSH)) + (DEN) / 2) / (DEN))
|
+ ((((NOM) % (DEN)) << (LSH)) + (DEN) / 2) / (DEN))
|
||||||
|
|
||||||
/* HZ is the requested value. ACTHZ is actual HZ ("<< 8" is for accuracy) */
|
/* LATCH is used in the interval timer and ftape setup. */
|
||||||
#define ACTHZ (SH_DIV (CLOCK_TICK_RATE, LATCH, 8))
|
#define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */
|
||||||
|
|
||||||
/* TICK_NSEC is the time between ticks in nsec assuming real ACTHZ */
|
extern int register_refined_jiffies(long clock_tick_rate);
|
||||||
#define TICK_NSEC (SH_DIV (1000000UL * 1000, ACTHZ, 8))
|
|
||||||
|
/* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */
|
||||||
|
#define TICK_NSEC ((NSEC_PER_SEC+HZ/2)/HZ)
|
||||||
|
|
||||||
/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
|
/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
|
||||||
#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
|
#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
|
||||||
|
|
||||||
/* TICK_USEC_TO_NSEC is the time between ticks in nsec assuming real ACTHZ and */
|
/* some arch's have a small-data section that can be accessed register-relative
|
||||||
/* a value TUSEC for TICK_USEC (can be set bij adjtimex) */
|
* but that can only take up to, say, 4-byte variables. jiffies being part of
|
||||||
#define TICK_USEC_TO_NSEC(TUSEC) (SH_DIV (TUSEC * USER_HZ * 1000, ACTHZ, 8))
|
* an 8-byte variable may not be correctly accessed unless we force the issue
|
||||||
|
*/
|
||||||
|
#define __jiffy_data __attribute__((section(".data")))
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The 64-bit value is not atomic - you MUST NOT read it
|
||||||
|
* without sampling the sequence number in jiffies_lock.
|
||||||
|
* get_jiffies_64() will do this for you as appropriate.
|
||||||
static inline u64 get_jiffies_64(void)
|
static inline u64 get_jiffies_64(void)
|
||||||
{
|
{
|
||||||
return (u64)GetTimerTicks();
|
return (u64)GetTimerTicks();
|
||||||
@ -92,13 +97,13 @@ static inline u64 get_jiffies_64(void)
|
|||||||
#define time_after(a,b) \
|
#define time_after(a,b) \
|
||||||
(typecheck(unsigned long, a) && \
|
(typecheck(unsigned long, a) && \
|
||||||
typecheck(unsigned long, b) && \
|
typecheck(unsigned long, b) && \
|
||||||
((long)(b) - (long)(a) < 0))
|
((long)((b) - (a)) < 0))
|
||||||
#define time_before(a,b) time_after(b,a)
|
#define time_before(a,b) time_after(b,a)
|
||||||
|
|
||||||
#define time_after_eq(a,b) \
|
#define time_after_eq(a,b) \
|
||||||
(typecheck(unsigned long, a) && \
|
(typecheck(unsigned long, a) && \
|
||||||
typecheck(unsigned long, b) && \
|
typecheck(unsigned long, b) && \
|
||||||
((long)(a) - (long)(b) >= 0))
|
((long)((a) - (b)) >= 0))
|
||||||
#define time_before_eq(a,b) time_after_eq(b,a)
|
#define time_before_eq(a,b) time_after_eq(b,a)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -121,13 +126,13 @@ static inline u64 get_jiffies_64(void)
|
|||||||
#define time_after64(a,b) \
|
#define time_after64(a,b) \
|
||||||
(typecheck(__u64, a) && \
|
(typecheck(__u64, a) && \
|
||||||
typecheck(__u64, b) && \
|
typecheck(__u64, b) && \
|
||||||
((__s64)(b) - (__s64)(a) < 0))
|
((__s64)((b) - (a)) < 0))
|
||||||
#define time_before64(a,b) time_after64(b,a)
|
#define time_before64(a,b) time_after64(b,a)
|
||||||
|
|
||||||
#define time_after_eq64(a,b) \
|
#define time_after_eq64(a,b) \
|
||||||
(typecheck(__u64, a) && \
|
(typecheck(__u64, a) && \
|
||||||
typecheck(__u64, b) && \
|
typecheck(__u64, b) && \
|
||||||
((__s64)(a) - (__s64)(b) >= 0))
|
((__s64)((a) - (b)) >= 0))
|
||||||
#define time_before_eq64(a,b) time_after_eq64(b,a)
|
#define time_before_eq64(a,b) time_after_eq64(b,a)
|
||||||
|
|
||||||
#define time_in_range64(a, b, c) \
|
#define time_in_range64(a, b, c) \
|
||||||
|
@ -49,6 +49,8 @@
|
|||||||
)
|
)
|
||||||
|
|
||||||
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
|
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
|
||||||
|
#define DIV_ROUND_UP_ULL(ll,d) \
|
||||||
|
({ unsigned long long _tmp = (ll)+(d)-1; do_div(_tmp, d); _tmp; })
|
||||||
#define DIV_ROUND_CLOSEST(x, divisor)( \
|
#define DIV_ROUND_CLOSEST(x, divisor)( \
|
||||||
{ \
|
{ \
|
||||||
typeof(divisor) __divisor = divisor; \
|
typeof(divisor) __divisor = divisor; \
|
||||||
@ -56,6 +58,16 @@
|
|||||||
} \
|
} \
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
#define clamp_t(type, val, min, max) ({ \
|
||||||
|
type __val = (val); \
|
||||||
|
type __min = (min); \
|
||||||
|
type __max = (max); \
|
||||||
|
__val = __val < __min ? __min: __val; \
|
||||||
|
__val > __max ? __max: __val; })
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* upper_32_bits - return bits 32-63 of a number
|
* upper_32_bits - return bits 32-63 of a number
|
||||||
* @n: the number we're accessing
|
* @n: the number we're accessing
|
||||||
@ -253,11 +265,6 @@ static inline void *dev_get_drvdata(struct device *dev)
|
|||||||
|
|
||||||
#define HZ 100
|
#define HZ 100
|
||||||
|
|
||||||
#define time_after(a,b) \
|
|
||||||
(typecheck(unsigned long, a) && \
|
|
||||||
typecheck(unsigned long, b) && \
|
|
||||||
((long)(b) - (long)(a) < 0))
|
|
||||||
|
|
||||||
struct tvec_base;
|
struct tvec_base;
|
||||||
|
|
||||||
struct timer_list {
|
struct timer_list {
|
||||||
|
@ -30,6 +30,15 @@ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
|
|||||||
return dividend / divisor;
|
return dividend / divisor;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
|
||||||
|
*/
|
||||||
|
static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
|
||||||
|
{
|
||||||
|
*remainder = dividend % divisor;
|
||||||
|
return dividend / divisor;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* div64_u64 - unsigned 64bit divide with 64bit divisor
|
* div64_u64 - unsigned 64bit divide with 64bit divisor
|
||||||
*/
|
*/
|
||||||
@ -63,6 +72,10 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
|
|||||||
extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
|
extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifndef div64_u64_rem
|
||||||
|
extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifndef div64_u64
|
#ifndef div64_u64
|
||||||
extern u64 div64_u64(u64 dividend, u64 divisor);
|
extern u64 div64_u64(u64 dividend, u64 divisor);
|
||||||
#endif
|
#endif
|
||||||
|
@ -9,8 +9,8 @@
|
|||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
#include <linux/compiler.h>
|
#include <linux/compiler.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/export.h>
|
|
||||||
#include <linux/moduleparam.h>
|
#include <linux/moduleparam.h>
|
||||||
|
#include <linux/export.h>
|
||||||
|
|
||||||
|
|
||||||
#define MODULE_FIRMWARE(x)
|
#define MODULE_FIRMWARE(x)
|
||||||
|
@ -68,6 +68,10 @@ extern struct rb_node *rb_prev(const struct rb_node *);
|
|||||||
extern struct rb_node *rb_first(const struct rb_root *);
|
extern struct rb_node *rb_first(const struct rb_root *);
|
||||||
extern struct rb_node *rb_last(const struct rb_root *);
|
extern struct rb_node *rb_last(const struct rb_root *);
|
||||||
|
|
||||||
|
/* Postorder iteration - always visit the parent after its children */
|
||||||
|
extern struct rb_node *rb_first_postorder(const struct rb_root *);
|
||||||
|
extern struct rb_node *rb_next_postorder(const struct rb_node *);
|
||||||
|
|
||||||
/* Fast replacement of a single node without remove/rebalance/add/rebalance */
|
/* Fast replacement of a single node without remove/rebalance/add/rebalance */
|
||||||
extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
|
extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
|
||||||
struct rb_root *root);
|
struct rb_root *root);
|
||||||
@ -81,4 +85,22 @@ static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
|
|||||||
*rb_link = node;
|
*rb_link = node;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* rbtree_postorder_for_each_entry_safe - iterate over rb_root in post order of
|
||||||
|
* given type safe against removal of rb_node entry
|
||||||
|
*
|
||||||
|
* @pos: the 'type *' to use as a loop cursor.
|
||||||
|
* @n: another 'type *' to use as temporary storage
|
||||||
|
* @root: 'rb_root *' of the rbtree.
|
||||||
|
* @field: the name of the rb_node field within 'type'.
|
||||||
|
*/
|
||||||
|
#define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \
|
||||||
|
for (pos = rb_entry(rb_first_postorder(root), typeof(*pos), field),\
|
||||||
|
n = rb_entry(rb_next_postorder(&pos->field), \
|
||||||
|
typeof(*pos), field); \
|
||||||
|
&pos->field; \
|
||||||
|
pos = n, \
|
||||||
|
n = rb_entry(rb_next_postorder(&pos->field), \
|
||||||
|
typeof(*pos), field))
|
||||||
|
|
||||||
#endif /* _LINUX_RBTREE_H */
|
#endif /* _LINUX_RBTREE_H */
|
||||||
|
232
drivers/include/linux/rbtree_augmented.h
Normal file
232
drivers/include/linux/rbtree_augmented.h
Normal file
@ -0,0 +1,232 @@
|
|||||||
|
/*
|
||||||
|
Red Black Trees
|
||||||
|
(C) 1999 Andrea Arcangeli <andrea@suse.de>
|
||||||
|
(C) 2002 David Woodhouse <dwmw2@infradead.org>
|
||||||
|
(C) 2012 Michel Lespinasse <walken@google.com>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License
|
||||||
|
along with this program; if not, write to the Free Software
|
||||||
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
|
|
||||||
|
linux/include/linux/rbtree_augmented.h
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _LINUX_RBTREE_AUGMENTED_H
|
||||||
|
#define _LINUX_RBTREE_AUGMENTED_H
|
||||||
|
|
||||||
|
#include <linux/compiler.h>
|
||||||
|
#include <linux/rbtree.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Please note - only struct rb_augment_callbacks and the prototypes for
|
||||||
|
* rb_insert_augmented() and rb_erase_augmented() are intended to be public.
|
||||||
|
* The rest are implementation details you are not expected to depend on.
|
||||||
|
*
|
||||||
|
* See Documentation/rbtree.txt for documentation and samples.
|
||||||
|
*/
|
||||||
|
|
||||||
|
struct rb_augment_callbacks {
|
||||||
|
void (*propagate)(struct rb_node *node, struct rb_node *stop);
|
||||||
|
void (*copy)(struct rb_node *old, struct rb_node *new);
|
||||||
|
void (*rotate)(struct rb_node *old, struct rb_node *new);
|
||||||
|
};
|
||||||
|
|
||||||
|
extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
|
||||||
|
void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
|
||||||
|
static inline void
|
||||||
|
rb_insert_augmented(struct rb_node *node, struct rb_root *root,
|
||||||
|
const struct rb_augment_callbacks *augment)
|
||||||
|
{
|
||||||
|
__rb_insert_augmented(node, root, augment->rotate);
|
||||||
|
}
|
||||||
|
|
||||||
|
#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \
|
||||||
|
rbtype, rbaugmented, rbcompute) \
|
||||||
|
static inline void \
|
||||||
|
rbname ## _propagate(struct rb_node *rb, struct rb_node *stop) \
|
||||||
|
{ \
|
||||||
|
while (rb != stop) { \
|
||||||
|
rbstruct *node = rb_entry(rb, rbstruct, rbfield); \
|
||||||
|
rbtype augmented = rbcompute(node); \
|
||||||
|
if (node->rbaugmented == augmented) \
|
||||||
|
break; \
|
||||||
|
node->rbaugmented = augmented; \
|
||||||
|
rb = rb_parent(&node->rbfield); \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
static inline void \
|
||||||
|
rbname ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \
|
||||||
|
{ \
|
||||||
|
rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
|
||||||
|
rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
|
||||||
|
new->rbaugmented = old->rbaugmented; \
|
||||||
|
} \
|
||||||
|
static void \
|
||||||
|
rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
|
||||||
|
{ \
|
||||||
|
rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
|
||||||
|
rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
|
||||||
|
new->rbaugmented = old->rbaugmented; \
|
||||||
|
old->rbaugmented = rbcompute(old); \
|
||||||
|
} \
|
||||||
|
rbstatic const struct rb_augment_callbacks rbname = { \
|
||||||
|
rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
#define RB_RED 0
|
||||||
|
#define RB_BLACK 1
|
||||||
|
|
||||||
|
#define __rb_parent(pc) ((struct rb_node *)(pc & ~3))
|
||||||
|
|
||||||
|
#define __rb_color(pc) ((pc) & 1)
|
||||||
|
#define __rb_is_black(pc) __rb_color(pc)
|
||||||
|
#define __rb_is_red(pc) (!__rb_color(pc))
|
||||||
|
#define rb_color(rb) __rb_color((rb)->__rb_parent_color)
|
||||||
|
#define rb_is_red(rb) __rb_is_red((rb)->__rb_parent_color)
|
||||||
|
#define rb_is_black(rb) __rb_is_black((rb)->__rb_parent_color)
|
||||||
|
|
||||||
|
static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p)
|
||||||
|
{
|
||||||
|
rb->__rb_parent_color = rb_color(rb) | (unsigned long)p;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void rb_set_parent_color(struct rb_node *rb,
|
||||||
|
struct rb_node *p, int color)
|
||||||
|
{
|
||||||
|
rb->__rb_parent_color = (unsigned long)p | color;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
__rb_change_child(struct rb_node *old, struct rb_node *new,
|
||||||
|
struct rb_node *parent, struct rb_root *root)
|
||||||
|
{
|
||||||
|
if (parent) {
|
||||||
|
if (parent->rb_left == old)
|
||||||
|
parent->rb_left = new;
|
||||||
|
else
|
||||||
|
parent->rb_right = new;
|
||||||
|
} else
|
||||||
|
root->rb_node = new;
|
||||||
|
}
|
||||||
|
|
||||||
|
extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
|
||||||
|
void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
|
||||||
|
|
||||||
|
static __always_inline struct rb_node *
|
||||||
|
__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
|
||||||
|
const struct rb_augment_callbacks *augment)
|
||||||
|
{
|
||||||
|
struct rb_node *child = node->rb_right, *tmp = node->rb_left;
|
||||||
|
struct rb_node *parent, *rebalance;
|
||||||
|
unsigned long pc;
|
||||||
|
|
||||||
|
if (!tmp) {
|
||||||
|
/*
|
||||||
|
* Case 1: node to erase has no more than 1 child (easy!)
|
||||||
|
*
|
||||||
|
* Note that if there is one child it must be red due to 5)
|
||||||
|
* and node must be black due to 4). We adjust colors locally
|
||||||
|
* so as to bypass __rb_erase_color() later on.
|
||||||
|
*/
|
||||||
|
pc = node->__rb_parent_color;
|
||||||
|
parent = __rb_parent(pc);
|
||||||
|
__rb_change_child(node, child, parent, root);
|
||||||
|
if (child) {
|
||||||
|
child->__rb_parent_color = pc;
|
||||||
|
rebalance = NULL;
|
||||||
|
} else
|
||||||
|
rebalance = __rb_is_black(pc) ? parent : NULL;
|
||||||
|
tmp = parent;
|
||||||
|
} else if (!child) {
|
||||||
|
/* Still case 1, but this time the child is node->rb_left */
|
||||||
|
tmp->__rb_parent_color = pc = node->__rb_parent_color;
|
||||||
|
parent = __rb_parent(pc);
|
||||||
|
__rb_change_child(node, tmp, parent, root);
|
||||||
|
rebalance = NULL;
|
||||||
|
tmp = parent;
|
||||||
|
} else {
|
||||||
|
struct rb_node *successor = child, *child2;
|
||||||
|
tmp = child->rb_left;
|
||||||
|
if (!tmp) {
|
||||||
|
/*
|
||||||
|
* Case 2: node's successor is its right child
|
||||||
|
*
|
||||||
|
* (n) (s)
|
||||||
|
* / \ / \
|
||||||
|
* (x) (s) -> (x) (c)
|
||||||
|
* \
|
||||||
|
* (c)
|
||||||
|
*/
|
||||||
|
parent = successor;
|
||||||
|
child2 = successor->rb_right;
|
||||||
|
augment->copy(node, successor);
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* Case 3: node's successor is leftmost under
|
||||||
|
* node's right child subtree
|
||||||
|
*
|
||||||
|
* (n) (s)
|
||||||
|
* / \ / \
|
||||||
|
* (x) (y) -> (x) (y)
|
||||||
|
* / /
|
||||||
|
* (p) (p)
|
||||||
|
* / /
|
||||||
|
* (s) (c)
|
||||||
|
* \
|
||||||
|
* (c)
|
||||||
|
*/
|
||||||
|
do {
|
||||||
|
parent = successor;
|
||||||
|
successor = tmp;
|
||||||
|
tmp = tmp->rb_left;
|
||||||
|
} while (tmp);
|
||||||
|
parent->rb_left = child2 = successor->rb_right;
|
||||||
|
successor->rb_right = child;
|
||||||
|
rb_set_parent(child, successor);
|
||||||
|
augment->copy(node, successor);
|
||||||
|
augment->propagate(parent, successor);
|
||||||
|
}
|
||||||
|
|
||||||
|
successor->rb_left = tmp = node->rb_left;
|
||||||
|
rb_set_parent(tmp, successor);
|
||||||
|
|
||||||
|
pc = node->__rb_parent_color;
|
||||||
|
tmp = __rb_parent(pc);
|
||||||
|
__rb_change_child(node, successor, tmp, root);
|
||||||
|
if (child2) {
|
||||||
|
successor->__rb_parent_color = pc;
|
||||||
|
rb_set_parent_color(child2, parent, RB_BLACK);
|
||||||
|
rebalance = NULL;
|
||||||
|
} else {
|
||||||
|
unsigned long pc2 = successor->__rb_parent_color;
|
||||||
|
successor->__rb_parent_color = pc;
|
||||||
|
rebalance = __rb_is_black(pc2) ? parent : NULL;
|
||||||
|
}
|
||||||
|
tmp = successor;
|
||||||
|
}
|
||||||
|
|
||||||
|
augment->propagate(tmp, NULL);
|
||||||
|
return rebalance;
|
||||||
|
}
|
||||||
|
|
||||||
|
static __always_inline void
|
||||||
|
rb_erase_augmented(struct rb_node *node, struct rb_root *root,
|
||||||
|
const struct rb_augment_callbacks *augment)
|
||||||
|
{
|
||||||
|
struct rb_node *rebalance = __rb_erase_augmented(node, root, augment);
|
||||||
|
if (rebalance)
|
||||||
|
__rb_erase_color(rebalance, root, augment->rotate);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* _LINUX_RBTREE_AUGMENTED_H */
|
@ -267,8 +267,9 @@ static inline void list_splice_init_rcu(struct list_head *list,
|
|||||||
*/
|
*/
|
||||||
#define list_first_or_null_rcu(ptr, type, member) \
|
#define list_first_or_null_rcu(ptr, type, member) \
|
||||||
({struct list_head *__ptr = (ptr); \
|
({struct list_head *__ptr = (ptr); \
|
||||||
struct list_head __rcu *__next = list_next_rcu(__ptr); \
|
struct list_head *__next = ACCESS_ONCE(__ptr->next); \
|
||||||
likely(__ptr != __next) ? container_of(__next, type, member) : NULL; \
|
likely(__ptr != __next) ? \
|
||||||
|
list_entry_rcu(__next, type, member) : NULL; \
|
||||||
})
|
})
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -4,6 +4,8 @@
|
|||||||
* (C) SGI 2006, Christoph Lameter
|
* (C) SGI 2006, Christoph Lameter
|
||||||
* Cleaned up and restructured to ease the addition of alternative
|
* Cleaned up and restructured to ease the addition of alternative
|
||||||
* implementations of SLAB allocators.
|
* implementations of SLAB allocators.
|
||||||
|
* (C) Linux Foundation 2008-2013
|
||||||
|
* Unified interface for all slab allocators
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef _LINUX_SLAB_H
|
#ifndef _LINUX_SLAB_H
|
||||||
|
@ -116,11 +116,10 @@ typedef __u8 uint8_t;
|
|||||||
typedef __u16 uint16_t;
|
typedef __u16 uint16_t;
|
||||||
typedef __u32 uint32_t;
|
typedef __u32 uint32_t;
|
||||||
|
|
||||||
#if defined(__GNUC__)
|
|
||||||
typedef __u64 uint64_t;
|
typedef __u64 uint64_t;
|
||||||
typedef __u64 u_int64_t;
|
typedef __u64 u_int64_t;
|
||||||
typedef __s64 int64_t;
|
|
||||||
#endif
|
typedef __signed__ long long int64_t;
|
||||||
|
|
||||||
/* this is a special 64bit data type that is 8-byte aligned */
|
/* this is a special 64bit data type that is 8-byte aligned */
|
||||||
#define aligned_u64 __u64 __attribute__((aligned(8)))
|
#define aligned_u64 __u64 __attribute__((aligned(8)))
|
||||||
|
@ -33,6 +33,30 @@
|
|||||||
* subject to backwards-compatibility constraints.
|
* subject to backwards-compatibility constraints.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* DOC: uevents generated by i915 on it's device node
|
||||||
|
*
|
||||||
|
* I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
|
||||||
|
* event from the gpu l3 cache. Additional information supplied is ROW,
|
||||||
|
* BANK, SUBBANK of the affected cacheline. Userspace should keep track of
|
||||||
|
* these events and if a specific cache-line seems to have a persistent
|
||||||
|
* error remap it with the l3 remapping tool supplied in intel-gpu-tools.
|
||||||
|
* The value supplied with the event is always 1.
|
||||||
|
*
|
||||||
|
* I915_ERROR_UEVENT - Generated upon error detection, currently only via
|
||||||
|
* hangcheck. The error detection event is a good indicator of when things
|
||||||
|
* began to go badly. The value supplied with the event is a 1 upon error
|
||||||
|
* detection, and a 0 upon reset completion, signifying no more error
|
||||||
|
* exists. NOTE: Disabling hangcheck or reset via module parameter will
|
||||||
|
* cause the related events to not be seen.
|
||||||
|
*
|
||||||
|
* I915_RESET_UEVENT - Event is generated just before an attempt to reset the
|
||||||
|
* the GPU. The value supplied with the event is always 1. NOTE: Disable
|
||||||
|
* reset via module parameter will cause this event to not be seen.
|
||||||
|
*/
|
||||||
|
#define I915_L3_PARITY_UEVENT "L3_PARITY_ERROR"
|
||||||
|
#define I915_ERROR_UEVENT "ERROR"
|
||||||
|
#define I915_RESET_UEVENT "RESET"
|
||||||
|
|
||||||
/* Each region is a minimum of 16k, and there are at most 255 of them.
|
/* Each region is a minimum of 16k, and there are at most 255 of them.
|
||||||
*/
|
*/
|
||||||
@ -305,11 +329,12 @@ typedef struct drm_i915_irq_wait {
|
|||||||
#define I915_PARAM_HAS_WAIT_TIMEOUT 19
|
#define I915_PARAM_HAS_WAIT_TIMEOUT 19
|
||||||
#define I915_PARAM_HAS_SEMAPHORES 20
|
#define I915_PARAM_HAS_SEMAPHORES 20
|
||||||
#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21
|
#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21
|
||||||
#define I915_PARAM_RSVD_FOR_FUTURE_USE 22
|
#define I915_PARAM_HAS_VEBOX 22
|
||||||
#define I915_PARAM_HAS_SECURE_BATCHES 23
|
#define I915_PARAM_HAS_SECURE_BATCHES 23
|
||||||
#define I915_PARAM_HAS_PINNED_BATCHES 24
|
#define I915_PARAM_HAS_PINNED_BATCHES 24
|
||||||
#define I915_PARAM_HAS_EXEC_NO_RELOC 25
|
#define I915_PARAM_HAS_EXEC_NO_RELOC 25
|
||||||
#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
|
#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
|
||||||
|
#define I915_PARAM_HAS_WT 27
|
||||||
|
|
||||||
typedef struct drm_i915_getparam {
|
typedef struct drm_i915_getparam {
|
||||||
int param;
|
int param;
|
||||||
@ -660,6 +685,7 @@ struct drm_i915_gem_execbuffer2 {
|
|||||||
#define I915_EXEC_RENDER (1<<0)
|
#define I915_EXEC_RENDER (1<<0)
|
||||||
#define I915_EXEC_BSD (2<<0)
|
#define I915_EXEC_BSD (2<<0)
|
||||||
#define I915_EXEC_BLT (3<<0)
|
#define I915_EXEC_BLT (3<<0)
|
||||||
|
#define I915_EXEC_VEBOX (4<<0)
|
||||||
|
|
||||||
/* Used for switching the constants addressing mode on gen4+ RENDER ring.
|
/* Used for switching the constants addressing mode on gen4+ RENDER ring.
|
||||||
* Gen6+ only supports relative addressing to dynamic state (default) and
|
* Gen6+ only supports relative addressing to dynamic state (default) and
|
||||||
@ -743,8 +769,32 @@ struct drm_i915_gem_busy {
|
|||||||
__u32 busy;
|
__u32 busy;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* I915_CACHING_NONE
|
||||||
|
*
|
||||||
|
* GPU access is not coherent with cpu caches. Default for machines without an
|
||||||
|
* LLC.
|
||||||
|
*/
|
||||||
#define I915_CACHING_NONE 0
|
#define I915_CACHING_NONE 0
|
||||||
|
/**
|
||||||
|
* I915_CACHING_CACHED
|
||||||
|
*
|
||||||
|
* GPU access is coherent with cpu caches and furthermore the data is cached in
|
||||||
|
* last-level caches shared between cpu cores and the gpu GT. Default on
|
||||||
|
* machines with HAS_LLC.
|
||||||
|
*/
|
||||||
#define I915_CACHING_CACHED 1
|
#define I915_CACHING_CACHED 1
|
||||||
|
/**
|
||||||
|
* I915_CACHING_DISPLAY
|
||||||
|
*
|
||||||
|
* Special GPU caching mode which is coherent with the scanout engines.
|
||||||
|
* Transparently falls back to I915_CACHING_NONE on platforms where no special
|
||||||
|
* cache mode (like write-through or gfdt flushing) is available. The kernel
|
||||||
|
* automatically sets this mode when using a buffer as a scanout target.
|
||||||
|
* Userspace can manually set this mode to avoid a costly stall and clflush in
|
||||||
|
* the hotpath of drawing the first frame.
|
||||||
|
*/
|
||||||
|
#define I915_CACHING_DISPLAY 2
|
||||||
|
|
||||||
struct drm_i915_gem_caching {
|
struct drm_i915_gem_caching {
|
||||||
/**
|
/**
|
||||||
|
Loading…
Reference in New Issue
Block a user