forked from KolibriOS/kolibrios
ddk: update
git-svn-id: svn://kolibrios.org@3391 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
parent
6e80a53e5e
commit
4a4da537e6
@ -22,6 +22,7 @@ NAME_SRCS:= \
|
|||||||
io/finfo.c \
|
io/finfo.c \
|
||||||
io/ssize.c \
|
io/ssize.c \
|
||||||
io/write.c \
|
io/write.c \
|
||||||
|
linux/bitmap.c \
|
||||||
linux/idr.c \
|
linux/idr.c \
|
||||||
linux/firmware.c \
|
linux/firmware.c \
|
||||||
linux/kref.c \
|
linux/kref.c \
|
||||||
|
@ -65,6 +65,7 @@
|
|||||||
.global _UserFree
|
.global _UserFree
|
||||||
|
|
||||||
.global _WaitEvent
|
.global _WaitEvent
|
||||||
|
.global _WaitEventTimeout
|
||||||
|
|
||||||
|
|
||||||
.def _AllocKernelSpace; .scl 2; .type 32; .endef
|
.def _AllocKernelSpace; .scl 2; .type 32; .endef
|
||||||
@ -127,6 +128,7 @@
|
|||||||
.def _UserFree; .scl 2; .type 32; .endef
|
.def _UserFree; .scl 2; .type 32; .endef
|
||||||
|
|
||||||
.def _WaitEvent; .scl 2; .type 32; .endef
|
.def _WaitEvent; .scl 2; .type 32; .endef
|
||||||
|
.def _WaitEventTimeout; .scl 2; .type 32; .endef
|
||||||
|
|
||||||
|
|
||||||
_AllocKernelSpace:
|
_AllocKernelSpace:
|
||||||
@ -189,6 +191,7 @@ _TimerHs:
|
|||||||
_UserAlloc:
|
_UserAlloc:
|
||||||
_UserFree:
|
_UserFree:
|
||||||
_WaitEvent:
|
_WaitEvent:
|
||||||
|
_WaitEventTimeout:
|
||||||
|
|
||||||
ret
|
ret
|
||||||
|
|
||||||
@ -258,4 +261,5 @@ _WaitEvent:
|
|||||||
.ascii " -export:UserFree" # stdcall
|
.ascii " -export:UserFree" # stdcall
|
||||||
|
|
||||||
.ascii " -export:WaitEvent" # stdcall
|
.ascii " -export:WaitEvent" # stdcall
|
||||||
|
.ascii " -export:WaitEventTimeout" # stdcall
|
||||||
|
|
||||||
|
848
drivers/ddk/linux/bitmap.c
Normal file
848
drivers/ddk/linux/bitmap.c
Normal file
@ -0,0 +1,848 @@
|
|||||||
|
/*
|
||||||
|
* lib/bitmap.c
|
||||||
|
* Helper functions for bitmap.h.
|
||||||
|
*
|
||||||
|
* Tlhis source code is licensed under the GNU General Public License,
|
||||||
|
* Version 2. See the file COPYING for more details.
|
||||||
|
*/
|
||||||
|
#include <syscall.h>
|
||||||
|
#include <linux/export.h>
|
||||||
|
//#include <linux/thread_info.h>
|
||||||
|
#include <linux/ctype.h>
|
||||||
|
#include <linux/errno.h>
|
||||||
|
#include <linux/bitmap.h>
|
||||||
|
#include <linux/bitops.h>
|
||||||
|
#include <linux/bug.h>
|
||||||
|
//#include <asm/uaccess.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* bitmaps provide an array of bits, implemented using an an
|
||||||
|
* array of unsigned longs. The number of valid bits in a
|
||||||
|
* given bitmap does _not_ need to be an exact multiple of
|
||||||
|
* BITS_PER_LONG.
|
||||||
|
*
|
||||||
|
* The possible unused bits in the last, partially used word
|
||||||
|
* of a bitmap are 'don't care'. The implementation makes
|
||||||
|
* no particular effort to keep them zero. It ensures that
|
||||||
|
* their value will not affect the results of any operation.
|
||||||
|
* The bitmap operations that return Boolean (bitmap_empty,
|
||||||
|
* for example) or scalar (bitmap_weight, for example) results
|
||||||
|
* carefully filter out these unused bits from impacting their
|
||||||
|
* results.
|
||||||
|
*
|
||||||
|
* These operations actually hold to a slightly stronger rule:
|
||||||
|
* if you don't input any bitmaps to these ops that have some
|
||||||
|
* unused bits set, then they won't output any set unused bits
|
||||||
|
* in output bitmaps.
|
||||||
|
*
|
||||||
|
* The byte ordering of bitmaps is more natural on little
|
||||||
|
* endian architectures. See the big-endian headers
|
||||||
|
* include/asm-ppc64/bitops.h and include/asm-s390/bitops.h
|
||||||
|
* for the best explanations of this ordering.
|
||||||
|
*/
|
||||||
|
|
||||||
|
int __bitmap_empty(const unsigned long *bitmap, int bits)
|
||||||
|
{
|
||||||
|
int k, lim = bits/BITS_PER_LONG;
|
||||||
|
for (k = 0; k < lim; ++k)
|
||||||
|
if (bitmap[k])
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (bits % BITS_PER_LONG)
|
||||||
|
if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__bitmap_empty);
|
||||||
|
|
||||||
|
int __bitmap_full(const unsigned long *bitmap, int bits)
|
||||||
|
{
|
||||||
|
int k, lim = bits/BITS_PER_LONG;
|
||||||
|
for (k = 0; k < lim; ++k)
|
||||||
|
if (~bitmap[k])
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (bits % BITS_PER_LONG)
|
||||||
|
if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__bitmap_full);
|
||||||
|
|
||||||
|
int __bitmap_equal(const unsigned long *bitmap1,
|
||||||
|
const unsigned long *bitmap2, int bits)
|
||||||
|
{
|
||||||
|
int k, lim = bits/BITS_PER_LONG;
|
||||||
|
for (k = 0; k < lim; ++k)
|
||||||
|
if (bitmap1[k] != bitmap2[k])
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (bits % BITS_PER_LONG)
|
||||||
|
if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__bitmap_equal);
|
||||||
|
|
||||||
|
void __bitmap_complement(unsigned long *dst, const unsigned long *src, int bits)
|
||||||
|
{
|
||||||
|
int k, lim = bits/BITS_PER_LONG;
|
||||||
|
for (k = 0; k < lim; ++k)
|
||||||
|
dst[k] = ~src[k];
|
||||||
|
|
||||||
|
if (bits % BITS_PER_LONG)
|
||||||
|
dst[k] = ~src[k] & BITMAP_LAST_WORD_MASK(bits);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__bitmap_complement);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* __bitmap_shift_right - logical right shift of the bits in a bitmap
|
||||||
|
* @dst : destination bitmap
|
||||||
|
* @src : source bitmap
|
||||||
|
* @shift : shift by this many bits
|
||||||
|
* @bits : bitmap size, in bits
|
||||||
|
*
|
||||||
|
* Shifting right (dividing) means moving bits in the MS -> LS bit
|
||||||
|
* direction. Zeros are fed into the vacated MS positions and the
|
||||||
|
* LS bits shifted off the bottom are lost.
|
||||||
|
*/
|
||||||
|
void __bitmap_shift_right(unsigned long *dst,
|
||||||
|
const unsigned long *src, int shift, int bits)
|
||||||
|
{
|
||||||
|
int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG;
|
||||||
|
int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG;
|
||||||
|
unsigned long mask = (1UL << left) - 1;
|
||||||
|
for (k = 0; off + k < lim; ++k) {
|
||||||
|
unsigned long upper, lower;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If shift is not word aligned, take lower rem bits of
|
||||||
|
* word above and make them the top rem bits of result.
|
||||||
|
*/
|
||||||
|
if (!rem || off + k + 1 >= lim)
|
||||||
|
upper = 0;
|
||||||
|
else {
|
||||||
|
upper = src[off + k + 1];
|
||||||
|
if (off + k + 1 == lim - 1 && left)
|
||||||
|
upper &= mask;
|
||||||
|
}
|
||||||
|
lower = src[off + k];
|
||||||
|
if (left && off + k == lim - 1)
|
||||||
|
lower &= mask;
|
||||||
|
dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem;
|
||||||
|
if (left && k == lim - 1)
|
||||||
|
dst[k] &= mask;
|
||||||
|
}
|
||||||
|
if (off)
|
||||||
|
memset(&dst[lim - off], 0, off*sizeof(unsigned long));
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__bitmap_shift_right);
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* __bitmap_shift_left - logical left shift of the bits in a bitmap
|
||||||
|
* @dst : destination bitmap
|
||||||
|
* @src : source bitmap
|
||||||
|
* @shift : shift by this many bits
|
||||||
|
* @bits : bitmap size, in bits
|
||||||
|
*
|
||||||
|
* Shifting left (multiplying) means moving bits in the LS -> MS
|
||||||
|
* direction. Zeros are fed into the vacated LS bit positions
|
||||||
|
* and those MS bits shifted off the top are lost.
|
||||||
|
*/
|
||||||
|
|
||||||
|
void __bitmap_shift_left(unsigned long *dst,
|
||||||
|
const unsigned long *src, int shift, int bits)
|
||||||
|
{
|
||||||
|
int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG;
|
||||||
|
int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG;
|
||||||
|
for (k = lim - off - 1; k >= 0; --k) {
|
||||||
|
unsigned long upper, lower;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If shift is not word aligned, take upper rem bits of
|
||||||
|
* word below and make them the bottom rem bits of result.
|
||||||
|
*/
|
||||||
|
if (rem && k > 0)
|
||||||
|
lower = src[k - 1];
|
||||||
|
else
|
||||||
|
lower = 0;
|
||||||
|
upper = src[k];
|
||||||
|
if (left && k == lim - 1)
|
||||||
|
upper &= (1UL << left) - 1;
|
||||||
|
dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem;
|
||||||
|
if (left && k + off == lim - 1)
|
||||||
|
dst[k + off] &= (1UL << left) - 1;
|
||||||
|
}
|
||||||
|
if (off)
|
||||||
|
memset(dst, 0, off*sizeof(unsigned long));
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__bitmap_shift_left);
|
||||||
|
|
||||||
|
int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
|
||||||
|
const unsigned long *bitmap2, int bits)
|
||||||
|
{
|
||||||
|
int k;
|
||||||
|
int nr = BITS_TO_LONGS(bits);
|
||||||
|
unsigned long result = 0;
|
||||||
|
|
||||||
|
for (k = 0; k < nr; k++)
|
||||||
|
result |= (dst[k] = bitmap1[k] & bitmap2[k]);
|
||||||
|
return result != 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__bitmap_and);
|
||||||
|
|
||||||
|
void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
|
||||||
|
const unsigned long *bitmap2, int bits)
|
||||||
|
{
|
||||||
|
int k;
|
||||||
|
int nr = BITS_TO_LONGS(bits);
|
||||||
|
|
||||||
|
for (k = 0; k < nr; k++)
|
||||||
|
dst[k] = bitmap1[k] | bitmap2[k];
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__bitmap_or);
|
||||||
|
|
||||||
|
void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
|
||||||
|
const unsigned long *bitmap2, int bits)
|
||||||
|
{
|
||||||
|
int k;
|
||||||
|
int nr = BITS_TO_LONGS(bits);
|
||||||
|
|
||||||
|
for (k = 0; k < nr; k++)
|
||||||
|
dst[k] = bitmap1[k] ^ bitmap2[k];
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__bitmap_xor);
|
||||||
|
|
||||||
|
int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
|
||||||
|
const unsigned long *bitmap2, int bits)
|
||||||
|
{
|
||||||
|
int k;
|
||||||
|
int nr = BITS_TO_LONGS(bits);
|
||||||
|
unsigned long result = 0;
|
||||||
|
|
||||||
|
for (k = 0; k < nr; k++)
|
||||||
|
result |= (dst[k] = bitmap1[k] & ~bitmap2[k]);
|
||||||
|
return result != 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__bitmap_andnot);
|
||||||
|
|
||||||
|
int __bitmap_intersects(const unsigned long *bitmap1,
|
||||||
|
const unsigned long *bitmap2, int bits)
|
||||||
|
{
|
||||||
|
int k, lim = bits/BITS_PER_LONG;
|
||||||
|
for (k = 0; k < lim; ++k)
|
||||||
|
if (bitmap1[k] & bitmap2[k])
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
if (bits % BITS_PER_LONG)
|
||||||
|
if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
|
||||||
|
return 1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__bitmap_intersects);
|
||||||
|
|
||||||
|
int __bitmap_subset(const unsigned long *bitmap1,
|
||||||
|
const unsigned long *bitmap2, int bits)
|
||||||
|
{
|
||||||
|
int k, lim = bits/BITS_PER_LONG;
|
||||||
|
for (k = 0; k < lim; ++k)
|
||||||
|
if (bitmap1[k] & ~bitmap2[k])
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (bits % BITS_PER_LONG)
|
||||||
|
if ((bitmap1[k] & ~bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
|
||||||
|
return 0;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__bitmap_subset);
|
||||||
|
|
||||||
|
int __bitmap_weight(const unsigned long *bitmap, int bits)
|
||||||
|
{
|
||||||
|
int k, w = 0, lim = bits/BITS_PER_LONG;
|
||||||
|
|
||||||
|
for (k = 0; k < lim; k++)
|
||||||
|
w += hweight_long(bitmap[k]);
|
||||||
|
|
||||||
|
if (bits % BITS_PER_LONG)
|
||||||
|
w += hweight_long(bitmap[k] & BITMAP_LAST_WORD_MASK(bits));
|
||||||
|
|
||||||
|
return w;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__bitmap_weight);
|
||||||
|
|
||||||
|
void bitmap_set(unsigned long *map, int start, int nr)
|
||||||
|
{
|
||||||
|
unsigned long *p = map + BIT_WORD(start);
|
||||||
|
const int size = start + nr;
|
||||||
|
int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
|
||||||
|
unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
|
||||||
|
|
||||||
|
while (nr - bits_to_set >= 0) {
|
||||||
|
*p |= mask_to_set;
|
||||||
|
nr -= bits_to_set;
|
||||||
|
bits_to_set = BITS_PER_LONG;
|
||||||
|
mask_to_set = ~0UL;
|
||||||
|
p++;
|
||||||
|
}
|
||||||
|
if (nr) {
|
||||||
|
mask_to_set &= BITMAP_LAST_WORD_MASK(size);
|
||||||
|
*p |= mask_to_set;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(bitmap_set);
|
||||||
|
|
||||||
|
void bitmap_clear(unsigned long *map, int start, int nr)
|
||||||
|
{
|
||||||
|
unsigned long *p = map + BIT_WORD(start);
|
||||||
|
const int size = start + nr;
|
||||||
|
int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
|
||||||
|
unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
|
||||||
|
|
||||||
|
while (nr - bits_to_clear >= 0) {
|
||||||
|
*p &= ~mask_to_clear;
|
||||||
|
nr -= bits_to_clear;
|
||||||
|
bits_to_clear = BITS_PER_LONG;
|
||||||
|
mask_to_clear = ~0UL;
|
||||||
|
p++;
|
||||||
|
}
|
||||||
|
if (nr) {
|
||||||
|
mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
|
||||||
|
*p &= ~mask_to_clear;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(bitmap_clear);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* bitmap_find_next_zero_area - find a contiguous aligned zero area
|
||||||
|
* @map: The address to base the search on
|
||||||
|
* @size: The bitmap size in bits
|
||||||
|
* @start: The bitnumber to start searching at
|
||||||
|
* @nr: The number of zeroed bits we're looking for
|
||||||
|
* @align_mask: Alignment mask for zero area
|
||||||
|
*
|
||||||
|
* The @align_mask should be one less than a power of 2; the effect is that
|
||||||
|
* the bit offset of all zero areas this function finds is multiples of that
|
||||||
|
* power of 2. A @align_mask of 0 means no alignment is required.
|
||||||
|
*/
|
||||||
|
unsigned long bitmap_find_next_zero_area(unsigned long *map,
|
||||||
|
unsigned long size,
|
||||||
|
unsigned long start,
|
||||||
|
unsigned int nr,
|
||||||
|
unsigned long align_mask)
|
||||||
|
{
|
||||||
|
unsigned long index, end, i;
|
||||||
|
again:
|
||||||
|
index = find_next_zero_bit(map, size, start);
|
||||||
|
|
||||||
|
/* Align allocation */
|
||||||
|
index = __ALIGN_MASK(index, align_mask);
|
||||||
|
|
||||||
|
end = index + nr;
|
||||||
|
if (end > size)
|
||||||
|
return end;
|
||||||
|
i = find_next_bit(map, end, index);
|
||||||
|
if (i < end) {
|
||||||
|
start = i + 1;
|
||||||
|
goto again;
|
||||||
|
}
|
||||||
|
return index;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(bitmap_find_next_zero_area);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Bitmap printing & parsing functions: first version by Nadia Yvette Chambers,
|
||||||
|
* second version by Paul Jackson, third by Joe Korty.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define CHUNKSZ 32
|
||||||
|
#define nbits_to_hold_value(val) fls(val)
|
||||||
|
#define BASEDEC 10 /* fancier cpuset lists input in decimal */
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap
|
||||||
|
* @buf: pointer to a bitmap
|
||||||
|
* @pos: a bit position in @buf (0 <= @pos < @bits)
|
||||||
|
* @bits: number of valid bit positions in @buf
|
||||||
|
*
|
||||||
|
* Map the bit at position @pos in @buf (of length @bits) to the
|
||||||
|
* ordinal of which set bit it is. If it is not set or if @pos
|
||||||
|
* is not a valid bit position, map to -1.
|
||||||
|
*
|
||||||
|
* If for example, just bits 4 through 7 are set in @buf, then @pos
|
||||||
|
* values 4 through 7 will get mapped to 0 through 3, respectively,
|
||||||
|
* and other @pos values will get mapped to 0. When @pos value 7
|
||||||
|
* gets mapped to (returns) @ord value 3 in this example, that means
|
||||||
|
* that bit 7 is the 3rd (starting with 0th) set bit in @buf.
|
||||||
|
*
|
||||||
|
* The bit positions 0 through @bits are valid positions in @buf.
|
||||||
|
*/
|
||||||
|
static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits)
|
||||||
|
{
|
||||||
|
int i, ord;
|
||||||
|
|
||||||
|
if (pos < 0 || pos >= bits || !test_bit(pos, buf))
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
i = find_first_bit(buf, bits);
|
||||||
|
ord = 0;
|
||||||
|
while (i < pos) {
|
||||||
|
i = find_next_bit(buf, bits, i + 1);
|
||||||
|
ord++;
|
||||||
|
}
|
||||||
|
BUG_ON(i != pos);
|
||||||
|
|
||||||
|
return ord;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* bitmap_ord_to_pos - find position of n-th set bit in bitmap
|
||||||
|
* @buf: pointer to bitmap
|
||||||
|
* @ord: ordinal bit position (n-th set bit, n >= 0)
|
||||||
|
* @bits: number of valid bit positions in @buf
|
||||||
|
*
|
||||||
|
* Map the ordinal offset of bit @ord in @buf to its position in @buf.
|
||||||
|
* Value of @ord should be in range 0 <= @ord < weight(buf), else
|
||||||
|
* results are undefined.
|
||||||
|
*
|
||||||
|
* If for example, just bits 4 through 7 are set in @buf, then @ord
|
||||||
|
* values 0 through 3 will get mapped to 4 through 7, respectively,
|
||||||
|
* and all other @ord values return undefined values. When @ord value 3
|
||||||
|
* gets mapped to (returns) @pos value 7 in this example, that means
|
||||||
|
* that the 3rd set bit (starting with 0th) is at position 7 in @buf.
|
||||||
|
*
|
||||||
|
* The bit positions 0 through @bits are valid positions in @buf.
|
||||||
|
*/
|
||||||
|
int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits)
|
||||||
|
{
|
||||||
|
int pos = 0;
|
||||||
|
|
||||||
|
if (ord >= 0 && ord < bits) {
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = find_first_bit(buf, bits);
|
||||||
|
i < bits && ord > 0;
|
||||||
|
i = find_next_bit(buf, bits, i + 1))
|
||||||
|
ord--;
|
||||||
|
if (i < bits && ord == 0)
|
||||||
|
pos = i;
|
||||||
|
}
|
||||||
|
|
||||||
|
return pos;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* bitmap_remap - Apply map defined by a pair of bitmaps to another bitmap
|
||||||
|
* @dst: remapped result
|
||||||
|
* @src: subset to be remapped
|
||||||
|
* @old: defines domain of map
|
||||||
|
* @new: defines range of map
|
||||||
|
* @bits: number of bits in each of these bitmaps
|
||||||
|
*
|
||||||
|
* Let @old and @new define a mapping of bit positions, such that
|
||||||
|
* whatever position is held by the n-th set bit in @old is mapped
|
||||||
|
* to the n-th set bit in @new. In the more general case, allowing
|
||||||
|
* for the possibility that the weight 'w' of @new is less than the
|
||||||
|
* weight of @old, map the position of the n-th set bit in @old to
|
||||||
|
* the position of the m-th set bit in @new, where m == n % w.
|
||||||
|
*
|
||||||
|
* If either of the @old and @new bitmaps are empty, or if @src and
|
||||||
|
* @dst point to the same location, then this routine copies @src
|
||||||
|
* to @dst.
|
||||||
|
*
|
||||||
|
* The positions of unset bits in @old are mapped to themselves
|
||||||
|
* (the identify map).
|
||||||
|
*
|
||||||
|
* Apply the above specified mapping to @src, placing the result in
|
||||||
|
* @dst, clearing any bits previously set in @dst.
|
||||||
|
*
|
||||||
|
* For example, lets say that @old has bits 4 through 7 set, and
|
||||||
|
* @new has bits 12 through 15 set. This defines the mapping of bit
|
||||||
|
* position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other
|
||||||
|
* bit positions unchanged. So if say @src comes into this routine
|
||||||
|
* with bits 1, 5 and 7 set, then @dst should leave with bits 1,
|
||||||
|
* 13 and 15 set.
|
||||||
|
*/
|
||||||
|
void bitmap_remap(unsigned long *dst, const unsigned long *src,
|
||||||
|
const unsigned long *old, const unsigned long *new,
|
||||||
|
int bits)
|
||||||
|
{
|
||||||
|
int oldbit, w;
|
||||||
|
|
||||||
|
if (dst == src) /* following doesn't handle inplace remaps */
|
||||||
|
return;
|
||||||
|
bitmap_zero(dst, bits);
|
||||||
|
|
||||||
|
w = bitmap_weight(new, bits);
|
||||||
|
for_each_set_bit(oldbit, src, bits) {
|
||||||
|
int n = bitmap_pos_to_ord(old, oldbit, bits);
|
||||||
|
|
||||||
|
if (n < 0 || w == 0)
|
||||||
|
set_bit(oldbit, dst); /* identity map */
|
||||||
|
else
|
||||||
|
set_bit(bitmap_ord_to_pos(new, n % w, bits), dst);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(bitmap_remap);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* bitmap_bitremap - Apply map defined by a pair of bitmaps to a single bit
|
||||||
|
* @oldbit: bit position to be mapped
|
||||||
|
* @old: defines domain of map
|
||||||
|
* @new: defines range of map
|
||||||
|
* @bits: number of bits in each of these bitmaps
|
||||||
|
*
|
||||||
|
* Let @old and @new define a mapping of bit positions, such that
|
||||||
|
* whatever position is held by the n-th set bit in @old is mapped
|
||||||
|
* to the n-th set bit in @new. In the more general case, allowing
|
||||||
|
* for the possibility that the weight 'w' of @new is less than the
|
||||||
|
* weight of @old, map the position of the n-th set bit in @old to
|
||||||
|
* the position of the m-th set bit in @new, where m == n % w.
|
||||||
|
*
|
||||||
|
* The positions of unset bits in @old are mapped to themselves
|
||||||
|
* (the identify map).
|
||||||
|
*
|
||||||
|
* Apply the above specified mapping to bit position @oldbit, returning
|
||||||
|
* the new bit position.
|
||||||
|
*
|
||||||
|
* For example, lets say that @old has bits 4 through 7 set, and
|
||||||
|
* @new has bits 12 through 15 set. This defines the mapping of bit
|
||||||
|
* position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other
|
||||||
|
* bit positions unchanged. So if say @oldbit is 5, then this routine
|
||||||
|
* returns 13.
|
||||||
|
*/
|
||||||
|
int bitmap_bitremap(int oldbit, const unsigned long *old,
|
||||||
|
const unsigned long *new, int bits)
|
||||||
|
{
|
||||||
|
int w = bitmap_weight(new, bits);
|
||||||
|
int n = bitmap_pos_to_ord(old, oldbit, bits);
|
||||||
|
if (n < 0 || w == 0)
|
||||||
|
return oldbit;
|
||||||
|
else
|
||||||
|
return bitmap_ord_to_pos(new, n % w, bits);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(bitmap_bitremap);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* bitmap_onto - translate one bitmap relative to another
|
||||||
|
* @dst: resulting translated bitmap
|
||||||
|
* @orig: original untranslated bitmap
|
||||||
|
* @relmap: bitmap relative to which translated
|
||||||
|
* @bits: number of bits in each of these bitmaps
|
||||||
|
*
|
||||||
|
* Set the n-th bit of @dst iff there exists some m such that the
|
||||||
|
* n-th bit of @relmap is set, the m-th bit of @orig is set, and
|
||||||
|
* the n-th bit of @relmap is also the m-th _set_ bit of @relmap.
|
||||||
|
* (If you understood the previous sentence the first time your
|
||||||
|
* read it, you're overqualified for your current job.)
|
||||||
|
*
|
||||||
|
* In other words, @orig is mapped onto (surjectively) @dst,
|
||||||
|
* using the the map { <n, m> | the n-th bit of @relmap is the
|
||||||
|
* m-th set bit of @relmap }.
|
||||||
|
*
|
||||||
|
* Any set bits in @orig above bit number W, where W is the
|
||||||
|
* weight of (number of set bits in) @relmap are mapped nowhere.
|
||||||
|
* In particular, if for all bits m set in @orig, m >= W, then
|
||||||
|
* @dst will end up empty. In situations where the possibility
|
||||||
|
* of such an empty result is not desired, one way to avoid it is
|
||||||
|
* to use the bitmap_fold() operator, below, to first fold the
|
||||||
|
* @orig bitmap over itself so that all its set bits x are in the
|
||||||
|
* range 0 <= x < W. The bitmap_fold() operator does this by
|
||||||
|
* setting the bit (m % W) in @dst, for each bit (m) set in @orig.
|
||||||
|
*
|
||||||
|
* Example [1] for bitmap_onto():
|
||||||
|
* Let's say @relmap has bits 30-39 set, and @orig has bits
|
||||||
|
* 1, 3, 5, 7, 9 and 11 set. Then on return from this routine,
|
||||||
|
* @dst will have bits 31, 33, 35, 37 and 39 set.
|
||||||
|
*
|
||||||
|
* When bit 0 is set in @orig, it means turn on the bit in
|
||||||
|
* @dst corresponding to whatever is the first bit (if any)
|
||||||
|
* that is turned on in @relmap. Since bit 0 was off in the
|
||||||
|
* above example, we leave off that bit (bit 30) in @dst.
|
||||||
|
*
|
||||||
|
* When bit 1 is set in @orig (as in the above example), it
|
||||||
|
* means turn on the bit in @dst corresponding to whatever
|
||||||
|
* is the second bit that is turned on in @relmap. The second
|
||||||
|
* bit in @relmap that was turned on in the above example was
|
||||||
|
* bit 31, so we turned on bit 31 in @dst.
|
||||||
|
*
|
||||||
|
* Similarly, we turned on bits 33, 35, 37 and 39 in @dst,
|
||||||
|
* because they were the 4th, 6th, 8th and 10th set bits
|
||||||
|
* set in @relmap, and the 4th, 6th, 8th and 10th bits of
|
||||||
|
* @orig (i.e. bits 3, 5, 7 and 9) were also set.
|
||||||
|
*
|
||||||
|
* When bit 11 is set in @orig, it means turn on the bit in
|
||||||
|
* @dst corresponding to whatever is the twelfth bit that is
|
||||||
|
* turned on in @relmap. In the above example, there were
|
||||||
|
* only ten bits turned on in @relmap (30..39), so that bit
|
||||||
|
* 11 was set in @orig had no affect on @dst.
|
||||||
|
*
|
||||||
|
* Example [2] for bitmap_fold() + bitmap_onto():
|
||||||
|
* Let's say @relmap has these ten bits set:
|
||||||
|
* 40 41 42 43 45 48 53 61 74 95
|
||||||
|
* (for the curious, that's 40 plus the first ten terms of the
|
||||||
|
* Fibonacci sequence.)
|
||||||
|
*
|
||||||
|
* Further lets say we use the following code, invoking
|
||||||
|
* bitmap_fold() then bitmap_onto, as suggested above to
|
||||||
|
* avoid the possitility of an empty @dst result:
|
||||||
|
*
|
||||||
|
* unsigned long *tmp; // a temporary bitmap's bits
|
||||||
|
*
|
||||||
|
* bitmap_fold(tmp, orig, bitmap_weight(relmap, bits), bits);
|
||||||
|
* bitmap_onto(dst, tmp, relmap, bits);
|
||||||
|
*
|
||||||
|
* Then this table shows what various values of @dst would be, for
|
||||||
|
* various @orig's. I list the zero-based positions of each set bit.
|
||||||
|
* The tmp column shows the intermediate result, as computed by
|
||||||
|
* using bitmap_fold() to fold the @orig bitmap modulo ten
|
||||||
|
* (the weight of @relmap).
|
||||||
|
*
|
||||||
|
* @orig tmp @dst
|
||||||
|
* 0 0 40
|
||||||
|
* 1 1 41
|
||||||
|
* 9 9 95
|
||||||
|
* 10 0 40 (*)
|
||||||
|
* 1 3 5 7 1 3 5 7 41 43 48 61
|
||||||
|
* 0 1 2 3 4 0 1 2 3 4 40 41 42 43 45
|
||||||
|
* 0 9 18 27 0 9 8 7 40 61 74 95
|
||||||
|
* 0 10 20 30 0 40
|
||||||
|
* 0 11 22 33 0 1 2 3 40 41 42 43
|
||||||
|
* 0 12 24 36 0 2 4 6 40 42 45 53
|
||||||
|
* 78 102 211 1 2 8 41 42 74 (*)
|
||||||
|
*
|
||||||
|
* (*) For these marked lines, if we hadn't first done bitmap_fold()
|
||||||
|
* into tmp, then the @dst result would have been empty.
|
||||||
|
*
|
||||||
|
* If either of @orig or @relmap is empty (no set bits), then @dst
|
||||||
|
* will be returned empty.
|
||||||
|
*
|
||||||
|
* If (as explained above) the only set bits in @orig are in positions
|
||||||
|
* m where m >= W, (where W is the weight of @relmap) then @dst will
|
||||||
|
* once again be returned empty.
|
||||||
|
*
|
||||||
|
* All bits in @dst not set by the above rule are cleared.
|
||||||
|
*/
|
||||||
|
void bitmap_onto(unsigned long *dst, const unsigned long *orig,
|
||||||
|
const unsigned long *relmap, int bits)
|
||||||
|
{
|
||||||
|
int n, m; /* same meaning as in above comment */
|
||||||
|
|
||||||
|
if (dst == orig) /* following doesn't handle inplace mappings */
|
||||||
|
return;
|
||||||
|
bitmap_zero(dst, bits);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The following code is a more efficient, but less
|
||||||
|
* obvious, equivalent to the loop:
|
||||||
|
* for (m = 0; m < bitmap_weight(relmap, bits); m++) {
|
||||||
|
* n = bitmap_ord_to_pos(orig, m, bits);
|
||||||
|
* if (test_bit(m, orig))
|
||||||
|
* set_bit(n, dst);
|
||||||
|
* }
|
||||||
|
*/
|
||||||
|
|
||||||
|
m = 0;
|
||||||
|
for_each_set_bit(n, relmap, bits) {
|
||||||
|
/* m == bitmap_pos_to_ord(relmap, n, bits) */
|
||||||
|
if (test_bit(m, orig))
|
||||||
|
set_bit(n, dst);
|
||||||
|
m++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(bitmap_onto);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* bitmap_fold - fold larger bitmap into smaller, modulo specified size
|
||||||
|
* @dst: resulting smaller bitmap
|
||||||
|
* @orig: original larger bitmap
|
||||||
|
* @sz: specified size
|
||||||
|
* @bits: number of bits in each of these bitmaps
|
||||||
|
*
|
||||||
|
* For each bit oldbit in @orig, set bit oldbit mod @sz in @dst.
|
||||||
|
* Clear all other bits in @dst. See further the comment and
|
||||||
|
* Example [2] for bitmap_onto() for why and how to use this.
|
||||||
|
*/
|
||||||
|
void bitmap_fold(unsigned long *dst, const unsigned long *orig,
|
||||||
|
int sz, int bits)
|
||||||
|
{
|
||||||
|
int oldbit;
|
||||||
|
|
||||||
|
if (dst == orig) /* following doesn't handle inplace mappings */
|
||||||
|
return;
|
||||||
|
bitmap_zero(dst, bits);
|
||||||
|
|
||||||
|
for_each_set_bit(oldbit, orig, bits)
|
||||||
|
set_bit(oldbit % sz, dst);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(bitmap_fold);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Common code for bitmap_*_region() routines.
|
||||||
|
* bitmap: array of unsigned longs corresponding to the bitmap
|
||||||
|
* pos: the beginning of the region
|
||||||
|
* order: region size (log base 2 of number of bits)
|
||||||
|
* reg_op: operation(s) to perform on that region of bitmap
|
||||||
|
*
|
||||||
|
* Can set, verify and/or release a region of bits in a bitmap,
|
||||||
|
* depending on which combination of REG_OP_* flag bits is set.
|
||||||
|
*
|
||||||
|
* A region of a bitmap is a sequence of bits in the bitmap, of
|
||||||
|
* some size '1 << order' (a power of two), aligned to that same
|
||||||
|
* '1 << order' power of two.
|
||||||
|
*
|
||||||
|
* Returns 1 if REG_OP_ISFREE succeeds (region is all zero bits).
|
||||||
|
* Returns 0 in all other cases and reg_ops.
|
||||||
|
*/
|
||||||
|
|
||||||
|
enum {
|
||||||
|
REG_OP_ISFREE, /* true if region is all zero bits */
|
||||||
|
REG_OP_ALLOC, /* set all bits in region */
|
||||||
|
REG_OP_RELEASE, /* clear all bits in region */
|
||||||
|
};
|
||||||
|
|
||||||
|
static int __reg_op(unsigned long *bitmap, int pos, int order, int reg_op)
|
||||||
|
{
|
||||||
|
int nbits_reg; /* number of bits in region */
|
||||||
|
int index; /* index first long of region in bitmap */
|
||||||
|
int offset; /* bit offset region in bitmap[index] */
|
||||||
|
int nlongs_reg; /* num longs spanned by region in bitmap */
|
||||||
|
int nbitsinlong; /* num bits of region in each spanned long */
|
||||||
|
unsigned long mask; /* bitmask for one long of region */
|
||||||
|
int i; /* scans bitmap by longs */
|
||||||
|
int ret = 0; /* return value */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Either nlongs_reg == 1 (for small orders that fit in one long)
|
||||||
|
* or (offset == 0 && mask == ~0UL) (for larger multiword orders.)
|
||||||
|
*/
|
||||||
|
nbits_reg = 1 << order;
|
||||||
|
index = pos / BITS_PER_LONG;
|
||||||
|
offset = pos - (index * BITS_PER_LONG);
|
||||||
|
nlongs_reg = BITS_TO_LONGS(nbits_reg);
|
||||||
|
nbitsinlong = min(nbits_reg, BITS_PER_LONG);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Can't do "mask = (1UL << nbitsinlong) - 1", as that
|
||||||
|
* overflows if nbitsinlong == BITS_PER_LONG.
|
||||||
|
*/
|
||||||
|
mask = (1UL << (nbitsinlong - 1));
|
||||||
|
mask += mask - 1;
|
||||||
|
mask <<= offset;
|
||||||
|
|
||||||
|
switch (reg_op) {
|
||||||
|
case REG_OP_ISFREE:
|
||||||
|
for (i = 0; i < nlongs_reg; i++) {
|
||||||
|
if (bitmap[index + i] & mask)
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
ret = 1; /* all bits in region free (zero) */
|
||||||
|
break;
|
||||||
|
|
||||||
|
case REG_OP_ALLOC:
|
||||||
|
for (i = 0; i < nlongs_reg; i++)
|
||||||
|
bitmap[index + i] |= mask;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case REG_OP_RELEASE:
|
||||||
|
for (i = 0; i < nlongs_reg; i++)
|
||||||
|
bitmap[index + i] &= ~mask;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
done:
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* bitmap_find_free_region - find a contiguous aligned mem region
|
||||||
|
* @bitmap: array of unsigned longs corresponding to the bitmap
|
||||||
|
* @bits: number of bits in the bitmap
|
||||||
|
* @order: region size (log base 2 of number of bits) to find
|
||||||
|
*
|
||||||
|
* Find a region of free (zero) bits in a @bitmap of @bits bits and
|
||||||
|
* allocate them (set them to one). Only consider regions of length
|
||||||
|
* a power (@order) of two, aligned to that power of two, which
|
||||||
|
* makes the search algorithm much faster.
|
||||||
|
*
|
||||||
|
* Return the bit offset in bitmap of the allocated region,
|
||||||
|
* or -errno on failure.
|
||||||
|
*/
|
||||||
|
int bitmap_find_free_region(unsigned long *bitmap, int bits, int order)
|
||||||
|
{
|
||||||
|
int pos, end; /* scans bitmap by regions of size order */
|
||||||
|
|
||||||
|
for (pos = 0 ; (end = pos + (1 << order)) <= bits; pos = end) {
|
||||||
|
if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
|
||||||
|
continue;
|
||||||
|
__reg_op(bitmap, pos, order, REG_OP_ALLOC);
|
||||||
|
return pos;
|
||||||
|
}
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(bitmap_find_free_region);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* bitmap_release_region - release allocated bitmap region
|
||||||
|
* @bitmap: array of unsigned longs corresponding to the bitmap
|
||||||
|
* @pos: beginning of bit region to release
|
||||||
|
* @order: region size (log base 2 of number of bits) to release
|
||||||
|
*
|
||||||
|
* This is the complement to __bitmap_find_free_region() and releases
|
||||||
|
* the found region (by clearing it in the bitmap).
|
||||||
|
*
|
||||||
|
* No return value.
|
||||||
|
*/
|
||||||
|
void bitmap_release_region(unsigned long *bitmap, int pos, int order)
|
||||||
|
{
|
||||||
|
__reg_op(bitmap, pos, order, REG_OP_RELEASE);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(bitmap_release_region);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* bitmap_allocate_region - allocate bitmap region
|
||||||
|
* @bitmap: array of unsigned longs corresponding to the bitmap
|
||||||
|
* @pos: beginning of bit region to allocate
|
||||||
|
* @order: region size (log base 2 of number of bits) to allocate
|
||||||
|
*
|
||||||
|
* Allocate (set bits in) a specified region of a bitmap.
|
||||||
|
*
|
||||||
|
* Return 0 on success, or %-EBUSY if specified region wasn't
|
||||||
|
* free (not all bits were zero).
|
||||||
|
*/
|
||||||
|
int bitmap_allocate_region(unsigned long *bitmap, int pos, int order)
|
||||||
|
{
|
||||||
|
if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
|
||||||
|
return -EBUSY;
|
||||||
|
__reg_op(bitmap, pos, order, REG_OP_ALLOC);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(bitmap_allocate_region);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* bitmap_copy_le - copy a bitmap, putting the bits into little-endian order.
|
||||||
|
* @dst: destination buffer
|
||||||
|
* @src: bitmap to copy
|
||||||
|
* @nbits: number of bits in the bitmap
|
||||||
|
*
|
||||||
|
* Require nbits % BITS_PER_LONG == 0.
|
||||||
|
*/
|
||||||
|
void bitmap_copy_le(void *dst, const unsigned long *src, int nbits)
|
||||||
|
{
|
||||||
|
unsigned long *d = dst;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < nbits/BITS_PER_LONG; i++) {
|
||||||
|
if (BITS_PER_LONG == 64)
|
||||||
|
d[i] = cpu_to_le64(src[i]);
|
||||||
|
else
|
||||||
|
d[i] = cpu_to_le32(src[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(bitmap_copy_le);
|
File diff suppressed because it is too large
Load Diff
@ -2215,7 +2215,22 @@ static void do_check_malloc_state(mstate m) {
|
|||||||
else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
|
else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
|
||||||
|
|
||||||
|
|
||||||
|
/* Relays to internal calls to malloc/free from realloc, memalign etc */
|
||||||
|
|
||||||
|
#if ONLY_MSPACES
|
||||||
|
#define internal_malloc(m, b) mspace_malloc(m, b)
|
||||||
|
#define internal_free(m, mem) mspace_free(m,mem);
|
||||||
|
#else /* ONLY_MSPACES */
|
||||||
|
#if MSPACES
|
||||||
|
#define internal_malloc(m, b)\
|
||||||
|
((m == gm)? dlmalloc(b) : mspace_malloc(m, b))
|
||||||
|
#define internal_free(m, mem)\
|
||||||
|
if (m == gm) dlfree(mem); else mspace_free(m,mem);
|
||||||
|
#else /* MSPACES */
|
||||||
|
#define internal_malloc(m, b) malloc(b)
|
||||||
|
#define internal_free(m, mem) free(mem)
|
||||||
|
#endif /* MSPACES */
|
||||||
|
#endif /* ONLY_MSPACES */
|
||||||
|
|
||||||
|
|
||||||
static inline void* os_mmap(size_t size)
|
static inline void* os_mmap(size_t size)
|
||||||
@ -2231,7 +2246,6 @@ static inline int os_munmap(void* ptr, size_t size)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#define MMAP_DEFAULT(s) os_mmap(s)
|
#define MMAP_DEFAULT(s) os_mmap(s)
|
||||||
#define MUNMAP_DEFAULT(a, s) os_munmap((a), (s))
|
#define MUNMAP_DEFAULT(a, s) os_munmap((a), (s))
|
||||||
#define DIRECT_MMAP_DEFAULT(s) os_mmap(s)
|
#define DIRECT_MMAP_DEFAULT(s) os_mmap(s)
|
||||||
@ -3090,8 +3104,7 @@ void* malloc(size_t bytes)
|
|||||||
|
|
||||||
/* ---------------------------- free --------------------------- */
|
/* ---------------------------- free --------------------------- */
|
||||||
|
|
||||||
void free(void* mem)
|
void free(void* mem){
|
||||||
{
|
|
||||||
/*
|
/*
|
||||||
Consolidate freed chunks with preceeding or succeeding bordering
|
Consolidate freed chunks with preceeding or succeeding bordering
|
||||||
free chunks, if they exist, and then place in a bin. Intermixed
|
free chunks, if they exist, and then place in a bin. Intermixed
|
||||||
@ -3206,4 +3219,149 @@ void free(void* mem)
|
|||||||
#endif /* FOOTERS */
|
#endif /* FOOTERS */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void* calloc(size_t n_elements, size_t elem_size) {
|
||||||
|
void* mem;
|
||||||
|
size_t req = 0;
|
||||||
|
if (n_elements != 0) {
|
||||||
|
req = n_elements * elem_size;
|
||||||
|
if (((n_elements | elem_size) & ~(size_t)0xffff) &&
|
||||||
|
(req / n_elements != elem_size))
|
||||||
|
req = MAX_SIZE_T; /* force downstream failure on overflow */
|
||||||
|
}
|
||||||
|
mem = malloc(req);
|
||||||
|
if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
|
||||||
|
memset(mem, 0, req);
|
||||||
|
return mem;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ------------ Internal support for realloc, memalign, etc -------------- */
|
||||||
|
|
||||||
|
/* Try to realloc; only in-place unless can_move true */
|
||||||
|
static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb,
|
||||||
|
int can_move) {
|
||||||
|
mchunkptr newp = 0;
|
||||||
|
size_t oldsize = chunksize(p);
|
||||||
|
mchunkptr next = chunk_plus_offset(p, oldsize);
|
||||||
|
if (RTCHECK(ok_address(m, p) && ok_inuse(p) &&
|
||||||
|
ok_next(p, next) && ok_pinuse(next))) {
|
||||||
|
if (is_mmapped(p)) {
|
||||||
|
newp = mmap_resize(m, p, nb, can_move);
|
||||||
|
}
|
||||||
|
else if (oldsize >= nb) { /* already big enough */
|
||||||
|
size_t rsize = oldsize - nb;
|
||||||
|
if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */
|
||||||
|
mchunkptr r = chunk_plus_offset(p, nb);
|
||||||
|
set_inuse(m, p, nb);
|
||||||
|
set_inuse(m, r, rsize);
|
||||||
|
dispose_chunk(m, r, rsize);
|
||||||
|
}
|
||||||
|
newp = p;
|
||||||
|
}
|
||||||
|
else if (next == m->top) { /* extend into top */
|
||||||
|
if (oldsize + m->topsize > nb) {
|
||||||
|
size_t newsize = oldsize + m->topsize;
|
||||||
|
size_t newtopsize = newsize - nb;
|
||||||
|
mchunkptr newtop = chunk_plus_offset(p, nb);
|
||||||
|
set_inuse(m, p, nb);
|
||||||
|
newtop->head = newtopsize |PINUSE_BIT;
|
||||||
|
m->top = newtop;
|
||||||
|
m->topsize = newtopsize;
|
||||||
|
newp = p;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (next == m->dv) { /* extend into dv */
|
||||||
|
size_t dvs = m->dvsize;
|
||||||
|
if (oldsize + dvs >= nb) {
|
||||||
|
size_t dsize = oldsize + dvs - nb;
|
||||||
|
if (dsize >= MIN_CHUNK_SIZE) {
|
||||||
|
mchunkptr r = chunk_plus_offset(p, nb);
|
||||||
|
mchunkptr n = chunk_plus_offset(r, dsize);
|
||||||
|
set_inuse(m, p, nb);
|
||||||
|
set_size_and_pinuse_of_free_chunk(r, dsize);
|
||||||
|
clear_pinuse(n);
|
||||||
|
m->dvsize = dsize;
|
||||||
|
m->dv = r;
|
||||||
|
}
|
||||||
|
else { /* exhaust dv */
|
||||||
|
size_t newsize = oldsize + dvs;
|
||||||
|
set_inuse(m, p, newsize);
|
||||||
|
m->dvsize = 0;
|
||||||
|
m->dv = 0;
|
||||||
|
}
|
||||||
|
newp = p;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (!cinuse(next)) { /* extend into next free chunk */
|
||||||
|
size_t nextsize = chunksize(next);
|
||||||
|
if (oldsize + nextsize >= nb) {
|
||||||
|
size_t rsize = oldsize + nextsize - nb;
|
||||||
|
unlink_chunk(m, next, nextsize);
|
||||||
|
if (rsize < MIN_CHUNK_SIZE) {
|
||||||
|
size_t newsize = oldsize + nextsize;
|
||||||
|
set_inuse(m, p, newsize);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
mchunkptr r = chunk_plus_offset(p, nb);
|
||||||
|
set_inuse(m, p, nb);
|
||||||
|
set_inuse(m, r, rsize);
|
||||||
|
dispose_chunk(m, r, rsize);
|
||||||
|
}
|
||||||
|
newp = p;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
USAGE_ERROR_ACTION(m, chunk2mem(p));
|
||||||
|
}
|
||||||
|
return newp;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void* realloc(void* oldmem, size_t bytes) {
|
||||||
|
void* mem = 0;
|
||||||
|
if (oldmem == 0) {
|
||||||
|
mem = malloc(bytes);
|
||||||
|
}
|
||||||
|
else if (bytes >= MAX_REQUEST) {
|
||||||
|
// MALLOC_FAILURE_ACTION;
|
||||||
|
}
|
||||||
|
#ifdef REALLOC_ZERO_BYTES_FREES
|
||||||
|
else if (bytes == 0) {
|
||||||
|
free(oldmem);
|
||||||
|
}
|
||||||
|
#endif /* REALLOC_ZERO_BYTES_FREES */
|
||||||
|
else {
|
||||||
|
size_t nb = request2size(bytes);
|
||||||
|
mchunkptr oldp = mem2chunk(oldmem);
|
||||||
|
#if ! FOOTERS
|
||||||
|
mstate m = gm;
|
||||||
|
#else /* FOOTERS */
|
||||||
|
mstate m = get_mstate_for(oldp);
|
||||||
|
if (!ok_magic(m)) {
|
||||||
|
USAGE_ERROR_ACTION(m, oldmem);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif /* FOOTERS */
|
||||||
|
PREACTION(m); {
|
||||||
|
mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
|
||||||
|
POSTACTION(m);
|
||||||
|
if (newp != 0) {
|
||||||
|
check_inuse_chunk(m, newp);
|
||||||
|
mem = chunk2mem(newp);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
mem = internal_malloc(m, bytes);
|
||||||
|
if (mem != 0) {
|
||||||
|
size_t oc = chunksize(oldp) - overhead_for(oldp);
|
||||||
|
memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
|
||||||
|
internal_free(m, oldmem);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return mem;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
|
|
||||||
#define PG_SW 0x003
|
#define PG_SW 0x003
|
||||||
#define PG_UW 0x007
|
#define PG_UW 0x007
|
||||||
#define PG_NOCACHE 0x018
|
#define PG_NOCACHE 0x010
|
||||||
#define PG_SHARED 0x200
|
#define PG_SHARED 0x200
|
||||||
|
|
||||||
|
|
||||||
@ -63,17 +63,6 @@ int ddk_init(struct ddk_params *params);
|
|||||||
u32_t drvEntry(int, char *)__asm__("_drvEntry");
|
u32_t drvEntry(int, char *)__asm__("_drvEntry");
|
||||||
|
|
||||||
|
|
||||||
#define __WARN() dbgprintf(__FILE__, __LINE__)
|
|
||||||
|
|
||||||
#ifndef WARN_ON
|
|
||||||
#define WARN_ON(condition) ({ \
|
|
||||||
int __ret_warn_on = !!(condition); \
|
|
||||||
if (unlikely(__ret_warn_on)) \
|
|
||||||
__WARN(); \
|
|
||||||
unlikely(__ret_warn_on); \
|
|
||||||
})
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
|
static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
|
||||||
{
|
{
|
||||||
|
@ -52,6 +52,7 @@
|
|||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/wait.h>
|
#include <linux/wait.h>
|
||||||
#include <linux/bug.h>
|
#include <linux/bug.h>
|
||||||
|
#include <linux/sched.h>
|
||||||
|
|
||||||
//#include <linux/miscdevice.h>
|
//#include <linux/miscdevice.h>
|
||||||
//#include <linux/fs.h>
|
//#include <linux/fs.h>
|
||||||
@ -85,6 +86,8 @@ struct module;
|
|||||||
struct drm_file;
|
struct drm_file;
|
||||||
struct drm_device;
|
struct drm_device;
|
||||||
|
|
||||||
|
struct device_node;
|
||||||
|
struct videomode;
|
||||||
//#include <drm/drm_os_linux.h>
|
//#include <drm/drm_os_linux.h>
|
||||||
#include <drm/drm_hashtab.h>
|
#include <drm/drm_hashtab.h>
|
||||||
#include <drm/drm_mm.h>
|
#include <drm/drm_mm.h>
|
||||||
@ -171,7 +174,7 @@ int drm_err(const char *func, const char *format, ...);
|
|||||||
/** \name Begin the DRM... */
|
/** \name Begin the DRM... */
|
||||||
/*@{*/
|
/*@{*/
|
||||||
|
|
||||||
#define DRM_DEBUG_CODE 0 /**< Include debugging code if > 1, then
|
#define DRM_DEBUG_CODE 2 /**< Include debugging code if > 1, then
|
||||||
also include looping detection. */
|
also include looping detection. */
|
||||||
|
|
||||||
#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */
|
#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */
|
||||||
@ -1417,6 +1420,8 @@ extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
|
|||||||
extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
|
extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
|
||||||
extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
|
extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
|
||||||
struct timeval *vblanktime);
|
struct timeval *vblanktime);
|
||||||
|
extern void drm_send_vblank_event(struct drm_device *dev, int crtc,
|
||||||
|
struct drm_pending_vblank_event *e);
|
||||||
extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
|
extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
|
||||||
extern int drm_vblank_get(struct drm_device *dev, int crtc);
|
extern int drm_vblank_get(struct drm_device *dev, int crtc);
|
||||||
extern void drm_vblank_put(struct drm_device *dev, int crtc);
|
extern void drm_vblank_put(struct drm_device *dev, int crtc);
|
||||||
@ -1440,6 +1445,12 @@ extern struct drm_display_mode *
|
|||||||
drm_mode_create_from_cmdline_mode(struct drm_device *dev,
|
drm_mode_create_from_cmdline_mode(struct drm_device *dev,
|
||||||
struct drm_cmdline_mode *cmd);
|
struct drm_cmdline_mode *cmd);
|
||||||
|
|
||||||
|
extern int drm_display_mode_from_videomode(const struct videomode *vm,
|
||||||
|
struct drm_display_mode *dmode);
|
||||||
|
extern int of_get_drm_display_mode(struct device_node *np,
|
||||||
|
struct drm_display_mode *dmode,
|
||||||
|
int index);
|
||||||
|
|
||||||
/* Modesetting support */
|
/* Modesetting support */
|
||||||
extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
|
extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
|
||||||
extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc);
|
extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc);
|
||||||
@ -1738,4 +1749,6 @@ static __inline__ int drm_device_is_pcie(struct drm_device *dev)
|
|||||||
#define drm_sysfs_connector_add(connector)
|
#define drm_sysfs_connector_add(connector)
|
||||||
#define drm_sysfs_connector_remove(connector)
|
#define drm_sysfs_connector_remove(connector)
|
||||||
|
|
||||||
|
#define LFB_SIZE 0xC00000
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -38,7 +38,8 @@ struct drm_device;
|
|||||||
struct drm_mode_set;
|
struct drm_mode_set;
|
||||||
struct drm_framebuffer;
|
struct drm_framebuffer;
|
||||||
struct drm_object_properties;
|
struct drm_object_properties;
|
||||||
|
struct drm_file;
|
||||||
|
struct drm_clip_rect;
|
||||||
|
|
||||||
#define DRM_MODE_OBJECT_CRTC 0xcccccccc
|
#define DRM_MODE_OBJECT_CRTC 0xcccccccc
|
||||||
#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
|
#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
|
||||||
@ -254,6 +255,10 @@ struct drm_framebuffer {
|
|||||||
* userspace perspective.
|
* userspace perspective.
|
||||||
*/
|
*/
|
||||||
struct kref refcount;
|
struct kref refcount;
|
||||||
|
/*
|
||||||
|
* Place on the dev->mode_config.fb_list, access protected by
|
||||||
|
* dev->mode_config.fb_lock.
|
||||||
|
*/
|
||||||
struct list_head head;
|
struct list_head head;
|
||||||
struct drm_mode_object base;
|
struct drm_mode_object base;
|
||||||
const struct drm_framebuffer_funcs *funcs;
|
const struct drm_framebuffer_funcs *funcs;
|
||||||
@ -390,6 +395,15 @@ struct drm_crtc {
|
|||||||
struct drm_device *dev;
|
struct drm_device *dev;
|
||||||
struct list_head head;
|
struct list_head head;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* crtc mutex
|
||||||
|
*
|
||||||
|
* This provides a read lock for the overall crtc state (mode, dpms
|
||||||
|
* state, ...) and a write lock for everything which can be update
|
||||||
|
* without a full modeset (fb, cursor data, ...)
|
||||||
|
*/
|
||||||
|
struct mutex mutex;
|
||||||
|
|
||||||
struct drm_mode_object base;
|
struct drm_mode_object base;
|
||||||
|
|
||||||
/* framebuffer the connector is currently bound to */
|
/* framebuffer the connector is currently bound to */
|
||||||
@ -429,12 +443,12 @@ struct drm_crtc {
|
|||||||
* @dpms: set power state (see drm_crtc_funcs above)
|
* @dpms: set power state (see drm_crtc_funcs above)
|
||||||
* @save: save connector state
|
* @save: save connector state
|
||||||
* @restore: restore connector state
|
* @restore: restore connector state
|
||||||
* @reset: reset connector after state has been invalidate (e.g. resume)
|
* @reset: reset connector after state has been invalidated (e.g. resume)
|
||||||
* @detect: is this connector active?
|
* @detect: is this connector active?
|
||||||
* @fill_modes: fill mode list for this connector
|
* @fill_modes: fill mode list for this connector
|
||||||
* @set_property: property for this connector may need update
|
* @set_property: property for this connector may need an update
|
||||||
* @destroy: make object go away
|
* @destroy: make object go away
|
||||||
* @force: notify the driver the connector is forced on
|
* @force: notify the driver that the connector is forced on
|
||||||
*
|
*
|
||||||
* Each CRTC may have one or more connectors attached to it. The functions
|
* Each CRTC may have one or more connectors attached to it. The functions
|
||||||
* below allow the core DRM code to control connectors, enumerate available modes,
|
* below allow the core DRM code to control connectors, enumerate available modes,
|
||||||
@ -771,8 +785,18 @@ struct drm_mode_config {
|
|||||||
struct mutex idr_mutex; /* for IDR management */
|
struct mutex idr_mutex; /* for IDR management */
|
||||||
struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
|
struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
|
||||||
/* this is limited to one for now */
|
/* this is limited to one for now */
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* fb_lock - mutex to protect fb state
|
||||||
|
*
|
||||||
|
* Besides the global fb list his also protects the fbs list in the
|
||||||
|
* file_priv
|
||||||
|
*/
|
||||||
|
struct mutex fb_lock;
|
||||||
int num_fb;
|
int num_fb;
|
||||||
struct list_head fb_list;
|
struct list_head fb_list;
|
||||||
|
|
||||||
int num_connector;
|
int num_connector;
|
||||||
struct list_head connector_list;
|
struct list_head connector_list;
|
||||||
int num_encoder;
|
int num_encoder;
|
||||||
@ -842,6 +866,10 @@ struct drm_prop_enum_list {
|
|||||||
char *name;
|
char *name;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
extern void drm_modeset_lock_all(struct drm_device *dev);
|
||||||
|
extern void drm_modeset_unlock_all(struct drm_device *dev);
|
||||||
|
extern void drm_warn_on_modeset_not_all_locked(struct drm_device *dev);
|
||||||
|
|
||||||
extern int drm_crtc_init(struct drm_device *dev,
|
extern int drm_crtc_init(struct drm_device *dev,
|
||||||
struct drm_crtc *crtc,
|
struct drm_crtc *crtc,
|
||||||
const struct drm_crtc_funcs *funcs);
|
const struct drm_crtc_funcs *funcs);
|
||||||
@ -932,10 +960,13 @@ extern void drm_framebuffer_set_object(struct drm_device *dev,
|
|||||||
extern int drm_framebuffer_init(struct drm_device *dev,
|
extern int drm_framebuffer_init(struct drm_device *dev,
|
||||||
struct drm_framebuffer *fb,
|
struct drm_framebuffer *fb,
|
||||||
const struct drm_framebuffer_funcs *funcs);
|
const struct drm_framebuffer_funcs *funcs);
|
||||||
|
extern struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
|
||||||
|
uint32_t id);
|
||||||
extern void drm_framebuffer_unreference(struct drm_framebuffer *fb);
|
extern void drm_framebuffer_unreference(struct drm_framebuffer *fb);
|
||||||
extern void drm_framebuffer_reference(struct drm_framebuffer *fb);
|
extern void drm_framebuffer_reference(struct drm_framebuffer *fb);
|
||||||
extern void drm_framebuffer_remove(struct drm_framebuffer *fb);
|
extern void drm_framebuffer_remove(struct drm_framebuffer *fb);
|
||||||
extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
|
extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
|
||||||
|
extern void drm_framebuffer_unregister_private(struct drm_framebuffer *fb);
|
||||||
extern int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
|
extern int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
|
||||||
extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
|
extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
|
||||||
extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY);
|
extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY);
|
||||||
@ -985,6 +1016,7 @@ extern int drm_mode_getcrtc(struct drm_device *dev,
|
|||||||
void *data, struct drm_file *file_priv);
|
void *data, struct drm_file *file_priv);
|
||||||
extern int drm_mode_getconnector(struct drm_device *dev,
|
extern int drm_mode_getconnector(struct drm_device *dev,
|
||||||
void *data, struct drm_file *file_priv);
|
void *data, struct drm_file *file_priv);
|
||||||
|
extern int drm_mode_set_config_internal(struct drm_mode_set *set);
|
||||||
extern int drm_mode_setcrtc(struct drm_device *dev,
|
extern int drm_mode_setcrtc(struct drm_device *dev,
|
||||||
void *data, struct drm_file *file_priv);
|
void *data, struct drm_file *file_priv);
|
||||||
extern int drm_mode_getplane(struct drm_device *dev,
|
extern int drm_mode_getplane(struct drm_device *dev,
|
||||||
@ -1030,9 +1062,10 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
|
|||||||
extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
|
extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
|
||||||
void *data, struct drm_file *file_priv);
|
void *data, struct drm_file *file_priv);
|
||||||
extern u8 *drm_find_cea_extension(struct edid *edid);
|
extern u8 *drm_find_cea_extension(struct edid *edid);
|
||||||
extern u8 drm_match_cea_mode(struct drm_display_mode *to_match);
|
extern u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
|
||||||
extern bool drm_detect_hdmi_monitor(struct edid *edid);
|
extern bool drm_detect_hdmi_monitor(struct edid *edid);
|
||||||
extern bool drm_detect_monitor_audio(struct edid *edid);
|
extern bool drm_detect_monitor_audio(struct edid *edid);
|
||||||
|
extern bool drm_rgb_quant_range_selectable(struct edid *edid);
|
||||||
extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
|
extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
|
||||||
void *data, struct drm_file *file_priv);
|
void *data, struct drm_file *file_priv);
|
||||||
extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
|
extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
|
||||||
@ -1047,7 +1080,6 @@ extern struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev,
|
|||||||
int GTF_2C, int GTF_K, int GTF_2J);
|
int GTF_2C, int GTF_K, int GTF_2J);
|
||||||
extern int drm_add_modes_noedid(struct drm_connector *connector,
|
extern int drm_add_modes_noedid(struct drm_connector *connector,
|
||||||
int hdisplay, int vdisplay);
|
int hdisplay, int vdisplay);
|
||||||
extern uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode);
|
|
||||||
|
|
||||||
extern int drm_edid_header_is_valid(const u8 *raw_edid);
|
extern int drm_edid_header_is_valid(const u8 *raw_edid);
|
||||||
extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
|
extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
|
||||||
|
@ -247,6 +247,8 @@ struct edid {
|
|||||||
struct drm_encoder;
|
struct drm_encoder;
|
||||||
struct drm_connector;
|
struct drm_connector;
|
||||||
struct drm_display_mode;
|
struct drm_display_mode;
|
||||||
|
struct hdmi_avi_infoframe;
|
||||||
|
|
||||||
void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid);
|
void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid);
|
||||||
int drm_av_sync_delay(struct drm_connector *connector,
|
int drm_av_sync_delay(struct drm_connector *connector,
|
||||||
struct drm_display_mode *mode);
|
struct drm_display_mode *mode);
|
||||||
@ -254,4 +256,8 @@ struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
|
|||||||
struct drm_display_mode *mode);
|
struct drm_display_mode *mode);
|
||||||
int drm_load_edid_firmware(struct drm_connector *connector);
|
int drm_load_edid_firmware(struct drm_connector *connector);
|
||||||
|
|
||||||
|
int
|
||||||
|
drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
|
||||||
|
const struct drm_display_mode *mode);
|
||||||
|
|
||||||
#endif /* __DRM_EDID_H__ */
|
#endif /* __DRM_EDID_H__ */
|
||||||
|
@ -47,6 +47,18 @@ struct drm_fb_helper_surface_size {
|
|||||||
u32 surface_depth;
|
u32 surface_depth;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct drm_fb_helper_funcs - driver callbacks for the fbdev emulation library
|
||||||
|
* @gamma_set: - Set the given gamma lut register on the given crtc.
|
||||||
|
* @gamma_get: - Read the given gamma lut register on the given crtc, used to
|
||||||
|
* save the current lut when force-restoring the fbdev for e.g.
|
||||||
|
* kdbg.
|
||||||
|
* @fb_probe: - Driver callback to allocate and initialize the fbdev info
|
||||||
|
* structure. Futhermore it also needs to allocate the drm
|
||||||
|
* framebuffer used to back the fbdev.
|
||||||
|
*
|
||||||
|
* Driver callbacks used by the fbdev emulation helper library.
|
||||||
|
*/
|
||||||
struct drm_fb_helper_funcs {
|
struct drm_fb_helper_funcs {
|
||||||
void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green,
|
void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green,
|
||||||
u16 blue, int regno);
|
u16 blue, int regno);
|
||||||
@ -64,9 +76,7 @@ struct drm_fb_helper_connector {
|
|||||||
|
|
||||||
struct drm_fb_helper {
|
struct drm_fb_helper {
|
||||||
struct drm_framebuffer *fb;
|
struct drm_framebuffer *fb;
|
||||||
struct drm_framebuffer *saved_fb;
|
|
||||||
struct drm_device *dev;
|
struct drm_device *dev;
|
||||||
struct drm_display_mode *mode;
|
|
||||||
int crtc_count;
|
int crtc_count;
|
||||||
struct drm_fb_helper_crtc *crtc_info;
|
struct drm_fb_helper_crtc *crtc_info;
|
||||||
int connector_count;
|
int connector_count;
|
||||||
@ -81,9 +91,6 @@ struct drm_fb_helper {
|
|||||||
bool delayed_hotplug;
|
bool delayed_hotplug;
|
||||||
};
|
};
|
||||||
|
|
||||||
int drm_fb_helper_single_fb_probe(struct drm_fb_helper *helper,
|
|
||||||
int preferred_bpp);
|
|
||||||
|
|
||||||
int drm_fb_helper_init(struct drm_device *dev,
|
int drm_fb_helper_init(struct drm_device *dev,
|
||||||
struct drm_fb_helper *helper, int crtc_count,
|
struct drm_fb_helper *helper, int crtc_count,
|
||||||
int max_conn);
|
int max_conn);
|
||||||
@ -102,7 +109,6 @@ int drm_fb_helper_setcolreg(unsigned regno,
|
|||||||
struct fb_info *info);
|
struct fb_info *info);
|
||||||
|
|
||||||
bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper);
|
bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper);
|
||||||
void drm_fb_helper_restore(void);
|
|
||||||
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
|
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
|
||||||
uint32_t fb_width, uint32_t fb_height);
|
uint32_t fb_width, uint32_t fb_height);
|
||||||
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
|
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
|
||||||
|
@ -89,6 +89,29 @@ static inline bool drm_mm_initialized(struct drm_mm *mm)
|
|||||||
{
|
{
|
||||||
return mm->hole_stack.next;
|
return mm->hole_stack.next;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
|
||||||
|
{
|
||||||
|
return hole_node->start + hole_node->size;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
|
||||||
|
{
|
||||||
|
BUG_ON(!hole_node->hole_follows);
|
||||||
|
return __drm_mm_hole_node_start(hole_node);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
|
||||||
|
{
|
||||||
|
return list_entry(hole_node->node_list.next,
|
||||||
|
struct drm_mm_node, node_list)->start;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
|
||||||
|
{
|
||||||
|
return __drm_mm_hole_node_end(hole_node);
|
||||||
|
}
|
||||||
|
|
||||||
#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
|
#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
|
||||||
&(mm)->head_node.node_list, \
|
&(mm)->head_node.node_list, \
|
||||||
node_list)
|
node_list)
|
||||||
@ -99,9 +122,26 @@ static inline bool drm_mm_initialized(struct drm_mm *mm)
|
|||||||
entry != NULL; entry = next, \
|
entry != NULL; entry = next, \
|
||||||
next = entry ? list_entry(entry->node_list.next, \
|
next = entry ? list_entry(entry->node_list.next, \
|
||||||
struct drm_mm_node, node_list) : NULL) \
|
struct drm_mm_node, node_list) : NULL) \
|
||||||
|
|
||||||
|
/* Note that we need to unroll list_for_each_entry in order to inline
|
||||||
|
* setting hole_start and hole_end on each iteration and keep the
|
||||||
|
* macro sane.
|
||||||
|
*/
|
||||||
|
#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
|
||||||
|
for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
|
||||||
|
&entry->hole_stack != &(mm)->hole_stack ? \
|
||||||
|
hole_start = drm_mm_hole_node_start(entry), \
|
||||||
|
hole_end = drm_mm_hole_node_end(entry), \
|
||||||
|
1 : 0; \
|
||||||
|
entry = list_entry(entry->hole_stack.next, struct drm_mm_node, hole_stack))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Basic range manager support (drm_mm.c)
|
* Basic range manager support (drm_mm.c)
|
||||||
*/
|
*/
|
||||||
|
extern struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
|
||||||
|
unsigned long start,
|
||||||
|
unsigned long size,
|
||||||
|
bool atomic);
|
||||||
extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
|
extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
|
||||||
unsigned long size,
|
unsigned long size,
|
||||||
unsigned alignment,
|
unsigned alignment,
|
||||||
|
@ -139,6 +139,19 @@
|
|||||||
{0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
|
{0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
|
||||||
{0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
|
{0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
|
||||||
{0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
|
{0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
|
||||||
|
{0x1002, 0x6600, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||||
|
{0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||||
|
{0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||||
|
{0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||||
|
{0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||||
|
{0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||||
|
{0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
|
||||||
|
{0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
|
||||||
|
{0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
|
||||||
|
{0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||||
|
{0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||||
|
{0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||||
|
{0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
|
||||||
{0x1002, 0x6700, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
|
{0x1002, 0x6700, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
|
||||||
{0x1002, 0x6701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
|
{0x1002, 0x6701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
|
||||||
{0x1002, 0x6702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
|
{0x1002, 0x6702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
|
||||||
|
@ -23,15 +23,10 @@
|
|||||||
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
#ifndef _I915_DRM_H_
|
||||||
|
#define _I915_DRM_H_
|
||||||
|
|
||||||
#ifndef _UAPI_I915_DRM_H_
|
#include <uapi/drm/i915_drm.h>
|
||||||
#define _UAPI_I915_DRM_H_
|
|
||||||
|
|
||||||
#include <drm/drm.h>
|
|
||||||
|
|
||||||
/* Please note that modifications to all structs defined here are
|
|
||||||
* subject to backwards-compatibility constraints.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* For use by IPS driver */
|
/* For use by IPS driver */
|
||||||
extern unsigned long i915_read_mch_val(void);
|
extern unsigned long i915_read_mch_val(void);
|
||||||
@ -39,917 +34,4 @@ extern bool i915_gpu_raise(void);
|
|||||||
extern bool i915_gpu_lower(void);
|
extern bool i915_gpu_lower(void);
|
||||||
extern bool i915_gpu_busy(void);
|
extern bool i915_gpu_busy(void);
|
||||||
extern bool i915_gpu_turbo_disable(void);
|
extern bool i915_gpu_turbo_disable(void);
|
||||||
|
#endif /* _I915_DRM_H_ */
|
||||||
/* Each region is a minimum of 16k, and there are at most 255 of them.
|
|
||||||
*/
|
|
||||||
#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
|
|
||||||
* of chars for next/prev indices */
|
|
||||||
#define I915_LOG_MIN_TEX_REGION_SIZE 14
|
|
||||||
|
|
||||||
typedef struct _drm_i915_init {
|
|
||||||
enum {
|
|
||||||
I915_INIT_DMA = 0x01,
|
|
||||||
I915_CLEANUP_DMA = 0x02,
|
|
||||||
I915_RESUME_DMA = 0x03
|
|
||||||
} func;
|
|
||||||
unsigned int mmio_offset;
|
|
||||||
int sarea_priv_offset;
|
|
||||||
unsigned int ring_start;
|
|
||||||
unsigned int ring_end;
|
|
||||||
unsigned int ring_size;
|
|
||||||
unsigned int front_offset;
|
|
||||||
unsigned int back_offset;
|
|
||||||
unsigned int depth_offset;
|
|
||||||
unsigned int w;
|
|
||||||
unsigned int h;
|
|
||||||
unsigned int pitch;
|
|
||||||
unsigned int pitch_bits;
|
|
||||||
unsigned int back_pitch;
|
|
||||||
unsigned int depth_pitch;
|
|
||||||
unsigned int cpp;
|
|
||||||
unsigned int chipset;
|
|
||||||
} drm_i915_init_t;
|
|
||||||
|
|
||||||
typedef struct _drm_i915_sarea {
|
|
||||||
struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
|
|
||||||
int last_upload; /* last time texture was uploaded */
|
|
||||||
int last_enqueue; /* last time a buffer was enqueued */
|
|
||||||
int last_dispatch; /* age of the most recently dispatched buffer */
|
|
||||||
int ctxOwner; /* last context to upload state */
|
|
||||||
int texAge;
|
|
||||||
int pf_enabled; /* is pageflipping allowed? */
|
|
||||||
int pf_active;
|
|
||||||
int pf_current_page; /* which buffer is being displayed? */
|
|
||||||
int perf_boxes; /* performance boxes to be displayed */
|
|
||||||
int width, height; /* screen size in pixels */
|
|
||||||
|
|
||||||
drm_handle_t front_handle;
|
|
||||||
int front_offset;
|
|
||||||
int front_size;
|
|
||||||
|
|
||||||
drm_handle_t back_handle;
|
|
||||||
int back_offset;
|
|
||||||
int back_size;
|
|
||||||
|
|
||||||
drm_handle_t depth_handle;
|
|
||||||
int depth_offset;
|
|
||||||
int depth_size;
|
|
||||||
|
|
||||||
drm_handle_t tex_handle;
|
|
||||||
int tex_offset;
|
|
||||||
int tex_size;
|
|
||||||
int log_tex_granularity;
|
|
||||||
int pitch;
|
|
||||||
int rotation; /* 0, 90, 180 or 270 */
|
|
||||||
int rotated_offset;
|
|
||||||
int rotated_size;
|
|
||||||
int rotated_pitch;
|
|
||||||
int virtualX, virtualY;
|
|
||||||
|
|
||||||
unsigned int front_tiled;
|
|
||||||
unsigned int back_tiled;
|
|
||||||
unsigned int depth_tiled;
|
|
||||||
unsigned int rotated_tiled;
|
|
||||||
unsigned int rotated2_tiled;
|
|
||||||
|
|
||||||
int pipeA_x;
|
|
||||||
int pipeA_y;
|
|
||||||
int pipeA_w;
|
|
||||||
int pipeA_h;
|
|
||||||
int pipeB_x;
|
|
||||||
int pipeB_y;
|
|
||||||
int pipeB_w;
|
|
||||||
int pipeB_h;
|
|
||||||
|
|
||||||
/* fill out some space for old userspace triple buffer */
|
|
||||||
drm_handle_t unused_handle;
|
|
||||||
__u32 unused1, unused2, unused3;
|
|
||||||
|
|
||||||
/* buffer object handles for static buffers. May change
|
|
||||||
* over the lifetime of the client.
|
|
||||||
*/
|
|
||||||
__u32 front_bo_handle;
|
|
||||||
__u32 back_bo_handle;
|
|
||||||
__u32 unused_bo_handle;
|
|
||||||
__u32 depth_bo_handle;
|
|
||||||
|
|
||||||
} drm_i915_sarea_t;
|
|
||||||
|
|
||||||
/* due to userspace building against these headers we need some compat here */
|
|
||||||
#define planeA_x pipeA_x
|
|
||||||
#define planeA_y pipeA_y
|
|
||||||
#define planeA_w pipeA_w
|
|
||||||
#define planeA_h pipeA_h
|
|
||||||
#define planeB_x pipeB_x
|
|
||||||
#define planeB_y pipeB_y
|
|
||||||
#define planeB_w pipeB_w
|
|
||||||
#define planeB_h pipeB_h
|
|
||||||
|
|
||||||
/* Flags for perf_boxes
|
|
||||||
*/
|
|
||||||
#define I915_BOX_RING_EMPTY 0x1
|
|
||||||
#define I915_BOX_FLIP 0x2
|
|
||||||
#define I915_BOX_WAIT 0x4
|
|
||||||
#define I915_BOX_TEXTURE_LOAD 0x8
|
|
||||||
#define I915_BOX_LOST_CONTEXT 0x10
|
|
||||||
|
|
||||||
/* I915 specific ioctls
|
|
||||||
* The device specific ioctl range is 0x40 to 0x79.
|
|
||||||
*/
|
|
||||||
#define DRM_I915_INIT 0x00
|
|
||||||
#define DRM_I915_FLUSH 0x01
|
|
||||||
#define DRM_I915_FLIP 0x02
|
|
||||||
#define DRM_I915_BATCHBUFFER 0x03
|
|
||||||
#define DRM_I915_IRQ_EMIT 0x04
|
|
||||||
#define DRM_I915_IRQ_WAIT 0x05
|
|
||||||
#define DRM_I915_GETPARAM 0x06
|
|
||||||
#define DRM_I915_SETPARAM 0x07
|
|
||||||
#define DRM_I915_ALLOC 0x08
|
|
||||||
#define DRM_I915_FREE 0x09
|
|
||||||
#define DRM_I915_INIT_HEAP 0x0a
|
|
||||||
#define DRM_I915_CMDBUFFER 0x0b
|
|
||||||
#define DRM_I915_DESTROY_HEAP 0x0c
|
|
||||||
#define DRM_I915_SET_VBLANK_PIPE 0x0d
|
|
||||||
#define DRM_I915_GET_VBLANK_PIPE 0x0e
|
|
||||||
#define DRM_I915_VBLANK_SWAP 0x0f
|
|
||||||
#define DRM_I915_HWS_ADDR 0x11
|
|
||||||
#define DRM_I915_GEM_INIT 0x13
|
|
||||||
#define DRM_I915_GEM_EXECBUFFER 0x14
|
|
||||||
#define DRM_I915_GEM_PIN 0x15
|
|
||||||
#define DRM_I915_GEM_UNPIN 0x16
|
|
||||||
#define DRM_I915_GEM_BUSY 0x17
|
|
||||||
#define DRM_I915_GEM_THROTTLE 0x18
|
|
||||||
#define DRM_I915_GEM_ENTERVT 0x19
|
|
||||||
#define DRM_I915_GEM_LEAVEVT 0x1a
|
|
||||||
#define DRM_I915_GEM_CREATE 0x1b
|
|
||||||
#define DRM_I915_GEM_PREAD 0x1c
|
|
||||||
#define DRM_I915_GEM_PWRITE 0x1d
|
|
||||||
#define DRM_I915_GEM_MMAP 0x1e
|
|
||||||
#define DRM_I915_GEM_SET_DOMAIN 0x1f
|
|
||||||
#define DRM_I915_GEM_SW_FINISH 0x20
|
|
||||||
#define DRM_I915_GEM_SET_TILING 0x21
|
|
||||||
#define DRM_I915_GEM_GET_TILING 0x22
|
|
||||||
#define DRM_I915_GEM_GET_APERTURE 0x23
|
|
||||||
#define DRM_I915_GEM_MMAP_GTT 0x24
|
|
||||||
#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25
|
|
||||||
#define DRM_I915_GEM_MADVISE 0x26
|
|
||||||
#define DRM_I915_OVERLAY_PUT_IMAGE 0x27
|
|
||||||
#define DRM_I915_OVERLAY_ATTRS 0x28
|
|
||||||
#define DRM_I915_GEM_EXECBUFFER2 0x29
|
|
||||||
#define DRM_I915_GET_SPRITE_COLORKEY 0x2a
|
|
||||||
#define DRM_I915_SET_SPRITE_COLORKEY 0x2b
|
|
||||||
#define DRM_I915_GEM_WAIT 0x2c
|
|
||||||
#define DRM_I915_GEM_CONTEXT_CREATE 0x2d
|
|
||||||
#define DRM_I915_GEM_CONTEXT_DESTROY 0x2e
|
|
||||||
#define DRM_I915_GEM_SET_CACHING 0x2f
|
|
||||||
#define DRM_I915_GEM_GET_CACHING 0x30
|
|
||||||
#define DRM_I915_REG_READ 0x31
|
|
||||||
|
|
||||||
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
|
|
||||||
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
|
|
||||||
#define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
|
|
||||||
#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
|
|
||||||
#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
|
|
||||||
#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
|
|
||||||
#define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
|
|
||||||
#define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
|
|
||||||
#define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
|
|
||||||
#define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
|
|
||||||
#define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
|
|
||||||
#define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
|
|
||||||
#define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
|
|
||||||
#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
|
|
||||||
#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
|
|
||||||
#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
|
|
||||||
#define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
|
|
||||||
#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
|
|
||||||
#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
|
|
||||||
#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
|
|
||||||
#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
|
|
||||||
#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
|
|
||||||
#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
|
|
||||||
#define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
|
|
||||||
#define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
|
|
||||||
#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
|
|
||||||
#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
|
|
||||||
#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
|
|
||||||
#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
|
|
||||||
#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
|
|
||||||
#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
|
|
||||||
#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
|
|
||||||
#define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
|
|
||||||
#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
|
|
||||||
#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
|
|
||||||
#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
|
|
||||||
#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
|
|
||||||
#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
|
|
||||||
#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
|
|
||||||
#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
|
|
||||||
#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
|
|
||||||
#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
|
|
||||||
#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
|
|
||||||
#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
|
|
||||||
#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
|
|
||||||
#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
|
|
||||||
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
|
|
||||||
#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
|
|
||||||
|
|
||||||
/* Allow drivers to submit batchbuffers directly to hardware, relying
|
|
||||||
* on the security mechanisms provided by hardware.
|
|
||||||
*/
|
|
||||||
typedef struct drm_i915_batchbuffer {
|
|
||||||
int start; /* agp offset */
|
|
||||||
int used; /* nr bytes in use */
|
|
||||||
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
|
|
||||||
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
|
|
||||||
int num_cliprects; /* mulitpass with multiple cliprects? */
|
|
||||||
struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
|
|
||||||
} drm_i915_batchbuffer_t;
|
|
||||||
|
|
||||||
/* As above, but pass a pointer to userspace buffer which can be
|
|
||||||
* validated by the kernel prior to sending to hardware.
|
|
||||||
*/
|
|
||||||
typedef struct _drm_i915_cmdbuffer {
|
|
||||||
char __user *buf; /* pointer to userspace command buffer */
|
|
||||||
int sz; /* nr bytes in buf */
|
|
||||||
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
|
|
||||||
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
|
|
||||||
int num_cliprects; /* mulitpass with multiple cliprects? */
|
|
||||||
struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
|
|
||||||
} drm_i915_cmdbuffer_t;
|
|
||||||
|
|
||||||
/* Userspace can request & wait on irq's:
|
|
||||||
*/
|
|
||||||
typedef struct drm_i915_irq_emit {
|
|
||||||
int __user *irq_seq;
|
|
||||||
} drm_i915_irq_emit_t;
|
|
||||||
|
|
||||||
typedef struct drm_i915_irq_wait {
|
|
||||||
int irq_seq;
|
|
||||||
} drm_i915_irq_wait_t;
|
|
||||||
|
|
||||||
/* Ioctl to query kernel params:
|
|
||||||
*/
|
|
||||||
#define I915_PARAM_IRQ_ACTIVE 1
|
|
||||||
#define I915_PARAM_ALLOW_BATCHBUFFER 2
|
|
||||||
#define I915_PARAM_LAST_DISPATCH 3
|
|
||||||
#define I915_PARAM_CHIPSET_ID 4
|
|
||||||
#define I915_PARAM_HAS_GEM 5
|
|
||||||
#define I915_PARAM_NUM_FENCES_AVAIL 6
|
|
||||||
#define I915_PARAM_HAS_OVERLAY 7
|
|
||||||
#define I915_PARAM_HAS_PAGEFLIPPING 8
|
|
||||||
#define I915_PARAM_HAS_EXECBUF2 9
|
|
||||||
#define I915_PARAM_HAS_BSD 10
|
|
||||||
#define I915_PARAM_HAS_BLT 11
|
|
||||||
#define I915_PARAM_HAS_RELAXED_FENCING 12
|
|
||||||
#define I915_PARAM_HAS_COHERENT_RINGS 13
|
|
||||||
#define I915_PARAM_HAS_EXEC_CONSTANTS 14
|
|
||||||
#define I915_PARAM_HAS_RELAXED_DELTA 15
|
|
||||||
#define I915_PARAM_HAS_GEN7_SOL_RESET 16
|
|
||||||
#define I915_PARAM_HAS_LLC 17
|
|
||||||
#define I915_PARAM_HAS_ALIASING_PPGTT 18
|
|
||||||
#define I915_PARAM_HAS_WAIT_TIMEOUT 19
|
|
||||||
#define I915_PARAM_HAS_SEMAPHORES 20
|
|
||||||
#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21
|
|
||||||
#define I915_PARAM_RSVD_FOR_FUTURE_USE 22
|
|
||||||
#define I915_PARAM_HAS_SECURE_BATCHES 23
|
|
||||||
#define I915_PARAM_HAS_PINNED_BATCHES 24
|
|
||||||
|
|
||||||
typedef struct drm_i915_getparam {
|
|
||||||
int param;
|
|
||||||
int __user *value;
|
|
||||||
} drm_i915_getparam_t;
|
|
||||||
|
|
||||||
/* Ioctl to set kernel params:
|
|
||||||
*/
|
|
||||||
#define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1
|
|
||||||
#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
|
|
||||||
#define I915_SETPARAM_ALLOW_BATCHBUFFER 3
|
|
||||||
#define I915_SETPARAM_NUM_USED_FENCES 4
|
|
||||||
|
|
||||||
typedef struct drm_i915_setparam {
|
|
||||||
int param;
|
|
||||||
int value;
|
|
||||||
} drm_i915_setparam_t;
|
|
||||||
|
|
||||||
/* A memory manager for regions of shared memory:
|
|
||||||
*/
|
|
||||||
#define I915_MEM_REGION_AGP 1
|
|
||||||
|
|
||||||
typedef struct drm_i915_mem_alloc {
|
|
||||||
int region;
|
|
||||||
int alignment;
|
|
||||||
int size;
|
|
||||||
int __user *region_offset; /* offset from start of fb or agp */
|
|
||||||
} drm_i915_mem_alloc_t;
|
|
||||||
|
|
||||||
typedef struct drm_i915_mem_free {
|
|
||||||
int region;
|
|
||||||
int region_offset;
|
|
||||||
} drm_i915_mem_free_t;
|
|
||||||
|
|
||||||
typedef struct drm_i915_mem_init_heap {
|
|
||||||
int region;
|
|
||||||
int size;
|
|
||||||
int start;
|
|
||||||
} drm_i915_mem_init_heap_t;
|
|
||||||
|
|
||||||
/* Allow memory manager to be torn down and re-initialized (eg on
|
|
||||||
* rotate):
|
|
||||||
*/
|
|
||||||
typedef struct drm_i915_mem_destroy_heap {
|
|
||||||
int region;
|
|
||||||
} drm_i915_mem_destroy_heap_t;
|
|
||||||
|
|
||||||
/* Allow X server to configure which pipes to monitor for vblank signals
|
|
||||||
*/
|
|
||||||
#define DRM_I915_VBLANK_PIPE_A 1
|
|
||||||
#define DRM_I915_VBLANK_PIPE_B 2
|
|
||||||
|
|
||||||
typedef struct drm_i915_vblank_pipe {
|
|
||||||
int pipe;
|
|
||||||
} drm_i915_vblank_pipe_t;
|
|
||||||
|
|
||||||
/* Schedule buffer swap at given vertical blank:
|
|
||||||
*/
|
|
||||||
typedef struct drm_i915_vblank_swap {
|
|
||||||
drm_drawable_t drawable;
|
|
||||||
enum drm_vblank_seq_type seqtype;
|
|
||||||
unsigned int sequence;
|
|
||||||
} drm_i915_vblank_swap_t;
|
|
||||||
|
|
||||||
typedef struct drm_i915_hws_addr {
|
|
||||||
__u64 addr;
|
|
||||||
} drm_i915_hws_addr_t;
|
|
||||||
|
|
||||||
struct drm_i915_gem_init {
|
|
||||||
/**
|
|
||||||
* Beginning offset in the GTT to be managed by the DRM memory
|
|
||||||
* manager.
|
|
||||||
*/
|
|
||||||
__u64 gtt_start;
|
|
||||||
/**
|
|
||||||
* Ending offset in the GTT to be managed by the DRM memory
|
|
||||||
* manager.
|
|
||||||
*/
|
|
||||||
__u64 gtt_end;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_i915_gem_create {
|
|
||||||
/**
|
|
||||||
* Requested size for the object.
|
|
||||||
*
|
|
||||||
* The (page-aligned) allocated size for the object will be returned.
|
|
||||||
*/
|
|
||||||
__u64 size;
|
|
||||||
/**
|
|
||||||
* Returned handle for the object.
|
|
||||||
*
|
|
||||||
* Object handles are nonzero.
|
|
||||||
*/
|
|
||||||
__u32 handle;
|
|
||||||
__u32 pad;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_i915_gem_pread {
|
|
||||||
/** Handle for the object being read. */
|
|
||||||
__u32 handle;
|
|
||||||
__u32 pad;
|
|
||||||
/** Offset into the object to read from */
|
|
||||||
__u64 offset;
|
|
||||||
/** Length of data to read */
|
|
||||||
__u64 size;
|
|
||||||
/**
|
|
||||||
* Pointer to write the data into.
|
|
||||||
*
|
|
||||||
* This is a fixed-size type for 32/64 compatibility.
|
|
||||||
*/
|
|
||||||
__u64 data_ptr;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_i915_gem_pwrite {
|
|
||||||
/** Handle for the object being written to. */
|
|
||||||
__u32 handle;
|
|
||||||
__u32 pad;
|
|
||||||
/** Offset into the object to write to */
|
|
||||||
__u64 offset;
|
|
||||||
/** Length of data to write */
|
|
||||||
__u64 size;
|
|
||||||
/**
|
|
||||||
* Pointer to read the data from.
|
|
||||||
*
|
|
||||||
* This is a fixed-size type for 32/64 compatibility.
|
|
||||||
*/
|
|
||||||
__u64 data_ptr;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_i915_gem_mmap {
|
|
||||||
/** Handle for the object being mapped. */
|
|
||||||
__u32 handle;
|
|
||||||
__u32 pad;
|
|
||||||
/** Offset in the object to map. */
|
|
||||||
__u64 offset;
|
|
||||||
/**
|
|
||||||
* Length of data to map.
|
|
||||||
*
|
|
||||||
* The value will be page-aligned.
|
|
||||||
*/
|
|
||||||
__u64 size;
|
|
||||||
/**
|
|
||||||
* Returned pointer the data was mapped at.
|
|
||||||
*
|
|
||||||
* This is a fixed-size type for 32/64 compatibility.
|
|
||||||
*/
|
|
||||||
__u64 addr_ptr;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_i915_gem_mmap_gtt {
|
|
||||||
/** Handle for the object being mapped. */
|
|
||||||
__u32 handle;
|
|
||||||
__u32 pad;
|
|
||||||
/**
|
|
||||||
* Fake offset to use for subsequent mmap call
|
|
||||||
*
|
|
||||||
* This is a fixed-size type for 32/64 compatibility.
|
|
||||||
*/
|
|
||||||
__u64 offset;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_i915_gem_set_domain {
|
|
||||||
/** Handle for the object */
|
|
||||||
__u32 handle;
|
|
||||||
|
|
||||||
/** New read domains */
|
|
||||||
__u32 read_domains;
|
|
||||||
|
|
||||||
/** New write domain */
|
|
||||||
__u32 write_domain;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_i915_gem_sw_finish {
|
|
||||||
/** Handle for the object */
|
|
||||||
__u32 handle;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_i915_gem_relocation_entry {
|
|
||||||
/**
|
|
||||||
* Handle of the buffer being pointed to by this relocation entry.
|
|
||||||
*
|
|
||||||
* It's appealing to make this be an index into the mm_validate_entry
|
|
||||||
* list to refer to the buffer, but this allows the driver to create
|
|
||||||
* a relocation list for state buffers and not re-write it per
|
|
||||||
* exec using the buffer.
|
|
||||||
*/
|
|
||||||
__u32 target_handle;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Value to be added to the offset of the target buffer to make up
|
|
||||||
* the relocation entry.
|
|
||||||
*/
|
|
||||||
__u32 delta;
|
|
||||||
|
|
||||||
/** Offset in the buffer the relocation entry will be written into */
|
|
||||||
__u64 offset;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Offset value of the target buffer that the relocation entry was last
|
|
||||||
* written as.
|
|
||||||
*
|
|
||||||
* If the buffer has the same offset as last time, we can skip syncing
|
|
||||||
* and writing the relocation. This value is written back out by
|
|
||||||
* the execbuffer ioctl when the relocation is written.
|
|
||||||
*/
|
|
||||||
__u64 presumed_offset;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Target memory domains read by this operation.
|
|
||||||
*/
|
|
||||||
__u32 read_domains;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Target memory domains written by this operation.
|
|
||||||
*
|
|
||||||
* Note that only one domain may be written by the whole
|
|
||||||
* execbuffer operation, so that where there are conflicts,
|
|
||||||
* the application will get -EINVAL back.
|
|
||||||
*/
|
|
||||||
__u32 write_domain;
|
|
||||||
};
|
|
||||||
|
|
||||||
/** @{
|
|
||||||
* Intel memory domains
|
|
||||||
*
|
|
||||||
* Most of these just align with the various caches in
|
|
||||||
* the system and are used to flush and invalidate as
|
|
||||||
* objects end up cached in different domains.
|
|
||||||
*/
|
|
||||||
/** CPU cache */
|
|
||||||
#define I915_GEM_DOMAIN_CPU 0x00000001
|
|
||||||
/** Render cache, used by 2D and 3D drawing */
|
|
||||||
#define I915_GEM_DOMAIN_RENDER 0x00000002
|
|
||||||
/** Sampler cache, used by texture engine */
|
|
||||||
#define I915_GEM_DOMAIN_SAMPLER 0x00000004
|
|
||||||
/** Command queue, used to load batch buffers */
|
|
||||||
#define I915_GEM_DOMAIN_COMMAND 0x00000008
|
|
||||||
/** Instruction cache, used by shader programs */
|
|
||||||
#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
|
|
||||||
/** Vertex address cache */
|
|
||||||
#define I915_GEM_DOMAIN_VERTEX 0x00000020
|
|
||||||
/** GTT domain - aperture and scanout */
|
|
||||||
#define I915_GEM_DOMAIN_GTT 0x00000040
|
|
||||||
/** @} */
|
|
||||||
|
|
||||||
struct drm_i915_gem_exec_object {
|
|
||||||
/**
|
|
||||||
* User's handle for a buffer to be bound into the GTT for this
|
|
||||||
* operation.
|
|
||||||
*/
|
|
||||||
__u32 handle;
|
|
||||||
|
|
||||||
/** Number of relocations to be performed on this buffer */
|
|
||||||
__u32 relocation_count;
|
|
||||||
/**
|
|
||||||
* Pointer to array of struct drm_i915_gem_relocation_entry containing
|
|
||||||
* the relocations to be performed in this buffer.
|
|
||||||
*/
|
|
||||||
__u64 relocs_ptr;
|
|
||||||
|
|
||||||
/** Required alignment in graphics aperture */
|
|
||||||
__u64 alignment;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returned value of the updated offset of the object, for future
|
|
||||||
* presumed_offset writes.
|
|
||||||
*/
|
|
||||||
__u64 offset;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_i915_gem_execbuffer {
|
|
||||||
/**
|
|
||||||
* List of buffers to be validated with their relocations to be
|
|
||||||
* performend on them.
|
|
||||||
*
|
|
||||||
* This is a pointer to an array of struct drm_i915_gem_validate_entry.
|
|
||||||
*
|
|
||||||
* These buffers must be listed in an order such that all relocations
|
|
||||||
* a buffer is performing refer to buffers that have already appeared
|
|
||||||
* in the validate list.
|
|
||||||
*/
|
|
||||||
__u64 buffers_ptr;
|
|
||||||
__u32 buffer_count;
|
|
||||||
|
|
||||||
/** Offset in the batchbuffer to start execution from. */
|
|
||||||
__u32 batch_start_offset;
|
|
||||||
/** Bytes used in batchbuffer from batch_start_offset */
|
|
||||||
__u32 batch_len;
|
|
||||||
__u32 DR1;
|
|
||||||
__u32 DR4;
|
|
||||||
__u32 num_cliprects;
|
|
||||||
/** This is a struct drm_clip_rect *cliprects */
|
|
||||||
__u64 cliprects_ptr;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_i915_gem_exec_object2 {
|
|
||||||
/**
|
|
||||||
* User's handle for a buffer to be bound into the GTT for this
|
|
||||||
* operation.
|
|
||||||
*/
|
|
||||||
__u32 handle;
|
|
||||||
|
|
||||||
/** Number of relocations to be performed on this buffer */
|
|
||||||
__u32 relocation_count;
|
|
||||||
/**
|
|
||||||
* Pointer to array of struct drm_i915_gem_relocation_entry containing
|
|
||||||
* the relocations to be performed in this buffer.
|
|
||||||
*/
|
|
||||||
__u64 relocs_ptr;
|
|
||||||
|
|
||||||
/** Required alignment in graphics aperture */
|
|
||||||
__u64 alignment;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returned value of the updated offset of the object, for future
|
|
||||||
* presumed_offset writes.
|
|
||||||
*/
|
|
||||||
__u64 offset;
|
|
||||||
|
|
||||||
#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
|
|
||||||
__u64 flags;
|
|
||||||
__u64 rsvd1;
|
|
||||||
__u64 rsvd2;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_i915_gem_execbuffer2 {
|
|
||||||
/**
|
|
||||||
* List of gem_exec_object2 structs
|
|
||||||
*/
|
|
||||||
__u64 buffers_ptr;
|
|
||||||
__u32 buffer_count;
|
|
||||||
|
|
||||||
/** Offset in the batchbuffer to start execution from. */
|
|
||||||
__u32 batch_start_offset;
|
|
||||||
/** Bytes used in batchbuffer from batch_start_offset */
|
|
||||||
__u32 batch_len;
|
|
||||||
__u32 DR1;
|
|
||||||
__u32 DR4;
|
|
||||||
__u32 num_cliprects;
|
|
||||||
/** This is a struct drm_clip_rect *cliprects */
|
|
||||||
__u64 cliprects_ptr;
|
|
||||||
#define I915_EXEC_RING_MASK (7<<0)
|
|
||||||
#define I915_EXEC_DEFAULT (0<<0)
|
|
||||||
#define I915_EXEC_RENDER (1<<0)
|
|
||||||
#define I915_EXEC_BSD (2<<0)
|
|
||||||
#define I915_EXEC_BLT (3<<0)
|
|
||||||
|
|
||||||
/* Used for switching the constants addressing mode on gen4+ RENDER ring.
|
|
||||||
* Gen6+ only supports relative addressing to dynamic state (default) and
|
|
||||||
* absolute addressing.
|
|
||||||
*
|
|
||||||
* These flags are ignored for the BSD and BLT rings.
|
|
||||||
*/
|
|
||||||
#define I915_EXEC_CONSTANTS_MASK (3<<6)
|
|
||||||
#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
|
|
||||||
#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
|
|
||||||
#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
|
|
||||||
__u64 flags;
|
|
||||||
__u64 rsvd1; /* now used for context info */
|
|
||||||
__u64 rsvd2;
|
|
||||||
};
|
|
||||||
|
|
||||||
/** Resets the SO write offset registers for transform feedback on gen7. */
|
|
||||||
#define I915_EXEC_GEN7_SOL_RESET (1<<8)
|
|
||||||
|
|
||||||
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
|
|
||||||
#define i915_execbuffer2_set_context_id(eb2, context) \
|
|
||||||
(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
|
|
||||||
#define i915_execbuffer2_get_context_id(eb2) \
|
|
||||||
((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
|
|
||||||
|
|
||||||
struct drm_i915_gem_pin {
|
|
||||||
/** Handle of the buffer to be pinned. */
|
|
||||||
__u32 handle;
|
|
||||||
__u32 pad;
|
|
||||||
|
|
||||||
/** alignment required within the aperture */
|
|
||||||
__u64 alignment;
|
|
||||||
|
|
||||||
/** Returned GTT offset of the buffer. */
|
|
||||||
__u64 offset;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_i915_gem_unpin {
|
|
||||||
/** Handle of the buffer to be unpinned. */
|
|
||||||
__u32 handle;
|
|
||||||
__u32 pad;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_i915_gem_busy {
|
|
||||||
/** Handle of the buffer to check for busy */
|
|
||||||
__u32 handle;
|
|
||||||
|
|
||||||
/** Return busy status (1 if busy, 0 if idle).
|
|
||||||
* The high word is used to indicate on which rings the object
|
|
||||||
* currently resides:
|
|
||||||
* 16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc)
|
|
||||||
*/
|
|
||||||
__u32 busy;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define I915_CACHING_NONE 0
|
|
||||||
#define I915_CACHING_CACHED 1
|
|
||||||
|
|
||||||
struct drm_i915_gem_caching {
|
|
||||||
/**
|
|
||||||
* Handle of the buffer to set/get the caching level of. */
|
|
||||||
__u32 handle;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Cacheing level to apply or return value
|
|
||||||
*
|
|
||||||
* bits0-15 are for generic caching control (i.e. the above defined
|
|
||||||
* values). bits16-31 are reserved for platform-specific variations
|
|
||||||
* (e.g. l3$ caching on gen7). */
|
|
||||||
__u32 caching;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define I915_TILING_NONE 0
|
|
||||||
#define I915_TILING_X 1
|
|
||||||
#define I915_TILING_Y 2
|
|
||||||
|
|
||||||
#define I915_BIT_6_SWIZZLE_NONE 0
|
|
||||||
#define I915_BIT_6_SWIZZLE_9 1
|
|
||||||
#define I915_BIT_6_SWIZZLE_9_10 2
|
|
||||||
#define I915_BIT_6_SWIZZLE_9_11 3
|
|
||||||
#define I915_BIT_6_SWIZZLE_9_10_11 4
|
|
||||||
/* Not seen by userland */
|
|
||||||
#define I915_BIT_6_SWIZZLE_UNKNOWN 5
|
|
||||||
/* Seen by userland. */
|
|
||||||
#define I915_BIT_6_SWIZZLE_9_17 6
|
|
||||||
#define I915_BIT_6_SWIZZLE_9_10_17 7
|
|
||||||
|
|
||||||
struct drm_i915_gem_set_tiling {
|
|
||||||
/** Handle of the buffer to have its tiling state updated */
|
|
||||||
__u32 handle;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
|
|
||||||
* I915_TILING_Y).
|
|
||||||
*
|
|
||||||
* This value is to be set on request, and will be updated by the
|
|
||||||
* kernel on successful return with the actual chosen tiling layout.
|
|
||||||
*
|
|
||||||
* The tiling mode may be demoted to I915_TILING_NONE when the system
|
|
||||||
* has bit 6 swizzling that can't be managed correctly by GEM.
|
|
||||||
*
|
|
||||||
* Buffer contents become undefined when changing tiling_mode.
|
|
||||||
*/
|
|
||||||
__u32 tiling_mode;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Stride in bytes for the object when in I915_TILING_X or
|
|
||||||
* I915_TILING_Y.
|
|
||||||
*/
|
|
||||||
__u32 stride;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returned address bit 6 swizzling required for CPU access through
|
|
||||||
* mmap mapping.
|
|
||||||
*/
|
|
||||||
__u32 swizzle_mode;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_i915_gem_get_tiling {
|
|
||||||
/** Handle of the buffer to get tiling state for. */
|
|
||||||
__u32 handle;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
|
|
||||||
* I915_TILING_Y).
|
|
||||||
*/
|
|
||||||
__u32 tiling_mode;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returned address bit 6 swizzling required for CPU access through
|
|
||||||
* mmap mapping.
|
|
||||||
*/
|
|
||||||
__u32 swizzle_mode;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_i915_gem_get_aperture {
|
|
||||||
/** Total size of the aperture used by i915_gem_execbuffer, in bytes */
|
|
||||||
__u64 aper_size;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Available space in the aperture used by i915_gem_execbuffer, in
|
|
||||||
* bytes
|
|
||||||
*/
|
|
||||||
__u64 aper_available_size;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_i915_get_pipe_from_crtc_id {
|
|
||||||
/** ID of CRTC being requested **/
|
|
||||||
__u32 crtc_id;
|
|
||||||
|
|
||||||
/** pipe of requested CRTC **/
|
|
||||||
__u32 pipe;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define I915_MADV_WILLNEED 0
|
|
||||||
#define I915_MADV_DONTNEED 1
|
|
||||||
#define __I915_MADV_PURGED 2 /* internal state */
|
|
||||||
|
|
||||||
struct drm_i915_gem_madvise {
|
|
||||||
/** Handle of the buffer to change the backing store advice */
|
|
||||||
__u32 handle;
|
|
||||||
|
|
||||||
/* Advice: either the buffer will be needed again in the near future,
|
|
||||||
* or wont be and could be discarded under memory pressure.
|
|
||||||
*/
|
|
||||||
__u32 madv;
|
|
||||||
|
|
||||||
/** Whether the backing store still exists. */
|
|
||||||
__u32 retained;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* flags */
|
|
||||||
#define I915_OVERLAY_TYPE_MASK 0xff
|
|
||||||
#define I915_OVERLAY_YUV_PLANAR 0x01
|
|
||||||
#define I915_OVERLAY_YUV_PACKED 0x02
|
|
||||||
#define I915_OVERLAY_RGB 0x03
|
|
||||||
|
|
||||||
#define I915_OVERLAY_DEPTH_MASK 0xff00
|
|
||||||
#define I915_OVERLAY_RGB24 0x1000
|
|
||||||
#define I915_OVERLAY_RGB16 0x2000
|
|
||||||
#define I915_OVERLAY_RGB15 0x3000
|
|
||||||
#define I915_OVERLAY_YUV422 0x0100
|
|
||||||
#define I915_OVERLAY_YUV411 0x0200
|
|
||||||
#define I915_OVERLAY_YUV420 0x0300
|
|
||||||
#define I915_OVERLAY_YUV410 0x0400
|
|
||||||
|
|
||||||
#define I915_OVERLAY_SWAP_MASK 0xff0000
|
|
||||||
#define I915_OVERLAY_NO_SWAP 0x000000
|
|
||||||
#define I915_OVERLAY_UV_SWAP 0x010000
|
|
||||||
#define I915_OVERLAY_Y_SWAP 0x020000
|
|
||||||
#define I915_OVERLAY_Y_AND_UV_SWAP 0x030000
|
|
||||||
|
|
||||||
#define I915_OVERLAY_FLAGS_MASK 0xff000000
|
|
||||||
#define I915_OVERLAY_ENABLE 0x01000000
|
|
||||||
|
|
||||||
struct drm_intel_overlay_put_image {
|
|
||||||
/* various flags and src format description */
|
|
||||||
__u32 flags;
|
|
||||||
/* source picture description */
|
|
||||||
__u32 bo_handle;
|
|
||||||
/* stride values and offsets are in bytes, buffer relative */
|
|
||||||
__u16 stride_Y; /* stride for packed formats */
|
|
||||||
__u16 stride_UV;
|
|
||||||
__u32 offset_Y; /* offset for packet formats */
|
|
||||||
__u32 offset_U;
|
|
||||||
__u32 offset_V;
|
|
||||||
/* in pixels */
|
|
||||||
__u16 src_width;
|
|
||||||
__u16 src_height;
|
|
||||||
/* to compensate the scaling factors for partially covered surfaces */
|
|
||||||
__u16 src_scan_width;
|
|
||||||
__u16 src_scan_height;
|
|
||||||
/* output crtc description */
|
|
||||||
__u32 crtc_id;
|
|
||||||
__u16 dst_x;
|
|
||||||
__u16 dst_y;
|
|
||||||
__u16 dst_width;
|
|
||||||
__u16 dst_height;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* flags */
|
|
||||||
#define I915_OVERLAY_UPDATE_ATTRS (1<<0)
|
|
||||||
#define I915_OVERLAY_UPDATE_GAMMA (1<<1)
|
|
||||||
struct drm_intel_overlay_attrs {
|
|
||||||
__u32 flags;
|
|
||||||
__u32 color_key;
|
|
||||||
__s32 brightness;
|
|
||||||
__u32 contrast;
|
|
||||||
__u32 saturation;
|
|
||||||
__u32 gamma0;
|
|
||||||
__u32 gamma1;
|
|
||||||
__u32 gamma2;
|
|
||||||
__u32 gamma3;
|
|
||||||
__u32 gamma4;
|
|
||||||
__u32 gamma5;
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Intel sprite handling
|
|
||||||
*
|
|
||||||
* Color keying works with a min/mask/max tuple. Both source and destination
|
|
||||||
* color keying is allowed.
|
|
||||||
*
|
|
||||||
* Source keying:
|
|
||||||
* Sprite pixels within the min & max values, masked against the color channels
|
|
||||||
* specified in the mask field, will be transparent. All other pixels will
|
|
||||||
* be displayed on top of the primary plane. For RGB surfaces, only the min
|
|
||||||
* and mask fields will be used; ranged compares are not allowed.
|
|
||||||
*
|
|
||||||
* Destination keying:
|
|
||||||
* Primary plane pixels that match the min value, masked against the color
|
|
||||||
* channels specified in the mask field, will be replaced by corresponding
|
|
||||||
* pixels from the sprite plane.
|
|
||||||
*
|
|
||||||
* Note that source & destination keying are exclusive; only one can be
|
|
||||||
* active on a given plane.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */
|
|
||||||
#define I915_SET_COLORKEY_DESTINATION (1<<1)
|
|
||||||
#define I915_SET_COLORKEY_SOURCE (1<<2)
|
|
||||||
struct drm_intel_sprite_colorkey {
|
|
||||||
__u32 plane_id;
|
|
||||||
__u32 min_value;
|
|
||||||
__u32 channel_mask;
|
|
||||||
__u32 max_value;
|
|
||||||
__u32 flags;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_i915_gem_wait {
|
|
||||||
/** Handle of BO we shall wait on */
|
|
||||||
__u32 bo_handle;
|
|
||||||
__u32 flags;
|
|
||||||
/** Number of nanoseconds to wait, Returns time remaining. */
|
|
||||||
__s64 timeout_ns;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_i915_gem_context_create {
|
|
||||||
/* output: id of new context*/
|
|
||||||
__u32 ctx_id;
|
|
||||||
__u32 pad;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_i915_gem_context_destroy {
|
|
||||||
__u32 ctx_id;
|
|
||||||
__u32 pad;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_i915_reg_read {
|
|
||||||
__u64 offset;
|
|
||||||
__u64 val; /* Return value */
|
|
||||||
};
|
|
||||||
#endif /* _UAPI_I915_DRM_H_ */
|
|
||||||
|
@ -5,26 +5,8 @@
|
|||||||
|
|
||||||
struct agp_bridge_data;
|
struct agp_bridge_data;
|
||||||
|
|
||||||
struct intel_gtt {
|
void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
|
||||||
/* Size of memory reserved for graphics by the BIOS */
|
phys_addr_t *mappable_base, unsigned long *mappable_end);
|
||||||
unsigned int stolen_size;
|
|
||||||
/* Total number of gtt entries. */
|
|
||||||
unsigned int gtt_total_entries;
|
|
||||||
/* Part of the gtt that is mappable by the cpu, for those chips where
|
|
||||||
* this is not the full gtt. */
|
|
||||||
unsigned int gtt_mappable_entries;
|
|
||||||
/* Whether i915 needs to use the dmar apis or not. */
|
|
||||||
unsigned int needs_dmar : 1;
|
|
||||||
/* Whether we idle the gpu before mapping/unmapping */
|
|
||||||
unsigned int do_idle_maps : 1;
|
|
||||||
/* Share the scratch page dma with ppgtts. */
|
|
||||||
dma_addr_t scratch_page_dma;
|
|
||||||
struct page *scratch_page;
|
|
||||||
/* for ppgtt PDE access */
|
|
||||||
u32 __iomem *gtt;
|
|
||||||
/* needed for ioremap in drm/i915 */
|
|
||||||
phys_addr_t gma_bus_addr;
|
|
||||||
} *intel_gtt_get(void);
|
|
||||||
|
|
||||||
int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
|
int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
|
||||||
struct agp_bridge_data *bridge);
|
struct agp_bridge_data *bridge);
|
||||||
@ -42,10 +24,6 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
|
|||||||
#define AGP_DCACHE_MEMORY 1
|
#define AGP_DCACHE_MEMORY 1
|
||||||
#define AGP_PHYS_MEMORY 2
|
#define AGP_PHYS_MEMORY 2
|
||||||
|
|
||||||
/* New caching attributes for gen6/sandybridge */
|
|
||||||
#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2)
|
|
||||||
#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4)
|
|
||||||
|
|
||||||
/* flag for GFDT type */
|
/* flag for GFDT type */
|
||||||
#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
|
#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
|
||||||
|
|
||||||
|
109
drivers/include/drm/ttm/ttm_execbuf_util.h
Normal file
109
drivers/include/drm/ttm/ttm_execbuf_util.h
Normal file
@ -0,0 +1,109 @@
|
|||||||
|
/**************************************************************************
|
||||||
|
*
|
||||||
|
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||||
|
* All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the
|
||||||
|
* "Software"), to deal in the Software without restriction, including
|
||||||
|
* without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
* distribute, sub license, and/or sell copies of the Software, and to
|
||||||
|
* permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
* the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice (including the
|
||||||
|
* next paragraph) shall be included in all copies or substantial portions
|
||||||
|
* of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||||
|
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||||
|
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||||
|
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
**************************************************************************/
|
||||||
|
/*
|
||||||
|
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _TTM_EXECBUF_UTIL_H_
|
||||||
|
#define _TTM_EXECBUF_UTIL_H_
|
||||||
|
|
||||||
|
#include <ttm/ttm_bo_api.h>
|
||||||
|
#include <linux/list.h>
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct ttm_validate_buffer
|
||||||
|
*
|
||||||
|
* @head: list head for thread-private list.
|
||||||
|
* @bo: refcounted buffer object pointer.
|
||||||
|
* @reserved: Indicates whether @bo has been reserved for validation.
|
||||||
|
* @removed: Indicates whether @bo has been removed from lru lists.
|
||||||
|
* @put_count: Number of outstanding references on bo::list_kref.
|
||||||
|
* @old_sync_obj: Pointer to a sync object about to be unreferenced
|
||||||
|
*/
|
||||||
|
|
||||||
|
struct ttm_validate_buffer {
|
||||||
|
struct list_head head;
|
||||||
|
struct ttm_buffer_object *bo;
|
||||||
|
bool reserved;
|
||||||
|
bool removed;
|
||||||
|
int put_count;
|
||||||
|
void *old_sync_obj;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* function ttm_eu_backoff_reservation
|
||||||
|
*
|
||||||
|
* @list: thread private list of ttm_validate_buffer structs.
|
||||||
|
*
|
||||||
|
* Undoes all buffer validation reservations for bos pointed to by
|
||||||
|
* the list entries.
|
||||||
|
*/
|
||||||
|
|
||||||
|
extern void ttm_eu_backoff_reservation(struct list_head *list);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* function ttm_eu_reserve_buffers
|
||||||
|
*
|
||||||
|
* @list: thread private list of ttm_validate_buffer structs.
|
||||||
|
*
|
||||||
|
* Tries to reserve bos pointed to by the list entries for validation.
|
||||||
|
* If the function returns 0, all buffers are marked as "unfenced",
|
||||||
|
* taken off the lru lists and are not synced for write CPU usage.
|
||||||
|
*
|
||||||
|
* If the function detects a deadlock due to multiple threads trying to
|
||||||
|
* reserve the same buffers in reverse order, all threads except one will
|
||||||
|
* back off and retry. This function may sleep while waiting for
|
||||||
|
* CPU write reservations to be cleared, and for other threads to
|
||||||
|
* unreserve their buffers.
|
||||||
|
*
|
||||||
|
* This function may return -ERESTART or -EAGAIN if the calling process
|
||||||
|
* receives a signal while waiting. In that case, no buffers on the list
|
||||||
|
* will be reserved upon return.
|
||||||
|
*
|
||||||
|
* Buffers reserved by this function should be unreserved by
|
||||||
|
* a call to either ttm_eu_backoff_reservation() or
|
||||||
|
* ttm_eu_fence_buffer_objects() when command submission is complete or
|
||||||
|
* has failed.
|
||||||
|
*/
|
||||||
|
|
||||||
|
extern int ttm_eu_reserve_buffers(struct list_head *list);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* function ttm_eu_fence_buffer_objects.
|
||||||
|
*
|
||||||
|
* @list: thread private list of ttm_validate_buffer structs.
|
||||||
|
* @sync_obj: The new sync object for the buffers.
|
||||||
|
*
|
||||||
|
* This function should be called when command submission is complete, and
|
||||||
|
* it will add a new sync object to bos pointed to by entries on @list.
|
||||||
|
* It also unreserves all buffers, putting them on lru lists.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
extern void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj);
|
||||||
|
|
||||||
|
#endif
|
247
drivers/include/drm/ttm/ttm_lock.h
Normal file
247
drivers/include/drm/ttm/ttm_lock.h
Normal file
@ -0,0 +1,247 @@
|
|||||||
|
/**************************************************************************
|
||||||
|
*
|
||||||
|
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
|
||||||
|
* All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the
|
||||||
|
* "Software"), to deal in the Software without restriction, including
|
||||||
|
* without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
* distribute, sub license, and/or sell copies of the Software, and to
|
||||||
|
* permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
* the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice (including the
|
||||||
|
* next paragraph) shall be included in all copies or substantial portions
|
||||||
|
* of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||||
|
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||||
|
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||||
|
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
**************************************************************************/
|
||||||
|
/*
|
||||||
|
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||||
|
*/
|
||||||
|
|
||||||
|
/** @file ttm_lock.h
|
||||||
|
* This file implements a simple replacement for the buffer manager use
|
||||||
|
* of the DRM heavyweight hardware lock.
|
||||||
|
* The lock is a read-write lock. Taking it in read mode and write mode
|
||||||
|
* is relatively fast, and intended for in-kernel use only.
|
||||||
|
*
|
||||||
|
* The vt mode is used only when there is a need to block all
|
||||||
|
* user-space processes from validating buffers.
|
||||||
|
* It's allowed to leave kernel space with the vt lock held.
|
||||||
|
* If a user-space process dies while having the vt-lock,
|
||||||
|
* it will be released during the file descriptor release. The vt lock
|
||||||
|
* excludes write lock and read lock.
|
||||||
|
*
|
||||||
|
* The suspend mode is used to lock out all TTM users when preparing for
|
||||||
|
* and executing suspend operations.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _TTM_LOCK_H_
|
||||||
|
#define _TTM_LOCK_H_
|
||||||
|
|
||||||
|
#include <ttm/ttm_object.h>
|
||||||
|
#include <linux/wait.h>
|
||||||
|
#include <linux/atomic.h>
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct ttm_lock
|
||||||
|
*
|
||||||
|
* @base: ttm base object used solely to release the lock if the client
|
||||||
|
* holding the lock dies.
|
||||||
|
* @queue: Queue for processes waiting for lock change-of-status.
|
||||||
|
* @lock: Spinlock protecting some lock members.
|
||||||
|
* @rw: Read-write lock counter. Protected by @lock.
|
||||||
|
* @flags: Lock state. Protected by @lock.
|
||||||
|
* @kill_takers: Boolean whether to kill takers of the lock.
|
||||||
|
* @signal: Signal to send when kill_takers is true.
|
||||||
|
*/
|
||||||
|
|
||||||
|
struct ttm_lock {
|
||||||
|
struct ttm_base_object base;
|
||||||
|
wait_queue_head_t queue;
|
||||||
|
spinlock_t lock;
|
||||||
|
int32_t rw;
|
||||||
|
uint32_t flags;
|
||||||
|
bool kill_takers;
|
||||||
|
int signal;
|
||||||
|
struct ttm_object_file *vt_holder;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_lock_init
|
||||||
|
*
|
||||||
|
* @lock: Pointer to a struct ttm_lock
|
||||||
|
* Initializes the lock.
|
||||||
|
*/
|
||||||
|
extern void ttm_lock_init(struct ttm_lock *lock);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_read_unlock
|
||||||
|
*
|
||||||
|
* @lock: Pointer to a struct ttm_lock
|
||||||
|
*
|
||||||
|
* Releases a read lock.
|
||||||
|
*/
|
||||||
|
extern void ttm_read_unlock(struct ttm_lock *lock);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_read_lock
|
||||||
|
*
|
||||||
|
* @lock: Pointer to a struct ttm_lock
|
||||||
|
* @interruptible: Interruptible sleeping while waiting for a lock.
|
||||||
|
*
|
||||||
|
* Takes the lock in read mode.
|
||||||
|
* Returns:
|
||||||
|
* -ERESTARTSYS If interrupted by a signal and interruptible is true.
|
||||||
|
*/
|
||||||
|
extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_read_trylock
|
||||||
|
*
|
||||||
|
* @lock: Pointer to a struct ttm_lock
|
||||||
|
* @interruptible: Interruptible sleeping while waiting for a lock.
|
||||||
|
*
|
||||||
|
* Tries to take the lock in read mode. If the lock is already held
|
||||||
|
* in write mode, the function will return -EBUSY. If the lock is held
|
||||||
|
* in vt or suspend mode, the function will sleep until these modes
|
||||||
|
* are unlocked.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* -EBUSY The lock was already held in write mode.
|
||||||
|
* -ERESTARTSYS If interrupted by a signal and interruptible is true.
|
||||||
|
*/
|
||||||
|
extern int ttm_read_trylock(struct ttm_lock *lock, bool interruptible);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_write_unlock
|
||||||
|
*
|
||||||
|
* @lock: Pointer to a struct ttm_lock
|
||||||
|
*
|
||||||
|
* Releases a write lock.
|
||||||
|
*/
|
||||||
|
extern void ttm_write_unlock(struct ttm_lock *lock);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_write_lock
|
||||||
|
*
|
||||||
|
* @lock: Pointer to a struct ttm_lock
|
||||||
|
* @interruptible: Interruptible sleeping while waiting for a lock.
|
||||||
|
*
|
||||||
|
* Takes the lock in write mode.
|
||||||
|
* Returns:
|
||||||
|
* -ERESTARTSYS If interrupted by a signal and interruptible is true.
|
||||||
|
*/
|
||||||
|
extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_lock_downgrade
|
||||||
|
*
|
||||||
|
* @lock: Pointer to a struct ttm_lock
|
||||||
|
*
|
||||||
|
* Downgrades a write lock to a read lock.
|
||||||
|
*/
|
||||||
|
extern void ttm_lock_downgrade(struct ttm_lock *lock);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_suspend_lock
|
||||||
|
*
|
||||||
|
* @lock: Pointer to a struct ttm_lock
|
||||||
|
*
|
||||||
|
* Takes the lock in suspend mode. Excludes read and write mode.
|
||||||
|
*/
|
||||||
|
extern void ttm_suspend_lock(struct ttm_lock *lock);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_suspend_unlock
|
||||||
|
*
|
||||||
|
* @lock: Pointer to a struct ttm_lock
|
||||||
|
*
|
||||||
|
* Releases a suspend lock
|
||||||
|
*/
|
||||||
|
extern void ttm_suspend_unlock(struct ttm_lock *lock);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_vt_lock
|
||||||
|
*
|
||||||
|
* @lock: Pointer to a struct ttm_lock
|
||||||
|
* @interruptible: Interruptible sleeping while waiting for a lock.
|
||||||
|
* @tfile: Pointer to a struct ttm_object_file to register the lock with.
|
||||||
|
*
|
||||||
|
* Takes the lock in vt mode.
|
||||||
|
* Returns:
|
||||||
|
* -ERESTARTSYS If interrupted by a signal and interruptible is true.
|
||||||
|
* -ENOMEM: Out of memory when locking.
|
||||||
|
*/
|
||||||
|
extern int ttm_vt_lock(struct ttm_lock *lock, bool interruptible,
|
||||||
|
struct ttm_object_file *tfile);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_vt_unlock
|
||||||
|
*
|
||||||
|
* @lock: Pointer to a struct ttm_lock
|
||||||
|
*
|
||||||
|
* Releases a vt lock.
|
||||||
|
* Returns:
|
||||||
|
* -EINVAL If the lock was not held.
|
||||||
|
*/
|
||||||
|
extern int ttm_vt_unlock(struct ttm_lock *lock);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_write_unlock
|
||||||
|
*
|
||||||
|
* @lock: Pointer to a struct ttm_lock
|
||||||
|
*
|
||||||
|
* Releases a write lock.
|
||||||
|
*/
|
||||||
|
extern void ttm_write_unlock(struct ttm_lock *lock);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_write_lock
|
||||||
|
*
|
||||||
|
* @lock: Pointer to a struct ttm_lock
|
||||||
|
* @interruptible: Interruptible sleeping while waiting for a lock.
|
||||||
|
*
|
||||||
|
* Takes the lock in write mode.
|
||||||
|
* Returns:
|
||||||
|
* -ERESTARTSYS If interrupted by a signal and interruptible is true.
|
||||||
|
*/
|
||||||
|
extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_lock_set_kill
|
||||||
|
*
|
||||||
|
* @lock: Pointer to a struct ttm_lock
|
||||||
|
* @val: Boolean whether to kill processes taking the lock.
|
||||||
|
* @signal: Signal to send to the process taking the lock.
|
||||||
|
*
|
||||||
|
* The kill-when-taking-lock functionality is used to kill processes that keep
|
||||||
|
* on using the TTM functionality when its resources has been taken down, for
|
||||||
|
* example when the X server exits. A typical sequence would look like this:
|
||||||
|
* - X server takes lock in write mode.
|
||||||
|
* - ttm_lock_set_kill() is called with @val set to true.
|
||||||
|
* - As part of X server exit, TTM resources are taken down.
|
||||||
|
* - X server releases the lock on file release.
|
||||||
|
* - Another dri client wants to render, takes the lock and is killed.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val,
|
||||||
|
int signal)
|
||||||
|
{
|
||||||
|
lock->kill_takers = val;
|
||||||
|
if (val)
|
||||||
|
lock->signal = signal;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
275
drivers/include/drm/ttm/ttm_object.h
Normal file
275
drivers/include/drm/ttm/ttm_object.h
Normal file
@ -0,0 +1,275 @@
|
|||||||
|
/**************************************************************************
|
||||||
|
*
|
||||||
|
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||||
|
* All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the
|
||||||
|
* "Software"), to deal in the Software without restriction, including
|
||||||
|
* without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
* distribute, sub license, and/or sell copies of the Software, and to
|
||||||
|
* permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
* the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice (including the
|
||||||
|
* next paragraph) shall be included in all copies or substantial portions
|
||||||
|
* of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||||
|
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||||
|
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||||
|
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
**************************************************************************/
|
||||||
|
/*
|
||||||
|
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||||
|
*/
|
||||||
|
/** @file ttm_object.h
|
||||||
|
*
|
||||||
|
* Base- and reference object implementation for the various
|
||||||
|
* ttm objects. Implements reference counting, minimal security checks
|
||||||
|
* and release on file close.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _TTM_OBJECT_H_
|
||||||
|
#define _TTM_OBJECT_H_
|
||||||
|
|
||||||
|
#include <linux/list.h>
|
||||||
|
#include <drm/drm_hashtab.h>
|
||||||
|
#include <linux/kref.h>
|
||||||
|
#include <linux/rcupdate.h>
|
||||||
|
#include <ttm/ttm_memory.h>
|
||||||
|
|
||||||
|
/**
|
||||||
|
* enum ttm_ref_type
|
||||||
|
*
|
||||||
|
* Describes what type of reference a ref object holds.
|
||||||
|
*
|
||||||
|
* TTM_REF_USAGE is a simple refcount on a base object.
|
||||||
|
*
|
||||||
|
* TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a
|
||||||
|
* buffer object.
|
||||||
|
*
|
||||||
|
* TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a
|
||||||
|
* buffer object.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
enum ttm_ref_type {
|
||||||
|
TTM_REF_USAGE,
|
||||||
|
TTM_REF_SYNCCPU_READ,
|
||||||
|
TTM_REF_SYNCCPU_WRITE,
|
||||||
|
TTM_REF_NUM
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* enum ttm_object_type
|
||||||
|
*
|
||||||
|
* One entry per ttm object type.
|
||||||
|
* Device-specific types should use the
|
||||||
|
* ttm_driver_typex types.
|
||||||
|
*/
|
||||||
|
|
||||||
|
enum ttm_object_type {
|
||||||
|
ttm_fence_type,
|
||||||
|
ttm_buffer_type,
|
||||||
|
ttm_lock_type,
|
||||||
|
ttm_driver_type0 = 256,
|
||||||
|
ttm_driver_type1,
|
||||||
|
ttm_driver_type2,
|
||||||
|
ttm_driver_type3,
|
||||||
|
ttm_driver_type4,
|
||||||
|
ttm_driver_type5
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ttm_object_file;
|
||||||
|
struct ttm_object_device;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct ttm_base_object
|
||||||
|
*
|
||||||
|
* @hash: hash entry for the per-device object hash.
|
||||||
|
* @type: derived type this object is base class for.
|
||||||
|
* @shareable: Other ttm_object_files can access this object.
|
||||||
|
*
|
||||||
|
* @tfile: Pointer to ttm_object_file of the creator.
|
||||||
|
* NULL if the object was not created by a user request.
|
||||||
|
* (kernel object).
|
||||||
|
*
|
||||||
|
* @refcount: Number of references to this object, not
|
||||||
|
* including the hash entry. A reference to a base object can
|
||||||
|
* only be held by a ref object.
|
||||||
|
*
|
||||||
|
* @refcount_release: A function to be called when there are
|
||||||
|
* no more references to this object. This function should
|
||||||
|
* destroy the object (or make sure destruction eventually happens),
|
||||||
|
* and when it is called, the object has
|
||||||
|
* already been taken out of the per-device hash. The parameter
|
||||||
|
* "base" should be set to NULL by the function.
|
||||||
|
*
|
||||||
|
* @ref_obj_release: A function to be called when a reference object
|
||||||
|
* with another ttm_ref_type than TTM_REF_USAGE is deleted.
|
||||||
|
* This function may, for example, release a lock held by a user-space
|
||||||
|
* process.
|
||||||
|
*
|
||||||
|
* This struct is intended to be used as a base struct for objects that
|
||||||
|
* are visible to user-space. It provides a global name, race-safe
|
||||||
|
* access and refcounting, minimal access contol and hooks for unref actions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
struct ttm_base_object {
|
||||||
|
struct rcu_head rhead;
|
||||||
|
struct drm_hash_item hash;
|
||||||
|
enum ttm_object_type object_type;
|
||||||
|
bool shareable;
|
||||||
|
struct ttm_object_file *tfile;
|
||||||
|
struct kref refcount;
|
||||||
|
void (*refcount_release) (struct ttm_base_object **base);
|
||||||
|
void (*ref_obj_release) (struct ttm_base_object *base,
|
||||||
|
enum ttm_ref_type ref_type);
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_base_object_init
|
||||||
|
*
|
||||||
|
* @tfile: Pointer to a struct ttm_object_file.
|
||||||
|
* @base: The struct ttm_base_object to initialize.
|
||||||
|
* @shareable: This object is shareable with other applcations.
|
||||||
|
* (different @tfile pointers.)
|
||||||
|
* @type: The object type.
|
||||||
|
* @refcount_release: See the struct ttm_base_object description.
|
||||||
|
* @ref_obj_release: See the struct ttm_base_object description.
|
||||||
|
*
|
||||||
|
* Initializes a struct ttm_base_object.
|
||||||
|
*/
|
||||||
|
|
||||||
|
extern int ttm_base_object_init(struct ttm_object_file *tfile,
|
||||||
|
struct ttm_base_object *base,
|
||||||
|
bool shareable,
|
||||||
|
enum ttm_object_type type,
|
||||||
|
void (*refcount_release) (struct ttm_base_object
|
||||||
|
**),
|
||||||
|
void (*ref_obj_release) (struct ttm_base_object
|
||||||
|
*,
|
||||||
|
enum ttm_ref_type
|
||||||
|
ref_type));
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_base_object_lookup
|
||||||
|
*
|
||||||
|
* @tfile: Pointer to a struct ttm_object_file.
|
||||||
|
* @key: Hash key
|
||||||
|
*
|
||||||
|
* Looks up a struct ttm_base_object with the key @key.
|
||||||
|
* Also verifies that the object is visible to the application, by
|
||||||
|
* comparing the @tfile argument and checking the object shareable flag.
|
||||||
|
*/
|
||||||
|
|
||||||
|
extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
|
||||||
|
*tfile, uint32_t key);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_base_object_unref
|
||||||
|
*
|
||||||
|
* @p_base: Pointer to a pointer referencing a struct ttm_base_object.
|
||||||
|
*
|
||||||
|
* Decrements the base object refcount and clears the pointer pointed to by
|
||||||
|
* p_base.
|
||||||
|
*/
|
||||||
|
|
||||||
|
extern void ttm_base_object_unref(struct ttm_base_object **p_base);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_ref_object_add.
|
||||||
|
*
|
||||||
|
* @tfile: A struct ttm_object_file representing the application owning the
|
||||||
|
* ref_object.
|
||||||
|
* @base: The base object to reference.
|
||||||
|
* @ref_type: The type of reference.
|
||||||
|
* @existed: Upon completion, indicates that an identical reference object
|
||||||
|
* already existed, and the refcount was upped on that object instead.
|
||||||
|
*
|
||||||
|
* Adding a ref object to a base object is basically like referencing the
|
||||||
|
* base object, but a user-space application holds the reference. When the
|
||||||
|
* file corresponding to @tfile is closed, all its reference objects are
|
||||||
|
* deleted. A reference object can have different types depending on what
|
||||||
|
* it's intended for. It can be refcounting to prevent object destruction,
|
||||||
|
* When user-space takes a lock, it can add a ref object to that lock to
|
||||||
|
* make sure the lock is released if the application dies. A ref object
|
||||||
|
* will hold a single reference on a base object.
|
||||||
|
*/
|
||||||
|
extern int ttm_ref_object_add(struct ttm_object_file *tfile,
|
||||||
|
struct ttm_base_object *base,
|
||||||
|
enum ttm_ref_type ref_type, bool *existed);
|
||||||
|
/**
|
||||||
|
* ttm_ref_object_base_unref
|
||||||
|
*
|
||||||
|
* @key: Key representing the base object.
|
||||||
|
* @ref_type: Ref type of the ref object to be dereferenced.
|
||||||
|
*
|
||||||
|
* Unreference a ref object with type @ref_type
|
||||||
|
* on the base object identified by @key. If there are no duplicate
|
||||||
|
* references, the ref object will be destroyed and the base object
|
||||||
|
* will be unreferenced.
|
||||||
|
*/
|
||||||
|
extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
|
||||||
|
unsigned long key,
|
||||||
|
enum ttm_ref_type ref_type);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_object_file_init - initialize a struct ttm_object file
|
||||||
|
*
|
||||||
|
* @tdev: A struct ttm_object device this file is initialized on.
|
||||||
|
* @hash_order: Order of the hash table used to hold the reference objects.
|
||||||
|
*
|
||||||
|
* This is typically called by the file_ops::open function.
|
||||||
|
*/
|
||||||
|
|
||||||
|
extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device
|
||||||
|
*tdev,
|
||||||
|
unsigned int hash_order);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_object_file_release - release data held by a ttm_object_file
|
||||||
|
*
|
||||||
|
* @p_tfile: Pointer to pointer to the ttm_object_file object to release.
|
||||||
|
* *p_tfile will be set to NULL by this function.
|
||||||
|
*
|
||||||
|
* Releases all data associated by a ttm_object_file.
|
||||||
|
* Typically called from file_ops::release. The caller must
|
||||||
|
* ensure that there are no concurrent users of tfile.
|
||||||
|
*/
|
||||||
|
|
||||||
|
extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_object device init - initialize a struct ttm_object_device
|
||||||
|
*
|
||||||
|
* @hash_order: Order of hash table used to hash the base objects.
|
||||||
|
*
|
||||||
|
* This function is typically called on device initialization to prepare
|
||||||
|
* data structures needed for ttm base and ref objects.
|
||||||
|
*/
|
||||||
|
|
||||||
|
extern struct ttm_object_device *ttm_object_device_init
|
||||||
|
(struct ttm_mem_global *mem_glob, unsigned int hash_order);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_object_device_release - release data held by a ttm_object_device
|
||||||
|
*
|
||||||
|
* @p_tdev: Pointer to pointer to the ttm_object_device object to release.
|
||||||
|
* *p_tdev will be set to NULL by this function.
|
||||||
|
*
|
||||||
|
* Releases all data associated by a ttm_object_device.
|
||||||
|
* Typically called from driver::unload before the destruction of the
|
||||||
|
* device private data structure.
|
||||||
|
*/
|
||||||
|
|
||||||
|
extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
|
||||||
|
|
||||||
|
#define ttm_base_object_kfree(__object, __base)\
|
||||||
|
kfree_rcu(__object, __base.rhead)
|
||||||
|
#endif
|
99
drivers/include/drm/ttm/ttm_page_alloc.h
Normal file
99
drivers/include/drm/ttm/ttm_page_alloc.h
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) Red Hat Inc.
|
||||||
|
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
|
* to deal in the Software without restriction, including without limitation
|
||||||
|
* the rights to use, copy, modify, merge, publish, distribute, sub license,
|
||||||
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice (including the
|
||||||
|
* next paragraph) shall be included in all copies or substantial portions
|
||||||
|
* of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||||
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
* DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
* Authors: Dave Airlie <airlied@redhat.com>
|
||||||
|
* Jerome Glisse <jglisse@redhat.com>
|
||||||
|
*/
|
||||||
|
#ifndef TTM_PAGE_ALLOC
|
||||||
|
#define TTM_PAGE_ALLOC
|
||||||
|
|
||||||
|
#include <drm/ttm/ttm_bo_driver.h>
|
||||||
|
#include <drm/ttm/ttm_memory.h>
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initialize pool allocator.
|
||||||
|
*/
|
||||||
|
int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
|
||||||
|
/**
|
||||||
|
* Free pool allocator.
|
||||||
|
*/
|
||||||
|
void ttm_page_alloc_fini(void);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_pool_populate:
|
||||||
|
*
|
||||||
|
* @ttm: The struct ttm_tt to contain the backing pages.
|
||||||
|
*
|
||||||
|
* Add backing pages to all of @ttm
|
||||||
|
*/
|
||||||
|
extern int ttm_pool_populate(struct ttm_tt *ttm);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_pool_unpopulate:
|
||||||
|
*
|
||||||
|
* @ttm: The struct ttm_tt which to free backing pages.
|
||||||
|
*
|
||||||
|
* Free all pages of @ttm
|
||||||
|
*/
|
||||||
|
extern void ttm_pool_unpopulate(struct ttm_tt *ttm);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Output the state of pools to debugfs file
|
||||||
|
*/
|
||||||
|
extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
|
||||||
|
|
||||||
|
|
||||||
|
#ifdef CONFIG_SWIOTLB
|
||||||
|
/**
|
||||||
|
* Initialize pool allocator.
|
||||||
|
*/
|
||||||
|
int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Free pool allocator.
|
||||||
|
*/
|
||||||
|
void ttm_dma_page_alloc_fini(void);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Output the state of pools to debugfs file
|
||||||
|
*/
|
||||||
|
extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
|
||||||
|
|
||||||
|
extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
|
||||||
|
extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
|
||||||
|
|
||||||
|
#else
|
||||||
|
static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
|
||||||
|
unsigned max_pages)
|
||||||
|
{
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void ttm_dma_page_alloc_fini(void) { return; }
|
||||||
|
|
||||||
|
static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif
|
@ -33,4 +33,9 @@ struct scatterlist {
|
|||||||
|
|
||||||
#define ARCH_HAS_SG_CHAIN
|
#define ARCH_HAS_SG_CHAIN
|
||||||
|
|
||||||
|
int dma_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
|
int nelems, int dir);
|
||||||
|
|
||||||
|
#define dma_unmap_sg(d, s, n, r)
|
||||||
|
|
||||||
#endif /* __ASM_GENERIC_SCATTERLIST_H */
|
#endif /* __ASM_GENERIC_SCATTERLIST_H */
|
||||||
|
@ -1,12 +1,63 @@
|
|||||||
#ifndef _ASM_GENERIC_BUG_H
|
#ifndef _ASM_GENERIC_BUG_H
|
||||||
#define _ASM_GENERIC_BUG_H
|
#define _ASM_GENERIC_BUG_H
|
||||||
|
|
||||||
|
//extern __printf(3, 4)
|
||||||
|
//void warn_slowpath_fmt(const char *file, const int line,
|
||||||
|
// const char *fmt, ...);
|
||||||
|
//extern __printf(4, 5)
|
||||||
|
//void warn_slowpath_fmt_taint(const char *file, const int line, unsigned taint,
|
||||||
|
// const char *fmt, ...);
|
||||||
|
|
||||||
|
//extern void warn_slowpath_null(const char *file, const int line);
|
||||||
|
|
||||||
|
#define __WARN() printf("\nWARNING: at %s:%d\n", __FILE__, __LINE__)
|
||||||
|
#define __WARN_printf(arg...) printf("\nWARNING: at %s:%d\n", __FILE__, __LINE__)
|
||||||
|
|
||||||
|
|
||||||
#define WARN(condition, format...) ({ \
|
#define WARN(condition, format...) ({ \
|
||||||
int __ret_warn_on = !!(condition); \
|
int __ret_warn_on = !!(condition); \
|
||||||
unlikely(__ret_warn_on); \
|
if (unlikely(__ret_warn_on)) \
|
||||||
|
__WARN_printf(format); \
|
||||||
|
unlikely(__ret_warn_on); \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
||||||
|
#define WARN_ON(condition) ({ \
|
||||||
|
int __ret_warn_on = !!(condition); \
|
||||||
|
if (unlikely(__ret_warn_on)) \
|
||||||
|
__WARN(); \
|
||||||
|
unlikely(__ret_warn_on); \
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
#define WARN_ONCE(condition, format...) ({ \
|
||||||
|
static bool __warned; \
|
||||||
|
int __ret_warn_once = !!(condition); \
|
||||||
|
\
|
||||||
|
if (unlikely(__ret_warn_once)) \
|
||||||
|
if (WARN(!__warned, format)) \
|
||||||
|
__warned = true; \
|
||||||
|
unlikely(__ret_warn_once); \
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
#define WARN_ON_ONCE(condition) ({ \
|
||||||
|
static bool __warned; \
|
||||||
|
int __ret_warn_once = !!(condition); \
|
||||||
|
\
|
||||||
|
if (unlikely(__ret_warn_once)) \
|
||||||
|
if (WARN_ON(!__warned)) \
|
||||||
|
__warned = true; \
|
||||||
|
unlikely(__ret_warn_once); \
|
||||||
|
})
|
||||||
|
|
||||||
|
#define BUG() do { \
|
||||||
|
printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __FUNCTION__); \
|
||||||
|
while(1){ delay(10); }; \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
231
drivers/include/linux/hdmi.h
Normal file
231
drivers/include/linux/hdmi.h
Normal file
@ -0,0 +1,231 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (C) 2012 Avionic Design GmbH
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License version 2 as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef __LINUX_HDMI_H_
|
||||||
|
#define __LINUX_HDMI_H_
|
||||||
|
|
||||||
|
#include <linux/types.h>
|
||||||
|
|
||||||
|
enum hdmi_infoframe_type {
|
||||||
|
HDMI_INFOFRAME_TYPE_VENDOR = 0x81,
|
||||||
|
HDMI_INFOFRAME_TYPE_AVI = 0x82,
|
||||||
|
HDMI_INFOFRAME_TYPE_SPD = 0x83,
|
||||||
|
HDMI_INFOFRAME_TYPE_AUDIO = 0x84,
|
||||||
|
};
|
||||||
|
|
||||||
|
#define HDMI_INFOFRAME_HEADER_SIZE 4
|
||||||
|
#define HDMI_AVI_INFOFRAME_SIZE 13
|
||||||
|
#define HDMI_SPD_INFOFRAME_SIZE 25
|
||||||
|
#define HDMI_AUDIO_INFOFRAME_SIZE 10
|
||||||
|
|
||||||
|
enum hdmi_colorspace {
|
||||||
|
HDMI_COLORSPACE_RGB,
|
||||||
|
HDMI_COLORSPACE_YUV422,
|
||||||
|
HDMI_COLORSPACE_YUV444,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum hdmi_scan_mode {
|
||||||
|
HDMI_SCAN_MODE_NONE,
|
||||||
|
HDMI_SCAN_MODE_OVERSCAN,
|
||||||
|
HDMI_SCAN_MODE_UNDERSCAN,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum hdmi_colorimetry {
|
||||||
|
HDMI_COLORIMETRY_NONE,
|
||||||
|
HDMI_COLORIMETRY_ITU_601,
|
||||||
|
HDMI_COLORIMETRY_ITU_709,
|
||||||
|
HDMI_COLORIMETRY_EXTENDED,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum hdmi_picture_aspect {
|
||||||
|
HDMI_PICTURE_ASPECT_NONE,
|
||||||
|
HDMI_PICTURE_ASPECT_4_3,
|
||||||
|
HDMI_PICTURE_ASPECT_16_9,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum hdmi_active_aspect {
|
||||||
|
HDMI_ACTIVE_ASPECT_16_9_TOP = 2,
|
||||||
|
HDMI_ACTIVE_ASPECT_14_9_TOP = 3,
|
||||||
|
HDMI_ACTIVE_ASPECT_16_9_CENTER = 4,
|
||||||
|
HDMI_ACTIVE_ASPECT_PICTURE = 8,
|
||||||
|
HDMI_ACTIVE_ASPECT_4_3 = 9,
|
||||||
|
HDMI_ACTIVE_ASPECT_16_9 = 10,
|
||||||
|
HDMI_ACTIVE_ASPECT_14_9 = 11,
|
||||||
|
HDMI_ACTIVE_ASPECT_4_3_SP_14_9 = 13,
|
||||||
|
HDMI_ACTIVE_ASPECT_16_9_SP_14_9 = 14,
|
||||||
|
HDMI_ACTIVE_ASPECT_16_9_SP_4_3 = 15,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum hdmi_extended_colorimetry {
|
||||||
|
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601,
|
||||||
|
HDMI_EXTENDED_COLORIMETRY_XV_YCC_709,
|
||||||
|
HDMI_EXTENDED_COLORIMETRY_S_YCC_601,
|
||||||
|
HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601,
|
||||||
|
HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum hdmi_quantization_range {
|
||||||
|
HDMI_QUANTIZATION_RANGE_DEFAULT,
|
||||||
|
HDMI_QUANTIZATION_RANGE_LIMITED,
|
||||||
|
HDMI_QUANTIZATION_RANGE_FULL,
|
||||||
|
};
|
||||||
|
|
||||||
|
/* non-uniform picture scaling */
|
||||||
|
enum hdmi_nups {
|
||||||
|
HDMI_NUPS_UNKNOWN,
|
||||||
|
HDMI_NUPS_HORIZONTAL,
|
||||||
|
HDMI_NUPS_VERTICAL,
|
||||||
|
HDMI_NUPS_BOTH,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum hdmi_ycc_quantization_range {
|
||||||
|
HDMI_YCC_QUANTIZATION_RANGE_LIMITED,
|
||||||
|
HDMI_YCC_QUANTIZATION_RANGE_FULL,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum hdmi_content_type {
|
||||||
|
HDMI_CONTENT_TYPE_NONE,
|
||||||
|
HDMI_CONTENT_TYPE_PHOTO,
|
||||||
|
HDMI_CONTENT_TYPE_CINEMA,
|
||||||
|
HDMI_CONTENT_TYPE_GAME,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct hdmi_avi_infoframe {
|
||||||
|
enum hdmi_infoframe_type type;
|
||||||
|
unsigned char version;
|
||||||
|
unsigned char length;
|
||||||
|
enum hdmi_colorspace colorspace;
|
||||||
|
bool active_info_valid;
|
||||||
|
bool horizontal_bar_valid;
|
||||||
|
bool vertical_bar_valid;
|
||||||
|
enum hdmi_scan_mode scan_mode;
|
||||||
|
enum hdmi_colorimetry colorimetry;
|
||||||
|
enum hdmi_picture_aspect picture_aspect;
|
||||||
|
enum hdmi_active_aspect active_aspect;
|
||||||
|
bool itc;
|
||||||
|
enum hdmi_extended_colorimetry extended_colorimetry;
|
||||||
|
enum hdmi_quantization_range quantization_range;
|
||||||
|
enum hdmi_nups nups;
|
||||||
|
unsigned char video_code;
|
||||||
|
enum hdmi_ycc_quantization_range ycc_quantization_range;
|
||||||
|
enum hdmi_content_type content_type;
|
||||||
|
unsigned char pixel_repeat;
|
||||||
|
unsigned short top_bar;
|
||||||
|
unsigned short bottom_bar;
|
||||||
|
unsigned short left_bar;
|
||||||
|
unsigned short right_bar;
|
||||||
|
};
|
||||||
|
|
||||||
|
int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame);
|
||||||
|
ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
|
||||||
|
size_t size);
|
||||||
|
|
||||||
|
enum hdmi_spd_sdi {
|
||||||
|
HDMI_SPD_SDI_UNKNOWN,
|
||||||
|
HDMI_SPD_SDI_DSTB,
|
||||||
|
HDMI_SPD_SDI_DVDP,
|
||||||
|
HDMI_SPD_SDI_DVHS,
|
||||||
|
HDMI_SPD_SDI_HDDVR,
|
||||||
|
HDMI_SPD_SDI_DVC,
|
||||||
|
HDMI_SPD_SDI_DSC,
|
||||||
|
HDMI_SPD_SDI_VCD,
|
||||||
|
HDMI_SPD_SDI_GAME,
|
||||||
|
HDMI_SPD_SDI_PC,
|
||||||
|
HDMI_SPD_SDI_BD,
|
||||||
|
HDMI_SPD_SDI_SACD,
|
||||||
|
HDMI_SPD_SDI_HDDVD,
|
||||||
|
HDMI_SPD_SDI_PMP,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct hdmi_spd_infoframe {
|
||||||
|
enum hdmi_infoframe_type type;
|
||||||
|
unsigned char version;
|
||||||
|
unsigned char length;
|
||||||
|
char vendor[8];
|
||||||
|
char product[16];
|
||||||
|
enum hdmi_spd_sdi sdi;
|
||||||
|
};
|
||||||
|
|
||||||
|
int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
|
||||||
|
const char *vendor, const char *product);
|
||||||
|
ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
|
||||||
|
size_t size);
|
||||||
|
|
||||||
|
enum hdmi_audio_coding_type {
|
||||||
|
HDMI_AUDIO_CODING_TYPE_STREAM,
|
||||||
|
HDMI_AUDIO_CODING_TYPE_PCM,
|
||||||
|
HDMI_AUDIO_CODING_TYPE_AC3,
|
||||||
|
HDMI_AUDIO_CODING_TYPE_MPEG1,
|
||||||
|
HDMI_AUDIO_CODING_TYPE_MP3,
|
||||||
|
HDMI_AUDIO_CODING_TYPE_MPEG2,
|
||||||
|
HDMI_AUDIO_CODING_TYPE_AAC_LC,
|
||||||
|
HDMI_AUDIO_CODING_TYPE_DTS,
|
||||||
|
HDMI_AUDIO_CODING_TYPE_ATRAC,
|
||||||
|
HDMI_AUDIO_CODING_TYPE_DSD,
|
||||||
|
HDMI_AUDIO_CODING_TYPE_EAC3,
|
||||||
|
HDMI_AUDIO_CODING_TYPE_DTS_HD,
|
||||||
|
HDMI_AUDIO_CODING_TYPE_MLP,
|
||||||
|
HDMI_AUDIO_CODING_TYPE_DST,
|
||||||
|
HDMI_AUDIO_CODING_TYPE_WMA_PRO,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum hdmi_audio_sample_size {
|
||||||
|
HDMI_AUDIO_SAMPLE_SIZE_STREAM,
|
||||||
|
HDMI_AUDIO_SAMPLE_SIZE_16,
|
||||||
|
HDMI_AUDIO_SAMPLE_SIZE_20,
|
||||||
|
HDMI_AUDIO_SAMPLE_SIZE_24,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum hdmi_audio_sample_frequency {
|
||||||
|
HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM,
|
||||||
|
HDMI_AUDIO_SAMPLE_FREQUENCY_32000,
|
||||||
|
HDMI_AUDIO_SAMPLE_FREQUENCY_44100,
|
||||||
|
HDMI_AUDIO_SAMPLE_FREQUENCY_48000,
|
||||||
|
HDMI_AUDIO_SAMPLE_FREQUENCY_88200,
|
||||||
|
HDMI_AUDIO_SAMPLE_FREQUENCY_96000,
|
||||||
|
HDMI_AUDIO_SAMPLE_FREQUENCY_176400,
|
||||||
|
HDMI_AUDIO_SAMPLE_FREQUENCY_192000,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum hdmi_audio_coding_type_ext {
|
||||||
|
HDMI_AUDIO_CODING_TYPE_EXT_STREAM,
|
||||||
|
HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC,
|
||||||
|
HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC_V2,
|
||||||
|
HDMI_AUDIO_CODING_TYPE_EXT_MPEG_SURROUND,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct hdmi_audio_infoframe {
|
||||||
|
enum hdmi_infoframe_type type;
|
||||||
|
unsigned char version;
|
||||||
|
unsigned char length;
|
||||||
|
unsigned char channels;
|
||||||
|
enum hdmi_audio_coding_type coding_type;
|
||||||
|
enum hdmi_audio_sample_size sample_size;
|
||||||
|
enum hdmi_audio_sample_frequency sample_frequency;
|
||||||
|
enum hdmi_audio_coding_type_ext coding_type_ext;
|
||||||
|
unsigned char channel_allocation;
|
||||||
|
unsigned char level_shift_value;
|
||||||
|
bool downmix_inhibit;
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame);
|
||||||
|
ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
|
||||||
|
void *buffer, size_t size);
|
||||||
|
|
||||||
|
struct hdmi_vendor_infoframe {
|
||||||
|
enum hdmi_infoframe_type type;
|
||||||
|
unsigned char version;
|
||||||
|
unsigned char length;
|
||||||
|
u8 data[27];
|
||||||
|
};
|
||||||
|
|
||||||
|
ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
|
||||||
|
void *buffer, size_t size);
|
||||||
|
|
||||||
|
#endif /* _DRM_HDMI_H */
|
@ -12,51 +12,30 @@
|
|||||||
#ifndef __IDR_H__
|
#ifndef __IDR_H__
|
||||||
#define __IDR_H__
|
#define __IDR_H__
|
||||||
|
|
||||||
|
#include <syscall.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <errno-base.h>
|
#include <errno-base.h>
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
//#include <linux/init.h>
|
//#include <linux/init.h>
|
||||||
//#include <linux/rcupdate.h>
|
//#include <linux/rcupdate.h>
|
||||||
|
#include <linux/spinlock.h>
|
||||||
struct rcu_head {
|
#include <linux/bitmap.h>
|
||||||
struct rcu_head *next;
|
#include <linux/bug.h>
|
||||||
void (*func)(struct rcu_head *head);
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
#if BITS_PER_LONG == 32
|
/*
|
||||||
# define IDR_BITS 5
|
* We want shallower trees and thus more bits covered at each layer. 8
|
||||||
# define IDR_FULL 0xfffffffful
|
* bits gives us large enough first layer for most use cases and maximum
|
||||||
/* We can only use two of the bits in the top level because there is
|
* tree depth of 4. Each idr_layer is slightly larger than 2k on 64bit and
|
||||||
only one possible bit in the top level (5 bits * 7 levels = 35
|
* 1k on 32bit.
|
||||||
bits, but you only use 31 bits in the id). */
|
*/
|
||||||
# define TOP_LEVEL_FULL (IDR_FULL >> 30)
|
#define IDR_BITS 8
|
||||||
#elif BITS_PER_LONG == 64
|
|
||||||
# define IDR_BITS 6
|
|
||||||
# define IDR_FULL 0xfffffffffffffffful
|
|
||||||
/* We can only use two of the bits in the top level because there is
|
|
||||||
only one possible bit in the top level (6 bits * 6 levels = 36
|
|
||||||
bits, but you only use 31 bits in the id). */
|
|
||||||
# define TOP_LEVEL_FULL (IDR_FULL >> 62)
|
|
||||||
#else
|
|
||||||
# error "BITS_PER_LONG is not 32 or 64"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define IDR_SIZE (1 << IDR_BITS)
|
#define IDR_SIZE (1 << IDR_BITS)
|
||||||
#define IDR_MASK ((1 << IDR_BITS)-1)
|
#define IDR_MASK ((1 << IDR_BITS)-1)
|
||||||
|
|
||||||
#define MAX_ID_SHIFT (sizeof(int)*8 - 1)
|
|
||||||
#define MAX_ID_BIT (1U << MAX_ID_SHIFT)
|
|
||||||
#define MAX_ID_MASK (MAX_ID_BIT - 1)
|
|
||||||
|
|
||||||
/* Leave the possibility of an incomplete final layer */
|
|
||||||
#define MAX_LEVEL (MAX_ID_SHIFT + IDR_BITS - 1) / IDR_BITS
|
|
||||||
|
|
||||||
/* Number of id_layer structs to leave in free list */
|
|
||||||
#define IDR_FREE_MAX MAX_LEVEL + MAX_LEVEL
|
|
||||||
|
|
||||||
struct idr_layer {
|
struct idr_layer {
|
||||||
unsigned long bitmap; /* A zero bit means "space here" */
|
int prefix; /* the ID prefix of this idr_layer */
|
||||||
|
DECLARE_BITMAP(bitmap, IDR_SIZE); /* A zero bit means "space here" */
|
||||||
struct idr_layer __rcu *ary[1<<IDR_BITS];
|
struct idr_layer __rcu *ary[1<<IDR_BITS];
|
||||||
int count; /* When zero, we can release it */
|
int count; /* When zero, we can release it */
|
||||||
int layer; /* distance from leaf */
|
int layer; /* distance from leaf */
|
||||||
@ -64,29 +43,20 @@ struct idr_layer {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct idr {
|
struct idr {
|
||||||
|
struct idr_layer __rcu *hint; /* the last layer allocated from */
|
||||||
struct idr_layer __rcu *top;
|
struct idr_layer __rcu *top;
|
||||||
struct idr_layer *id_free;
|
struct idr_layer *id_free;
|
||||||
int layers; /* only valid without concurrent changes */
|
int layers; /* only valid w/o concurrent changes */
|
||||||
int id_free_cnt;
|
int id_free_cnt;
|
||||||
// spinlock_t lock;
|
spinlock_t lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define IDR_INIT(name) \
|
#define IDR_INIT(name) \
|
||||||
{ \
|
{ \
|
||||||
.top = NULL, \
|
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
|
||||||
.id_free = NULL, \
|
|
||||||
.layers = 0, \
|
|
||||||
.id_free_cnt = 0, \
|
|
||||||
// .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
|
|
||||||
}
|
}
|
||||||
#define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
|
#define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
|
||||||
|
|
||||||
/* Actions to be taken after a call to _idr_sub_alloc */
|
|
||||||
#define IDR_NEED_TO_GROW -2
|
|
||||||
#define IDR_NOMORE_SPACE -3
|
|
||||||
|
|
||||||
#define _idr_rc_to_errno(rc) ((rc) == -1 ? -EAGAIN : -ENOSPC)
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* DOC: idr sync
|
* DOC: idr sync
|
||||||
* idr synchronization (stolen from radix-tree.h)
|
* idr synchronization (stolen from radix-tree.h)
|
||||||
@ -108,19 +78,90 @@ struct idr {
|
|||||||
* This is what we export.
|
* This is what we export.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
void *idr_find(struct idr *idp, int id);
|
void *idr_find_slowpath(struct idr *idp, int id);
|
||||||
int idr_pre_get(struct idr *idp, gfp_t gfp_mask);
|
int idr_pre_get(struct idr *idp, gfp_t gfp_mask);
|
||||||
int idr_get_new(struct idr *idp, void *ptr, int *id);
|
|
||||||
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id);
|
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id);
|
||||||
|
void idr_preload(gfp_t gfp_mask);
|
||||||
|
int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask);
|
||||||
int idr_for_each(struct idr *idp,
|
int idr_for_each(struct idr *idp,
|
||||||
int (*fn)(int id, void *p, void *data), void *data);
|
int (*fn)(int id, void *p, void *data), void *data);
|
||||||
void *idr_get_next(struct idr *idp, int *nextid);
|
void *idr_get_next(struct idr *idp, int *nextid);
|
||||||
void *idr_replace(struct idr *idp, void *ptr, int id);
|
void *idr_replace(struct idr *idp, void *ptr, int id);
|
||||||
void idr_remove(struct idr *idp, int id);
|
void idr_remove(struct idr *idp, int id);
|
||||||
void idr_remove_all(struct idr *idp);
|
void idr_free(struct idr *idp, int id);
|
||||||
void idr_destroy(struct idr *idp);
|
void idr_destroy(struct idr *idp);
|
||||||
void idr_init(struct idr *idp);
|
void idr_init(struct idr *idp);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* idr_preload_end - end preload section started with idr_preload()
|
||||||
|
*
|
||||||
|
* Each idr_preload() should be matched with an invocation of this
|
||||||
|
* function. See idr_preload() for details.
|
||||||
|
*/
|
||||||
|
static inline void idr_preload_end(void)
|
||||||
|
{
|
||||||
|
// preempt_enable();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* idr_find - return pointer for given id
|
||||||
|
* @idp: idr handle
|
||||||
|
* @id: lookup key
|
||||||
|
*
|
||||||
|
* Return the pointer given the id it has been registered with. A %NULL
|
||||||
|
* return indicates that @id is not valid or you passed %NULL in
|
||||||
|
* idr_get_new().
|
||||||
|
*
|
||||||
|
* This function can be called under rcu_read_lock(), given that the leaf
|
||||||
|
* pointers lifetimes are correctly managed.
|
||||||
|
*/
|
||||||
|
static inline void *idr_find(struct idr *idr, int id)
|
||||||
|
{
|
||||||
|
struct idr_layer *hint = rcu_dereference_raw(idr->hint);
|
||||||
|
|
||||||
|
if (hint && (id & ~IDR_MASK) == hint->prefix)
|
||||||
|
return rcu_dereference_raw(hint->ary[id & IDR_MASK]);
|
||||||
|
|
||||||
|
return idr_find_slowpath(idr, id);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* idr_get_new - allocate new idr entry
|
||||||
|
* @idp: idr handle
|
||||||
|
* @ptr: pointer you want associated with the id
|
||||||
|
* @id: pointer to the allocated handle
|
||||||
|
*
|
||||||
|
* Simple wrapper around idr_get_new_above() w/ @starting_id of zero.
|
||||||
|
*/
|
||||||
|
static inline int idr_get_new(struct idr *idp, void *ptr, int *id)
|
||||||
|
{
|
||||||
|
return idr_get_new_above(idp, ptr, 0, id);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* idr_for_each_entry - iterate over an idr's elements of a given type
|
||||||
|
* @idp: idr handle
|
||||||
|
* @entry: the type * to use as cursor
|
||||||
|
* @id: id entry's key
|
||||||
|
*/
|
||||||
|
#define idr_for_each_entry(idp, entry, id) \
|
||||||
|
for (id = 0, entry = (typeof(entry))idr_get_next((idp), &(id)); \
|
||||||
|
entry != NULL; \
|
||||||
|
++id, entry = (typeof(entry))idr_get_next((idp), &(id)))
|
||||||
|
|
||||||
|
void __idr_remove_all(struct idr *idp); /* don't use */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* idr_remove_all - remove all ids from the given idr tree
|
||||||
|
* @idp: idr handle
|
||||||
|
*
|
||||||
|
* If you're trying to destroy @idp, calling idr_destroy() is enough.
|
||||||
|
* This is going away. Don't use.
|
||||||
|
*/
|
||||||
|
static inline void __deprecated idr_remove_all(struct idr *idp)
|
||||||
|
{
|
||||||
|
__idr_remove_all(idp);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* IDA - IDR based id allocator, use when translation from id to
|
* IDA - IDR based id allocator, use when translation from id to
|
||||||
@ -143,16 +184,17 @@ struct ida {
|
|||||||
struct ida_bitmap *free_bitmap;
|
struct ida_bitmap *free_bitmap;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define IDA_INIT(name) { .idr = IDR_INIT(name), .free_bitmap = NULL, }
|
#define IDA_INIT(name) { .idr = IDR_INIT((name).idr), .free_bitmap = NULL, }
|
||||||
#define DEFINE_IDA(name) struct ida name = IDA_INIT(name)
|
#define DEFINE_IDA(name) struct ida name = IDA_INIT(name)
|
||||||
|
|
||||||
int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
|
int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
|
||||||
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id);
|
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id);
|
||||||
int ida_get_new(struct ida *ida, int *p_id);
|
|
||||||
void ida_remove(struct ida *ida, int id);
|
void ida_remove(struct ida *ida, int id);
|
||||||
void ida_destroy(struct ida *ida);
|
void ida_destroy(struct ida *ida);
|
||||||
void ida_init(struct ida *ida);
|
void ida_init(struct ida *ida);
|
||||||
|
|
||||||
void idr_init_cache(void);
|
void __init idr_init_cache(void);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#endif /* __IDR_H__ */
|
#endif /* __IDR_H__ */
|
||||||
|
@ -338,9 +338,6 @@ static inline void writeq(__u64 val, volatile void __iomem *addr)
|
|||||||
#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
|
#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
struct page
|
struct page
|
||||||
{
|
{
|
||||||
unsigned int addr;
|
unsigned int addr;
|
||||||
@ -365,9 +362,48 @@ struct pagelist {
|
|||||||
unsigned int nents;
|
unsigned int nents;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define page_cache_release(page) FreePage((addr_t)(page))
|
#define page_cache_release(page) FreePage(page_to_phys(page))
|
||||||
|
|
||||||
#define alloc_page(gfp_mask) (struct page*)AllocPage()
|
#define alloc_page(gfp_mask) (struct page*)AllocPage()
|
||||||
|
|
||||||
|
#define __free_page(page) FreePage(page_to_phys(page))
|
||||||
|
|
||||||
|
#define get_page(a)
|
||||||
|
#define put_page(a)
|
||||||
|
#define set_pages_uc(a,b)
|
||||||
|
#define set_pages_wb(a,b)
|
||||||
|
|
||||||
|
#define pci_map_page(dev, page, offset, size, direction) \
|
||||||
|
(dma_addr_t)( (offset)+page_to_phys(page))
|
||||||
|
|
||||||
|
#define pci_unmap_page(dev, dma_address, size, direction)
|
||||||
|
|
||||||
|
#define GFP_TEMPORARY 0
|
||||||
|
#define __GFP_NOWARN 0
|
||||||
|
#define __GFP_NORETRY 0
|
||||||
|
#define GFP_NOWAIT 0
|
||||||
|
|
||||||
|
#define IS_ENABLED(a) 0
|
||||||
|
|
||||||
|
|
||||||
|
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
|
||||||
|
|
||||||
|
#define RCU_INIT_POINTER(p, v) \
|
||||||
|
do { \
|
||||||
|
p = (typeof(*v) __force __rcu *)(v); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
|
||||||
|
#define rcu_dereference_raw(p) ({ \
|
||||||
|
typeof(p) _________p1 = ACCESS_ONCE(p); \
|
||||||
|
(_________p1); \
|
||||||
|
})
|
||||||
|
#define rcu_assign_pointer(p, v) \
|
||||||
|
({ \
|
||||||
|
if (!__builtin_constant_p(v) || \
|
||||||
|
((v) != NULL)) \
|
||||||
|
(p) = (v); \
|
||||||
|
})
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -10,7 +10,7 @@
|
|||||||
#include <linux/compiler.h>
|
#include <linux/compiler.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
|
#include <linux/moduleparam.h>
|
||||||
|
|
||||||
|
|
||||||
#define MODULE_FIRMWARE(x)
|
#define MODULE_FIRMWARE(x)
|
||||||
|
@ -72,6 +72,15 @@ void __attribute__ ((fastcall)) __attribute__ ((dllimport))
|
|||||||
void __attribute__ ((fastcall)) __attribute__ ((dllimport))
|
void __attribute__ ((fastcall)) __attribute__ ((dllimport))
|
||||||
mutex_unlock(struct mutex*)__asm__("MutexUnlock");
|
mutex_unlock(struct mutex*)__asm__("MutexUnlock");
|
||||||
|
|
||||||
|
static inline int mutex_lock_interruptible(struct mutex *lock)
|
||||||
|
{
|
||||||
|
mutex_lock(lock);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* mutex_is_locked - is the mutex locked
|
* mutex_is_locked - is the mutex locked
|
||||||
* @lock: the mutex to be queried
|
* @lock: the mutex to be queried
|
||||||
|
@ -663,7 +663,8 @@ find_pci_device(pci_dev_t* pdev, const struct pci_device_id *idlist);
|
|||||||
|
|
||||||
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
|
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
|
||||||
|
|
||||||
int pci_set_dma_mask(struct pci_dev *dev, u64 mask);
|
#define pci_set_dma_mask(a, b) 0
|
||||||
|
#define pci_set_consistent_dma_mask(a, b)
|
||||||
|
|
||||||
struct pci_dev *pci_get_bus_and_slot(unsigned int bus, unsigned int devfn);
|
struct pci_dev *pci_get_bus_and_slot(unsigned int bus, unsigned int devfn);
|
||||||
struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
|
struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
|
||||||
|
@ -1,30 +1,9 @@
|
|||||||
/* stub */
|
#ifndef _LINUX_SCHED_H
|
||||||
|
#define _LINUX_SCHED_H
|
||||||
|
|
||||||
/*
|
|
||||||
static inline void mdelay(u32_t time)
|
|
||||||
{
|
|
||||||
time /= 10;
|
|
||||||
if(!time) time = 1;
|
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
#define TASK_UNINTERRUPTIBLE 2
|
||||||
"call *__imp__Delay"
|
|
||||||
::"b" (time));
|
|
||||||
__asm__ __volatile__ (
|
|
||||||
"":::"ebx");
|
|
||||||
|
|
||||||
};
|
#define schedule_timeout(x) delay(x)
|
||||||
|
|
||||||
static inline void udelay(u32_t delay)
|
#endif
|
||||||
{
|
|
||||||
if(!delay) delay++;
|
|
||||||
delay*= 500;
|
|
||||||
|
|
||||||
while(delay--)
|
|
||||||
{
|
|
||||||
__asm__ __volatile__(
|
|
||||||
"xorl %%eax, %%eax \n\t"
|
|
||||||
"cpuid"
|
|
||||||
:::"eax","ebx","ecx","edx" );
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
11
drivers/include/linux/shmem_fs.h
Normal file
11
drivers/include/linux/shmem_fs.h
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
#ifndef __SHMEM_FS_H
|
||||||
|
#define __SHMEM_FS_H
|
||||||
|
|
||||||
|
#include <kernel.h>
|
||||||
|
|
||||||
|
struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);
|
||||||
|
struct page *shmem_read_mapping_page_gfp(struct file *filep,
|
||||||
|
pgoff_t index, gfp_t gfp);
|
||||||
|
|
||||||
|
|
||||||
|
#endif
|
@ -272,21 +272,6 @@ typedef unsigned int count_t;
|
|||||||
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
|
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#ifndef HAVE_ARCH_BUG
|
|
||||||
#define BUG() do { \
|
|
||||||
printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __FUNCTION__); \
|
|
||||||
/* panic("BUG!"); */ \
|
|
||||||
} while (0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef HAVE_ARCH_BUG_ON
|
|
||||||
#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#define MTRR_TYPE_UNCACHABLE 0
|
#define MTRR_TYPE_UNCACHABLE 0
|
||||||
#define MTRR_TYPE_WRCOMB 1
|
#define MTRR_TYPE_WRCOMB 1
|
||||||
#define MTRR_TYPE_WRTHROUGH 4
|
#define MTRR_TYPE_WRTHROUGH 4
|
||||||
@ -313,8 +298,15 @@ char *strcpy(char *s1, const char *s2);
|
|||||||
char *strncpy (char *dst, const char *src, size_t len);
|
char *strncpy (char *dst, const char *src, size_t len);
|
||||||
|
|
||||||
void *malloc(size_t size);
|
void *malloc(size_t size);
|
||||||
|
void* realloc(void* oldmem, size_t bytes);
|
||||||
|
|
||||||
#define kfree free
|
#define kfree free
|
||||||
|
|
||||||
|
static inline void *krealloc(void *p, size_t new_size, gfp_t flags)
|
||||||
|
{
|
||||||
|
return realloc(p, new_size);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void *kzalloc(size_t size, uint32_t flags)
|
static inline void *kzalloc(size_t size, uint32_t flags)
|
||||||
{
|
{
|
||||||
void *ret = malloc(size);
|
void *ret = malloc(size);
|
||||||
@ -324,6 +316,9 @@ static inline void *kzalloc(size_t size, uint32_t flags)
|
|||||||
|
|
||||||
#define kmalloc(s,f) kzalloc((s), (f))
|
#define kmalloc(s,f) kzalloc((s), (f))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
struct drm_file;
|
struct drm_file;
|
||||||
|
|
||||||
|
|
||||||
@ -353,5 +348,15 @@ struct timeval
|
|||||||
#define __read_mostly
|
#define __read_mostly
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct callback_head - callback structure for use with RCU and task_work
|
||||||
|
* @next: next update requests in a list
|
||||||
|
* @func: actual update function to call after the grace period.
|
||||||
|
*/
|
||||||
|
struct callback_head {
|
||||||
|
struct callback_head *next;
|
||||||
|
void (*func)(struct callback_head *head);
|
||||||
|
};
|
||||||
|
#define rcu_head callback_head
|
||||||
|
|
||||||
#endif /* _LINUX_TYPES_H */
|
#endif /* _LINUX_TYPES_H */
|
||||||
|
983
drivers/include/linux/uapi/drm/i915_drm.h
Normal file
983
drivers/include/linux/uapi/drm/i915_drm.h
Normal file
@ -0,0 +1,983 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
|
||||||
|
* All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the
|
||||||
|
* "Software"), to deal in the Software without restriction, including
|
||||||
|
* without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
* distribute, sub license, and/or sell copies of the Software, and to
|
||||||
|
* permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
* the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice (including the
|
||||||
|
* next paragraph) shall be included in all copies or substantial portions
|
||||||
|
* of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||||
|
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
|
||||||
|
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
|
||||||
|
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||||
|
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||||
|
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _UAPI_I915_DRM_H_
|
||||||
|
#define _UAPI_I915_DRM_H_
|
||||||
|
|
||||||
|
#include <drm/drm.h>
|
||||||
|
|
||||||
|
/* Please note that modifications to all structs defined here are
|
||||||
|
* subject to backwards-compatibility constraints.
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
/* Each region is a minimum of 16k, and there are at most 255 of them.
|
||||||
|
*/
|
||||||
|
#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
|
||||||
|
* of chars for next/prev indices */
|
||||||
|
#define I915_LOG_MIN_TEX_REGION_SIZE 14
|
||||||
|
|
||||||
|
typedef struct _drm_i915_init {
|
||||||
|
enum {
|
||||||
|
I915_INIT_DMA = 0x01,
|
||||||
|
I915_CLEANUP_DMA = 0x02,
|
||||||
|
I915_RESUME_DMA = 0x03
|
||||||
|
} func;
|
||||||
|
unsigned int mmio_offset;
|
||||||
|
int sarea_priv_offset;
|
||||||
|
unsigned int ring_start;
|
||||||
|
unsigned int ring_end;
|
||||||
|
unsigned int ring_size;
|
||||||
|
unsigned int front_offset;
|
||||||
|
unsigned int back_offset;
|
||||||
|
unsigned int depth_offset;
|
||||||
|
unsigned int w;
|
||||||
|
unsigned int h;
|
||||||
|
unsigned int pitch;
|
||||||
|
unsigned int pitch_bits;
|
||||||
|
unsigned int back_pitch;
|
||||||
|
unsigned int depth_pitch;
|
||||||
|
unsigned int cpp;
|
||||||
|
unsigned int chipset;
|
||||||
|
} drm_i915_init_t;
|
||||||
|
|
||||||
|
typedef struct _drm_i915_sarea {
|
||||||
|
struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
|
||||||
|
int last_upload; /* last time texture was uploaded */
|
||||||
|
int last_enqueue; /* last time a buffer was enqueued */
|
||||||
|
int last_dispatch; /* age of the most recently dispatched buffer */
|
||||||
|
int ctxOwner; /* last context to upload state */
|
||||||
|
int texAge;
|
||||||
|
int pf_enabled; /* is pageflipping allowed? */
|
||||||
|
int pf_active;
|
||||||
|
int pf_current_page; /* which buffer is being displayed? */
|
||||||
|
int perf_boxes; /* performance boxes to be displayed */
|
||||||
|
int width, height; /* screen size in pixels */
|
||||||
|
|
||||||
|
drm_handle_t front_handle;
|
||||||
|
int front_offset;
|
||||||
|
int front_size;
|
||||||
|
|
||||||
|
drm_handle_t back_handle;
|
||||||
|
int back_offset;
|
||||||
|
int back_size;
|
||||||
|
|
||||||
|
drm_handle_t depth_handle;
|
||||||
|
int depth_offset;
|
||||||
|
int depth_size;
|
||||||
|
|
||||||
|
drm_handle_t tex_handle;
|
||||||
|
int tex_offset;
|
||||||
|
int tex_size;
|
||||||
|
int log_tex_granularity;
|
||||||
|
int pitch;
|
||||||
|
int rotation; /* 0, 90, 180 or 270 */
|
||||||
|
int rotated_offset;
|
||||||
|
int rotated_size;
|
||||||
|
int rotated_pitch;
|
||||||
|
int virtualX, virtualY;
|
||||||
|
|
||||||
|
unsigned int front_tiled;
|
||||||
|
unsigned int back_tiled;
|
||||||
|
unsigned int depth_tiled;
|
||||||
|
unsigned int rotated_tiled;
|
||||||
|
unsigned int rotated2_tiled;
|
||||||
|
|
||||||
|
int pipeA_x;
|
||||||
|
int pipeA_y;
|
||||||
|
int pipeA_w;
|
||||||
|
int pipeA_h;
|
||||||
|
int pipeB_x;
|
||||||
|
int pipeB_y;
|
||||||
|
int pipeB_w;
|
||||||
|
int pipeB_h;
|
||||||
|
|
||||||
|
/* fill out some space for old userspace triple buffer */
|
||||||
|
drm_handle_t unused_handle;
|
||||||
|
__u32 unused1, unused2, unused3;
|
||||||
|
|
||||||
|
/* buffer object handles for static buffers. May change
|
||||||
|
* over the lifetime of the client.
|
||||||
|
*/
|
||||||
|
__u32 front_bo_handle;
|
||||||
|
__u32 back_bo_handle;
|
||||||
|
__u32 unused_bo_handle;
|
||||||
|
__u32 depth_bo_handle;
|
||||||
|
|
||||||
|
} drm_i915_sarea_t;
|
||||||
|
|
||||||
|
/* due to userspace building against these headers we need some compat here */
|
||||||
|
#define planeA_x pipeA_x
|
||||||
|
#define planeA_y pipeA_y
|
||||||
|
#define planeA_w pipeA_w
|
||||||
|
#define planeA_h pipeA_h
|
||||||
|
#define planeB_x pipeB_x
|
||||||
|
#define planeB_y pipeB_y
|
||||||
|
#define planeB_w pipeB_w
|
||||||
|
#define planeB_h pipeB_h
|
||||||
|
|
||||||
|
/* Flags for perf_boxes
|
||||||
|
*/
|
||||||
|
#define I915_BOX_RING_EMPTY 0x1
|
||||||
|
#define I915_BOX_FLIP 0x2
|
||||||
|
#define I915_BOX_WAIT 0x4
|
||||||
|
#define I915_BOX_TEXTURE_LOAD 0x8
|
||||||
|
#define I915_BOX_LOST_CONTEXT 0x10
|
||||||
|
|
||||||
|
/* I915 specific ioctls
|
||||||
|
* The device specific ioctl range is 0x40 to 0x79.
|
||||||
|
*/
|
||||||
|
#define DRM_I915_INIT 0x00
|
||||||
|
#define DRM_I915_FLUSH 0x01
|
||||||
|
#define DRM_I915_FLIP 0x02
|
||||||
|
#define DRM_I915_BATCHBUFFER 0x03
|
||||||
|
#define DRM_I915_IRQ_EMIT 0x04
|
||||||
|
#define DRM_I915_IRQ_WAIT 0x05
|
||||||
|
#define DRM_I915_GETPARAM 0x06
|
||||||
|
#define DRM_I915_SETPARAM 0x07
|
||||||
|
#define DRM_I915_ALLOC 0x08
|
||||||
|
#define DRM_I915_FREE 0x09
|
||||||
|
#define DRM_I915_INIT_HEAP 0x0a
|
||||||
|
#define DRM_I915_CMDBUFFER 0x0b
|
||||||
|
#define DRM_I915_DESTROY_HEAP 0x0c
|
||||||
|
#define DRM_I915_SET_VBLANK_PIPE 0x0d
|
||||||
|
#define DRM_I915_GET_VBLANK_PIPE 0x0e
|
||||||
|
#define DRM_I915_VBLANK_SWAP 0x0f
|
||||||
|
#define DRM_I915_HWS_ADDR 0x11
|
||||||
|
#define DRM_I915_GEM_INIT 0x13
|
||||||
|
#define DRM_I915_GEM_EXECBUFFER 0x14
|
||||||
|
#define DRM_I915_GEM_PIN 0x15
|
||||||
|
#define DRM_I915_GEM_UNPIN 0x16
|
||||||
|
#define DRM_I915_GEM_BUSY 0x17
|
||||||
|
#define DRM_I915_GEM_THROTTLE 0x18
|
||||||
|
#define DRM_I915_GEM_ENTERVT 0x19
|
||||||
|
#define DRM_I915_GEM_LEAVEVT 0x1a
|
||||||
|
#define DRM_I915_GEM_CREATE 0x1b
|
||||||
|
#define DRM_I915_GEM_PREAD 0x1c
|
||||||
|
#define DRM_I915_GEM_PWRITE 0x1d
|
||||||
|
#define DRM_I915_GEM_MMAP 0x1e
|
||||||
|
#define DRM_I915_GEM_SET_DOMAIN 0x1f
|
||||||
|
#define DRM_I915_GEM_SW_FINISH 0x20
|
||||||
|
#define DRM_I915_GEM_SET_TILING 0x21
|
||||||
|
#define DRM_I915_GEM_GET_TILING 0x22
|
||||||
|
#define DRM_I915_GEM_GET_APERTURE 0x23
|
||||||
|
#define DRM_I915_GEM_MMAP_GTT 0x24
|
||||||
|
#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25
|
||||||
|
#define DRM_I915_GEM_MADVISE 0x26
|
||||||
|
#define DRM_I915_OVERLAY_PUT_IMAGE 0x27
|
||||||
|
#define DRM_I915_OVERLAY_ATTRS 0x28
|
||||||
|
#define DRM_I915_GEM_EXECBUFFER2 0x29
|
||||||
|
#define DRM_I915_GET_SPRITE_COLORKEY 0x2a
|
||||||
|
#define DRM_I915_SET_SPRITE_COLORKEY 0x2b
|
||||||
|
#define DRM_I915_GEM_WAIT 0x2c
|
||||||
|
#define DRM_I915_GEM_CONTEXT_CREATE 0x2d
|
||||||
|
#define DRM_I915_GEM_CONTEXT_DESTROY 0x2e
|
||||||
|
#define DRM_I915_GEM_SET_CACHING 0x2f
|
||||||
|
#define DRM_I915_GEM_GET_CACHING 0x30
|
||||||
|
#define DRM_I915_REG_READ 0x31
|
||||||
|
|
||||||
|
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
|
||||||
|
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
|
||||||
|
#define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
|
||||||
|
#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
|
||||||
|
#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
|
||||||
|
#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
|
||||||
|
#define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
|
||||||
|
#define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
|
||||||
|
#define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
|
||||||
|
#define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
|
||||||
|
#define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
|
||||||
|
#define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
|
||||||
|
#define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
|
||||||
|
#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
|
||||||
|
#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
|
||||||
|
#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
|
||||||
|
#define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
|
||||||
|
#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
|
||||||
|
#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
|
||||||
|
#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
|
||||||
|
#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
|
||||||
|
#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
|
||||||
|
#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
|
||||||
|
#define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
|
||||||
|
#define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
|
||||||
|
#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
|
||||||
|
#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
|
||||||
|
#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
|
||||||
|
#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
|
||||||
|
#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
|
||||||
|
#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
|
||||||
|
#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
|
||||||
|
#define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
|
||||||
|
#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
|
||||||
|
#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
|
||||||
|
#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
|
||||||
|
#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
|
||||||
|
#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
|
||||||
|
#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
|
||||||
|
#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
|
||||||
|
#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
|
||||||
|
#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
|
||||||
|
#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
|
||||||
|
#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
|
||||||
|
#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
|
||||||
|
#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
|
||||||
|
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
|
||||||
|
#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
|
||||||
|
|
||||||
|
/* Allow drivers to submit batchbuffers directly to hardware, relying
|
||||||
|
* on the security mechanisms provided by hardware.
|
||||||
|
*/
|
||||||
|
typedef struct drm_i915_batchbuffer {
|
||||||
|
int start; /* agp offset */
|
||||||
|
int used; /* nr bytes in use */
|
||||||
|
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
|
||||||
|
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
|
||||||
|
int num_cliprects; /* mulitpass with multiple cliprects? */
|
||||||
|
struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
|
||||||
|
} drm_i915_batchbuffer_t;
|
||||||
|
|
||||||
|
/* As above, but pass a pointer to userspace buffer which can be
|
||||||
|
* validated by the kernel prior to sending to hardware.
|
||||||
|
*/
|
||||||
|
typedef struct _drm_i915_cmdbuffer {
|
||||||
|
char __user *buf; /* pointer to userspace command buffer */
|
||||||
|
int sz; /* nr bytes in buf */
|
||||||
|
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
|
||||||
|
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
|
||||||
|
int num_cliprects; /* mulitpass with multiple cliprects? */
|
||||||
|
struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
|
||||||
|
} drm_i915_cmdbuffer_t;
|
||||||
|
|
||||||
|
/* Userspace can request & wait on irq's:
|
||||||
|
*/
|
||||||
|
typedef struct drm_i915_irq_emit {
|
||||||
|
int __user *irq_seq;
|
||||||
|
} drm_i915_irq_emit_t;
|
||||||
|
|
||||||
|
typedef struct drm_i915_irq_wait {
|
||||||
|
int irq_seq;
|
||||||
|
} drm_i915_irq_wait_t;
|
||||||
|
|
||||||
|
/* Ioctl to query kernel params:
|
||||||
|
*/
|
||||||
|
#define I915_PARAM_IRQ_ACTIVE 1
|
||||||
|
#define I915_PARAM_ALLOW_BATCHBUFFER 2
|
||||||
|
#define I915_PARAM_LAST_DISPATCH 3
|
||||||
|
#define I915_PARAM_CHIPSET_ID 4
|
||||||
|
#define I915_PARAM_HAS_GEM 5
|
||||||
|
#define I915_PARAM_NUM_FENCES_AVAIL 6
|
||||||
|
#define I915_PARAM_HAS_OVERLAY 7
|
||||||
|
#define I915_PARAM_HAS_PAGEFLIPPING 8
|
||||||
|
#define I915_PARAM_HAS_EXECBUF2 9
|
||||||
|
#define I915_PARAM_HAS_BSD 10
|
||||||
|
#define I915_PARAM_HAS_BLT 11
|
||||||
|
#define I915_PARAM_HAS_RELAXED_FENCING 12
|
||||||
|
#define I915_PARAM_HAS_COHERENT_RINGS 13
|
||||||
|
#define I915_PARAM_HAS_EXEC_CONSTANTS 14
|
||||||
|
#define I915_PARAM_HAS_RELAXED_DELTA 15
|
||||||
|
#define I915_PARAM_HAS_GEN7_SOL_RESET 16
|
||||||
|
#define I915_PARAM_HAS_LLC 17
|
||||||
|
#define I915_PARAM_HAS_ALIASING_PPGTT 18
|
||||||
|
#define I915_PARAM_HAS_WAIT_TIMEOUT 19
|
||||||
|
#define I915_PARAM_HAS_SEMAPHORES 20
|
||||||
|
#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21
|
||||||
|
#define I915_PARAM_RSVD_FOR_FUTURE_USE 22
|
||||||
|
#define I915_PARAM_HAS_SECURE_BATCHES 23
|
||||||
|
#define I915_PARAM_HAS_PINNED_BATCHES 24
|
||||||
|
#define I915_PARAM_HAS_EXEC_NO_RELOC 25
|
||||||
|
#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
|
||||||
|
|
||||||
|
typedef struct drm_i915_getparam {
|
||||||
|
int param;
|
||||||
|
int __user *value;
|
||||||
|
} drm_i915_getparam_t;
|
||||||
|
|
||||||
|
/* Ioctl to set kernel params:
|
||||||
|
*/
|
||||||
|
#define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1
|
||||||
|
#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
|
||||||
|
#define I915_SETPARAM_ALLOW_BATCHBUFFER 3
|
||||||
|
#define I915_SETPARAM_NUM_USED_FENCES 4
|
||||||
|
|
||||||
|
typedef struct drm_i915_setparam {
|
||||||
|
int param;
|
||||||
|
int value;
|
||||||
|
} drm_i915_setparam_t;
|
||||||
|
|
||||||
|
/* A memory manager for regions of shared memory:
|
||||||
|
*/
|
||||||
|
#define I915_MEM_REGION_AGP 1
|
||||||
|
|
||||||
|
typedef struct drm_i915_mem_alloc {
|
||||||
|
int region;
|
||||||
|
int alignment;
|
||||||
|
int size;
|
||||||
|
int __user *region_offset; /* offset from start of fb or agp */
|
||||||
|
} drm_i915_mem_alloc_t;
|
||||||
|
|
||||||
|
typedef struct drm_i915_mem_free {
|
||||||
|
int region;
|
||||||
|
int region_offset;
|
||||||
|
} drm_i915_mem_free_t;
|
||||||
|
|
||||||
|
typedef struct drm_i915_mem_init_heap {
|
||||||
|
int region;
|
||||||
|
int size;
|
||||||
|
int start;
|
||||||
|
} drm_i915_mem_init_heap_t;
|
||||||
|
|
||||||
|
/* Allow memory manager to be torn down and re-initialized (eg on
|
||||||
|
* rotate):
|
||||||
|
*/
|
||||||
|
typedef struct drm_i915_mem_destroy_heap {
|
||||||
|
int region;
|
||||||
|
} drm_i915_mem_destroy_heap_t;
|
||||||
|
|
||||||
|
/* Allow X server to configure which pipes to monitor for vblank signals
|
||||||
|
*/
|
||||||
|
#define DRM_I915_VBLANK_PIPE_A 1
|
||||||
|
#define DRM_I915_VBLANK_PIPE_B 2
|
||||||
|
|
||||||
|
typedef struct drm_i915_vblank_pipe {
|
||||||
|
int pipe;
|
||||||
|
} drm_i915_vblank_pipe_t;
|
||||||
|
|
||||||
|
/* Schedule buffer swap at given vertical blank:
|
||||||
|
*/
|
||||||
|
typedef struct drm_i915_vblank_swap {
|
||||||
|
drm_drawable_t drawable;
|
||||||
|
enum drm_vblank_seq_type seqtype;
|
||||||
|
unsigned int sequence;
|
||||||
|
} drm_i915_vblank_swap_t;
|
||||||
|
|
||||||
|
typedef struct drm_i915_hws_addr {
|
||||||
|
__u64 addr;
|
||||||
|
} drm_i915_hws_addr_t;
|
||||||
|
|
||||||
|
struct drm_i915_gem_init {
|
||||||
|
/**
|
||||||
|
* Beginning offset in the GTT to be managed by the DRM memory
|
||||||
|
* manager.
|
||||||
|
*/
|
||||||
|
__u64 gtt_start;
|
||||||
|
/**
|
||||||
|
* Ending offset in the GTT to be managed by the DRM memory
|
||||||
|
* manager.
|
||||||
|
*/
|
||||||
|
__u64 gtt_end;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct drm_i915_gem_create {
|
||||||
|
/**
|
||||||
|
* Requested size for the object.
|
||||||
|
*
|
||||||
|
* The (page-aligned) allocated size for the object will be returned.
|
||||||
|
*/
|
||||||
|
__u64 size;
|
||||||
|
/**
|
||||||
|
* Returned handle for the object.
|
||||||
|
*
|
||||||
|
* Object handles are nonzero.
|
||||||
|
*/
|
||||||
|
__u32 handle;
|
||||||
|
__u32 pad;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct drm_i915_gem_pread {
|
||||||
|
/** Handle for the object being read. */
|
||||||
|
__u32 handle;
|
||||||
|
__u32 pad;
|
||||||
|
/** Offset into the object to read from */
|
||||||
|
__u64 offset;
|
||||||
|
/** Length of data to read */
|
||||||
|
__u64 size;
|
||||||
|
/**
|
||||||
|
* Pointer to write the data into.
|
||||||
|
*
|
||||||
|
* This is a fixed-size type for 32/64 compatibility.
|
||||||
|
*/
|
||||||
|
__u64 data_ptr;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct drm_i915_gem_pwrite {
|
||||||
|
/** Handle for the object being written to. */
|
||||||
|
__u32 handle;
|
||||||
|
__u32 pad;
|
||||||
|
/** Offset into the object to write to */
|
||||||
|
__u64 offset;
|
||||||
|
/** Length of data to write */
|
||||||
|
__u64 size;
|
||||||
|
/**
|
||||||
|
* Pointer to read the data from.
|
||||||
|
*
|
||||||
|
* This is a fixed-size type for 32/64 compatibility.
|
||||||
|
*/
|
||||||
|
__u64 data_ptr;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct drm_i915_gem_mmap {
|
||||||
|
/** Handle for the object being mapped. */
|
||||||
|
__u32 handle;
|
||||||
|
__u32 pad;
|
||||||
|
/** Offset in the object to map. */
|
||||||
|
__u64 offset;
|
||||||
|
/**
|
||||||
|
* Length of data to map.
|
||||||
|
*
|
||||||
|
* The value will be page-aligned.
|
||||||
|
*/
|
||||||
|
__u64 size;
|
||||||
|
/**
|
||||||
|
* Returned pointer the data was mapped at.
|
||||||
|
*
|
||||||
|
* This is a fixed-size type for 32/64 compatibility.
|
||||||
|
*/
|
||||||
|
__u64 addr_ptr;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct drm_i915_gem_mmap_gtt {
|
||||||
|
/** Handle for the object being mapped. */
|
||||||
|
__u32 handle;
|
||||||
|
__u32 pad;
|
||||||
|
/**
|
||||||
|
* Fake offset to use for subsequent mmap call
|
||||||
|
*
|
||||||
|
* This is a fixed-size type for 32/64 compatibility.
|
||||||
|
*/
|
||||||
|
__u64 offset;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct drm_i915_gem_set_domain {
|
||||||
|
/** Handle for the object */
|
||||||
|
__u32 handle;
|
||||||
|
|
||||||
|
/** New read domains */
|
||||||
|
__u32 read_domains;
|
||||||
|
|
||||||
|
/** New write domain */
|
||||||
|
__u32 write_domain;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct drm_i915_gem_sw_finish {
|
||||||
|
/** Handle for the object */
|
||||||
|
__u32 handle;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct drm_i915_gem_relocation_entry {
|
||||||
|
/**
|
||||||
|
* Handle of the buffer being pointed to by this relocation entry.
|
||||||
|
*
|
||||||
|
* It's appealing to make this be an index into the mm_validate_entry
|
||||||
|
* list to refer to the buffer, but this allows the driver to create
|
||||||
|
* a relocation list for state buffers and not re-write it per
|
||||||
|
* exec using the buffer.
|
||||||
|
*/
|
||||||
|
__u32 target_handle;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Value to be added to the offset of the target buffer to make up
|
||||||
|
* the relocation entry.
|
||||||
|
*/
|
||||||
|
__u32 delta;
|
||||||
|
|
||||||
|
/** Offset in the buffer the relocation entry will be written into */
|
||||||
|
__u64 offset;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Offset value of the target buffer that the relocation entry was last
|
||||||
|
* written as.
|
||||||
|
*
|
||||||
|
* If the buffer has the same offset as last time, we can skip syncing
|
||||||
|
* and writing the relocation. This value is written back out by
|
||||||
|
* the execbuffer ioctl when the relocation is written.
|
||||||
|
*/
|
||||||
|
__u64 presumed_offset;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Target memory domains read by this operation.
|
||||||
|
*/
|
||||||
|
__u32 read_domains;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Target memory domains written by this operation.
|
||||||
|
*
|
||||||
|
* Note that only one domain may be written by the whole
|
||||||
|
* execbuffer operation, so that where there are conflicts,
|
||||||
|
* the application will get -EINVAL back.
|
||||||
|
*/
|
||||||
|
__u32 write_domain;
|
||||||
|
};
|
||||||
|
|
||||||
|
/** @{
|
||||||
|
* Intel memory domains
|
||||||
|
*
|
||||||
|
* Most of these just align with the various caches in
|
||||||
|
* the system and are used to flush and invalidate as
|
||||||
|
* objects end up cached in different domains.
|
||||||
|
*/
|
||||||
|
/** CPU cache */
|
||||||
|
#define I915_GEM_DOMAIN_CPU 0x00000001
|
||||||
|
/** Render cache, used by 2D and 3D drawing */
|
||||||
|
#define I915_GEM_DOMAIN_RENDER 0x00000002
|
||||||
|
/** Sampler cache, used by texture engine */
|
||||||
|
#define I915_GEM_DOMAIN_SAMPLER 0x00000004
|
||||||
|
/** Command queue, used to load batch buffers */
|
||||||
|
#define I915_GEM_DOMAIN_COMMAND 0x00000008
|
||||||
|
/** Instruction cache, used by shader programs */
|
||||||
|
#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
|
||||||
|
/** Vertex address cache */
|
||||||
|
#define I915_GEM_DOMAIN_VERTEX 0x00000020
|
||||||
|
/** GTT domain - aperture and scanout */
|
||||||
|
#define I915_GEM_DOMAIN_GTT 0x00000040
|
||||||
|
/** @} */
|
||||||
|
|
||||||
|
struct drm_i915_gem_exec_object {
|
||||||
|
/**
|
||||||
|
* User's handle for a buffer to be bound into the GTT for this
|
||||||
|
* operation.
|
||||||
|
*/
|
||||||
|
__u32 handle;
|
||||||
|
|
||||||
|
/** Number of relocations to be performed on this buffer */
|
||||||
|
__u32 relocation_count;
|
||||||
|
/**
|
||||||
|
* Pointer to array of struct drm_i915_gem_relocation_entry containing
|
||||||
|
* the relocations to be performed in this buffer.
|
||||||
|
*/
|
||||||
|
__u64 relocs_ptr;
|
||||||
|
|
||||||
|
/** Required alignment in graphics aperture */
|
||||||
|
__u64 alignment;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returned value of the updated offset of the object, for future
|
||||||
|
* presumed_offset writes.
|
||||||
|
*/
|
||||||
|
__u64 offset;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct drm_i915_gem_execbuffer {
|
||||||
|
/**
|
||||||
|
* List of buffers to be validated with their relocations to be
|
||||||
|
* performend on them.
|
||||||
|
*
|
||||||
|
* This is a pointer to an array of struct drm_i915_gem_validate_entry.
|
||||||
|
*
|
||||||
|
* These buffers must be listed in an order such that all relocations
|
||||||
|
* a buffer is performing refer to buffers that have already appeared
|
||||||
|
* in the validate list.
|
||||||
|
*/
|
||||||
|
__u64 buffers_ptr;
|
||||||
|
__u32 buffer_count;
|
||||||
|
|
||||||
|
/** Offset in the batchbuffer to start execution from. */
|
||||||
|
__u32 batch_start_offset;
|
||||||
|
/** Bytes used in batchbuffer from batch_start_offset */
|
||||||
|
__u32 batch_len;
|
||||||
|
__u32 DR1;
|
||||||
|
__u32 DR4;
|
||||||
|
__u32 num_cliprects;
|
||||||
|
/** This is a struct drm_clip_rect *cliprects */
|
||||||
|
__u64 cliprects_ptr;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct drm_i915_gem_exec_object2 {
|
||||||
|
/**
|
||||||
|
* User's handle for a buffer to be bound into the GTT for this
|
||||||
|
* operation.
|
||||||
|
*/
|
||||||
|
__u32 handle;
|
||||||
|
|
||||||
|
/** Number of relocations to be performed on this buffer */
|
||||||
|
__u32 relocation_count;
|
||||||
|
/**
|
||||||
|
* Pointer to array of struct drm_i915_gem_relocation_entry containing
|
||||||
|
* the relocations to be performed in this buffer.
|
||||||
|
*/
|
||||||
|
__u64 relocs_ptr;
|
||||||
|
|
||||||
|
/** Required alignment in graphics aperture */
|
||||||
|
__u64 alignment;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returned value of the updated offset of the object, for future
|
||||||
|
* presumed_offset writes.
|
||||||
|
*/
|
||||||
|
__u64 offset;
|
||||||
|
|
||||||
|
#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
|
||||||
|
#define EXEC_OBJECT_NEEDS_GTT (1<<1)
|
||||||
|
#define EXEC_OBJECT_WRITE (1<<2)
|
||||||
|
#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_WRITE<<1)
|
||||||
|
__u64 flags;
|
||||||
|
|
||||||
|
__u64 rsvd1;
|
||||||
|
__u64 rsvd2;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct drm_i915_gem_execbuffer2 {
|
||||||
|
/**
|
||||||
|
* List of gem_exec_object2 structs
|
||||||
|
*/
|
||||||
|
__u64 buffers_ptr;
|
||||||
|
__u32 buffer_count;
|
||||||
|
|
||||||
|
/** Offset in the batchbuffer to start execution from. */
|
||||||
|
__u32 batch_start_offset;
|
||||||
|
/** Bytes used in batchbuffer from batch_start_offset */
|
||||||
|
__u32 batch_len;
|
||||||
|
__u32 DR1;
|
||||||
|
__u32 DR4;
|
||||||
|
__u32 num_cliprects;
|
||||||
|
/** This is a struct drm_clip_rect *cliprects */
|
||||||
|
__u64 cliprects_ptr;
|
||||||
|
#define I915_EXEC_RING_MASK (7<<0)
|
||||||
|
#define I915_EXEC_DEFAULT (0<<0)
|
||||||
|
#define I915_EXEC_RENDER (1<<0)
|
||||||
|
#define I915_EXEC_BSD (2<<0)
|
||||||
|
#define I915_EXEC_BLT (3<<0)
|
||||||
|
|
||||||
|
/* Used for switching the constants addressing mode on gen4+ RENDER ring.
|
||||||
|
* Gen6+ only supports relative addressing to dynamic state (default) and
|
||||||
|
* absolute addressing.
|
||||||
|
*
|
||||||
|
* These flags are ignored for the BSD and BLT rings.
|
||||||
|
*/
|
||||||
|
#define I915_EXEC_CONSTANTS_MASK (3<<6)
|
||||||
|
#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
|
||||||
|
#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
|
||||||
|
#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
|
||||||
|
__u64 flags;
|
||||||
|
__u64 rsvd1; /* now used for context info */
|
||||||
|
__u64 rsvd2;
|
||||||
|
};
|
||||||
|
|
||||||
|
/** Resets the SO write offset registers for transform feedback on gen7. */
|
||||||
|
#define I915_EXEC_GEN7_SOL_RESET (1<<8)
|
||||||
|
|
||||||
|
/** Request a privileged ("secure") batch buffer. Note only available for
|
||||||
|
* DRM_ROOT_ONLY | DRM_MASTER processes.
|
||||||
|
*/
|
||||||
|
#define I915_EXEC_SECURE (1<<9)
|
||||||
|
|
||||||
|
/** Inform the kernel that the batch is and will always be pinned. This
|
||||||
|
* negates the requirement for a workaround to be performed to avoid
|
||||||
|
* an incoherent CS (such as can be found on 830/845). If this flag is
|
||||||
|
* not passed, the kernel will endeavour to make sure the batch is
|
||||||
|
* coherent with the CS before execution. If this flag is passed,
|
||||||
|
* userspace assumes the responsibility for ensuring the same.
|
||||||
|
*/
|
||||||
|
#define I915_EXEC_IS_PINNED (1<<10)
|
||||||
|
|
||||||
|
/** Provide a hint to the kernel that the command stream and auxilliary
|
||||||
|
* state buffers already holds the correct presumed addresses and so the
|
||||||
|
* relocation process may be skipped if no buffers need to be moved in
|
||||||
|
* preparation for the execbuffer.
|
||||||
|
*/
|
||||||
|
#define I915_EXEC_NO_RELOC (1<<11)
|
||||||
|
|
||||||
|
/** Use the reloc.handle as an index into the exec object array rather
|
||||||
|
* than as the per-file handle.
|
||||||
|
*/
|
||||||
|
#define I915_EXEC_HANDLE_LUT (1<<12)
|
||||||
|
|
||||||
|
#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_HANDLE_LUT<<1)
|
||||||
|
|
||||||
|
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
|
||||||
|
#define i915_execbuffer2_set_context_id(eb2, context) \
|
||||||
|
(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
|
||||||
|
#define i915_execbuffer2_get_context_id(eb2) \
|
||||||
|
((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
|
||||||
|
|
||||||
|
struct drm_i915_gem_pin {
|
||||||
|
/** Handle of the buffer to be pinned. */
|
||||||
|
__u32 handle;
|
||||||
|
__u32 pad;
|
||||||
|
|
||||||
|
/** alignment required within the aperture */
|
||||||
|
__u64 alignment;
|
||||||
|
|
||||||
|
/** Returned GTT offset of the buffer. */
|
||||||
|
__u64 offset;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct drm_i915_gem_unpin {
|
||||||
|
/** Handle of the buffer to be unpinned. */
|
||||||
|
__u32 handle;
|
||||||
|
__u32 pad;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct drm_i915_gem_busy {
|
||||||
|
/** Handle of the buffer to check for busy */
|
||||||
|
__u32 handle;
|
||||||
|
|
||||||
|
/** Return busy status (1 if busy, 0 if idle).
|
||||||
|
* The high word is used to indicate on which rings the object
|
||||||
|
* currently resides:
|
||||||
|
* 16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc)
|
||||||
|
*/
|
||||||
|
__u32 busy;
|
||||||
|
};
|
||||||
|
|
||||||
|
#define I915_CACHING_NONE 0
|
||||||
|
#define I915_CACHING_CACHED 1
|
||||||
|
|
||||||
|
struct drm_i915_gem_caching {
|
||||||
|
/**
|
||||||
|
* Handle of the buffer to set/get the caching level of. */
|
||||||
|
__u32 handle;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Cacheing level to apply or return value
|
||||||
|
*
|
||||||
|
* bits0-15 are for generic caching control (i.e. the above defined
|
||||||
|
* values). bits16-31 are reserved for platform-specific variations
|
||||||
|
* (e.g. l3$ caching on gen7). */
|
||||||
|
__u32 caching;
|
||||||
|
};
|
||||||
|
|
||||||
|
#define I915_TILING_NONE 0
|
||||||
|
#define I915_TILING_X 1
|
||||||
|
#define I915_TILING_Y 2
|
||||||
|
|
||||||
|
#define I915_BIT_6_SWIZZLE_NONE 0
|
||||||
|
#define I915_BIT_6_SWIZZLE_9 1
|
||||||
|
#define I915_BIT_6_SWIZZLE_9_10 2
|
||||||
|
#define I915_BIT_6_SWIZZLE_9_11 3
|
||||||
|
#define I915_BIT_6_SWIZZLE_9_10_11 4
|
||||||
|
/* Not seen by userland */
|
||||||
|
#define I915_BIT_6_SWIZZLE_UNKNOWN 5
|
||||||
|
/* Seen by userland. */
|
||||||
|
#define I915_BIT_6_SWIZZLE_9_17 6
|
||||||
|
#define I915_BIT_6_SWIZZLE_9_10_17 7
|
||||||
|
|
||||||
|
struct drm_i915_gem_set_tiling {
|
||||||
|
/** Handle of the buffer to have its tiling state updated */
|
||||||
|
__u32 handle;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
|
||||||
|
* I915_TILING_Y).
|
||||||
|
*
|
||||||
|
* This value is to be set on request, and will be updated by the
|
||||||
|
* kernel on successful return with the actual chosen tiling layout.
|
||||||
|
*
|
||||||
|
* The tiling mode may be demoted to I915_TILING_NONE when the system
|
||||||
|
* has bit 6 swizzling that can't be managed correctly by GEM.
|
||||||
|
*
|
||||||
|
* Buffer contents become undefined when changing tiling_mode.
|
||||||
|
*/
|
||||||
|
__u32 tiling_mode;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Stride in bytes for the object when in I915_TILING_X or
|
||||||
|
* I915_TILING_Y.
|
||||||
|
*/
|
||||||
|
__u32 stride;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returned address bit 6 swizzling required for CPU access through
|
||||||
|
* mmap mapping.
|
||||||
|
*/
|
||||||
|
__u32 swizzle_mode;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct drm_i915_gem_get_tiling {
|
||||||
|
/** Handle of the buffer to get tiling state for. */
|
||||||
|
__u32 handle;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
|
||||||
|
* I915_TILING_Y).
|
||||||
|
*/
|
||||||
|
__u32 tiling_mode;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returned address bit 6 swizzling required for CPU access through
|
||||||
|
* mmap mapping.
|
||||||
|
*/
|
||||||
|
__u32 swizzle_mode;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct drm_i915_gem_get_aperture {
|
||||||
|
/** Total size of the aperture used by i915_gem_execbuffer, in bytes */
|
||||||
|
__u64 aper_size;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Available space in the aperture used by i915_gem_execbuffer, in
|
||||||
|
* bytes
|
||||||
|
*/
|
||||||
|
__u64 aper_available_size;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct drm_i915_get_pipe_from_crtc_id {
|
||||||
|
/** ID of CRTC being requested **/
|
||||||
|
__u32 crtc_id;
|
||||||
|
|
||||||
|
/** pipe of requested CRTC **/
|
||||||
|
__u32 pipe;
|
||||||
|
};
|
||||||
|
|
||||||
|
#define I915_MADV_WILLNEED 0
|
||||||
|
#define I915_MADV_DONTNEED 1
|
||||||
|
#define __I915_MADV_PURGED 2 /* internal state */
|
||||||
|
|
||||||
|
struct drm_i915_gem_madvise {
|
||||||
|
/** Handle of the buffer to change the backing store advice */
|
||||||
|
__u32 handle;
|
||||||
|
|
||||||
|
/* Advice: either the buffer will be needed again in the near future,
|
||||||
|
* or wont be and could be discarded under memory pressure.
|
||||||
|
*/
|
||||||
|
__u32 madv;
|
||||||
|
|
||||||
|
/** Whether the backing store still exists. */
|
||||||
|
__u32 retained;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* flags */
|
||||||
|
#define I915_OVERLAY_TYPE_MASK 0xff
|
||||||
|
#define I915_OVERLAY_YUV_PLANAR 0x01
|
||||||
|
#define I915_OVERLAY_YUV_PACKED 0x02
|
||||||
|
#define I915_OVERLAY_RGB 0x03
|
||||||
|
|
||||||
|
#define I915_OVERLAY_DEPTH_MASK 0xff00
|
||||||
|
#define I915_OVERLAY_RGB24 0x1000
|
||||||
|
#define I915_OVERLAY_RGB16 0x2000
|
||||||
|
#define I915_OVERLAY_RGB15 0x3000
|
||||||
|
#define I915_OVERLAY_YUV422 0x0100
|
||||||
|
#define I915_OVERLAY_YUV411 0x0200
|
||||||
|
#define I915_OVERLAY_YUV420 0x0300
|
||||||
|
#define I915_OVERLAY_YUV410 0x0400
|
||||||
|
|
||||||
|
#define I915_OVERLAY_SWAP_MASK 0xff0000
|
||||||
|
#define I915_OVERLAY_NO_SWAP 0x000000
|
||||||
|
#define I915_OVERLAY_UV_SWAP 0x010000
|
||||||
|
#define I915_OVERLAY_Y_SWAP 0x020000
|
||||||
|
#define I915_OVERLAY_Y_AND_UV_SWAP 0x030000
|
||||||
|
|
||||||
|
#define I915_OVERLAY_FLAGS_MASK 0xff000000
|
||||||
|
#define I915_OVERLAY_ENABLE 0x01000000
|
||||||
|
|
||||||
|
struct drm_intel_overlay_put_image {
|
||||||
|
/* various flags and src format description */
|
||||||
|
__u32 flags;
|
||||||
|
/* source picture description */
|
||||||
|
__u32 bo_handle;
|
||||||
|
/* stride values and offsets are in bytes, buffer relative */
|
||||||
|
__u16 stride_Y; /* stride for packed formats */
|
||||||
|
__u16 stride_UV;
|
||||||
|
__u32 offset_Y; /* offset for packet formats */
|
||||||
|
__u32 offset_U;
|
||||||
|
__u32 offset_V;
|
||||||
|
/* in pixels */
|
||||||
|
__u16 src_width;
|
||||||
|
__u16 src_height;
|
||||||
|
/* to compensate the scaling factors for partially covered surfaces */
|
||||||
|
__u16 src_scan_width;
|
||||||
|
__u16 src_scan_height;
|
||||||
|
/* output crtc description */
|
||||||
|
__u32 crtc_id;
|
||||||
|
__u16 dst_x;
|
||||||
|
__u16 dst_y;
|
||||||
|
__u16 dst_width;
|
||||||
|
__u16 dst_height;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* flags */
|
||||||
|
#define I915_OVERLAY_UPDATE_ATTRS (1<<0)
|
||||||
|
#define I915_OVERLAY_UPDATE_GAMMA (1<<1)
|
||||||
|
struct drm_intel_overlay_attrs {
|
||||||
|
__u32 flags;
|
||||||
|
__u32 color_key;
|
||||||
|
__s32 brightness;
|
||||||
|
__u32 contrast;
|
||||||
|
__u32 saturation;
|
||||||
|
__u32 gamma0;
|
||||||
|
__u32 gamma1;
|
||||||
|
__u32 gamma2;
|
||||||
|
__u32 gamma3;
|
||||||
|
__u32 gamma4;
|
||||||
|
__u32 gamma5;
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Intel sprite handling
|
||||||
|
*
|
||||||
|
* Color keying works with a min/mask/max tuple. Both source and destination
|
||||||
|
* color keying is allowed.
|
||||||
|
*
|
||||||
|
* Source keying:
|
||||||
|
* Sprite pixels within the min & max values, masked against the color channels
|
||||||
|
* specified in the mask field, will be transparent. All other pixels will
|
||||||
|
* be displayed on top of the primary plane. For RGB surfaces, only the min
|
||||||
|
* and mask fields will be used; ranged compares are not allowed.
|
||||||
|
*
|
||||||
|
* Destination keying:
|
||||||
|
* Primary plane pixels that match the min value, masked against the color
|
||||||
|
* channels specified in the mask field, will be replaced by corresponding
|
||||||
|
* pixels from the sprite plane.
|
||||||
|
*
|
||||||
|
* Note that source & destination keying are exclusive; only one can be
|
||||||
|
* active on a given plane.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */
|
||||||
|
#define I915_SET_COLORKEY_DESTINATION (1<<1)
|
||||||
|
#define I915_SET_COLORKEY_SOURCE (1<<2)
|
||||||
|
struct drm_intel_sprite_colorkey {
|
||||||
|
__u32 plane_id;
|
||||||
|
__u32 min_value;
|
||||||
|
__u32 channel_mask;
|
||||||
|
__u32 max_value;
|
||||||
|
__u32 flags;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct drm_i915_gem_wait {
|
||||||
|
/** Handle of BO we shall wait on */
|
||||||
|
__u32 bo_handle;
|
||||||
|
__u32 flags;
|
||||||
|
/** Number of nanoseconds to wait, Returns time remaining. */
|
||||||
|
__s64 timeout_ns;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct drm_i915_gem_context_create {
|
||||||
|
/* output: id of new context*/
|
||||||
|
__u32 ctx_id;
|
||||||
|
__u32 pad;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct drm_i915_gem_context_destroy {
|
||||||
|
__u32 ctx_id;
|
||||||
|
__u32 pad;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct drm_i915_reg_read {
|
||||||
|
__u64 offset;
|
||||||
|
__u64 val; /* Return value */
|
||||||
|
};
|
||||||
|
#endif /* _UAPI_I915_DRM_H_ */
|
@ -5,10 +5,13 @@
|
|||||||
#include <syscall.h>
|
#include <syscall.h>
|
||||||
|
|
||||||
typedef struct __wait_queue wait_queue_t;
|
typedef struct __wait_queue wait_queue_t;
|
||||||
|
typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
|
||||||
|
|
||||||
typedef struct __wait_queue_head wait_queue_head_t;
|
typedef struct __wait_queue_head wait_queue_head_t;
|
||||||
|
|
||||||
struct __wait_queue
|
struct __wait_queue
|
||||||
{
|
{
|
||||||
|
wait_queue_func_t func;
|
||||||
struct list_head task_list;
|
struct list_head task_list;
|
||||||
evhandle_t evnt;
|
evhandle_t evnt;
|
||||||
};
|
};
|
||||||
@ -58,7 +61,7 @@ do{ \
|
|||||||
for(;;){ \
|
for(;;){ \
|
||||||
if (condition) \
|
if (condition) \
|
||||||
break; \
|
break; \
|
||||||
WaitEvent(__wait.evnt); \
|
WaitEventTimeout(__wait.evnt, timeout); \
|
||||||
}; \
|
}; \
|
||||||
if (!list_empty(&__wait.task_list)) { \
|
if (!list_empty(&__wait.task_list)) { \
|
||||||
spin_lock_irqsave(&wq.lock, flags); \
|
spin_lock_irqsave(&wq.lock, flags); \
|
||||||
@ -191,6 +194,18 @@ struct completion {
|
|||||||
wait_queue_head_t wait;
|
wait_queue_head_t wait;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
|
||||||
|
|
||||||
|
|
||||||
|
#define DEFINE_WAIT_FUNC(name, function) \
|
||||||
|
wait_queue_t name = { \
|
||||||
|
.func = function, \
|
||||||
|
.task_list = LIST_HEAD_INIT((name).task_list), \
|
||||||
|
.evnt = CreateEvent(NULL, MANUAL_DESTROY), \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
|
||||||
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -173,6 +173,17 @@ static inline void WaitEvent(evhandle_t evh)
|
|||||||
__asm__ __volatile__ ("":::"ebx","ecx","edx","esi","edi");
|
__asm__ __volatile__ ("":::"ebx","ecx","edx","esi","edi");
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static inline int WaitEventTimeout(evhandle_t evh, int timeout)
|
||||||
|
{
|
||||||
|
int retval;
|
||||||
|
__asm__ __volatile__ (
|
||||||
|
"call *__imp__WaitEventTimeout"
|
||||||
|
:"=a"(retval)
|
||||||
|
:"a"(evh.handle),"b"(evh.euid), "c"(timeout));
|
||||||
|
__asm__ __volatile__ ("":::"ebx","ecx","edx","esi","edi");
|
||||||
|
return retval;
|
||||||
|
};
|
||||||
|
|
||||||
static inline void DestroyEvent(evhandle_t evh)
|
static inline void DestroyEvent(evhandle_t evh)
|
||||||
{
|
{
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
@ -476,6 +487,12 @@ static inline void __iomem *ioremap(uint32_t offset, size_t size)
|
|||||||
return (void __iomem*) MapIoMem(offset, size, PG_SW|PG_NOCACHE);
|
return (void __iomem*) MapIoMem(offset, size, PG_SW|PG_NOCACHE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void __iomem *ioremap_wc(uint32_t offset, size_t size)
|
||||||
|
{
|
||||||
|
return (void __iomem*) MapIoMem(offset, size, PG_SW|PG_NOCACHE);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static inline void iounmap(void *addr)
|
static inline void iounmap(void *addr)
|
||||||
{
|
{
|
||||||
FreeKernelSpace(addr);
|
FreeKernelSpace(addr);
|
||||||
|
Loading…
Reference in New Issue
Block a user