ddk: 3.17-rc1

git-svn-id: svn://kolibrios.org@5056 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge) 2014-08-23 10:29:27 +00:00
parent 144b0c94b0
commit 0ede580d2f
79 changed files with 6316 additions and 2130 deletions

View File

@ -25,12 +25,17 @@ NAME_SRCS:= \
io/write.c \ io/write.c \
linux/bitmap.c \ linux/bitmap.c \
linux/idr.c \ linux/idr.c \
linux/interval_tree.c \
linux/firmware.c \ linux/firmware.c \
linux/hdmi.c \
linux/kasprintf.c \
linux/kref.c \ linux/kref.c \
linux/list_sort.c \ linux/list_sort.c \
linux/mutex.c \
linux/rbtree.c \ linux/rbtree.c \
linux/dmapool.c \ linux/dmapool.c \
linux/ctype.c \ linux/ctype.c \
linux/scatterlist.c \
linux/string.c \ linux/string.c \
linux/time.c \ linux/time.c \
linux/workqueue.c \ linux/workqueue.c \

View File

@ -2,7 +2,7 @@
* lib/bitmap.c * lib/bitmap.c
* Helper functions for bitmap.h. * Helper functions for bitmap.h.
* *
* Tlhis source code is licensed under the GNU General Public License, * This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details. * Version 2. See the file COPYING for more details.
*/ */
#include <syscall.h> #include <syscall.h>
@ -41,9 +41,9 @@
* for the best explanations of this ordering. * for the best explanations of this ordering.
*/ */
int __bitmap_empty(const unsigned long *bitmap, int bits) int __bitmap_empty(const unsigned long *bitmap, unsigned int bits)
{ {
int k, lim = bits/BITS_PER_LONG; unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k) for (k = 0; k < lim; ++k)
if (bitmap[k]) if (bitmap[k])
return 0; return 0;
@ -56,9 +56,9 @@ int __bitmap_empty(const unsigned long *bitmap, int bits)
} }
EXPORT_SYMBOL(__bitmap_empty); EXPORT_SYMBOL(__bitmap_empty);
int __bitmap_full(const unsigned long *bitmap, int bits) int __bitmap_full(const unsigned long *bitmap, unsigned int bits)
{ {
int k, lim = bits/BITS_PER_LONG; unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k) for (k = 0; k < lim; ++k)
if (~bitmap[k]) if (~bitmap[k])
return 0; return 0;
@ -72,9 +72,9 @@ int __bitmap_full(const unsigned long *bitmap, int bits)
EXPORT_SYMBOL(__bitmap_full); EXPORT_SYMBOL(__bitmap_full);
int __bitmap_equal(const unsigned long *bitmap1, int __bitmap_equal(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits) const unsigned long *bitmap2, unsigned int bits)
{ {
int k, lim = bits/BITS_PER_LONG; unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k) for (k = 0; k < lim; ++k)
if (bitmap1[k] != bitmap2[k]) if (bitmap1[k] != bitmap2[k])
return 0; return 0;
@ -87,14 +87,14 @@ int __bitmap_equal(const unsigned long *bitmap1,
} }
EXPORT_SYMBOL(__bitmap_equal); EXPORT_SYMBOL(__bitmap_equal);
void __bitmap_complement(unsigned long *dst, const unsigned long *src, int bits) void __bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int bits)
{ {
int k, lim = bits/BITS_PER_LONG; unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k) for (k = 0; k < lim; ++k)
dst[k] = ~src[k]; dst[k] = ~src[k];
if (bits % BITS_PER_LONG) if (bits % BITS_PER_LONG)
dst[k] = ~src[k] & BITMAP_LAST_WORD_MASK(bits); dst[k] = ~src[k];
} }
EXPORT_SYMBOL(__bitmap_complement); EXPORT_SYMBOL(__bitmap_complement);
@ -183,23 +183,26 @@ void __bitmap_shift_left(unsigned long *dst,
EXPORT_SYMBOL(__bitmap_shift_left); EXPORT_SYMBOL(__bitmap_shift_left);
int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits) const unsigned long *bitmap2, unsigned int bits)
{ {
int k; unsigned int k;
int nr = BITS_TO_LONGS(bits); unsigned int lim = bits/BITS_PER_LONG;
unsigned long result = 0; unsigned long result = 0;
for (k = 0; k < nr; k++) for (k = 0; k < lim; k++)
result |= (dst[k] = bitmap1[k] & bitmap2[k]); result |= (dst[k] = bitmap1[k] & bitmap2[k]);
if (bits % BITS_PER_LONG)
result |= (dst[k] = bitmap1[k] & bitmap2[k] &
BITMAP_LAST_WORD_MASK(bits));
return result != 0; return result != 0;
} }
EXPORT_SYMBOL(__bitmap_and); EXPORT_SYMBOL(__bitmap_and);
void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits) const unsigned long *bitmap2, unsigned int bits)
{ {
int k; unsigned int k;
int nr = BITS_TO_LONGS(bits); unsigned int nr = BITS_TO_LONGS(bits);
for (k = 0; k < nr; k++) for (k = 0; k < nr; k++)
dst[k] = bitmap1[k] | bitmap2[k]; dst[k] = bitmap1[k] | bitmap2[k];
@ -207,10 +210,10 @@ void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
EXPORT_SYMBOL(__bitmap_or); EXPORT_SYMBOL(__bitmap_or);
void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits) const unsigned long *bitmap2, unsigned int bits)
{ {
int k; unsigned int k;
int nr = BITS_TO_LONGS(bits); unsigned int nr = BITS_TO_LONGS(bits);
for (k = 0; k < nr; k++) for (k = 0; k < nr; k++)
dst[k] = bitmap1[k] ^ bitmap2[k]; dst[k] = bitmap1[k] ^ bitmap2[k];
@ -218,22 +221,25 @@ void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
EXPORT_SYMBOL(__bitmap_xor); EXPORT_SYMBOL(__bitmap_xor);
int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits) const unsigned long *bitmap2, unsigned int bits)
{ {
int k; unsigned int k;
int nr = BITS_TO_LONGS(bits); unsigned int lim = bits/BITS_PER_LONG;
unsigned long result = 0; unsigned long result = 0;
for (k = 0; k < nr; k++) for (k = 0; k < lim; k++)
result |= (dst[k] = bitmap1[k] & ~bitmap2[k]); result |= (dst[k] = bitmap1[k] & ~bitmap2[k]);
if (bits % BITS_PER_LONG)
result |= (dst[k] = bitmap1[k] & ~bitmap2[k] &
BITMAP_LAST_WORD_MASK(bits));
return result != 0; return result != 0;
} }
EXPORT_SYMBOL(__bitmap_andnot); EXPORT_SYMBOL(__bitmap_andnot);
int __bitmap_intersects(const unsigned long *bitmap1, int __bitmap_intersects(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits) const unsigned long *bitmap2, unsigned int bits)
{ {
int k, lim = bits/BITS_PER_LONG; unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k) for (k = 0; k < lim; ++k)
if (bitmap1[k] & bitmap2[k]) if (bitmap1[k] & bitmap2[k])
return 1; return 1;
@ -246,9 +252,9 @@ int __bitmap_intersects(const unsigned long *bitmap1,
EXPORT_SYMBOL(__bitmap_intersects); EXPORT_SYMBOL(__bitmap_intersects);
int __bitmap_subset(const unsigned long *bitmap1, int __bitmap_subset(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits) const unsigned long *bitmap2, unsigned int bits)
{ {
int k, lim = bits/BITS_PER_LONG; unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k) for (k = 0; k < lim; ++k)
if (bitmap1[k] & ~bitmap2[k]) if (bitmap1[k] & ~bitmap2[k])
return 0; return 0;
@ -260,9 +266,10 @@ int __bitmap_subset(const unsigned long *bitmap1,
} }
EXPORT_SYMBOL(__bitmap_subset); EXPORT_SYMBOL(__bitmap_subset);
int __bitmap_weight(const unsigned long *bitmap, int bits) int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
{ {
int k, w = 0, lim = bits/BITS_PER_LONG; unsigned int k, lim = bits/BITS_PER_LONG;
int w = 0;
for (k = 0; k < lim; k++) for (k = 0; k < lim; k++)
w += hweight_long(bitmap[k]); w += hweight_long(bitmap[k]);
@ -274,42 +281,42 @@ int __bitmap_weight(const unsigned long *bitmap, int bits)
} }
EXPORT_SYMBOL(__bitmap_weight); EXPORT_SYMBOL(__bitmap_weight);
void bitmap_set(unsigned long *map, int start, int nr) void bitmap_set(unsigned long *map, unsigned int start, int len)
{ {
unsigned long *p = map + BIT_WORD(start); unsigned long *p = map + BIT_WORD(start);
const int size = start + nr; const unsigned int size = start + len;
int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
while (nr - bits_to_set >= 0) { while (len - bits_to_set >= 0) {
*p |= mask_to_set; *p |= mask_to_set;
nr -= bits_to_set; len -= bits_to_set;
bits_to_set = BITS_PER_LONG; bits_to_set = BITS_PER_LONG;
mask_to_set = ~0UL; mask_to_set = ~0UL;
p++; p++;
} }
if (nr) { if (len) {
mask_to_set &= BITMAP_LAST_WORD_MASK(size); mask_to_set &= BITMAP_LAST_WORD_MASK(size);
*p |= mask_to_set; *p |= mask_to_set;
} }
} }
EXPORT_SYMBOL(bitmap_set); EXPORT_SYMBOL(bitmap_set);
void bitmap_clear(unsigned long *map, int start, int nr) void bitmap_clear(unsigned long *map, unsigned int start, int len)
{ {
unsigned long *p = map + BIT_WORD(start); unsigned long *p = map + BIT_WORD(start);
const int size = start + nr; const unsigned int size = start + len;
int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
while (nr - bits_to_clear >= 0) { while (len - bits_to_clear >= 0) {
*p &= ~mask_to_clear; *p &= ~mask_to_clear;
nr -= bits_to_clear; len -= bits_to_clear;
bits_to_clear = BITS_PER_LONG; bits_to_clear = BITS_PER_LONG;
mask_to_clear = ~0UL; mask_to_clear = ~0UL;
p++; p++;
} }
if (nr) { if (len) {
mask_to_clear &= BITMAP_LAST_WORD_MASK(size); mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
*p &= ~mask_to_clear; *p &= ~mask_to_clear;
} }
@ -378,7 +385,7 @@ EXPORT_SYMBOL(bitmap_find_next_zero_area);
* *
* If for example, just bits 4 through 7 are set in @buf, then @pos * If for example, just bits 4 through 7 are set in @buf, then @pos
* values 4 through 7 will get mapped to 0 through 3, respectively, * values 4 through 7 will get mapped to 0 through 3, respectively,
* and other @pos values will get mapped to 0. When @pos value 7 * and other @pos values will get mapped to -1. When @pos value 7
* gets mapped to (returns) @ord value 3 in this example, that means * gets mapped to (returns) @ord value 3 in this example, that means
* that bit 7 is the 3rd (starting with 0th) set bit in @buf. * that bit 7 is the 3rd (starting with 0th) set bit in @buf.
* *
@ -708,7 +715,7 @@ enum {
REG_OP_RELEASE, /* clear all bits in region */ REG_OP_RELEASE, /* clear all bits in region */
}; };
static int __reg_op(unsigned long *bitmap, int pos, int order, int reg_op) static int __reg_op(unsigned long *bitmap, unsigned int pos, int order, int reg_op)
{ {
int nbits_reg; /* number of bits in region */ int nbits_reg; /* number of bits in region */
int index; /* index first long of region in bitmap */ int index; /* index first long of region in bitmap */
@ -774,11 +781,11 @@ done:
* Return the bit offset in bitmap of the allocated region, * Return the bit offset in bitmap of the allocated region,
* or -errno on failure. * or -errno on failure.
*/ */
int bitmap_find_free_region(unsigned long *bitmap, int bits, int order) int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order)
{ {
int pos, end; /* scans bitmap by regions of size order */ unsigned int pos, end; /* scans bitmap by regions of size order */
for (pos = 0 ; (end = pos + (1 << order)) <= bits; pos = end) { for (pos = 0 ; (end = pos + (1U << order)) <= bits; pos = end) {
if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE)) if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
continue; continue;
__reg_op(bitmap, pos, order, REG_OP_ALLOC); __reg_op(bitmap, pos, order, REG_OP_ALLOC);
@ -799,7 +806,7 @@ EXPORT_SYMBOL(bitmap_find_free_region);
* *
* No return value. * No return value.
*/ */
void bitmap_release_region(unsigned long *bitmap, int pos, int order) void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order)
{ {
__reg_op(bitmap, pos, order, REG_OP_RELEASE); __reg_op(bitmap, pos, order, REG_OP_RELEASE);
} }
@ -816,12 +823,11 @@ EXPORT_SYMBOL(bitmap_release_region);
* Return 0 on success, or %-EBUSY if specified region wasn't * Return 0 on success, or %-EBUSY if specified region wasn't
* free (not all bits were zero). * free (not all bits were zero).
*/ */
int bitmap_allocate_region(unsigned long *bitmap, int pos, int order) int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order)
{ {
if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE)) if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
return -EBUSY; return -EBUSY;
__reg_op(bitmap, pos, order, REG_OP_ALLOC); return __reg_op(bitmap, pos, order, REG_OP_ALLOC);
return 0;
} }
EXPORT_SYMBOL(bitmap_allocate_region); EXPORT_SYMBOL(bitmap_allocate_region);

436
drivers/ddk/linux/hdmi.c Normal file
View File

@ -0,0 +1,436 @@
/*
* Copyright (C) 2012 Avionic Design GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/hdmi.h>
#include <linux/string.h>
static void hdmi_infoframe_checksum(void *buffer, size_t size)
{
u8 *ptr = buffer;
u8 csum = 0;
size_t i;
/* compute checksum */
for (i = 0; i < size; i++)
csum += ptr[i];
ptr[3] = 256 - csum;
}
/**
* hdmi_avi_infoframe_init() - initialize an HDMI AVI infoframe
* @frame: HDMI AVI infoframe
*
* Returns 0 on success or a negative error code on failure.
*/
int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame)
{
memset(frame, 0, sizeof(*frame));
frame->type = HDMI_INFOFRAME_TYPE_AVI;
frame->version = 2;
frame->length = HDMI_AVI_INFOFRAME_SIZE;
return 0;
}
EXPORT_SYMBOL(hdmi_avi_infoframe_init);
/**
* hdmi_avi_infoframe_pack() - write HDMI AVI infoframe to binary buffer
* @frame: HDMI AVI infoframe
* @buffer: destination buffer
* @size: size of buffer
*
* Packs the information contained in the @frame structure into a binary
* representation that can be written into the corresponding controller
* registers. Also computes the checksum as required by section 5.3.5 of
* the HDMI 1.4 specification.
*
* Returns the number of bytes packed into the binary buffer or a negative
* error code on failure.
*/
ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
size_t size)
{
u8 *ptr = buffer;
size_t length;
length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
if (size < length)
return -ENOSPC;
memset(buffer, 0, size);
ptr[0] = frame->type;
ptr[1] = frame->version;
ptr[2] = frame->length;
ptr[3] = 0; /* checksum */
/* start infoframe payload */
ptr += HDMI_INFOFRAME_HEADER_SIZE;
ptr[0] = ((frame->colorspace & 0x3) << 5) | (frame->scan_mode & 0x3);
/*
* Data byte 1, bit 4 has to be set if we provide the active format
* aspect ratio
*/
if (frame->active_aspect & 0xf)
ptr[0] |= BIT(4);
/* Bit 3 and 2 indicate if we transmit horizontal/vertical bar data */
if (frame->top_bar || frame->bottom_bar)
ptr[0] |= BIT(3);
if (frame->left_bar || frame->right_bar)
ptr[0] |= BIT(2);
ptr[1] = ((frame->colorimetry & 0x3) << 6) |
((frame->picture_aspect & 0x3) << 4) |
(frame->active_aspect & 0xf);
ptr[2] = ((frame->extended_colorimetry & 0x7) << 4) |
((frame->quantization_range & 0x3) << 2) |
(frame->nups & 0x3);
if (frame->itc)
ptr[2] |= BIT(7);
ptr[3] = frame->video_code & 0x7f;
ptr[4] = ((frame->ycc_quantization_range & 0x3) << 6) |
((frame->content_type & 0x3) << 4) |
(frame->pixel_repeat & 0xf);
ptr[5] = frame->top_bar & 0xff;
ptr[6] = (frame->top_bar >> 8) & 0xff;
ptr[7] = frame->bottom_bar & 0xff;
ptr[8] = (frame->bottom_bar >> 8) & 0xff;
ptr[9] = frame->left_bar & 0xff;
ptr[10] = (frame->left_bar >> 8) & 0xff;
ptr[11] = frame->right_bar & 0xff;
ptr[12] = (frame->right_bar >> 8) & 0xff;
hdmi_infoframe_checksum(buffer, length);
return length;
}
EXPORT_SYMBOL(hdmi_avi_infoframe_pack);
/**
* hdmi_spd_infoframe_init() - initialize an HDMI SPD infoframe
* @frame: HDMI SPD infoframe
* @vendor: vendor string
* @product: product string
*
* Returns 0 on success or a negative error code on failure.
*/
int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
const char *vendor, const char *product)
{
memset(frame, 0, sizeof(*frame));
frame->type = HDMI_INFOFRAME_TYPE_SPD;
frame->version = 1;
frame->length = HDMI_SPD_INFOFRAME_SIZE;
strncpy(frame->vendor, vendor, sizeof(frame->vendor));
strncpy(frame->product, product, sizeof(frame->product));
return 0;
}
EXPORT_SYMBOL(hdmi_spd_infoframe_init);
/**
* hdmi_spd_infoframe_pack() - write HDMI SPD infoframe to binary buffer
* @frame: HDMI SPD infoframe
* @buffer: destination buffer
* @size: size of buffer
*
* Packs the information contained in the @frame structure into a binary
* representation that can be written into the corresponding controller
* registers. Also computes the checksum as required by section 5.3.5 of
* the HDMI 1.4 specification.
*
* Returns the number of bytes packed into the binary buffer or a negative
* error code on failure.
*/
ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
size_t size)
{
u8 *ptr = buffer;
size_t length;
length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
if (size < length)
return -ENOSPC;
memset(buffer, 0, size);
ptr[0] = frame->type;
ptr[1] = frame->version;
ptr[2] = frame->length;
ptr[3] = 0; /* checksum */
/* start infoframe payload */
ptr += HDMI_INFOFRAME_HEADER_SIZE;
memcpy(ptr, frame->vendor, sizeof(frame->vendor));
memcpy(ptr + 8, frame->product, sizeof(frame->product));
ptr[24] = frame->sdi;
hdmi_infoframe_checksum(buffer, length);
return length;
}
EXPORT_SYMBOL(hdmi_spd_infoframe_pack);
/**
* hdmi_audio_infoframe_init() - initialize an HDMI audio infoframe
* @frame: HDMI audio infoframe
*
* Returns 0 on success or a negative error code on failure.
*/
int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame)
{
memset(frame, 0, sizeof(*frame));
frame->type = HDMI_INFOFRAME_TYPE_AUDIO;
frame->version = 1;
frame->length = HDMI_AUDIO_INFOFRAME_SIZE;
return 0;
}
EXPORT_SYMBOL(hdmi_audio_infoframe_init);
/**
* hdmi_audio_infoframe_pack() - write HDMI audio infoframe to binary buffer
* @frame: HDMI audio infoframe
* @buffer: destination buffer
* @size: size of buffer
*
* Packs the information contained in the @frame structure into a binary
* representation that can be written into the corresponding controller
* registers. Also computes the checksum as required by section 5.3.5 of
* the HDMI 1.4 specification.
*
* Returns the number of bytes packed into the binary buffer or a negative
* error code on failure.
*/
ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
void *buffer, size_t size)
{
unsigned char channels;
u8 *ptr = buffer;
size_t length;
length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
if (size < length)
return -ENOSPC;
memset(buffer, 0, size);
if (frame->channels >= 2)
channels = frame->channels - 1;
else
channels = 0;
ptr[0] = frame->type;
ptr[1] = frame->version;
ptr[2] = frame->length;
ptr[3] = 0; /* checksum */
/* start infoframe payload */
ptr += HDMI_INFOFRAME_HEADER_SIZE;
ptr[0] = ((frame->coding_type & 0xf) << 4) | (channels & 0x7);
ptr[1] = ((frame->sample_frequency & 0x7) << 2) |
(frame->sample_size & 0x3);
ptr[2] = frame->coding_type_ext & 0x1f;
ptr[3] = frame->channel_allocation;
ptr[4] = (frame->level_shift_value & 0xf) << 3;
if (frame->downmix_inhibit)
ptr[4] |= BIT(7);
hdmi_infoframe_checksum(buffer, length);
return length;
}
EXPORT_SYMBOL(hdmi_audio_infoframe_pack);
/**
* hdmi_vendor_infoframe_init() - initialize an HDMI vendor infoframe
* @frame: HDMI vendor infoframe
*
* Returns 0 on success or a negative error code on failure.
*/
int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame)
{
memset(frame, 0, sizeof(*frame));
frame->type = HDMI_INFOFRAME_TYPE_VENDOR;
frame->version = 1;
frame->oui = HDMI_IEEE_OUI;
/*
* 0 is a valid value for s3d_struct, so we use a special "not set"
* value
*/
frame->s3d_struct = HDMI_3D_STRUCTURE_INVALID;
return 0;
}
EXPORT_SYMBOL(hdmi_vendor_infoframe_init);
/**
* hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary buffer
* @frame: HDMI infoframe
* @buffer: destination buffer
* @size: size of buffer
*
* Packs the information contained in the @frame structure into a binary
* representation that can be written into the corresponding controller
* registers. Also computes the checksum as required by section 5.3.5 of
* the HDMI 1.4 specification.
*
* Returns the number of bytes packed into the binary buffer or a negative
* error code on failure.
*/
ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
void *buffer, size_t size)
{
u8 *ptr = buffer;
size_t length;
/* empty info frame */
if (frame->vic == 0 && frame->s3d_struct == HDMI_3D_STRUCTURE_INVALID)
return -EINVAL;
/* only one of those can be supplied */
if (frame->vic != 0 && frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID)
return -EINVAL;
/* for side by side (half) we also need to provide 3D_Ext_Data */
if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
frame->length = 6;
else
frame->length = 5;
length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
if (size < length)
return -ENOSPC;
memset(buffer, 0, size);
ptr[0] = frame->type;
ptr[1] = frame->version;
ptr[2] = frame->length;
ptr[3] = 0; /* checksum */
/* HDMI OUI */
ptr[4] = 0x03;
ptr[5] = 0x0c;
ptr[6] = 0x00;
if (frame->vic) {
ptr[7] = 0x1 << 5; /* video format */
ptr[8] = frame->vic;
} else {
ptr[7] = 0x2 << 5; /* video format */
ptr[8] = (frame->s3d_struct & 0xf) << 4;
if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
ptr[9] = (frame->s3d_ext_data & 0xf) << 4;
}
hdmi_infoframe_checksum(buffer, length);
return length;
}
EXPORT_SYMBOL(hdmi_vendor_infoframe_pack);
/*
* hdmi_vendor_any_infoframe_pack() - write a vendor infoframe to binary buffer
*/
static ssize_t
hdmi_vendor_any_infoframe_pack(union hdmi_vendor_any_infoframe *frame,
void *buffer, size_t size)
{
/* we only know about HDMI vendor infoframes */
if (frame->any.oui != HDMI_IEEE_OUI)
return -EINVAL;
return hdmi_vendor_infoframe_pack(&frame->hdmi, buffer, size);
}
/**
* hdmi_infoframe_pack() - write a HDMI infoframe to binary buffer
* @frame: HDMI infoframe
* @buffer: destination buffer
* @size: size of buffer
*
* Packs the information contained in the @frame structure into a binary
* representation that can be written into the corresponding controller
* registers. Also computes the checksum as required by section 5.3.5 of
* the HDMI 1.4 specification.
*
* Returns the number of bytes packed into the binary buffer or a negative
* error code on failure.
*/
ssize_t
hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size)
{
ssize_t length;
switch (frame->any.type) {
case HDMI_INFOFRAME_TYPE_AVI:
length = hdmi_avi_infoframe_pack(&frame->avi, buffer, size);
break;
case HDMI_INFOFRAME_TYPE_SPD:
length = hdmi_spd_infoframe_pack(&frame->spd, buffer, size);
break;
case HDMI_INFOFRAME_TYPE_AUDIO:
length = hdmi_audio_infoframe_pack(&frame->audio, buffer, size);
break;
case HDMI_INFOFRAME_TYPE_VENDOR:
length = hdmi_vendor_any_infoframe_pack(&frame->vendor,
buffer, size);
break;
default:
WARN(1, "Bad infoframe type %d\n", frame->any.type);
length = -EINVAL;
}
return length;
}
EXPORT_SYMBOL(hdmi_infoframe_pack);

View File

@ -18,12 +18,6 @@
* pointer or what ever, we treat it as a (void *). You can pass this * pointer or what ever, we treat it as a (void *). You can pass this
* id to a user for him to pass back at a later time. You then pass * id to a user for him to pass back at a later time. You then pass
* that id to this code and it returns your pointer. * that id to this code and it returns your pointer.
* You can release ids at any time. When all ids are released, most of
* the memory is returned (we keep MAX_IDR_FREE) in a local pool so we
* don't need to go to the memory "store" during an id allocate, just
* so you don't need to be too concerned about locking and conflicts
* with the slab allocator.
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
@ -136,7 +130,7 @@ static void idr_layer_rcu_free(struct rcu_head *head)
static inline void free_layer(struct idr *idr, struct idr_layer *p) static inline void free_layer(struct idr *idr, struct idr_layer *p)
{ {
if (idr->hint && idr->hint == p) if (idr->hint == p)
RCU_INIT_POINTER(idr->hint, NULL); RCU_INIT_POINTER(idr->hint, NULL);
idr_layer_rcu_free(&p->rcu_head); idr_layer_rcu_free(&p->rcu_head);
} }
@ -181,7 +175,7 @@ static void idr_mark_full(struct idr_layer **pa, int id)
} }
} }
int __idr_pre_get(struct idr *idp, gfp_t gfp_mask) static int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
{ {
while (idp->id_free_cnt < MAX_IDR_FREE) { while (idp->id_free_cnt < MAX_IDR_FREE) {
struct idr_layer *new; struct idr_layer *new;
@ -192,7 +186,6 @@ int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
} }
return 1; return 1;
} }
EXPORT_SYMBOL(__idr_pre_get);
/** /**
* sub_alloc - try to allocate an id without growing the tree depth * sub_alloc - try to allocate an id without growing the tree depth
@ -235,7 +228,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
/* if already at the top layer, we need to grow */ /* if already at the top layer, we need to grow */
if (id >= 1 << (idp->layers * IDR_BITS)) { if (id > idr_max(idp->layers)) {
*starting_id = id; *starting_id = id;
return -EAGAIN; return -EAGAIN;
} }
@ -359,20 +352,6 @@ static void idr_fill_slot(struct idr *idr, void *ptr, int id,
idr_mark_full(pa, id); idr_mark_full(pa, id);
} }
int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
{
struct idr_layer *pa[MAX_IDR_LEVEL + 1];
int rv;
rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp);
if (rv < 0)
return rv == -ENOMEM ? -EAGAIN : rv;
idr_fill_slot(idp, ptr, rv, pa);
*id = rv;
return 0;
}
EXPORT_SYMBOL(__idr_get_new_above);
/** /**
* idr_preload - preload for idr_alloc() * idr_preload - preload for idr_alloc()
@ -550,6 +529,11 @@ void idr_remove(struct idr *idp, int id)
if (id < 0) if (id < 0)
return; return;
if (id > idr_max(idp->layers)) {
idr_remove_warning(id);
return;
}
sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
if (idp->top && idp->top->count == 1 && (idp->layers > 1) && if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
idp->top->ary[0]) { idp->top->ary[0]) {
@ -567,20 +551,10 @@ void idr_remove(struct idr *idp, int id)
bitmap_clear(to_free->bitmap, 0, IDR_SIZE); bitmap_clear(to_free->bitmap, 0, IDR_SIZE);
free_layer(idp, to_free); free_layer(idp, to_free);
} }
while (idp->id_free_cnt >= MAX_IDR_FREE) {
p = get_from_free_list(idp);
/*
* Note: we don't call the rcu callback here, since the only
* layers that fall into the freelist are those that have been
* preallocated.
*/
kfree(p);
}
return;
} }
EXPORT_SYMBOL(idr_remove); EXPORT_SYMBOL(idr_remove);
void __idr_remove_all(struct idr *idp) static void __idr_remove_all(struct idr *idp)
{ {
int n, id, max; int n, id, max;
int bt_mask; int bt_mask;
@ -589,31 +563,31 @@ void __idr_remove_all(struct idr *idp)
struct idr_layer **paa = &pa[0]; struct idr_layer **paa = &pa[0];
n = idp->layers * IDR_BITS; n = idp->layers * IDR_BITS;
p = idp->top; *paa = idp->top;
rcu_assign_pointer(idp->top, NULL); rcu_assign_pointer(idp->top, NULL);
max = idr_max(idp->layers); max = idr_max(idp->layers);
id = 0; id = 0;
while (id >= 0 && id <= max) { while (id >= 0 && id <= max) {
p = *paa;
while (n > IDR_BITS && p) { while (n > IDR_BITS && p) {
n -= IDR_BITS; n -= IDR_BITS;
*paa++ = p;
p = p->ary[(id >> n) & IDR_MASK]; p = p->ary[(id >> n) & IDR_MASK];
*++paa = p;
} }
bt_mask = id; bt_mask = id;
id += 1 << n; id += 1 << n;
/* Get the highest bit that the above add changed from 0->1. */ /* Get the highest bit that the above add changed from 0->1. */
while (n < fls(id ^ bt_mask)) { while (n < fls(id ^ bt_mask)) {
if (p) if (*paa)
free_layer(idp, p); free_layer(idp, *paa);
n += IDR_BITS; n += IDR_BITS;
p = *--paa; --paa;
} }
} }
idp->layers = 0; idp->layers = 0;
} }
EXPORT_SYMBOL(__idr_remove_all);
/** /**
* idr_destroy - release all cached layers within an idr tree * idr_destroy - release all cached layers within an idr tree
@ -692,15 +666,16 @@ int idr_for_each(struct idr *idp,
struct idr_layer **paa = &pa[0]; struct idr_layer **paa = &pa[0];
n = idp->layers * IDR_BITS; n = idp->layers * IDR_BITS;
p = rcu_dereference_raw(idp->top); *paa = rcu_dereference_raw(idp->top);
max = idr_max(idp->layers); max = idr_max(idp->layers);
id = 0; id = 0;
while (id >= 0 && id <= max) { while (id >= 0 && id <= max) {
p = *paa;
while (n > 0 && p) { while (n > 0 && p) {
n -= IDR_BITS; n -= IDR_BITS;
*paa++ = p;
p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
*++paa = p;
} }
if (p) { if (p) {
@ -712,7 +687,7 @@ int idr_for_each(struct idr *idp,
id += 1 << n; id += 1 << n;
while (n < fls(id)) { while (n < fls(id)) {
n += IDR_BITS; n += IDR_BITS;
p = *--paa; --paa;
} }
} }
@ -740,17 +715,18 @@ void *idr_get_next(struct idr *idp, int *nextidp)
int n, max; int n, max;
/* find first ent */ /* find first ent */
p = rcu_dereference_raw(idp->top); p = *paa = rcu_dereference_raw(idp->top);
if (!p) if (!p)
return NULL; return NULL;
n = (p->layer + 1) * IDR_BITS; n = (p->layer + 1) * IDR_BITS;
max = idr_max(p->layer + 1); max = idr_max(p->layer + 1);
while (id >= 0 && id <= max) { while (id >= 0 && id <= max) {
p = *paa;
while (n > 0 && p) { while (n > 0 && p) {
n -= IDR_BITS; n -= IDR_BITS;
*paa++ = p;
p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
*++paa = p;
} }
if (p) { if (p) {
@ -768,7 +744,7 @@ void *idr_get_next(struct idr *idp, int *nextidp)
id = round_up(id + 1, 1 << n); id = round_up(id + 1, 1 << n);
while (n < fls(id)) { while (n < fls(id)) {
n += IDR_BITS; n += IDR_BITS;
p = *--paa; --paa;
} }
} }
return NULL; return NULL;
@ -798,14 +774,12 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
p = idp->top; p = idp->top;
if (!p) if (!p)
return ERR_PTR(-EINVAL); return ERR_PTR(-ENOENT);
n = (p->layer+1) * IDR_BITS; if (id > idr_max(p->layer + 1))
return ERR_PTR(-ENOENT);
if (id >= (1 << n)) n = p->layer * IDR_BITS;
return ERR_PTR(-EINVAL);
n -= IDR_BITS;
while ((n > 0) && p) { while ((n > 0) && p) {
p = p->ary[(id >> n) & IDR_MASK]; p = p->ary[(id >> n) & IDR_MASK];
n -= IDR_BITS; n -= IDR_BITS;
@ -842,6 +816,16 @@ void idr_init(struct idr *idp)
} }
EXPORT_SYMBOL(idr_init); EXPORT_SYMBOL(idr_init);
static int idr_has_entry(int id, void *p, void *data)
{
return 1;
}
bool idr_is_empty(struct idr *idp)
{
return !idr_for_each(idp, idr_has_entry, NULL);
}
EXPORT_SYMBOL(idr_is_empty);
/** /**
* DOC: IDA description * DOC: IDA description
@ -1006,6 +990,9 @@ void ida_remove(struct ida *ida, int id)
int n; int n;
struct ida_bitmap *bitmap; struct ida_bitmap *bitmap;
if (idr_id > idr_max(ida->idr.layers))
goto err;
/* clear full bits while looking up the leaf idr_layer */ /* clear full bits while looking up the leaf idr_layer */
while ((shift > 0) && p) { while ((shift > 0) && p) {
n = (idr_id >> shift) & IDR_MASK; n = (idr_id >> shift) & IDR_MASK;
@ -1021,7 +1008,7 @@ void ida_remove(struct ida *ida, int id)
__clear_bit(n, p->bitmap); __clear_bit(n, p->bitmap);
bitmap = (void *)p->ary[n]; bitmap = (void *)p->ary[n];
if (!test_bit(offset, bitmap->bitmap)) if (!bitmap || !test_bit(offset, bitmap->bitmap))
goto err; goto err;
/* update bitmap and remove it if empty */ /* update bitmap and remove it if empty */
@ -1244,3 +1231,17 @@ unsigned int hweight32(unsigned int w)
return (res + (res >> 16)) & 0x000000FF; return (res + (res >> 16)) & 0x000000FF;
} }
unsigned long hweight64(__u64 w)
{
#if BITS_PER_LONG == 32
return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w);
#elif BITS_PER_LONG == 64
__u64 res = w - ((w >> 1) & 0x5555555555555555ul);
res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
res = res + (res >> 8);
res = res + (res >> 16);
return (res + (res >> 32)) & 0x00000000000000FFul;
#endif
}

View File

@ -0,0 +1,16 @@
//#include <linux/init.h>
#include <linux/interval_tree.h>
#include <linux/interval_tree_generic.h>
#include <linux/module.h>
#define START(node) ((node)->start)
#define LAST(node) ((node)->last)
INTERVAL_TREE_DEFINE(struct interval_tree_node, rb,
unsigned long, __subtree_last,
START, LAST,, interval_tree)
EXPORT_SYMBOL_GPL(interval_tree_insert);
EXPORT_SYMBOL_GPL(interval_tree_remove);
EXPORT_SYMBOL_GPL(interval_tree_iter_first);
EXPORT_SYMBOL_GPL(interval_tree_iter_next);

View File

@ -0,0 +1,45 @@
/*
* linux/lib/kasprintf.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
#include <stdarg.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/string.h>
/* Simplified asprintf. */
char *kvasprintf(gfp_t gfp, const char *fmt, va_list ap)
{
unsigned int len;
char *p;
va_list aq;
va_copy(aq, ap);
len = vsnprintf(NULL, 0, fmt, aq);
va_end(aq);
p = kmalloc(len+1, gfp);
if (!p)
return NULL;
vsnprintf(p, len+1, fmt, ap);
return p;
}
EXPORT_SYMBOL(kvasprintf);
char *kasprintf(gfp_t gfp, const char *fmt, ...)
{
va_list ap;
char *p;
va_start(ap, fmt);
p = kvasprintf(gfp, fmt, ap);
va_end(ap);
return p;
}
EXPORT_SYMBOL(kasprintf);

117
drivers/ddk/linux/mutex.c Normal file
View File

@ -0,0 +1,117 @@
/*
* kernel/locking/mutex.c
*
* Mutexes: blocking mutual exclusion locks
*
* Started by Ingo Molnar:
*
* Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*
* Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
* David Howells for suggestions and improvements.
*
* - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
* from the -rt tree, where it was originally implemented for rtmutexes
* by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
* and Sven Dietrich.
*
* Also see Documentation/mutex-design.txt.
*/
#include <linux/lockdep.h>
#include <linux/mutex.h>
#include <linux/ww_mutex.h>
#include <linux/sched.h>
#include <linux/export.h>
#include <linux/spinlock.h>
#include <syscall.h>
/*
* A negative mutex count indicates that waiters are sleeping waiting for the
* mutex.
*/
#define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0)
void
__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
{
atomic_set(&lock->count, 1);
// spin_lock_init(&lock->wait_lock);
INIT_LIST_HEAD(&lock->wait_list);
// mutex_clear_owner(lock);
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
lock->osq = NULL;
#endif
}
static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
struct ww_acquire_ctx *ww_ctx)
{
#ifdef CONFIG_DEBUG_MUTEXES
/*
* If this WARN_ON triggers, you used ww_mutex_lock to acquire,
* but released with a normal mutex_unlock in this call.
*
* This should never happen, always use ww_mutex_unlock.
*/
DEBUG_LOCKS_WARN_ON(ww->ctx);
/*
* Not quite done after calling ww_acquire_done() ?
*/
DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
if (ww_ctx->contending_lock) {
/*
* After -EDEADLK you tried to
* acquire a different ww_mutex? Bad!
*/
DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
/*
* You called ww_mutex_lock after receiving -EDEADLK,
* but 'forgot' to unlock everything else first?
*/
DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
ww_ctx->contending_lock = NULL;
}
/*
* Naughty, using a different class will lead to undefined behavior!
*/
DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
#endif
ww_ctx->acquired++;
}
void ww_mutex_unlock(struct ww_mutex *lock)
{
/*
* The unlocking fastpath is the 0->1 transition from 'locked'
* into 'unlocked' state:
*/
if (lock->ctx) {
if (lock->ctx->acquired > 0)
lock->ctx->acquired--;
lock->ctx = NULL;
}
MutexUnlock(&lock->base);
}
int __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
MutexLock(&lock->base);
ww_mutex_lock_acquired(lock, ctx);
lock->ctx = ctx;
return 0;
}
int __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
MutexLock(&lock->base);
ww_mutex_lock_acquired(lock, ctx);
lock->ctx = ctx;
return 0;
}

View File

@ -0,0 +1,363 @@
/*
* Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
*
* Scatterlist handling helpers.
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
#include <linux/export.h>
#include <linux/scatterlist.h>
/**
* sg_next - return the next scatterlist entry in a list
* @sg: The current sg entry
*
* Description:
* Usually the next entry will be @sg@ + 1, but if this sg element is part
* of a chained scatterlist, it could jump to the start of a new
* scatterlist array.
*
**/
struct scatterlist *sg_next(struct scatterlist *sg)
{
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
#endif
if (sg_is_last(sg))
return NULL;
sg++;
if (unlikely(sg_is_chain(sg)))
sg = sg_chain_ptr(sg);
return sg;
}
EXPORT_SYMBOL(sg_next);
/**
* sg_nents - return total count of entries in scatterlist
* @sg: The scatterlist
*
* Description:
* Allows to know how many entries are in sg, taking into acount
* chaining as well
*
**/
int sg_nents(struct scatterlist *sg)
{
int nents;
for (nents = 0; sg; sg = sg_next(sg))
nents++;
return nents;
}
EXPORT_SYMBOL(sg_nents);
/**
* sg_last - return the last scatterlist entry in a list
* @sgl: First entry in the scatterlist
* @nents: Number of entries in the scatterlist
*
* Description:
* Should only be used casually, it (currently) scans the entire list
* to get the last entry.
*
* Note that the @sgl@ pointer passed in need not be the first one,
* the important bit is that @nents@ denotes the number of entries that
* exist from @sgl@.
*
**/
struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
{
#ifndef ARCH_HAS_SG_CHAIN
struct scatterlist *ret = &sgl[nents - 1];
#else
struct scatterlist *sg, *ret = NULL;
unsigned int i;
for_each_sg(sgl, sg, nents, i)
ret = sg;
#endif
#ifdef CONFIG_DEBUG_SG
BUG_ON(sgl[0].sg_magic != SG_MAGIC);
BUG_ON(!sg_is_last(ret));
#endif
return ret;
}
EXPORT_SYMBOL(sg_last);
/**
* sg_init_table - Initialize SG table
* @sgl: The SG table
* @nents: Number of entries in table
*
* Notes:
* If this is part of a chained sg table, sg_mark_end() should be
* used only on the last table part.
*
**/
void sg_init_table(struct scatterlist *sgl, unsigned int nents)
{
memset(sgl, 0, sizeof(*sgl) * nents);
#ifdef CONFIG_DEBUG_SG
{
unsigned int i;
for (i = 0; i < nents; i++)
sgl[i].sg_magic = SG_MAGIC;
}
#endif
sg_mark_end(&sgl[nents - 1]);
}
EXPORT_SYMBOL(sg_init_table);
/**
* sg_init_one - Initialize a single entry sg list
* @sg: SG entry
* @buf: Virtual address for IO
* @buflen: IO length
*
**/
//void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
//{
// sg_init_table(sg, 1);
// sg_set_buf(sg, buf, buflen);
//}
EXPORT_SYMBOL(sg_init_one);
/*
* The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
* helpers.
*/
static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
{
return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
}
static void sg_kfree(struct scatterlist *sg, unsigned int nents)
{
kfree(sg);
}
/**
* __sg_free_table - Free a previously mapped sg table
* @table: The sg table header to use
* @max_ents: The maximum number of entries per single scatterlist
* @skip_first_chunk: don't free the (preallocated) first scatterlist chunk
* @free_fn: Free function
*
* Description:
* Free an sg table previously allocated and setup with
* __sg_alloc_table(). The @max_ents value must be identical to
* that previously used with __sg_alloc_table().
*
**/
void __sg_free_table(struct sg_table *table, unsigned int max_ents,
bool skip_first_chunk, sg_free_fn *free_fn)
{
struct scatterlist *sgl, *next;
if (unlikely(!table->sgl))
return;
sgl = table->sgl;
while (table->orig_nents) {
unsigned int alloc_size = table->orig_nents;
unsigned int sg_size;
/*
* If we have more than max_ents segments left,
* then assign 'next' to the sg table after the current one.
* sg_size is then one less than alloc size, since the last
* element is the chain pointer.
*/
if (alloc_size > max_ents) {
next = sg_chain_ptr(&sgl[max_ents - 1]);
alloc_size = max_ents;
sg_size = alloc_size - 1;
} else {
sg_size = alloc_size;
next = NULL;
}
table->orig_nents -= sg_size;
if (!skip_first_chunk) {
free_fn(sgl, alloc_size);
skip_first_chunk = false;
}
sgl = next;
}
table->sgl = NULL;
}
EXPORT_SYMBOL(__sg_free_table);
/**
* sg_free_table - Free a previously allocated sg table
* @table: The mapped sg table header
*
**/
void sg_free_table(struct sg_table *table)
{
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
}
EXPORT_SYMBOL(sg_free_table);
/**
* __sg_alloc_table - Allocate and initialize an sg table with given allocator
* @table: The sg table header to use
* @nents: Number of entries in sg list
* @max_ents: The maximum number of entries the allocator returns per call
* @gfp_mask: GFP allocation mask
* @alloc_fn: Allocator to use
*
* Description:
* This function returns a @table @nents long. The allocator is
* defined to return scatterlist chunks of maximum size @max_ents.
* Thus if @nents is bigger than @max_ents, the scatterlists will be
* chained in units of @max_ents.
*
* Notes:
* If this function returns non-0 (eg failure), the caller must call
* __sg_free_table() to cleanup any leftover allocations.
*
**/
int __sg_alloc_table(struct sg_table *table, unsigned int nents,
unsigned int max_ents, struct scatterlist *first_chunk,
gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
{
struct scatterlist *sg, *prv;
unsigned int left;
memset(table, 0, sizeof(*table));
if (nents == 0)
return -EINVAL;
#ifndef ARCH_HAS_SG_CHAIN
if (WARN_ON_ONCE(nents > max_ents))
return -EINVAL;
#endif
left = nents;
prv = NULL;
do {
unsigned int sg_size, alloc_size = left;
if (alloc_size > max_ents) {
alloc_size = max_ents;
sg_size = alloc_size - 1;
} else
sg_size = alloc_size;
left -= sg_size;
if (first_chunk) {
sg = first_chunk;
first_chunk = NULL;
} else {
sg = alloc_fn(alloc_size, gfp_mask);
}
if (unlikely(!sg)) {
/*
* Adjust entry count to reflect that the last
* entry of the previous table won't be used for
* linkage. Without this, sg_kfree() may get
* confused.
*/
if (prv)
table->nents = ++table->orig_nents;
return -ENOMEM;
}
sg_init_table(sg, alloc_size);
table->nents = table->orig_nents += sg_size;
/*
* If this is the first mapping, assign the sg table header.
* If this is not the first mapping, chain previous part.
*/
if (prv)
sg_chain(prv, max_ents, sg);
else
table->sgl = sg;
/*
* If no more entries after this one, mark the end
*/
if (!left)
sg_mark_end(&sg[sg_size - 1]);
prv = sg;
} while (left);
return 0;
}
EXPORT_SYMBOL(__sg_alloc_table);
/**
* sg_alloc_table - Allocate and initialize an sg table
* @table: The sg table header to use
* @nents: Number of entries in sg list
* @gfp_mask: GFP allocation mask
*
* Description:
* Allocate and initialize an sg table. If @nents@ is larger than
* SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
*
**/
int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
{
int ret;
ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
NULL, gfp_mask, sg_kmalloc);
if (unlikely(ret))
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
return ret;
}
EXPORT_SYMBOL(sg_alloc_table);
void __sg_page_iter_start(struct sg_page_iter *piter,
struct scatterlist *sglist, unsigned int nents,
unsigned long pgoffset)
{
piter->__pg_advance = 0;
piter->__nents = nents;
piter->sg = sglist;
piter->sg_pgoffset = pgoffset;
}
EXPORT_SYMBOL(__sg_page_iter_start);
static int sg_page_count(struct scatterlist *sg)
{
return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
}
bool __sg_page_iter_next(struct sg_page_iter *piter)
{
if (!piter->__nents || !piter->sg)
return false;
piter->sg_pgoffset += piter->__pg_advance;
piter->__pg_advance = 1;
while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
piter->sg_pgoffset -= sg_page_count(piter->sg);
piter->sg = sg_next(piter->sg);
if (!--piter->__nents || !piter->sg)
return false;
}
return true;
}
EXPORT_SYMBOL(__sg_page_iter_next);

File diff suppressed because it is too large Load Diff

View File

@ -64,17 +64,19 @@
#include <linux/file.h> #include <linux/file.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <linux/dma-mapping.h>
#include <linux/irqreturn.h> #include <linux/irqreturn.h>
//#include <linux/smp_lock.h> /* For (un)lock_kernel */
//#include <linux/dma-mapping.h>
#include <linux/mutex.h> #include <linux/mutex.h>
//#include <asm/io.h> //#include <asm/io.h>
//#include <asm/mman.h> #include <linux/slab.h>
//#include <asm/uaccess.h> //#include <asm/uaccess.h>
//#include <linux/workqueue.h> //#include <linux/workqueue.h>
//#include <linux/poll.h> //#include <linux/poll.h>
//#include <asm/pgalloc.h> //#include <asm/pgalloc.h>
#include <linux/types.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
@ -92,6 +94,7 @@ struct drm_device;
struct device_node; struct device_node;
struct videomode; struct videomode;
struct reservation_object;
struct inode; struct inode;
struct poll_table_struct; struct poll_table_struct;
@ -110,55 +113,48 @@ struct dma_buf;
#define DRM_CALLED_FROM_VBLIRQ 1 #define DRM_CALLED_FROM_VBLIRQ 1
#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0) #define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
#define DRM_VBLANKTIME_INVBL (1 << 1) #define DRM_VBLANKTIME_INVBL (1 << 1)
/* get_scanout_position() return flags */ /* get_scanout_position() return flags */
#define DRM_SCANOUTPOS_VALID (1 << 0) #define DRM_SCANOUTPOS_VALID (1 << 0)
#define DRM_SCANOUTPOS_INVBL (1 << 1) #define DRM_SCANOUTPOS_INVBL (1 << 1)
#define DRM_SCANOUTPOS_ACCURATE (1 << 2) #define DRM_SCANOUTPOS_ACCURATE (1 << 2)
/*
* 4 debug categories are defined:
*
* CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c, drm_memory.c, ...
* This is the category used by the DRM_DEBUG() macro.
*
* DRIVER: Used in the vendor specific part of the driver: i915, radeon, ...
* This is the category used by the DRM_DEBUG_DRIVER() macro.
*
* KMS: used in the modesetting code.
* This is the category used by the DRM_DEBUG_KMS() macro.
*
* PRIME: used in the prime code.
* This is the category used by the DRM_DEBUG_PRIME() macro.
*
* Enabling verbose debug messages is done through the drm.debug parameter,
* each category being enabled by a bit.
*
* drm.debug=0x1 will enable CORE messages
* drm.debug=0x2 will enable DRIVER messages
* drm.debug=0x3 will enable CORE and DRIVER messages
* ...
* drm.debug=0xf will enable all messages
*
* An interesting feature is that it's possible to enable verbose logging at
* run-time by echoing the debug value in its sysfs node:
* # echo 0xf > /sys/module/drm/parameters/debug
*/
#define DRM_UT_CORE 0x01 #define DRM_UT_CORE 0x01
#define DRM_UT_DRIVER 0x02 #define DRM_UT_DRIVER 0x02
#define DRM_UT_KMS 0x04 #define DRM_UT_KMS 0x04
#define DRM_UT_PRIME 0x08 #define DRM_UT_PRIME 0x08
/*
* Three debug levels are defined.
* drm_core, drm_driver, drm_kms
* drm_core level can be used in the generic drm code. For example:
* drm_ioctl, drm_mm, drm_memory
* The macro definition of DRM_DEBUG is used.
* DRM_DEBUG(fmt, args...)
* The debug info by using the DRM_DEBUG can be obtained by adding
* the boot option of "drm.debug=1".
*
* drm_driver level can be used in the specific drm driver. It is used
* to add the debug info related with the drm driver. For example:
* i915_drv, i915_dma, i915_gem, radeon_drv,
* The macro definition of DRM_DEBUG_DRIVER can be used.
* DRM_DEBUG_DRIVER(fmt, args...)
* The debug info by using the DRM_DEBUG_DRIVER can be obtained by
* adding the boot option of "drm.debug=0x02"
*
* drm_kms level can be used in the KMS code related with specific drm driver.
* It is used to add the debug info related with KMS mode. For example:
* the connector/crtc ,
* The macro definition of DRM_DEBUG_KMS can be used.
* DRM_DEBUG_KMS(fmt, args...)
* The debug info by using the DRM_DEBUG_KMS can be obtained by
* adding the boot option of "drm.debug=0x04"
*
* If we add the boot option of "drm.debug=0x06", we can get the debug info by
* using the DRM_DEBUG_KMS and DRM_DEBUG_DRIVER.
* If we add the boot option of "drm.debug=0x05", we can get the debug info by
* using the DRM_DEBUG_KMS and DRM_DEBUG.
*/
extern __printf(4, 5) extern __printf(2, 3)
void drm_ut_debug_printk(unsigned int request_level, void drm_ut_debug_printk(const char *function_name,
const char *prefix,
const char *function_name,
const char *format, ...); const char *format, ...);
extern __printf(2, 3) extern __printf(2, 3)
int drm_err(const char *func, const char *format, ...); int drm_err(const char *func, const char *format, ...);
@ -192,8 +188,6 @@ int drm_err(const char *func, const char *format, ...);
also include looping detection. */ also include looping detection. */
#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */ #define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */
#define DRM_KERNEL_CONTEXT 0 /**< Change drm_resctx if changed */
#define DRM_RESERVED_CONTEXTS 1 /**< Change drm_resctx if changed */
#define DRM_MAP_HASH_OFFSET 0x10000000 #define DRM_MAP_HASH_OFFSET 0x10000000
@ -231,6 +225,9 @@ int drm_err(const char *func, const char *format, ...);
#define DRM_INFO(fmt, ...) \ #define DRM_INFO(fmt, ...) \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__) printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
#define DRM_INFO_ONCE(fmt, ...) \
printk_once(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
/** /**
* Debug output. * Debug output.
* *
@ -238,49 +235,28 @@ int drm_err(const char *func, const char *format, ...);
* \param arg arguments * \param arg arguments
*/ */
#if DRM_DEBUG_CODE #if DRM_DEBUG_CODE
#define DRM_DEBUG(fmt, ...) \ #define DRM_DEBUG(fmt, args...) \
do { \ do { \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__); \ printk(KERN_INFO "[" DRM_NAME "] " fmt, ##args); \
} while (0) } while (0)
#define DRM_DEBUG_DRIVER(fmt, ...) \ #define DRM_DEBUG_DRIVER(fmt, args...) \
do { \ do { \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__); \ printk(KERN_INFO "[" DRM_NAME "] " fmt, ##args); \
} while (0) } while (0)
#define DRM_DEBUG_KMS(fmt, ...) \ #define DRM_DEBUG_KMS(fmt, args...) \
do { \ do { \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__); \ printk(KERN_INFO "[" DRM_NAME "] " fmt, ##args); \
} while (0) } while (0)
#define DRM_DEBUG_PRIME(fmt, ...) \ #define DRM_DEBUG_PRIME(fmt, args...) \
do { \ do { \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__); \ printk(KERN_INFO "[" DRM_NAME "] " fmt, ##args); \
} while (0)
#define DRM_LOG(fmt, ...) \
do { \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__); \
} while (0)
#define DRM_LOG_KMS(fmt, ...) \
do { \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__); \
} while (0)
#define DRM_LOG_MODE(fmt, ...) \
do { \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__); \
} while (0)
#define DRM_LOG_DRIVER(fmt, ...) \
do { \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__); \
} while (0) } while (0)
#else #else
#define DRM_DEBUG_DRIVER(fmt, args...) do { } while (0) #define DRM_DEBUG_DRIVER(fmt, args...) do { } while (0)
#define DRM_DEBUG_KMS(fmt, args...) do { } while (0) #define DRM_DEBUG_KMS(fmt, args...) do { } while (0)
#define DRM_DEBUG_PRIME(fmt, args...) do { } while (0) #define DRM_DEBUG_PRIME(fmt, args...) do { } while (0)
#define DRM_DEBUG(fmt, arg...) do { } while (0) #define DRM_DEBUG(fmt, arg...) do { } while (0)
#define DRM_LOG(fmt, arg...) do { } while (0)
#define DRM_LOG_KMS(fmt, args...) do { } while (0)
#define DRM_LOG_MODE(fmt, arg...) do { } while (0)
#define DRM_LOG_DRIVER(fmt, arg...) do { } while (0)
#endif #endif
/*@}*/ /*@}*/
@ -310,7 +286,6 @@ do { \
} \ } \
} while (0) } while (0)
#if 0
/** /**
* Ioctl function type. * Ioctl function type.
* *
@ -351,6 +326,7 @@ struct drm_ioctl_desc {
#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \ #define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \
[DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl, .name = #ioctl} [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl, .name = #ioctl}
#if 0
struct drm_magic_entry { struct drm_magic_entry {
struct list_head head; struct list_head head;
struct drm_hash_item hash_item; struct drm_hash_item hash_item;
@ -405,17 +381,6 @@ struct drm_waitlist {
}; };
#endif #endif
struct drm_freelist {
int initialized; /**< Freelist in use */
atomic_t count; /**< Number of free buffers */
struct drm_buf *next; /**< End pointer */
wait_queue_head_t waiting; /**< Processes waiting on free bufs */
int low_mark; /**< Low water mark */
int high_mark; /**< High water mark */
atomic_t wfh; /**< If waiting for high mark */
spinlock_t lock;
};
typedef struct drm_dma_handle { typedef struct drm_dma_handle {
dma_addr_t busaddr; dma_addr_t busaddr;
@ -434,7 +399,8 @@ struct drm_buf_entry {
int page_order; int page_order;
struct drm_dma_handle **seglist; struct drm_dma_handle **seglist;
struct drm_freelist freelist; int low_mark; /**< Low water mark */
int high_mark; /**< High water mark */
}; };
/* Event queued up for userspace to read */ /* Event queued up for userspace to read */
@ -455,11 +421,16 @@ struct drm_prime_file_private {
/** File private data */ /** File private data */
struct drm_file { struct drm_file {
unsigned always_authenticated :1;
unsigned authenticated :1; unsigned authenticated :1;
unsigned is_master :1; /* this file private is a master for a minor */ /* Whether we're master for a minor. Protected by master_mutex */
unsigned is_master :1;
/* true when the client has asked us to expose stereo 3D mode flags */ /* true when the client has asked us to expose stereo 3D mode flags */
unsigned stereo_allowed :1; unsigned stereo_allowed :1;
/*
* true if client understands CRTC primary planes and cursor planes
* in the plane list
*/
unsigned universal_planes:1;
struct list_head lhead; struct list_head lhead;
unsigned long lock_count; unsigned long lock_count;
@ -470,7 +441,16 @@ struct drm_file {
void *driver_priv; void *driver_priv;
struct drm_master *master; /* master this node is currently associated with
N.B. not always minor->master */
/**
* fbs - List of framebuffers associated with this file.
*
* Protected by fbs_lock. Note that the fbs list holds a reference on
* the fb object to prevent it from untimely disappearing.
*/
struct list_head fbs; struct list_head fbs;
struct mutex fbs_lock;
wait_queue_head_t event_wait; wait_queue_head_t event_wait;
struct list_head event_list; struct list_head event_list;
@ -478,23 +458,6 @@ struct drm_file {
}; };
#if 0 #if 0
/** Wait queue */
struct drm_queue {
atomic_t use_count; /**< Outstanding uses (+1) */
atomic_t finalization; /**< Finalization in progress */
atomic_t block_count; /**< Count of processes waiting */
atomic_t block_read; /**< Queue blocked for reads */
wait_queue_head_t read_queue; /**< Processes waiting on block_read */
atomic_t block_write; /**< Queue blocked for writes */
wait_queue_head_t write_queue; /**< Processes waiting on block_write */
atomic_t total_queued; /**< Total queued statistic */
atomic_t total_flushed; /**< Total flushes statistic */
atomic_t total_locks; /**< Total locks statistics */
enum drm_ctx_flags flags; /**< Context preserving and 2D-only */
struct drm_waitlist waitlist; /**< Pending buffers */
wait_queue_head_t flush_queue; /**< Processes waiting until flush */
};
/** /**
* Lock data. * Lock data.
*/ */
@ -578,7 +541,6 @@ struct drm_sigdata {
#endif #endif
/** /**
* Kernel side of a mapping * Kernel side of a mapping
*/ */
@ -605,15 +567,6 @@ struct drm_map_list {
struct drm_master *master; struct drm_master *master;
}; };
/**
* Context handle list
*/
struct drm_ctx_list {
struct list_head head; /**< list head */
drm_context_t handle; /**< context handle */
struct drm_file *tag; /**< associated fd private data */
};
/* location of GART table */ /* location of GART table */
#define DRM_ATI_GART_MAIN 1 #define DRM_ATI_GART_MAIN 1
#define DRM_ATI_GART_FB 2 #define DRM_ATI_GART_FB 2
@ -691,38 +644,46 @@ struct drm_gem_object {
uint32_t pending_read_domains; uint32_t pending_read_domains;
uint32_t pending_write_domain; uint32_t pending_write_domain;
/**
* dma_buf - dma buf associated with this GEM object
*
* Pointer to the dma-buf associated with this gem object (either
* through importing or exporting). We break the resulting reference
* loop when the last gem handle for this object is released.
*
* Protected by obj->object_name_lock
*/
struct dma_buf *dma_buf;
}; };
#include <drm/drm_crtc.h> #include <drm/drm_crtc.h>
/* per-master structure */ /**
* struct drm_master - drm master structure
*
* @refcount: Refcount for this master object.
* @minor: Link back to minor char device we are master for. Immutable.
* @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex.
* @unique_len: Length of unique field. Protected by drm_global_mutex.
* @unique_size: Amount allocated. Protected by drm_global_mutex.
* @magiclist: Hash of used authentication tokens. Protected by struct_mutex.
* @magicfree: List of used authentication tokens. Protected by struct_mutex.
* @lock: DRI lock information.
* @driver_priv: Pointer to driver-private information.
*/
struct drm_master { struct drm_master {
struct kref refcount;
struct kref refcount; /* refcount for this master */ struct drm_minor *minor;
char *unique;
struct list_head head; /**< each minor contains a list of masters */ int unique_len;
struct drm_minor *minor; /**< link back to minor we are a master for */ int unique_size;
// struct drm_open_hash magiclist;
char *unique; /**< Unique identifier: e.g., busid */ // struct list_head magicfree;
int unique_len; /**< Length of unique field */ // struct drm_lock_data lock;
int unique_size; /**< amount allocated */ void *driver_priv;
int blocked; /**< Blocked due to VC switch? */
/** \name Authentication */
/*@{ */
// struct drm_open_hash magiclist;
// struct list_head magicfree;
/*@} */
// struct drm_lock_data lock; /**< Information on hardware lock */
void *driver_priv; /**< Private structure for driver to use */
}; };
#if 0
/* Size of ringbuffer for vblank timestamps. Just double-buffer /* Size of ringbuffer for vblank timestamps. Just double-buffer
* in initial implementation. * in initial implementation.
*/ */
@ -739,17 +700,8 @@ struct drm_master {
#define DRM_SCANOUTPOS_ACCURATE (1 << 2) #define DRM_SCANOUTPOS_ACCURATE (1 << 2)
struct drm_bus { struct drm_bus {
int bus_type;
int (*get_irq)(struct drm_device *dev);
const char *(*get_name)(struct drm_device *dev);
int (*set_busid)(struct drm_device *dev, struct drm_master *master); int (*set_busid)(struct drm_device *dev, struct drm_master *master);
int (*set_unique)(struct drm_device *dev, struct drm_master *master,
struct drm_unique *unique);
int (*irq_by_busid)(struct drm_device *dev, struct drm_irq_busid *p);
}; };
#endif
#define DRM_IRQ_ARGS int irq, void *arg
/** /**
* DRM driver structure. This structure represent the common code for * DRM driver structure. This structure represent the common code for
@ -876,7 +828,7 @@ struct drm_driver {
/* these have to be filled in */ /* these have to be filled in */
irqreturn_t(*irq_handler) (DRM_IRQ_ARGS); irqreturn_t(*irq_handler) (int irq, void *arg);
void (*irq_preinstall) (struct drm_device *dev); void (*irq_preinstall) (struct drm_device *dev);
int (*irq_postinstall) (struct drm_device *dev); int (*irq_postinstall) (struct drm_device *dev);
void (*irq_uninstall) (struct drm_device *dev); void (*irq_uninstall) (struct drm_device *dev);
@ -891,12 +843,15 @@ struct drm_driver {
int (*gem_open_object) (struct drm_gem_object *, struct drm_file *); int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
void (*gem_close_object) (struct drm_gem_object *, struct drm_file *); void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
u32 driver_features; u32 driver_features;
int dev_priv_size;
}; };
#define DRM_MINOR_UNASSIGNED 0 enum drm_minor_type {
#define DRM_MINOR_LEGACY 1 DRM_MINOR_LEGACY,
#define DRM_MINOR_CONTROL 2 DRM_MINOR_CONTROL,
#define DRM_MINOR_RENDER 3 DRM_MINOR_RENDER,
DRM_MINOR_CNT,
};
/** /**
* Info file list entry. This structure represents a debugfs or proc file to * Info file list entry. This structure represents a debugfs or proc file to
@ -925,33 +880,17 @@ struct drm_info_node {
struct drm_minor { struct drm_minor {
int index; /**< Minor device number */ int index; /**< Minor device number */
int type; /**< Control or render */ int type; /**< Control or render */
// dev_t device; /**< Device number for mknod */
// struct device kdev; /**< Linux device */ // struct device kdev; /**< Linux device */
struct drm_device *dev; struct drm_device *dev;
// struct proc_dir_entry *proc_root; /**< proc directory entry */ struct dentry *debugfs_root;
// struct drm_info_node proc_nodes;
// struct dentry *debugfs_root;
// struct drm_info_node debugfs_nodes;
struct drm_master *master; /* currently active master for this node */ struct list_head debugfs_list;
// struct list_head master_list; struct mutex debugfs_lock; /* Protects debugfs_list. */
// struct drm_mode_group mode_group;
};
/* mode specified on the command line */ /* currently active master for this node. Protected by master_mutex */
struct drm_cmdline_mode { struct drm_master *master;
bool specified; struct drm_mode_group mode_group;
bool refresh_specified;
bool bpp_specified;
int xres, yres;
int bpp;
int refresh;
bool rb;
bool interlace;
bool cvt;
bool margins;
enum drm_connector_force force;
}; };
@ -962,18 +901,23 @@ struct drm_cmdline_mode {
*/ */
struct drm_device { struct drm_device {
struct list_head legacy_dev_list;/**< list of devices per driver for stealth attach cleanup */ struct list_head legacy_dev_list;/**< list of devices per driver for stealth attach cleanup */
char *devname; /**< For /proc/interrupts */
int if_version; /**< Highest interface version set */ int if_version; /**< Highest interface version set */
struct device *dev; /**< Device structure of bus-device */
struct drm_driver *driver; /**< DRM driver managing the device */
void *dev_private; /**< DRM driver private data */
struct drm_minor *primary; /**< Primary node */
atomic_t unplugged; /**< Flag whether dev is dead */
/** \name Locks */ /** \name Locks */
/*@{ */ /*@{ */
spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */
struct mutex struct_mutex; /**< For others */ struct mutex struct_mutex; /**< For others */
struct mutex master_mutex; /**< For drm_minor::master and drm_file::is_master */
/*@} */ /*@} */
/** \name Usage Counters */ /** \name Usage Counters */
/*@{ */ /*@{ */
int open_count; /**< Outstanding files open */ int open_count; /**< Outstanding files open, protected by drm_global_mutex. */
spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */
int buf_use; /**< Buffers in use -- cannot alloc */ int buf_use; /**< Buffers in use -- cannot alloc */
atomic_t buf_alloc; /**< Buffer allocation in progress */ atomic_t buf_alloc; /**< Buffer allocation in progress */
/*@} */ /*@} */
@ -1003,6 +947,8 @@ struct drm_device {
/** \name Context support */ /** \name Context support */
/*@{ */ /*@{ */
bool irq_enabled; /**< True if irq handler is enabled */ bool irq_enabled; /**< True if irq handler is enabled */
int irq;
__volatile__ long context_flag; /**< Context swapping flag */ __volatile__ long context_flag; /**< Context swapping flag */
int last_context; /**< Last current context */ int last_context; /**< Last current context */
/*@} */ /*@} */
@ -1018,6 +964,11 @@ struct drm_device {
*/ */
bool vblank_disable_allowed; bool vblank_disable_allowed;
/* array of size num_crtcs */
struct drm_vblank_crtc *vblank;
spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */
spinlock_t vbl_lock;
u32 max_vblank_count; /**< size of vblank counter register */ u32 max_vblank_count; /**< size of vblank counter register */
@ -1031,21 +982,10 @@ struct drm_device {
// struct drm_agp_head *agp; /**< AGP data */ // struct drm_agp_head *agp; /**< AGP data */
struct device *dev; /**< Device structure */
struct pci_dev *pdev; /**< PCI device structure */ struct pci_dev *pdev; /**< PCI device structure */
int pci_vendor; /**< PCI vendor id */
int pci_device; /**< PCI device id */
unsigned int num_crtcs; /**< Number of CRTCs on this device */
void *dev_private; /**< device private data */
struct address_space *dev_mapping;
// struct drm_sigdata sigdata; /**< For block_all_signals */
// sigset_t sigmask;
struct drm_driver *driver; unsigned int num_crtcs; /**< Number of CRTCs on this device */
// struct drm_local_map *agp_buffer_map;
// unsigned int agp_buffer_token;
// struct drm_minor *control; /**< Control node for card */
struct drm_minor *primary; /**< render type primary screen head */
struct drm_mode_config mode_config; /**< Current mode config */ struct drm_mode_config mode_config; /**< Current mode config */
@ -1056,8 +996,6 @@ struct drm_device {
struct drm_vma_offset_manager *vma_offset_manager; struct drm_vma_offset_manager *vma_offset_manager;
/*@} */ /*@} */
int switch_power_state; int switch_power_state;
atomic_t unplugged; /* device has been unplugged or gone away */
}; };
#define DRM_SWITCH_POWER_ON 0 #define DRM_SWITCH_POWER_ON 0
@ -1071,11 +1009,6 @@ static __inline__ int drm_core_check_feature(struct drm_device *dev,
return ((dev->driver->driver_features & feature) ? 1 : 0); return ((dev->driver->driver_features & feature) ? 1 : 0);
} }
static inline int drm_dev_to_irq(struct drm_device *dev)
{
return dev->pdev->irq;
}
static inline void drm_device_set_unplugged(struct drm_device *dev) static inline void drm_device_set_unplugged(struct drm_device *dev)
{ {
smp_wmb(); smp_wmb();
@ -1089,10 +1022,6 @@ static inline int drm_device_is_unplugged(struct drm_device *dev)
return ret; return ret;
} }
static inline bool drm_modeset_is_locked(struct drm_device *dev)
{
return mutex_is_locked(&dev->mode_config.mutex);
}
/******************************************************************/ /******************************************************************/
/** \name Internal function definitions */ /** \name Internal function definitions */
@ -1104,11 +1033,11 @@ extern long drm_ioctl(struct file *filp,
extern long drm_compat_ioctl(struct file *filp, extern long drm_compat_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg); unsigned int cmd, unsigned long arg);
extern int drm_lastclose(struct drm_device *dev); extern int drm_lastclose(struct drm_device *dev);
extern bool drm_ioctl_flags(unsigned int nr, unsigned int *flags);
/* Device support (drm_fops.h) */ /* Device support (drm_fops.h) */
extern struct mutex drm_global_mutex; extern struct mutex drm_global_mutex;
extern int drm_open(struct inode *inode, struct file *filp); extern int drm_open(struct inode *inode, struct file *filp);
extern int drm_stub_open(struct inode *inode, struct file *filp);
extern ssize_t drm_read(struct file *filp, char __user *buffer, extern ssize_t drm_read(struct file *filp, char __user *buffer,
size_t count, loff_t *offset); size_t count, loff_t *offset);
extern int drm_release(struct inode *inode, struct file *filp); extern int drm_release(struct inode *inode, struct file *filp);
@ -1146,29 +1075,6 @@ extern int drm_setversion(struct drm_device *dev, void *data,
extern int drm_noop(struct drm_device *dev, void *data, extern int drm_noop(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
/* Context IOCTL support (drm_context.h) */
extern int drm_resctx(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_addctx(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_getctx(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_switchctx(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_newctx(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_rmctx(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_ctxbitmap_init(struct drm_device *dev);
extern void drm_ctxbitmap_cleanup(struct drm_device *dev);
extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
extern int drm_setsareactx(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_getsareactx(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/* Authentication IOCTL support (drm_auth.h) */ /* Authentication IOCTL support (drm_auth.h) */
extern int drm_getmagic(struct drm_device *dev, void *data, extern int drm_getmagic(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
@ -1179,7 +1085,7 @@ extern int drm_remove_magic(struct drm_master *master, drm_magic_t magic);
/* Cache management (drm_cache.c) */ /* Cache management (drm_cache.c) */
void drm_clflush_pages(struct page *pages[], unsigned long num_pages); void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
void drm_clflush_sg(struct sg_table *st); void drm_clflush_sg(struct sg_table *st);
void drm_clflush_virt_range(char *addr, unsigned long length); void drm_clflush_virt_range(void *addr, unsigned long length);
/* Locking IOCTL support (drm_lock.h) */ /* Locking IOCTL support (drm_lock.h) */
extern int drm_lock(struct drm_device *dev, void *data, extern int drm_lock(struct drm_device *dev, void *data,
@ -1232,7 +1138,7 @@ extern void drm_core_reclaim_buffers(struct drm_device *dev,
/* IRQ support (drm_irq.h) */ /* IRQ support (drm_irq.h) */
extern int drm_control(struct drm_device *dev, void *data, extern int drm_control(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
extern int drm_irq_install(struct drm_device *dev); extern int drm_irq_install(struct drm_device *dev, int irq);
extern int drm_irq_uninstall(struct drm_device *dev); extern int drm_irq_uninstall(struct drm_device *dev);
extern int drm_vblank_init(struct drm_device *dev, int num_crtcs); extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
@ -1246,8 +1152,14 @@ extern void drm_send_vblank_event(struct drm_device *dev, int crtc,
extern bool drm_handle_vblank(struct drm_device *dev, int crtc); extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
extern int drm_vblank_get(struct drm_device *dev, int crtc); extern int drm_vblank_get(struct drm_device *dev, int crtc);
extern void drm_vblank_put(struct drm_device *dev, int crtc); extern void drm_vblank_put(struct drm_device *dev, int crtc);
extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
extern void drm_vblank_off(struct drm_device *dev, int crtc); extern void drm_vblank_off(struct drm_device *dev, int crtc);
extern void drm_vblank_on(struct drm_device *dev, int crtc);
extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
extern void drm_vblank_cleanup(struct drm_device *dev); extern void drm_vblank_cleanup(struct drm_device *dev);
extern u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, extern u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
struct timeval *tvblank, unsigned flags); struct timeval *tvblank, unsigned flags);
extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
@ -1259,20 +1171,6 @@ extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
extern void drm_calc_timestamping_constants(struct drm_crtc *crtc, extern void drm_calc_timestamping_constants(struct drm_crtc *crtc,
const struct drm_display_mode *mode); const struct drm_display_mode *mode);
extern bool
drm_mode_parse_command_line_for_connector(const char *mode_option,
struct drm_connector *connector,
struct drm_cmdline_mode *mode);
extern struct drm_display_mode *
drm_mode_create_from_cmdline_mode(struct drm_device *dev,
struct drm_cmdline_mode *cmd);
extern int drm_display_mode_from_videomode(const struct videomode *vm,
struct drm_display_mode *dmode);
extern int of_get_drm_display_mode(struct device_node *np,
struct drm_display_mode *dmode,
int index);
/* Modesetting support */ /* Modesetting support */
extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
@ -1294,7 +1192,6 @@ extern void drm_master_put(struct drm_master **master);
extern void drm_put_dev(struct drm_device *dev); extern void drm_put_dev(struct drm_device *dev);
extern void drm_unplug_dev(struct drm_device *dev); extern void drm_unplug_dev(struct drm_device *dev);
extern unsigned int drm_debug; extern unsigned int drm_debug;
extern unsigned int drm_rnodes;
#if 0 #if 0
extern unsigned int drm_vblank_offdelay; extern unsigned int drm_vblank_offdelay;
@ -1302,12 +1199,9 @@ extern unsigned int drm_timestamp_precision;
extern unsigned int drm_timestamp_monotonic; extern unsigned int drm_timestamp_monotonic;
extern struct class *drm_class; extern struct class *drm_class;
extern struct dentry *drm_debugfs_root;
extern struct idr drm_minors_idr;
extern struct drm_local_map *drm_getsarea(struct drm_device *dev); extern struct drm_local_map *drm_getsarea(struct drm_device *dev);
#endif
/* Debugfs support */ /* Debugfs support */
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
extern int drm_debugfs_init(struct drm_minor *minor, int minor_id, extern int drm_debugfs_init(struct drm_minor *minor, int minor_id,
@ -1318,6 +1212,8 @@ extern int drm_debugfs_create_files(const struct drm_info_list *files,
extern int drm_debugfs_remove_files(const struct drm_info_list *files, extern int drm_debugfs_remove_files(const struct drm_info_list *files,
int count, struct drm_minor *minor); int count, struct drm_minor *minor);
extern int drm_debugfs_cleanup(struct drm_minor *minor); extern int drm_debugfs_cleanup(struct drm_minor *minor);
extern int drm_debugfs_connector_add(struct drm_connector *connector);
extern void drm_debugfs_connector_remove(struct drm_connector *connector);
#else #else
static inline int drm_debugfs_init(struct drm_minor *minor, int minor_id, static inline int drm_debugfs_init(struct drm_minor *minor, int minor_id,
struct dentry *root) struct dentry *root)
@ -1342,6 +1238,15 @@ static inline int drm_debugfs_cleanup(struct drm_minor *minor)
{ {
return 0; return 0;
} }
static inline int drm_debugfs_connector_add(struct drm_connector *connector)
{
return 0;
}
static inline void drm_debugfs_connector_remove(struct drm_connector *connector)
{
}
#endif #endif
/* Info file support */ /* Info file support */
@ -1368,7 +1273,6 @@ extern int drm_ati_pcigart_init(struct drm_device *dev,
struct drm_ati_pcigart_info * gart_info); struct drm_ati_pcigart_info * gart_info);
extern int drm_ati_pcigart_cleanup(struct drm_device *dev, extern int drm_ati_pcigart_cleanup(struct drm_device *dev,
struct drm_ati_pcigart_info * gart_info); struct drm_ati_pcigart_info * gart_info);
#endif
extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size, extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
size_t align); size_t align);
@ -1380,9 +1284,8 @@ extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
struct drm_sysfs_class; struct drm_sysfs_class;
extern struct class *drm_sysfs_create(struct module *owner, char *name); extern struct class *drm_sysfs_create(struct module *owner, char *name);
extern void drm_sysfs_destroy(void); extern void drm_sysfs_destroy(void);
extern int drm_sysfs_device_add(struct drm_minor *minor); extern struct device *drm_sysfs_minor_alloc(struct drm_minor *minor);
extern void drm_sysfs_hotplug_event(struct drm_device *dev); extern void drm_sysfs_hotplug_event(struct drm_device *dev);
extern void drm_sysfs_device_remove(struct drm_minor *minor);
extern int drm_sysfs_connector_add(struct drm_connector *connector); extern int drm_sysfs_connector_add(struct drm_connector *connector);
extern void drm_sysfs_connector_remove(struct drm_connector *connector); extern void drm_sysfs_connector_remove(struct drm_connector *connector);
#endif #endif
@ -1443,7 +1346,7 @@ void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
int drm_gem_create_mmap_offset(struct drm_gem_object *obj); int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size); int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); struct page **drm_gem_get_pages(struct drm_gem_object *obj);
void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
bool dirty, bool accessed); bool dirty, bool accessed);
@ -1477,7 +1380,7 @@ static __inline__ void drm_core_dropmap(struct drm_local_map *map)
{ {
} }
//#include <drm/drm_mem_util.h> #include <drm/drm_mem_util.h>
extern int drm_fill_in_dev(struct drm_device *dev, extern int drm_fill_in_dev(struct drm_device *dev,
const struct pci_device_id *ent, const struct pci_device_id *ent,
@ -1486,10 +1389,13 @@ int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type);
/*@}*/ /*@}*/
#if 0
extern int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
extern void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
extern int drm_get_pci_dev(struct pci_dev *pdev, extern int drm_get_pci_dev(struct pci_dev *pdev,
const struct pci_device_id *ent, const struct pci_device_id *ent,
struct drm_driver *driver); struct drm_driver *driver);
#endif
#define DRM_PCIE_SPEED_25 1 #define DRM_PCIE_SPEED_25 1
#define DRM_PCIE_SPEED_50 2 #define DRM_PCIE_SPEED_50 2

View File

@ -32,8 +32,8 @@
#include <linux/fb.h> #include <linux/fb.h>
#include <linux/hdmi.h> #include <linux/hdmi.h>
#include <drm/drm_mode.h> #include <drm/drm_mode.h>
#include <drm/drm_fourcc.h> #include <drm/drm_fourcc.h>
#include <drm/drm_modeset_lock.h>
struct drm_device; struct drm_device;
struct drm_mode_set; struct drm_mode_set;
@ -41,6 +41,7 @@ struct drm_framebuffer;
struct drm_object_properties; struct drm_object_properties;
struct drm_file; struct drm_file;
struct drm_clip_rect; struct drm_clip_rect;
struct device_node;
#define DRM_MODE_OBJECT_CRTC 0xcccccccc #define DRM_MODE_OBJECT_CRTC 0xcccccccc
#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0 #define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
@ -51,6 +52,7 @@ struct drm_clip_rect;
#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb #define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee #define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
#define DRM_MODE_OBJECT_BRIDGE 0xbdbdbdbd #define DRM_MODE_OBJECT_BRIDGE 0xbdbdbdbd
#define DRM_MODE_OBJECT_ANY 0
struct drm_mode_object { struct drm_mode_object {
uint32_t id; uint32_t id;
@ -65,130 +67,31 @@ struct drm_object_properties {
uint64_t values[DRM_OBJECT_MAX_PROPERTY]; uint64_t values[DRM_OBJECT_MAX_PROPERTY];
}; };
/* static inline int64_t U642I64(uint64_t val)
* Note on terminology: here, for brevity and convenience, we refer to connector
* control chips as 'CRTCs'. They can control any type of connector, VGA, LVDS,
* DVI, etc. And 'screen' refers to the whole of the visible display, which
* may span multiple monitors (and therefore multiple CRTC and connector
* structures).
*/
enum drm_mode_status {
MODE_OK = 0, /* Mode OK */
MODE_HSYNC, /* hsync out of range */
MODE_VSYNC, /* vsync out of range */
MODE_H_ILLEGAL, /* mode has illegal horizontal timings */
MODE_V_ILLEGAL, /* mode has illegal horizontal timings */
MODE_BAD_WIDTH, /* requires an unsupported linepitch */
MODE_NOMODE, /* no mode with a matching name */
MODE_NO_INTERLACE, /* interlaced mode not supported */
MODE_NO_DBLESCAN, /* doublescan mode not supported */
MODE_NO_VSCAN, /* multiscan mode not supported */
MODE_MEM, /* insufficient video memory */
MODE_VIRTUAL_X, /* mode width too large for specified virtual size */
MODE_VIRTUAL_Y, /* mode height too large for specified virtual size */
MODE_MEM_VIRT, /* insufficient video memory given virtual size */
MODE_NOCLOCK, /* no fixed clock available */
MODE_CLOCK_HIGH, /* clock required is too high */
MODE_CLOCK_LOW, /* clock required is too low */
MODE_CLOCK_RANGE, /* clock/mode isn't in a ClockRange */
MODE_BAD_HVALUE, /* horizontal timing was out of range */
MODE_BAD_VVALUE, /* vertical timing was out of range */
MODE_BAD_VSCAN, /* VScan value out of range */
MODE_HSYNC_NARROW, /* horizontal sync too narrow */
MODE_HSYNC_WIDE, /* horizontal sync too wide */
MODE_HBLANK_NARROW, /* horizontal blanking too narrow */
MODE_HBLANK_WIDE, /* horizontal blanking too wide */
MODE_VSYNC_NARROW, /* vertical sync too narrow */
MODE_VSYNC_WIDE, /* vertical sync too wide */
MODE_VBLANK_NARROW, /* vertical blanking too narrow */
MODE_VBLANK_WIDE, /* vertical blanking too wide */
MODE_PANEL, /* exceeds panel dimensions */
MODE_INTERLACE_WIDTH, /* width too large for interlaced mode */
MODE_ONE_WIDTH, /* only one width is supported */
MODE_ONE_HEIGHT, /* only one height is supported */
MODE_ONE_SIZE, /* only one resolution is supported */
MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */
MODE_NO_STEREO, /* stereo modes not supported */
MODE_UNVERIFIED = -3, /* mode needs to reverified */
MODE_BAD = -2, /* unspecified reason */
MODE_ERROR = -1 /* error condition */
};
#define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \
DRM_MODE_TYPE_CRTC_C)
#define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \
.name = nm, .status = 0, .type = (t), .clock = (c), \
.hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \
.htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \
.vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \
.vscan = (vs), .flags = (f), \
.base.type = DRM_MODE_OBJECT_MODE
#define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */
#define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */
#define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF
struct drm_display_mode {
/* Header */
struct list_head head;
struct drm_mode_object base;
char name[DRM_DISPLAY_MODE_LEN];
enum drm_mode_status status;
unsigned int type;
/* Proposed mode values */
int clock; /* in kHz */
int hdisplay;
int hsync_start;
int hsync_end;
int htotal;
int hskew;
int vdisplay;
int vsync_start;
int vsync_end;
int vtotal;
int vscan;
unsigned int flags;
/* Addressable image size (may be 0 for projectors, etc.) */
int width_mm;
int height_mm;
/* Actual mode we give to hw */
int crtc_clock; /* in KHz */
int crtc_hdisplay;
int crtc_hblank_start;
int crtc_hblank_end;
int crtc_hsync_start;
int crtc_hsync_end;
int crtc_htotal;
int crtc_hskew;
int crtc_vdisplay;
int crtc_vblank_start;
int crtc_vblank_end;
int crtc_vsync_start;
int crtc_vsync_end;
int crtc_vtotal;
/* Driver private mode info */
int private_size;
int *private;
int private_flags;
int vrefresh; /* in Hz */
int hsync; /* in kHz */
enum hdmi_picture_aspect picture_aspect_ratio;
};
static inline bool drm_mode_is_stereo(const struct drm_display_mode *mode)
{ {
return mode->flags & DRM_MODE_FLAG_3D_MASK; return (int64_t)*((int64_t *)&val);
} }
static inline uint64_t I642U64(int64_t val)
{
return (uint64_t)*((uint64_t *)&val);
}
/* rotation property bits */
#define DRM_ROTATE_0 0
#define DRM_ROTATE_90 1
#define DRM_ROTATE_180 2
#define DRM_ROTATE_270 3
#define DRM_REFLECT_X 4
#define DRM_REFLECT_Y 5
enum drm_connector_force {
DRM_FORCE_UNSPECIFIED,
DRM_FORCE_OFF,
DRM_FORCE_ON, /* force on analog part normally */
DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */
};
#include <drm/drm_modes.h>
enum drm_connector_status { enum drm_connector_status {
connector_status_connected = 1, connector_status_connected = 1,
@ -227,6 +130,9 @@ struct drm_display_info {
enum subpixel_order subpixel_order; enum subpixel_order subpixel_order;
u32 color_formats; u32 color_formats;
/* Mask of supported hdmi deep color modes */
u8 edid_hdmi_dc_modes;
u8 cea_rev; u8 cea_rev;
}; };
@ -307,10 +213,15 @@ struct drm_property {
char name[DRM_PROP_NAME_LEN]; char name[DRM_PROP_NAME_LEN];
uint32_t num_values; uint32_t num_values;
uint64_t *values; uint64_t *values;
struct drm_device *dev;
struct list_head enum_blob_list; struct list_head enum_blob_list;
}; };
void drm_modeset_lock_all(struct drm_device *dev);
void drm_modeset_unlock_all(struct drm_device *dev);
void drm_warn_on_modeset_not_all_locked(struct drm_device *dev);
struct drm_crtc; struct drm_crtc;
struct drm_connector; struct drm_connector;
struct drm_encoder; struct drm_encoder;
@ -386,7 +297,10 @@ struct drm_crtc_funcs {
* drm_crtc - central CRTC control structure * drm_crtc - central CRTC control structure
* @dev: parent DRM device * @dev: parent DRM device
* @head: list management * @head: list management
* @mutex: per-CRTC locking
* @base: base KMS object for ID tracking etc. * @base: base KMS object for ID tracking etc.
* @primary: primary plane for this CRTC
* @cursor: cursor plane for this CRTC
* @enabled: is this CRTC enabled? * @enabled: is this CRTC enabled?
* @mode: current mode timings * @mode: current mode timings
* @hwmode: mode timings as programmed to hw regs * @hwmode: mode timings as programmed to hw regs
@ -409,6 +323,7 @@ struct drm_crtc_funcs {
*/ */
struct drm_crtc { struct drm_crtc {
struct drm_device *dev; struct drm_device *dev;
struct device_node *port;
struct list_head head; struct list_head head;
/** /**
@ -418,12 +333,17 @@ struct drm_crtc {
* state, ...) and a write lock for everything which can be update * state, ...) and a write lock for everything which can be update
* without a full modeset (fb, cursor data, ...) * without a full modeset (fb, cursor data, ...)
*/ */
struct mutex mutex; struct drm_modeset_lock mutex;
struct drm_mode_object base; struct drm_mode_object base;
/* framebuffer the connector is currently bound to */ /* primary and cursor planes for CRTC */
struct drm_framebuffer *fb; struct drm_plane *primary;
struct drm_plane *cursor;
/* position of cursor plane on crtc */
int cursor_x;
int cursor_y;
/* Temporary tracking of the old fb while a modeset is ongoing. Used /* Temporary tracking of the old fb while a modeset is ongoing. Used
* by drm_mode_set_config_internal to implement correct refcounting. */ * by drm_mode_set_config_internal to implement correct refcounting. */
@ -514,6 +434,7 @@ struct drm_encoder_funcs {
* @dev: parent DRM device * @dev: parent DRM device
* @head: list management * @head: list management
* @base: base KMS object * @base: base KMS object
* @name: encoder name
* @encoder_type: one of the %DRM_MODE_ENCODER_<foo> types in drm_mode.h * @encoder_type: one of the %DRM_MODE_ENCODER_<foo> types in drm_mode.h
* @possible_crtcs: bitmask of potential CRTC bindings * @possible_crtcs: bitmask of potential CRTC bindings
* @possible_clones: bitmask of potential sibling encoders for cloning * @possible_clones: bitmask of potential sibling encoders for cloning
@ -530,6 +451,7 @@ struct drm_encoder {
struct list_head head; struct list_head head;
struct drm_mode_object base; struct drm_mode_object base;
char *name;
int encoder_type; int encoder_type;
uint32_t possible_crtcs; uint32_t possible_crtcs;
uint32_t possible_clones; uint32_t possible_clones;
@ -540,13 +462,6 @@ struct drm_encoder {
void *helper_private; void *helper_private;
}; };
enum drm_connector_force {
DRM_FORCE_UNSPECIFIED,
DRM_FORCE_OFF,
DRM_FORCE_ON, /* force on analog part normally */
DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */
};
/* should we poll this connector for connects and disconnects */ /* should we poll this connector for connects and disconnects */
/* hot plug detectable */ /* hot plug detectable */
#define DRM_CONNECTOR_POLL_HPD (1 << 0) #define DRM_CONNECTOR_POLL_HPD (1 << 0)
@ -565,6 +480,7 @@ enum drm_connector_force {
* @attr: sysfs attributes * @attr: sysfs attributes
* @head: list management * @head: list management
* @base: base KMS object * @base: base KMS object
* @name: connector name
* @connector_type: one of the %DRM_MODE_CONNECTOR_<foo> types from drm_mode.h * @connector_type: one of the %DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
* @connector_type_id: index into connector type enum * @connector_type_id: index into connector type enum
* @interlace_allowed: can this connector handle interlaced modes? * @interlace_allowed: can this connector handle interlaced modes?
@ -603,6 +519,7 @@ struct drm_connector {
struct drm_mode_object base; struct drm_mode_object base;
char *name;
int connector_type; int connector_type;
int connector_type_id; int connector_type_id;
bool interlace_allowed; bool interlace_allowed;
@ -621,6 +538,8 @@ struct drm_connector {
struct drm_property_blob *edid_blob_ptr; struct drm_property_blob *edid_blob_ptr;
struct drm_object_properties properties; struct drm_object_properties properties;
struct drm_property_blob *path_blob_ptr;
uint8_t polled; /* DRM_CONNECTOR_POLL_* */ uint8_t polled; /* DRM_CONNECTOR_POLL_* */
/* requested DPMS state */ /* requested DPMS state */
@ -630,6 +549,7 @@ struct drm_connector {
/* forced on connector */ /* forced on connector */
enum drm_connector_force force; enum drm_connector_force force;
bool override_edid;
uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER]; uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
struct drm_encoder *encoder; /* currently active encoder */ struct drm_encoder *encoder; /* currently active encoder */
@ -642,6 +562,8 @@ struct drm_connector {
int audio_latency[2]; int audio_latency[2];
int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */ int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
unsigned bad_edid_counter; unsigned bad_edid_counter;
struct dentry *debugfs_entry;
}; };
/** /**
@ -665,6 +587,12 @@ struct drm_plane_funcs {
struct drm_property *property, uint64_t val); struct drm_property *property, uint64_t val);
}; };
enum drm_plane_type {
DRM_PLANE_TYPE_OVERLAY,
DRM_PLANE_TYPE_PRIMARY,
DRM_PLANE_TYPE_CURSOR,
};
/** /**
* drm_plane - central DRM plane control structure * drm_plane - central DRM plane control structure
* @dev: DRM device this plane belongs to * @dev: DRM device this plane belongs to
@ -677,6 +605,7 @@ struct drm_plane_funcs {
* @fb: currently bound fb * @fb: currently bound fb
* @funcs: helper functions * @funcs: helper functions
* @properties: property tracking for this plane * @properties: property tracking for this plane
* @type: type of plane (overlay, primary, cursor)
*/ */
struct drm_plane { struct drm_plane {
struct drm_device *dev; struct drm_device *dev;
@ -694,6 +623,8 @@ struct drm_plane {
const struct drm_plane_funcs *funcs; const struct drm_plane_funcs *funcs;
struct drm_object_properties properties; struct drm_object_properties properties;
enum drm_plane_type type;
}; };
/** /**
@ -835,6 +766,8 @@ struct drm_mode_group {
*/ */
struct drm_mode_config { struct drm_mode_config {
struct mutex mutex; /* protects configuration (mode lists etc.) */ struct mutex mutex; /* protects configuration (mode lists etc.) */
struct drm_modeset_lock connection_mutex; /* protects connector->encoder and encoder->crtc links */
struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */
struct mutex idr_mutex; /* for IDR management */ struct mutex idr_mutex; /* for IDR management */
struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */ struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
/* this is limited to one for now */ /* this is limited to one for now */
@ -856,7 +789,15 @@ struct drm_mode_config {
struct list_head bridge_list; struct list_head bridge_list;
int num_encoder; int num_encoder;
struct list_head encoder_list; struct list_head encoder_list;
int num_plane;
/*
* Track # of overlay planes separately from # of total planes. By
* default we only advertise overlay planes to userspace; if userspace
* sets the "universal plane" capability bit, we'll go ahead and
* expose all planes.
*/
int num_overlay_plane;
int num_total_plane;
struct list_head plane_list; struct list_head plane_list;
int num_crtc; int num_crtc;
@ -878,6 +819,8 @@ struct drm_mode_config {
struct list_head property_blob_list; struct list_head property_blob_list;
struct drm_property *edid_property; struct drm_property *edid_property;
struct drm_property *dpms_property; struct drm_property *dpms_property;
struct drm_property *path_property;
struct drm_property *plane_type_property;
/* DVI-I properties */ /* DVI-I properties */
struct drm_property *dvi_i_subconnector_property; struct drm_property *dvi_i_subconnector_property;
@ -900,6 +843,7 @@ struct drm_mode_config {
/* Optional properties */ /* Optional properties */
struct drm_property *scaling_mode_property; struct drm_property *scaling_mode_property;
struct drm_property *aspect_ratio_property;
struct drm_property *dirty_info_property; struct drm_property *dirty_info_property;
/* dumb ioctl parameters */ /* dumb ioctl parameters */
@ -907,6 +851,9 @@ struct drm_mode_config {
/* whether async page flip is supported or not */ /* whether async page flip is supported or not */
bool async_page_flip; bool async_page_flip;
/* cursor size */
uint32_t cursor_width, cursor_height;
}; };
#define obj_to_crtc(x) container_of(x, struct drm_crtc, base) #define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
@ -923,10 +870,11 @@ struct drm_prop_enum_list {
char *name; char *name;
}; };
extern void drm_modeset_lock_all(struct drm_device *dev); extern int drm_crtc_init_with_planes(struct drm_device *dev,
extern void drm_modeset_unlock_all(struct drm_device *dev); struct drm_crtc *crtc,
extern void drm_warn_on_modeset_not_all_locked(struct drm_device *dev); struct drm_plane *primary,
struct drm_plane *cursor,
const struct drm_crtc_funcs *funcs);
extern int drm_crtc_init(struct drm_device *dev, extern int drm_crtc_init(struct drm_device *dev,
struct drm_crtc *crtc, struct drm_crtc *crtc,
const struct drm_crtc_funcs *funcs); const struct drm_crtc_funcs *funcs);
@ -951,6 +899,8 @@ extern int drm_connector_init(struct drm_device *dev,
struct drm_connector *connector, struct drm_connector *connector,
const struct drm_connector_funcs *funcs, const struct drm_connector_funcs *funcs,
int connector_type); int connector_type);
int drm_connector_register(struct drm_connector *connector);
void drm_connector_unregister(struct drm_connector *connector);
extern void drm_connector_cleanup(struct drm_connector *connector); extern void drm_connector_cleanup(struct drm_connector *connector);
/* helper to unplug all connectors from sysfs for device */ /* helper to unplug all connectors from sysfs for device */
@ -978,19 +928,30 @@ static inline bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
return !!(encoder->possible_crtcs & drm_crtc_mask(crtc)); return !!(encoder->possible_crtcs & drm_crtc_mask(crtc));
} }
extern int drm_universal_plane_init(struct drm_device *dev,
struct drm_plane *plane,
unsigned long possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats,
uint32_t format_count,
enum drm_plane_type type);
extern int drm_plane_init(struct drm_device *dev, extern int drm_plane_init(struct drm_device *dev,
struct drm_plane *plane, struct drm_plane *plane,
unsigned long possible_crtcs, unsigned long possible_crtcs,
const struct drm_plane_funcs *funcs, const struct drm_plane_funcs *funcs,
const uint32_t *formats, uint32_t format_count, const uint32_t *formats, uint32_t format_count,
bool priv); bool is_primary);
extern void drm_plane_cleanup(struct drm_plane *plane); extern void drm_plane_cleanup(struct drm_plane *plane);
extern void drm_plane_force_disable(struct drm_plane *plane); extern void drm_plane_force_disable(struct drm_plane *plane);
extern int drm_crtc_check_viewport(const struct drm_crtc *crtc,
int x, int y,
const struct drm_display_mode *mode,
const struct drm_framebuffer *fb);
extern void drm_encoder_cleanup(struct drm_encoder *encoder); extern void drm_encoder_cleanup(struct drm_encoder *encoder);
extern const char *drm_get_connector_name(const struct drm_connector *connector);
extern const char *drm_get_connector_status_name(enum drm_connector_status status); extern const char *drm_get_connector_status_name(enum drm_connector_status status);
extern const char *drm_get_subpixel_order_name(enum subpixel_order order);
extern const char *drm_get_dpms_name(int val); extern const char *drm_get_dpms_name(int val);
extern const char *drm_get_dvi_i_subconnector_name(int val); extern const char *drm_get_dvi_i_subconnector_name(int val);
extern const char *drm_get_dvi_i_select_name(int val); extern const char *drm_get_dvi_i_select_name(int val);
@ -998,41 +959,38 @@ extern const char *drm_get_tv_subconnector_name(int val);
extern const char *drm_get_tv_select_name(int val); extern const char *drm_get_tv_select_name(int val);
extern void drm_fb_release(struct drm_file *file_priv); extern void drm_fb_release(struct drm_file *file_priv);
extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group); extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
extern void drm_mode_group_destroy(struct drm_mode_group *group);
extern void drm_reinit_primary_mode_group(struct drm_device *dev);
extern bool drm_probe_ddc(struct i2c_adapter *adapter); extern bool drm_probe_ddc(struct i2c_adapter *adapter);
extern struct edid *drm_get_edid(struct drm_connector *connector, extern struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter); struct i2c_adapter *adapter);
extern struct edid *drm_edid_duplicate(const struct edid *edid); extern struct edid *drm_edid_duplicate(const struct edid *edid);
extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src);
extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
const struct drm_display_mode *mode);
extern void drm_mode_debug_printmodeline(const struct drm_display_mode *mode);
extern void drm_mode_config_init(struct drm_device *dev); extern void drm_mode_config_init(struct drm_device *dev);
extern void drm_mode_config_reset(struct drm_device *dev); extern void drm_mode_config_reset(struct drm_device *dev);
extern void drm_mode_config_cleanup(struct drm_device *dev); extern void drm_mode_config_cleanup(struct drm_device *dev);
extern void drm_mode_set_name(struct drm_display_mode *mode);
extern bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
extern bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
extern int drm_mode_width(const struct drm_display_mode *mode);
extern int drm_mode_height(const struct drm_display_mode *mode);
/* for us by fb module */ extern int drm_mode_connector_set_path_property(struct drm_connector *connector,
extern struct drm_display_mode *drm_mode_create(struct drm_device *dev); char *path);
extern void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode);
extern void drm_mode_validate_size(struct drm_device *dev,
struct list_head *mode_list,
int maxX, int maxY, int maxPitch);
extern void drm_mode_prune_invalid(struct drm_device *dev,
struct list_head *mode_list, bool verbose);
extern void drm_mode_sort(struct list_head *mode_list);
extern int drm_mode_hsync(const struct drm_display_mode *mode);
extern int drm_mode_vrefresh(const struct drm_display_mode *mode);
extern void drm_mode_set_crtcinfo(struct drm_display_mode *p,
int adjust_flags);
extern void drm_mode_connector_list_update(struct drm_connector *connector);
extern int drm_mode_connector_update_edid_property(struct drm_connector *connector, extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
struct edid *edid); struct edid *edid);
static inline bool drm_property_type_is(struct drm_property *property,
uint32_t type)
{
/* instanceof for props.. handles extended type vs original types: */
if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
return (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) == type;
return property->flags & type;
}
static inline bool drm_property_type_valid(struct drm_property *property)
{
if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
return !(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
return !!(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
}
extern int drm_object_property_set_value(struct drm_mode_object *obj, extern int drm_object_property_set_value(struct drm_mode_object *obj,
struct drm_property *property, struct drm_property *property,
uint64_t val); uint64_t val);
@ -1062,10 +1020,16 @@ extern struct drm_property *drm_property_create_enum(struct drm_device *dev, int
struct drm_property *drm_property_create_bitmask(struct drm_device *dev, struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
int flags, const char *name, int flags, const char *name,
const struct drm_prop_enum_list *props, const struct drm_prop_enum_list *props,
int num_values); int num_props,
uint64_t supported_bits);
struct drm_property *drm_property_create_range(struct drm_device *dev, int flags, struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
const char *name, const char *name,
uint64_t min, uint64_t max); uint64_t min, uint64_t max);
struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
int flags, const char *name,
int64_t min, int64_t max);
struct drm_property *drm_property_create_object(struct drm_device *dev,
int flags, const char *name, uint32_t type);
extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property); extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
extern int drm_property_add_enum(struct drm_property *property, int index, extern int drm_property_add_enum(struct drm_property *property, int index,
uint64_t value, const char *name); uint64_t value, const char *name);
@ -1073,17 +1037,16 @@ extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats, extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats,
char *formats[]); char *formats[]);
extern int drm_mode_create_scaling_mode_property(struct drm_device *dev); extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
extern int drm_mode_create_dirty_info_property(struct drm_device *dev); extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
extern const char *drm_get_encoder_name(const struct drm_encoder *encoder);
extern int drm_mode_connector_attach_encoder(struct drm_connector *connector, extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder); struct drm_encoder *encoder);
extern void drm_mode_connector_detach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder);
extern int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, extern int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
int gamma_size); int gamma_size);
extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev, extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
uint32_t id, uint32_t type); uint32_t id, uint32_t type);
/* IOCTLs */ /* IOCTLs */
extern int drm_mode_getresources(struct drm_device *dev, extern int drm_mode_getresources(struct drm_device *dev,
void *data, struct drm_file *file_priv); void *data, struct drm_file *file_priv);
@ -1129,21 +1092,12 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
extern int drm_mode_gamma_set_ioctl(struct drm_device *dev, extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv); void *data, struct drm_file *file_priv);
extern u8 drm_match_cea_mode(const struct drm_display_mode *to_match); extern u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
extern enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code);
extern bool drm_detect_hdmi_monitor(struct edid *edid); extern bool drm_detect_hdmi_monitor(struct edid *edid);
extern bool drm_detect_monitor_audio(struct edid *edid); extern bool drm_detect_monitor_audio(struct edid *edid);
extern bool drm_rgb_quant_range_selectable(struct edid *edid); extern bool drm_rgb_quant_range_selectable(struct edid *edid);
extern int drm_mode_page_flip_ioctl(struct drm_device *dev, extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv); void *data, struct drm_file *file_priv);
extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
int hdisplay, int vdisplay, int vrefresh,
bool reduced, bool interlaced, bool margins);
extern struct drm_display_mode *drm_gtf_mode(struct drm_device *dev,
int hdisplay, int vdisplay, int vrefresh,
bool interlaced, int margins);
extern struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev,
int hdisplay, int vdisplay, int vrefresh,
bool interlaced, int margins, int GTF_M,
int GTF_2C, int GTF_K, int GTF_2J);
extern int drm_add_modes_noedid(struct drm_connector *connector, extern int drm_add_modes_noedid(struct drm_connector *connector,
int hdisplay, int vdisplay); int hdisplay, int vdisplay);
extern void drm_set_preferred_mode(struct drm_connector *connector, extern void drm_set_preferred_mode(struct drm_connector *connector,
@ -1174,8 +1128,21 @@ extern int drm_format_plane_cpp(uint32_t format, int plane);
extern int drm_format_horz_chroma_subsampling(uint32_t format); extern int drm_format_horz_chroma_subsampling(uint32_t format);
extern int drm_format_vert_chroma_subsampling(uint32_t format); extern int drm_format_vert_chroma_subsampling(uint32_t format);
extern const char *drm_get_format_name(uint32_t format); extern const char *drm_get_format_name(uint32_t format);
extern struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
unsigned int supported_rotations);
extern unsigned int drm_rotation_simplify(unsigned int rotation,
unsigned int supported_rotations);
/* Helpers */ /* Helpers */
static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
uint32_t id)
{
struct drm_mode_object *mo;
mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PLANE);
return mo ? obj_to_plane(mo) : NULL;
}
static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev, static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev,
uint32_t id) uint32_t id)
{ {
@ -1192,4 +1159,33 @@ static inline struct drm_encoder *drm_encoder_find(struct drm_device *dev,
return mo ? obj_to_encoder(mo) : NULL; return mo ? obj_to_encoder(mo) : NULL;
} }
static inline struct drm_connector *drm_connector_find(struct drm_device *dev,
uint32_t id)
{
struct drm_mode_object *mo;
mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_CONNECTOR);
return mo ? obj_to_connector(mo) : NULL;
}
static inline struct drm_property *drm_property_find(struct drm_device *dev,
uint32_t id)
{
struct drm_mode_object *mo;
mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PROPERTY);
return mo ? obj_to_property(mo) : NULL;
}
static inline struct drm_property_blob *
drm_property_blob_find(struct drm_device *dev, uint32_t id)
{
struct drm_mode_object *mo;
mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_BLOB);
return mo ? obj_to_blob(mo) : NULL;
}
/* Plane list iterator for legacy (overlay only) planes. */
#define drm_for_each_legacy_plane(plane, planelist) \
list_for_each_entry(plane, planelist, head) \
if (plane->type == DRM_PLANE_TYPE_OVERLAY)
#endif /* __DRM_CRTC_H__ */ #endif /* __DRM_CRTC_H__ */

View File

@ -114,7 +114,7 @@ struct drm_encoder_helper_funcs {
/** /**
* drm_connector_helper_funcs - helper operations for connectors * drm_connector_helper_funcs - helper operations for connectors
* @get_modes: get mode list for this connector * @get_modes: get mode list for this connector
* @mode_valid: is this mode valid on the given connector? * @mode_valid (optional): is this mode valid on the given connector?
* *
* The helper operations are called by the mid-layer CRTC helper. * The helper operations are called by the mid-layer CRTC helper.
*/ */
@ -125,7 +125,6 @@ struct drm_connector_helper_funcs {
struct drm_encoder *(*best_encoder)(struct drm_connector *connector); struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
}; };
extern int drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY);
extern void drm_helper_disable_unused_functions(struct drm_device *dev); extern void drm_helper_disable_unused_functions(struct drm_device *dev);
extern int drm_crtc_helper_set_config(struct drm_mode_set *set); extern int drm_crtc_helper_set_config(struct drm_mode_set *set);
extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
@ -139,7 +138,7 @@ extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
extern void drm_helper_move_panel_connectors_to_head(struct drm_device *); extern void drm_helper_move_panel_connectors_to_head(struct drm_device *);
extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, extern void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
struct drm_mode_fb_cmd2 *mode_cmd); struct drm_mode_fb_cmd2 *mode_cmd);
static inline void drm_crtc_helper_add(struct drm_crtc *crtc, static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
@ -160,7 +159,16 @@ static inline void drm_connector_helper_add(struct drm_connector *connector,
connector->helper_private = (void *)funcs; connector->helper_private = (void *)funcs;
} }
extern int drm_helper_resume_force_mode(struct drm_device *dev); extern void drm_helper_resume_force_mode(struct drm_device *dev);
/* drm_probe_helper.c */
extern int drm_helper_probe_single_connector_modes(struct drm_connector
*connector, uint32_t maxX,
uint32_t maxY);
extern int drm_helper_probe_single_connector_modes_nomerge(struct drm_connector
*connector,
uint32_t maxX,
uint32_t maxY);
extern void drm_kms_helper_poll_init(struct drm_device *dev); extern void drm_kms_helper_poll_init(struct drm_device *dev);
extern void drm_kms_helper_poll_fini(struct drm_device *dev); extern void drm_kms_helper_poll_fini(struct drm_device *dev);
extern bool drm_helper_hpd_irq_event(struct drm_device *dev); extern bool drm_helper_hpd_irq_event(struct drm_device *dev);

View File

@ -37,6 +37,7 @@
* eDP: Embedded DisplayPort version 1 * eDP: Embedded DisplayPort version 1
* DPI: DisplayPort Interoperability Guideline v1.1a * DPI: DisplayPort Interoperability Guideline v1.1a
* 1.2: DisplayPort 1.2 * 1.2: DisplayPort 1.2
* MST: Multistream Transport - part of DP 1.2a
* *
* 1.2 formally includes both eDP and DPI definitions. * 1.2 formally includes both eDP and DPI definitions.
*/ */
@ -103,9 +104,14 @@
#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */ #define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */
/* Multiple stream transport */ /* Multiple stream transport */
#define DP_FAUX_CAP 0x020 /* 1.2 */
# define DP_FAUX_CAP_1 (1 << 0)
#define DP_MSTM_CAP 0x021 /* 1.2 */ #define DP_MSTM_CAP 0x021 /* 1.2 */
# define DP_MST_CAP (1 << 0) # define DP_MST_CAP (1 << 0)
#define DP_GUID 0x030 /* 1.2 */
#define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */ #define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */
# define DP_PSR_IS_SUPPORTED 1 # define DP_PSR_IS_SUPPORTED 1
#define DP_PSR_CAPS 0x071 /* XXX 1.2? */ #define DP_PSR_CAPS 0x071 /* XXX 1.2? */
@ -221,6 +227,16 @@
# define DP_PSR_CRC_VERIFICATION (1 << 2) # define DP_PSR_CRC_VERIFICATION (1 << 2)
# define DP_PSR_FRAME_CAPTURE (1 << 3) # define DP_PSR_FRAME_CAPTURE (1 << 3)
#define DP_ADAPTER_CTRL 0x1a0
# define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0)
#define DP_BRANCH_DEVICE_CTRL 0x1a1
# define DP_BRANCH_DEVICE_IRQ_HPD (1 << 0)
#define DP_PAYLOAD_ALLOCATE_SET 0x1c0
#define DP_PAYLOAD_ALLOCATE_START_TIME_SLOT 0x1c1
#define DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT 0x1c2
#define DP_SINK_COUNT 0x200 #define DP_SINK_COUNT 0x200
/* prior to 1.2 bit 7 was reserved mbz */ /* prior to 1.2 bit 7 was reserved mbz */
# define DP_GET_SINK_COUNT(x) ((((x) & 0x80) >> 1) | ((x) & 0x3f)) # define DP_GET_SINK_COUNT(x) ((((x) & 0x80) >> 1) | ((x) & 0x3f))
@ -230,6 +246,9 @@
# define DP_REMOTE_CONTROL_COMMAND_PENDING (1 << 0) # define DP_REMOTE_CONTROL_COMMAND_PENDING (1 << 0)
# define DP_AUTOMATED_TEST_REQUEST (1 << 1) # define DP_AUTOMATED_TEST_REQUEST (1 << 1)
# define DP_CP_IRQ (1 << 2) # define DP_CP_IRQ (1 << 2)
# define DP_MCCS_IRQ (1 << 3)
# define DP_DOWN_REP_MSG_RDY (1 << 4) /* 1.2 MST */
# define DP_UP_REQ_MSG_RDY (1 << 5) /* 1.2 MST */
# define DP_SINK_SPECIFIC_IRQ (1 << 6) # define DP_SINK_SPECIFIC_IRQ (1 << 6)
#define DP_LANE0_1_STATUS 0x202 #define DP_LANE0_1_STATUS 0x202
@ -279,11 +298,30 @@
#define DP_TEST_PATTERN 0x221 #define DP_TEST_PATTERN 0x221
#define DP_TEST_CRC_R_CR 0x240
#define DP_TEST_CRC_G_Y 0x242
#define DP_TEST_CRC_B_CB 0x244
#define DP_TEST_SINK_MISC 0x246
#define DP_TEST_CRC_SUPPORTED (1 << 5)
#define DP_TEST_RESPONSE 0x260 #define DP_TEST_RESPONSE 0x260
# define DP_TEST_ACK (1 << 0) # define DP_TEST_ACK (1 << 0)
# define DP_TEST_NAK (1 << 1) # define DP_TEST_NAK (1 << 1)
# define DP_TEST_EDID_CHECKSUM_WRITE (1 << 2) # define DP_TEST_EDID_CHECKSUM_WRITE (1 << 2)
#define DP_TEST_EDID_CHECKSUM 0x261
#define DP_TEST_SINK 0x270
#define DP_TEST_SINK_START (1 << 0)
#define DP_PAYLOAD_TABLE_UPDATE_STATUS 0x2c0 /* 1.2 MST */
# define DP_PAYLOAD_TABLE_UPDATED (1 << 0)
# define DP_PAYLOAD_ACT_HANDLED (1 << 1)
#define DP_VC_PAYLOAD_ID_SLOT_1 0x2c1 /* 1.2 MST */
/* up to ID_SLOT_63 at 0x2ff */
#define DP_SOURCE_OUI 0x300 #define DP_SOURCE_OUI 0x300
#define DP_SINK_OUI 0x400 #define DP_SINK_OUI 0x400
#define DP_BRANCH_OUI 0x500 #define DP_BRANCH_OUI 0x500
@ -291,6 +329,22 @@
#define DP_SET_POWER 0x600 #define DP_SET_POWER 0x600
# define DP_SET_POWER_D0 0x1 # define DP_SET_POWER_D0 0x1
# define DP_SET_POWER_D3 0x2 # define DP_SET_POWER_D3 0x2
# define DP_SET_POWER_MASK 0x3
#define DP_SIDEBAND_MSG_DOWN_REQ_BASE 0x1000 /* 1.2 MST */
#define DP_SIDEBAND_MSG_UP_REP_BASE 0x1200 /* 1.2 MST */
#define DP_SIDEBAND_MSG_DOWN_REP_BASE 0x1400 /* 1.2 MST */
#define DP_SIDEBAND_MSG_UP_REQ_BASE 0x1600 /* 1.2 MST */
#define DP_SINK_COUNT_ESI 0x2002 /* 1.2 */
/* 0-5 sink count */
# define DP_SINK_COUNT_CP_READY (1 << 6)
#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 0x2003 /* 1.2 */
#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1 0x2004 /* 1.2 */
#define DP_LINK_SERVICE_IRQ_VECTOR_ESI0 0x2005 /* 1.2 */
#define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */ #define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */
# define DP_PSR_LINK_CRC_ERROR (1 << 0) # define DP_PSR_LINK_CRC_ERROR (1 << 0)
@ -308,6 +362,43 @@
# define DP_PSR_SINK_INTERNAL_ERROR 7 # define DP_PSR_SINK_INTERNAL_ERROR 7
# define DP_PSR_SINK_STATE_MASK 0x07 # define DP_PSR_SINK_STATE_MASK 0x07
/* DP 1.2 Sideband message defines */
/* peer device type - DP 1.2a Table 2-92 */
#define DP_PEER_DEVICE_NONE 0x0
#define DP_PEER_DEVICE_SOURCE_OR_SST 0x1
#define DP_PEER_DEVICE_MST_BRANCHING 0x2
#define DP_PEER_DEVICE_SST_SINK 0x3
#define DP_PEER_DEVICE_DP_LEGACY_CONV 0x4
/* DP 1.2 MST sideband request names DP 1.2a Table 2-80 */
#define DP_LINK_ADDRESS 0x01
#define DP_CONNECTION_STATUS_NOTIFY 0x02
#define DP_ENUM_PATH_RESOURCES 0x10
#define DP_ALLOCATE_PAYLOAD 0x11
#define DP_QUERY_PAYLOAD 0x12
#define DP_RESOURCE_STATUS_NOTIFY 0x13
#define DP_CLEAR_PAYLOAD_ID_TABLE 0x14
#define DP_REMOTE_DPCD_READ 0x20
#define DP_REMOTE_DPCD_WRITE 0x21
#define DP_REMOTE_I2C_READ 0x22
#define DP_REMOTE_I2C_WRITE 0x23
#define DP_POWER_UP_PHY 0x24
#define DP_POWER_DOWN_PHY 0x25
#define DP_SINK_EVENT_NOTIFY 0x30
#define DP_QUERY_STREAM_ENC_STATUS 0x38
/* DP 1.2 MST sideband nak reasons - table 2.84 */
#define DP_NAK_WRITE_FAILURE 0x01
#define DP_NAK_INVALID_READ 0x02
#define DP_NAK_CRC_FAILURE 0x03
#define DP_NAK_BAD_PARAM 0x04
#define DP_NAK_DEFER 0x05
#define DP_NAK_LINK_FAILURE 0x06
#define DP_NAK_NO_RESOURCES 0x07
#define DP_NAK_DPCD_FAIL 0x08
#define DP_NAK_I2C_NAK 0x09
#define DP_NAK_ALLOCATE_FAIL 0x0a
#define MODE_I2C_START 1 #define MODE_I2C_START 1
#define MODE_I2C_WRITE 2 #define MODE_I2C_WRITE 2
#define MODE_I2C_READ 4 #define MODE_I2C_READ 4
@ -398,4 +489,124 @@ drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
(dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP); (dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP);
} }
/*
* DisplayPort AUX channel
*/
/**
* struct drm_dp_aux_msg - DisplayPort AUX channel transaction
* @address: address of the (first) register to access
* @request: contains the type of transaction (see DP_AUX_* macros)
* @reply: upon completion, contains the reply type of the transaction
* @buffer: pointer to a transmission or reception buffer
* @size: size of @buffer
*/
struct drm_dp_aux_msg {
unsigned int address;
u8 request;
u8 reply;
void *buffer;
size_t size;
};
/**
* struct drm_dp_aux - DisplayPort AUX channel
* @name: user-visible name of this AUX channel and the I2C-over-AUX adapter
* @ddc: I2C adapter that can be used for I2C-over-AUX communication
* @dev: pointer to struct device that is the parent for this AUX channel
* @hw_mutex: internal mutex used for locking transfers
* @transfer: transfers a message representing a single AUX transaction
*
* The .dev field should be set to a pointer to the device that implements
* the AUX channel.
*
* The .name field may be used to specify the name of the I2C adapter. If set to
* NULL, dev_name() of .dev will be used.
*
* Drivers provide a hardware-specific implementation of how transactions
* are executed via the .transfer() function. A pointer to a drm_dp_aux_msg
* structure describing the transaction is passed into this function. Upon
* success, the implementation should return the number of payload bytes
* that were transferred, or a negative error-code on failure. Helpers
* propagate errors from the .transfer() function, with the exception of
* the -EBUSY error, which causes a transaction to be retried. On a short,
* helpers will return -EPROTO to make it simpler to check for failure.
*
* An AUX channel can also be used to transport I2C messages to a sink. A
* typical application of that is to access an EDID that's present in the
* sink device. The .transfer() function can also be used to execute such
* transactions. The drm_dp_aux_register_i2c_bus() function registers an
* I2C adapter that can be passed to drm_probe_ddc(). Upon removal, drivers
* should call drm_dp_aux_unregister_i2c_bus() to remove the I2C adapter.
*
* Note that the aux helper code assumes that the .transfer() function
* only modifies the reply field of the drm_dp_aux_msg structure. The
* retry logic and i2c helpers assume this is the case.
*/
struct drm_dp_aux {
const char *name;
struct i2c_adapter ddc;
struct device *dev;
struct mutex hw_mutex;
ssize_t (*transfer)(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg);
};
ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size);
ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size);
/**
* drm_dp_dpcd_readb() - read a single byte from the DPCD
* @aux: DisplayPort AUX channel
* @offset: address of the register to read
* @valuep: location where the value of the register will be stored
*
* Returns the number of bytes transferred (1) on success, or a negative
* error code on failure.
*/
static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux,
unsigned int offset, u8 *valuep)
{
return drm_dp_dpcd_read(aux, offset, valuep, 1);
}
/**
* drm_dp_dpcd_writeb() - write a single byte to the DPCD
* @aux: DisplayPort AUX channel
* @offset: address of the register to write
* @value: value to write to the register
*
* Returns the number of bytes transferred (1) on success, or a negative
* error code on failure.
*/
static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux,
unsigned int offset, u8 value)
{
return drm_dp_dpcd_write(aux, offset, &value, 1);
}
int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
u8 status[DP_LINK_STATUS_SIZE]);
/*
* DisplayPort link
*/
#define DP_LINK_CAP_ENHANCED_FRAMING (1 << 0)
struct drm_dp_link {
unsigned char revision;
unsigned int rate;
unsigned int num_lanes;
unsigned long capabilities;
};
int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_aux_register(struct drm_dp_aux *aux);
void drm_dp_aux_unregister(struct drm_dp_aux *aux);
#endif /* _DRM_DP_HELPER_H_ */ #endif /* _DRM_DP_HELPER_H_ */

View File

@ -0,0 +1,509 @@
/*
* Copyright © 2014 Red Hat.
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
#ifndef _DRM_DP_MST_HELPER_H_
#define _DRM_DP_MST_HELPER_H_
#include <linux/types.h>
#include <drm/drm_dp_helper.h>
struct drm_dp_mst_branch;
/**
* struct drm_dp_vcpi - Virtual Channel Payload Identifer
* @vcpi: Virtual channel ID.
* @pbn: Payload Bandwidth Number for this channel
* @aligned_pbn: PBN aligned with slot size
* @num_slots: number of slots for this PBN
*/
struct drm_dp_vcpi {
int vcpi;
int pbn;
int aligned_pbn;
int num_slots;
};
/**
* struct drm_dp_mst_port - MST port
* @kref: reference count for this port.
* @guid_valid: for DP 1.2 devices if we have validated the GUID.
* @guid: guid for DP 1.2 device on this port.
* @port_num: port number
* @input: if this port is an input port.
* @mcs: message capability status - DP 1.2 spec.
* @ddps: DisplayPort Device Plug Status - DP 1.2
* @pdt: Peer Device Type
* @ldps: Legacy Device Plug Status
* @dpcd_rev: DPCD revision of device on this port
* @num_sdp_streams: Number of simultaneous streams
* @num_sdp_stream_sinks: Number of stream sinks
* @available_pbn: Available bandwidth for this port.
* @next: link to next port on this branch device
* @mstb: branch device attach below this port
* @aux: i2c aux transport to talk to device connected to this port.
* @parent: branch device parent of this port
* @vcpi: Virtual Channel Payload info for this port.
* @connector: DRM connector this port is connected to.
* @mgr: topology manager this port lives under.
*
* This structure represents an MST port endpoint on a device somewhere
* in the MST topology.
*/
struct drm_dp_mst_port {
struct kref kref;
/* if dpcd 1.2 device is on this port - its GUID info */
bool guid_valid;
u8 guid[16];
u8 port_num;
bool input;
bool mcs;
bool ddps;
u8 pdt;
bool ldps;
u8 dpcd_rev;
u8 num_sdp_streams;
u8 num_sdp_stream_sinks;
uint16_t available_pbn;
struct list_head next;
struct drm_dp_mst_branch *mstb; /* pointer to an mstb if this port has one */
struct drm_dp_aux aux; /* i2c bus for this port? */
struct drm_dp_mst_branch *parent;
struct drm_dp_vcpi vcpi;
struct drm_connector *connector;
struct drm_dp_mst_topology_mgr *mgr;
};
/**
* struct drm_dp_mst_branch - MST branch device.
* @kref: reference count for this port.
* @rad: Relative Address to talk to this branch device.
* @lct: Link count total to talk to this branch device.
* @num_ports: number of ports on the branch.
* @msg_slots: one bit per transmitted msg slot.
* @ports: linked list of ports on this branch.
* @port_parent: pointer to the port parent, NULL if toplevel.
* @mgr: topology manager for this branch device.
* @tx_slots: transmission slots for this device.
* @last_seqno: last sequence number used to talk to this.
* @link_address_sent: if a link address message has been sent to this device yet.
*
* This structure represents an MST branch device, there is one
* primary branch device at the root, along with any others connected
* to downstream ports
*/
struct drm_dp_mst_branch {
struct kref kref;
u8 rad[8];
u8 lct;
int num_ports;
int msg_slots;
struct list_head ports;
/* list of tx ops queue for this port */
struct drm_dp_mst_port *port_parent;
struct drm_dp_mst_topology_mgr *mgr;
/* slots are protected by mstb->mgr->qlock */
struct drm_dp_sideband_msg_tx *tx_slots[2];
int last_seqno;
bool link_address_sent;
};
/* sideband msg header - not bit struct */
struct drm_dp_sideband_msg_hdr {
u8 lct;
u8 lcr;
u8 rad[8];
bool broadcast;
bool path_msg;
u8 msg_len;
bool somt;
bool eomt;
bool seqno;
};
struct drm_dp_nak_reply {
u8 guid[16];
u8 reason;
u8 nak_data;
};
struct drm_dp_link_address_ack_reply {
u8 guid[16];
u8 nports;
struct drm_dp_link_addr_reply_port {
bool input_port;
u8 peer_device_type;
u8 port_number;
bool mcs;
bool ddps;
bool legacy_device_plug_status;
u8 dpcd_revision;
u8 peer_guid[16];
u8 num_sdp_streams;
u8 num_sdp_stream_sinks;
} ports[16];
};
struct drm_dp_remote_dpcd_read_ack_reply {
u8 port_number;
u8 num_bytes;
u8 bytes[255];
};
struct drm_dp_remote_dpcd_write_ack_reply {
u8 port_number;
};
struct drm_dp_remote_dpcd_write_nak_reply {
u8 port_number;
u8 reason;
u8 bytes_written_before_failure;
};
struct drm_dp_remote_i2c_read_ack_reply {
u8 port_number;
u8 num_bytes;
u8 bytes[255];
};
struct drm_dp_remote_i2c_read_nak_reply {
u8 port_number;
u8 nak_reason;
u8 i2c_nak_transaction;
};
struct drm_dp_remote_i2c_write_ack_reply {
u8 port_number;
};
struct drm_dp_sideband_msg_rx {
u8 chunk[48];
u8 msg[256];
u8 curchunk_len;
u8 curchunk_idx; /* chunk we are parsing now */
u8 curchunk_hdrlen;
u8 curlen; /* total length of the msg */
bool have_somt;
bool have_eomt;
struct drm_dp_sideband_msg_hdr initial_hdr;
};
struct drm_dp_allocate_payload {
u8 port_number;
u8 number_sdp_streams;
u8 vcpi;
u16 pbn;
u8 sdp_stream_sink[8];
};
struct drm_dp_allocate_payload_ack_reply {
u8 port_number;
u8 vcpi;
u16 allocated_pbn;
};
struct drm_dp_connection_status_notify {
u8 guid[16];
u8 port_number;
bool legacy_device_plug_status;
bool displayport_device_plug_status;
bool message_capability_status;
bool input_port;
u8 peer_device_type;
};
struct drm_dp_remote_dpcd_read {
u8 port_number;
u32 dpcd_address;
u8 num_bytes;
};
struct drm_dp_remote_dpcd_write {
u8 port_number;
u32 dpcd_address;
u8 num_bytes;
u8 *bytes;
};
struct drm_dp_remote_i2c_read {
u8 num_transactions;
u8 port_number;
struct {
u8 i2c_dev_id;
u8 num_bytes;
u8 *bytes;
u8 no_stop_bit;
u8 i2c_transaction_delay;
} transactions[4];
u8 read_i2c_device_id;
u8 num_bytes_read;
};
struct drm_dp_remote_i2c_write {
u8 port_number;
u8 write_i2c_device_id;
u8 num_bytes;
u8 *bytes;
};
/* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */
struct drm_dp_port_number_req {
u8 port_number;
};
struct drm_dp_enum_path_resources_ack_reply {
u8 port_number;
u16 full_payload_bw_number;
u16 avail_payload_bw_number;
};
/* covers POWER_DOWN_PHY, POWER_UP_PHY */
struct drm_dp_port_number_rep {
u8 port_number;
};
struct drm_dp_query_payload {
u8 port_number;
u8 vcpi;
};
struct drm_dp_resource_status_notify {
u8 port_number;
u8 guid[16];
u16 available_pbn;
};
struct drm_dp_query_payload_ack_reply {
u8 port_number;
u8 allocated_pbn;
};
struct drm_dp_sideband_msg_req_body {
u8 req_type;
union ack_req {
struct drm_dp_connection_status_notify conn_stat;
struct drm_dp_port_number_req port_num;
struct drm_dp_resource_status_notify resource_stat;
struct drm_dp_query_payload query_payload;
struct drm_dp_allocate_payload allocate_payload;
struct drm_dp_remote_dpcd_read dpcd_read;
struct drm_dp_remote_dpcd_write dpcd_write;
struct drm_dp_remote_i2c_read i2c_read;
struct drm_dp_remote_i2c_write i2c_write;
} u;
};
struct drm_dp_sideband_msg_reply_body {
u8 reply_type;
u8 req_type;
union ack_replies {
struct drm_dp_nak_reply nak;
struct drm_dp_link_address_ack_reply link_addr;
struct drm_dp_port_number_rep port_number;
struct drm_dp_enum_path_resources_ack_reply path_resources;
struct drm_dp_allocate_payload_ack_reply allocate_payload;
struct drm_dp_query_payload_ack_reply query_payload;
struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack;
struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack;
struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack;
struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack;
struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack;
struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack;
} u;
};
/* msg is queued to be put into a slot */
#define DRM_DP_SIDEBAND_TX_QUEUED 0
/* msg has started transmitting on a slot - still on msgq */
#define DRM_DP_SIDEBAND_TX_START_SEND 1
/* msg has finished transmitting on a slot - removed from msgq only in slot */
#define DRM_DP_SIDEBAND_TX_SENT 2
/* msg has received a response - removed from slot */
#define DRM_DP_SIDEBAND_TX_RX 3
#define DRM_DP_SIDEBAND_TX_TIMEOUT 4
struct drm_dp_sideband_msg_tx {
u8 msg[256];
u8 chunk[48];
u8 cur_offset;
u8 cur_len;
struct drm_dp_mst_branch *dst;
struct list_head next;
int seqno;
int state;
bool path_msg;
struct drm_dp_sideband_msg_reply_body reply;
};
/* sideband msg handler */
struct drm_dp_mst_topology_mgr;
struct drm_dp_mst_topology_cbs {
/* create a connector for a port */
struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, char *path);
void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr,
struct drm_connector *connector);
void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr);
};
#define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8)
#define DP_PAYLOAD_LOCAL 1
#define DP_PAYLOAD_REMOTE 2
#define DP_PAYLOAD_DELETE_LOCAL 3
struct drm_dp_payload {
int payload_state;
int start_slot;
int num_slots;
};
/**
* struct drm_dp_mst_topology_mgr - DisplayPort MST manager
* @dev: device pointer for adding i2c devices etc.
* @cbs: callbacks for connector addition and destruction.
* @max_dpcd_transaction_bytes - maximum number of bytes to read/write in one go.
* @aux: aux channel for the DP connector.
* @max_payloads: maximum number of payloads the GPU can generate.
* @conn_base_id: DRM connector ID this mgr is connected to.
* @down_rep_recv: msg receiver state for down replies.
* @up_req_recv: msg receiver state for up requests.
* @lock: protects mst state, primary, guid, dpcd.
* @mst_state: if this manager is enabled for an MST capable port.
* @mst_primary: pointer to the primary branch device.
* @guid_valid: GUID valid for the primary branch device.
* @guid: GUID for primary port.
* @dpcd: cache of DPCD for primary port.
* @pbn_div: PBN to slots divisor.
*
* This struct represents the toplevel displayport MST topology manager.
* There should be one instance of this for every MST capable DP connector
* on the GPU.
*/
struct drm_dp_mst_topology_mgr {
struct device *dev;
struct drm_dp_mst_topology_cbs *cbs;
int max_dpcd_transaction_bytes;
struct drm_dp_aux *aux; /* auxch for this topology mgr to use */
int max_payloads;
int conn_base_id;
/* only ever accessed from the workqueue - which should be serialised */
struct drm_dp_sideband_msg_rx down_rep_recv;
struct drm_dp_sideband_msg_rx up_req_recv;
/* pointer to info about the initial MST device */
struct mutex lock; /* protects mst_state + primary + guid + dpcd */
bool mst_state;
struct drm_dp_mst_branch *mst_primary;
/* primary MST device GUID */
bool guid_valid;
u8 guid[16];
u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 sink_count;
int pbn_div;
int total_slots;
int avail_slots;
int total_pbn;
/* messages to be transmitted */
/* qlock protects the upq/downq and in_progress,
the mstb tx_slots and txmsg->state once they are queued */
struct mutex qlock;
struct list_head tx_msg_downq;
struct list_head tx_msg_upq;
bool tx_down_in_progress;
bool tx_up_in_progress;
/* payload info + lock for it */
struct mutex payload_lock;
struct drm_dp_vcpi **proposed_vcpis;
struct drm_dp_payload *payloads;
unsigned long payload_mask;
wait_queue_head_t tx_waitq;
struct work_struct work;
struct work_struct tx_work;
};
int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, struct device *dev, struct drm_dp_aux *aux, int max_dpcd_transaction_bytes, int max_payloads, int conn_base_id);
void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state);
int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
int drm_dp_calc_pbn_mode(int clock, int bpp);
bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots);
void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port);
int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
int pbn);
int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr);
int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr);
int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
void drm_dp_mst_dump_topology(struct seq_file *m,
struct drm_dp_mst_topology_mgr *mgr);
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr);
#endif

View File

@ -202,6 +202,11 @@ struct detailed_timing {
#define DRM_EDID_FEATURE_PM_SUSPEND (1 << 6) #define DRM_EDID_FEATURE_PM_SUSPEND (1 << 6)
#define DRM_EDID_FEATURE_PM_STANDBY (1 << 7) #define DRM_EDID_FEATURE_PM_STANDBY (1 << 7)
#define DRM_EDID_HDMI_DC_48 (1 << 6)
#define DRM_EDID_HDMI_DC_36 (1 << 5)
#define DRM_EDID_HDMI_DC_30 (1 << 4)
#define DRM_EDID_HDMI_DC_Y444 (1 << 3)
struct edid { struct edid {
u8 header[8]; u8 header[8];
/* Vendor & product info */ /* Vendor & product info */

View File

@ -32,6 +32,7 @@
struct drm_fb_helper; struct drm_fb_helper;
#include <linux/kgdb.h>
struct drm_fb_helper_crtc { struct drm_fb_helper_crtc {
struct drm_mode_set mode_set; struct drm_mode_set mode_set;
@ -54,7 +55,7 @@ struct drm_fb_helper_surface_size {
* save the current lut when force-restoring the fbdev for e.g. * save the current lut when force-restoring the fbdev for e.g.
* kdbg. * kdbg.
* @fb_probe: Driver callback to allocate and initialize the fbdev info * @fb_probe: Driver callback to allocate and initialize the fbdev info
* structure. Futhermore it also needs to allocate the drm * structure. Furthermore it also needs to allocate the drm
* framebuffer used to back the fbdev. * framebuffer used to back the fbdev.
* @initial_config: Setup an initial fbdev display configuration * @initial_config: Setup an initial fbdev display configuration
* *
@ -85,8 +86,9 @@ struct drm_fb_helper {
int crtc_count; int crtc_count;
struct drm_fb_helper_crtc *crtc_info; struct drm_fb_helper_crtc *crtc_info;
int connector_count; int connector_count;
int connector_info_alloc_count;
struct drm_fb_helper_connector **connector_info; struct drm_fb_helper_connector **connector_info;
struct drm_fb_helper_funcs *funcs; const struct drm_fb_helper_funcs *funcs;
struct fb_info *fbdev; struct fb_info *fbdev;
u32 pseudo_palette[17]; u32 pseudo_palette[17];
struct list_head kernel_fb_list; struct list_head kernel_fb_list;
@ -96,6 +98,8 @@ struct drm_fb_helper {
bool delayed_hotplug; bool delayed_hotplug;
}; };
void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
const struct drm_fb_helper_funcs *funcs);
int drm_fb_helper_init(struct drm_device *dev, int drm_fb_helper_init(struct drm_device *dev,
struct drm_fb_helper *helper, int crtc_count, struct drm_fb_helper *helper, int crtc_count,
int max_conn); int max_conn);
@ -107,7 +111,7 @@ int drm_fb_helper_set_par(struct fb_info *info);
int drm_fb_helper_check_var(struct fb_var_screeninfo *var, int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct fb_info *info); struct fb_info *info);
bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper); bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper);
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper, void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
uint32_t fb_width, uint32_t fb_height); uint32_t fb_width, uint32_t fb_height);
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
@ -120,5 +124,14 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper); int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper);
int drm_fb_helper_debug_enter(struct fb_info *info); int drm_fb_helper_debug_enter(struct fb_info *info);
int drm_fb_helper_debug_leave(struct fb_info *info); int drm_fb_helper_debug_leave(struct fb_info *info);
struct drm_display_mode *
drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector,
int width, int height);
struct drm_display_mode *
drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
int width, int height);
int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector);
int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
struct drm_connector *connector);
#endif #endif

View File

@ -47,8 +47,17 @@
enum drm_mm_search_flags { enum drm_mm_search_flags {
DRM_MM_SEARCH_DEFAULT = 0, DRM_MM_SEARCH_DEFAULT = 0,
DRM_MM_SEARCH_BEST = 1 << 0, DRM_MM_SEARCH_BEST = 1 << 0,
DRM_MM_SEARCH_BELOW = 1 << 1,
}; };
enum drm_mm_allocator_flags {
DRM_MM_CREATE_DEFAULT = 0,
DRM_MM_CREATE_TOP = 1 << 0,
};
#define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT
#define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP
struct drm_mm_node { struct drm_mm_node {
struct list_head node_list; struct list_head node_list;
struct list_head hole_stack; struct list_head hole_stack;
@ -85,11 +94,31 @@ struct drm_mm {
unsigned long *start, unsigned long *end); unsigned long *start, unsigned long *end);
}; };
/**
* drm_mm_node_allocated - checks whether a node is allocated
* @node: drm_mm_node to check
*
* Drivers should use this helpers for proper encapusulation of drm_mm
* internals.
*
* Returns:
* True if the @node is allocated.
*/
static inline bool drm_mm_node_allocated(struct drm_mm_node *node) static inline bool drm_mm_node_allocated(struct drm_mm_node *node)
{ {
return node->allocated; return node->allocated;
} }
/**
* drm_mm_initialized - checks whether an allocator is initialized
* @mm: drm_mm to check
*
* Drivers should use this helpers for proper encapusulation of drm_mm
* internals.
*
* Returns:
* True if the @mm is initialized.
*/
static inline bool drm_mm_initialized(struct drm_mm *mm) static inline bool drm_mm_initialized(struct drm_mm *mm)
{ {
return mm->hole_stack.next; return mm->hole_stack.next;
@ -100,6 +129,17 @@ static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_no
return hole_node->start + hole_node->size; return hole_node->start + hole_node->size;
} }
/**
* drm_mm_hole_node_start - computes the start of the hole following @node
* @hole_node: drm_mm_node which implicitly tracks the following hole
*
* This is useful for driver-sepific debug dumpers. Otherwise drivers should not
* inspect holes themselves. Drivers must check first whether a hole indeed
* follows by looking at node->hole_follows.
*
* Returns:
* Start of the subsequent hole.
*/
static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node) static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
{ {
BUG_ON(!hole_node->hole_follows); BUG_ON(!hole_node->hole_follows);
@ -112,18 +152,52 @@ static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node
struct drm_mm_node, node_list)->start; struct drm_mm_node, node_list)->start;
} }
/**
* drm_mm_hole_node_end - computes the end of the hole following @node
* @hole_node: drm_mm_node which implicitly tracks the following hole
*
* This is useful for driver-sepific debug dumpers. Otherwise drivers should not
* inspect holes themselves. Drivers must check first whether a hole indeed
* follows by looking at node->hole_follows.
*
* Returns:
* End of the subsequent hole.
*/
static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node) static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
{ {
return __drm_mm_hole_node_end(hole_node); return __drm_mm_hole_node_end(hole_node);
} }
/**
* drm_mm_for_each_node - iterator to walk over all allocated nodes
* @entry: drm_mm_node structure to assign to in each iteration step
* @mm: drm_mm allocator to walk
*
* This iterator walks over all nodes in the range allocator. It is implemented
* with list_for_each, so not save against removal of elements.
*/
#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \ #define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
&(mm)->head_node.node_list, \ &(mm)->head_node.node_list, \
node_list) node_list)
/* Note that we need to unroll list_for_each_entry in order to inline /**
* setting hole_start and hole_end on each iteration and keep the * drm_mm_for_each_hole - iterator to walk over all holes
* macro sane. * @entry: drm_mm_node used internally to track progress
* @mm: drm_mm allocator to walk
* @hole_start: ulong variable to assign the hole start to on each iteration
* @hole_end: ulong variable to assign the hole end to on each iteration
*
* This iterator walks over all holes in the range allocator. It is implemented
* with list_for_each, so not save against removal of elements. @entry is used
* internally and will not reflect a real drm_mm_node for the very first hole.
* Hence users of this iterator may not access it.
*
* Implementation Note:
* We need to inline list_for_each_entry in order to be able to set hole_start
* and hole_end on each iteration while keeping the macro sane.
*
* The __drm_mm_for_each_hole version is similar, but with added support for
* going backwards.
*/ */
#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \ #define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \ for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
@ -133,34 +207,79 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
1 : 0; \ 1 : 0; \
entry = list_entry(entry->hole_stack.next, struct drm_mm_node, hole_stack)) entry = list_entry(entry->hole_stack.next, struct drm_mm_node, hole_stack))
#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \
for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
&entry->hole_stack != &(mm)->hole_stack ? \
hole_start = drm_mm_hole_node_start(entry), \
hole_end = drm_mm_hole_node_end(entry), \
1 : 0; \
entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack))
/* /*
* Basic range manager support (drm_mm.c) * Basic range manager support (drm_mm.c)
*/ */
extern int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node); int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
extern int drm_mm_insert_node_generic(struct drm_mm *mm, int drm_mm_insert_node_generic(struct drm_mm *mm,
struct drm_mm_node *node, struct drm_mm_node *node,
unsigned long size, unsigned long size,
unsigned alignment, unsigned alignment,
unsigned long color, unsigned long color,
enum drm_mm_search_flags flags); enum drm_mm_search_flags sflags,
enum drm_mm_allocator_flags aflags);
/**
* drm_mm_insert_node - search for space and insert @node
* @mm: drm_mm to allocate from
* @node: preallocate node to insert
* @size: size of the allocation
* @alignment: alignment of the allocation
* @flags: flags to fine-tune the allocation
*
* This is a simplified version of drm_mm_insert_node_generic() with @color set
* to 0.
*
* The preallocated node must be cleared to 0.
*
* Returns:
* 0 on success, -ENOSPC if there's no suitable hole.
*/
static inline int drm_mm_insert_node(struct drm_mm *mm, static inline int drm_mm_insert_node(struct drm_mm *mm,
struct drm_mm_node *node, struct drm_mm_node *node,
unsigned long size, unsigned long size,
unsigned alignment, unsigned alignment,
enum drm_mm_search_flags flags) enum drm_mm_search_flags flags)
{ {
return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags); return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags,
DRM_MM_CREATE_DEFAULT);
} }
extern int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
struct drm_mm_node *node, struct drm_mm_node *node,
unsigned long size, unsigned long size,
unsigned alignment, unsigned alignment,
unsigned long color, unsigned long color,
unsigned long start, unsigned long start,
unsigned long end, unsigned long end,
enum drm_mm_search_flags flags); enum drm_mm_search_flags sflags,
enum drm_mm_allocator_flags aflags);
/**
* drm_mm_insert_node_in_range - ranged search for space and insert @node
* @mm: drm_mm to allocate from
* @node: preallocate node to insert
* @size: size of the allocation
* @alignment: alignment of the allocation
* @start: start of the allowed range for this node
* @end: end of the allowed range for this node
* @flags: flags to fine-tune the allocation
*
* This is a simplified version of drm_mm_insert_node_in_range_generic() with
* @color set to 0.
*
* The preallocated node must be cleared to 0.
*
* Returns:
* 0 on success, -ENOSPC if there's no suitable hole.
*/
static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
struct drm_mm_node *node, struct drm_mm_node *node,
unsigned long size, unsigned long size,
@ -170,16 +289,17 @@ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
enum drm_mm_search_flags flags) enum drm_mm_search_flags flags)
{ {
return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
0, start, end, flags); 0, start, end, flags,
DRM_MM_CREATE_DEFAULT);
} }
extern void drm_mm_remove_node(struct drm_mm_node *node); void drm_mm_remove_node(struct drm_mm_node *node);
extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
extern void drm_mm_init(struct drm_mm *mm, void drm_mm_init(struct drm_mm *mm,
unsigned long start, unsigned long start,
unsigned long size); unsigned long size);
extern void drm_mm_takedown(struct drm_mm *mm); void drm_mm_takedown(struct drm_mm *mm);
extern int drm_mm_clean(struct drm_mm *mm); bool drm_mm_clean(struct drm_mm *mm);
void drm_mm_init_scan(struct drm_mm *mm, void drm_mm_init_scan(struct drm_mm *mm,
unsigned long size, unsigned long size,
@ -191,10 +311,10 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm,
unsigned long color, unsigned long color,
unsigned long start, unsigned long start,
unsigned long end); unsigned long end);
int drm_mm_scan_add_block(struct drm_mm_node *node); bool drm_mm_scan_add_block(struct drm_mm_node *node);
int drm_mm_scan_remove_block(struct drm_mm_node *node); bool drm_mm_scan_remove_block(struct drm_mm_node *node);
extern void drm_mm_debug_table(struct drm_mm *mm, const char *prefix); void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm); int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm);
#endif #endif

View File

@ -0,0 +1,237 @@
/*
* Copyright © 2006 Keith Packard
* Copyright © 2007-2008 Dave Airlie
* Copyright © 2007-2008 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
* Copyright © 2014 Intel Corporation
* Daniel Vetter <daniel.vetter@ffwll.ch>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef __DRM_MODES_H__
#define __DRM_MODES_H__
/*
* Note on terminology: here, for brevity and convenience, we refer to connector
* control chips as 'CRTCs'. They can control any type of connector, VGA, LVDS,
* DVI, etc. And 'screen' refers to the whole of the visible display, which
* may span multiple monitors (and therefore multiple CRTC and connector
* structures).
*/
enum drm_mode_status {
MODE_OK = 0, /* Mode OK */
MODE_HSYNC, /* hsync out of range */
MODE_VSYNC, /* vsync out of range */
MODE_H_ILLEGAL, /* mode has illegal horizontal timings */
MODE_V_ILLEGAL, /* mode has illegal horizontal timings */
MODE_BAD_WIDTH, /* requires an unsupported linepitch */
MODE_NOMODE, /* no mode with a matching name */
MODE_NO_INTERLACE, /* interlaced mode not supported */
MODE_NO_DBLESCAN, /* doublescan mode not supported */
MODE_NO_VSCAN, /* multiscan mode not supported */
MODE_MEM, /* insufficient video memory */
MODE_VIRTUAL_X, /* mode width too large for specified virtual size */
MODE_VIRTUAL_Y, /* mode height too large for specified virtual size */
MODE_MEM_VIRT, /* insufficient video memory given virtual size */
MODE_NOCLOCK, /* no fixed clock available */
MODE_CLOCK_HIGH, /* clock required is too high */
MODE_CLOCK_LOW, /* clock required is too low */
MODE_CLOCK_RANGE, /* clock/mode isn't in a ClockRange */
MODE_BAD_HVALUE, /* horizontal timing was out of range */
MODE_BAD_VVALUE, /* vertical timing was out of range */
MODE_BAD_VSCAN, /* VScan value out of range */
MODE_HSYNC_NARROW, /* horizontal sync too narrow */
MODE_HSYNC_WIDE, /* horizontal sync too wide */
MODE_HBLANK_NARROW, /* horizontal blanking too narrow */
MODE_HBLANK_WIDE, /* horizontal blanking too wide */
MODE_VSYNC_NARROW, /* vertical sync too narrow */
MODE_VSYNC_WIDE, /* vertical sync too wide */
MODE_VBLANK_NARROW, /* vertical blanking too narrow */
MODE_VBLANK_WIDE, /* vertical blanking too wide */
MODE_PANEL, /* exceeds panel dimensions */
MODE_INTERLACE_WIDTH, /* width too large for interlaced mode */
MODE_ONE_WIDTH, /* only one width is supported */
MODE_ONE_HEIGHT, /* only one height is supported */
MODE_ONE_SIZE, /* only one resolution is supported */
MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */
MODE_NO_STEREO, /* stereo modes not supported */
MODE_UNVERIFIED = -3, /* mode needs to reverified */
MODE_BAD = -2, /* unspecified reason */
MODE_ERROR = -1 /* error condition */
};
#define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \
DRM_MODE_TYPE_CRTC_C)
#define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \
.name = nm, .status = 0, .type = (t), .clock = (c), \
.hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \
.htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \
.vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \
.vscan = (vs), .flags = (f), \
.base.type = DRM_MODE_OBJECT_MODE
#define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */
#define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */
#define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF
struct drm_display_mode {
/* Header */
struct list_head head;
struct drm_mode_object base;
char name[DRM_DISPLAY_MODE_LEN];
enum drm_mode_status status;
unsigned int type;
/* Proposed mode values */
int clock; /* in kHz */
int hdisplay;
int hsync_start;
int hsync_end;
int htotal;
int hskew;
int vdisplay;
int vsync_start;
int vsync_end;
int vtotal;
int vscan;
unsigned int flags;
/* Addressable image size (may be 0 for projectors, etc.) */
int width_mm;
int height_mm;
/* Actual mode we give to hw */
int crtc_clock; /* in KHz */
int crtc_hdisplay;
int crtc_hblank_start;
int crtc_hblank_end;
int crtc_hsync_start;
int crtc_hsync_end;
int crtc_htotal;
int crtc_hskew;
int crtc_vdisplay;
int crtc_vblank_start;
int crtc_vblank_end;
int crtc_vsync_start;
int crtc_vsync_end;
int crtc_vtotal;
/* Driver private mode info */
int *private;
int private_flags;
int vrefresh; /* in Hz */
int hsync; /* in kHz */
enum hdmi_picture_aspect picture_aspect_ratio;
};
/* mode specified on the command line */
struct drm_cmdline_mode {
bool specified;
bool refresh_specified;
bool bpp_specified;
int xres, yres;
int bpp;
int refresh;
bool rb;
bool interlace;
bool cvt;
bool margins;
enum drm_connector_force force;
};
/**
* drm_mode_is_stereo - check for stereo mode flags
* @mode: drm_display_mode to check
*
* Returns:
* True if the mode is one of the stereo modes (like side-by-side), false if
* not.
*/
static inline bool drm_mode_is_stereo(const struct drm_display_mode *mode)
{
return mode->flags & DRM_MODE_FLAG_3D_MASK;
}
struct drm_connector;
struct drm_cmdline_mode;
struct drm_display_mode *drm_mode_create(struct drm_device *dev);
void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode);
void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
void drm_mode_debug_printmodeline(const struct drm_display_mode *mode);
struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
int hdisplay, int vdisplay, int vrefresh,
bool reduced, bool interlaced,
bool margins);
struct drm_display_mode *drm_gtf_mode(struct drm_device *dev,
int hdisplay, int vdisplay, int vrefresh,
bool interlaced, int margins);
struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev,
int hdisplay, int vdisplay,
int vrefresh, bool interlaced,
int margins,
int GTF_M, int GTF_2C,
int GTF_K, int GTF_2J);
void drm_display_mode_from_videomode(const struct videomode *vm,
struct drm_display_mode *dmode);
int of_get_drm_display_mode(struct device_node *np,
struct drm_display_mode *dmode,
int index);
void drm_mode_set_name(struct drm_display_mode *mode);
int drm_mode_hsync(const struct drm_display_mode *mode);
int drm_mode_vrefresh(const struct drm_display_mode *mode);
void drm_mode_set_crtcinfo(struct drm_display_mode *p,
int adjust_flags);
void drm_mode_copy(struct drm_display_mode *dst,
const struct drm_display_mode *src);
struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
const struct drm_display_mode *mode);
bool drm_mode_equal(const struct drm_display_mode *mode1,
const struct drm_display_mode *mode2);
bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
const struct drm_display_mode *mode2);
/* for use by the crtc helper probe functions */
void drm_mode_validate_size(struct drm_device *dev,
struct list_head *mode_list,
int maxX, int maxY);
void drm_mode_prune_invalid(struct drm_device *dev,
struct list_head *mode_list, bool verbose);
void drm_mode_sort(struct list_head *mode_list);
void drm_mode_connector_list_update(struct drm_connector *connector, bool merge_type_bits);
/* parsing cmdline modes */
bool
drm_mode_parse_command_line_for_connector(const char *mode_option,
struct drm_connector *connector,
struct drm_cmdline_mode *mode);
struct drm_display_mode *
drm_mode_create_from_cmdline_mode(struct drm_device *dev,
struct drm_cmdline_mode *cmd);
#endif /* __DRM_MODES_H__ */

View File

@ -0,0 +1,126 @@
/*
* Copyright (C) 2014 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef DRM_MODESET_LOCK_H_
#define DRM_MODESET_LOCK_H_
#include <linux/ww_mutex.h>
struct drm_modeset_lock;
/**
* drm_modeset_acquire_ctx - locking context (see ww_acquire_ctx)
* @ww_ctx: base acquire ctx
* @contended: used internally for -EDEADLK handling
* @locked: list of held locks
*
* Each thread competing for a set of locks must use one acquire
* ctx. And if any lock fxn returns -EDEADLK, it must backoff and
* retry.
*/
struct drm_modeset_acquire_ctx {
struct ww_acquire_ctx ww_ctx;
/**
* Contended lock: if a lock is contended you should only call
* drm_modeset_backoff() which drops locks and slow-locks the
* contended lock.
*/
struct drm_modeset_lock *contended;
/**
* list of held locks (drm_modeset_lock)
*/
struct list_head locked;
};
/**
* drm_modeset_lock - used for locking modeset resources.
* @mutex: resource locking
* @head: used to hold it's place on state->locked list when
* part of an atomic update
*
* Used for locking CRTCs and other modeset resources.
*/
struct drm_modeset_lock {
/**
* modeset lock
*/
struct ww_mutex mutex;
/**
* Resources that are locked as part of an atomic update are added
* to a list (so we know what to unlock at the end).
*/
struct list_head head;
};
extern struct ww_class crtc_ww_class;
void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
uint32_t flags);
void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx);
void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx);
void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx);
int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx);
/**
* drm_modeset_lock_init - initialize lock
* @lock: lock to init
*/
static inline void drm_modeset_lock_init(struct drm_modeset_lock *lock)
{
ww_mutex_init(&lock->mutex, &crtc_ww_class);
INIT_LIST_HEAD(&lock->head);
}
/**
* drm_modeset_lock_fini - cleanup lock
* @lock: lock to cleanup
*/
static inline void drm_modeset_lock_fini(struct drm_modeset_lock *lock)
{
WARN_ON(!list_empty(&lock->head));
}
/**
* drm_modeset_is_locked - equivalent to mutex_is_locked()
* @lock: lock to check
*/
static inline bool drm_modeset_is_locked(struct drm_modeset_lock *lock)
{
return ww_mutex_is_locked(&lock->mutex);
}
int drm_modeset_lock(struct drm_modeset_lock *lock,
struct drm_modeset_acquire_ctx *ctx);
int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock,
struct drm_modeset_acquire_ctx *ctx);
void drm_modeset_unlock(struct drm_modeset_lock *lock);
struct drm_device;
int drm_modeset_lock_all_crtcs(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
#endif /* DRM_MODESET_LOCK_H_ */

View File

@ -637,6 +637,22 @@
{0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9851, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9852, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9853, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9854, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9855, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9856, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9857, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x985A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x985B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x985C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \

View File

@ -0,0 +1,71 @@
/*
* Copyright (C) 2011-2013 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef DRM_PLANE_HELPER_H
#define DRM_PLANE_HELPER_H
#include <drm/drm_rect.h>
/*
* Drivers that don't allow primary plane scaling may pass this macro in place
* of the min/max scale parameters of the update checker function.
*
* Due to src being in 16.16 fixed point and dest being in integer pixels,
* 1<<16 represents no scaling.
*/
#define DRM_PLANE_HELPER_NO_SCALING (1<<16)
/**
* DOC: plane helpers
*
* Helper functions to assist with creation and handling of CRTC primary
* planes.
*/
extern int drm_plane_helper_check_update(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_rect *src,
struct drm_rect *dest,
const struct drm_rect *clip,
int min_scale,
int max_scale,
bool can_position,
bool can_update_disabled,
bool *visible);
extern int drm_primary_helper_update(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h);
extern int drm_primary_helper_disable(struct drm_plane *plane);
extern void drm_primary_helper_destroy(struct drm_plane *plane);
extern const struct drm_plane_funcs drm_primary_helper_funcs;
extern struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev,
const uint32_t *formats,
int num_formats);
#endif

View File

@ -163,5 +163,11 @@ int drm_rect_calc_vscale_relaxed(struct drm_rect *src,
struct drm_rect *dst, struct drm_rect *dst,
int min_vscale, int max_vscale); int min_vscale, int max_vscale);
void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point); void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point);
void drm_rect_rotate(struct drm_rect *r,
int width, int height,
unsigned int rotation);
void drm_rect_rotate_inv(struct drm_rect *r,
int width, int height,
unsigned int rotation);
#endif #endif

View File

@ -221,8 +221,8 @@ static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node)
* @file_mapping: Address space to unmap @node from * @file_mapping: Address space to unmap @node from
* *
* Unmap all userspace mappings for a given offset node. The mappings must be * Unmap all userspace mappings for a given offset node. The mappings must be
* associated with the @file_mapping address-space. If no offset exists or * associated with the @file_mapping address-space. If no offset exists
* the address-space is invalid, nothing is done. * nothing is done.
* *
* This call is unlocked. The caller must guarantee that drm_vma_offset_remove() * This call is unlocked. The caller must guarantee that drm_vma_offset_remove()
* is not called on this node concurrently. * is not called on this node concurrently.
@ -230,10 +230,10 @@ static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node)
static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node, static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node,
struct address_space *file_mapping) struct address_space *file_mapping)
{ {
if (file_mapping && drm_vma_node_has_offset(node)) // if (drm_vma_node_has_offset(node))
unmap_mapping_range(file_mapping, // unmap_mapping_range(file_mapping,
drm_vma_node_offset_addr(node), // drm_vma_node_offset_addr(node),
drm_vma_node_size(node) << PAGE_SHIFT, 1); // drm_vma_node_size(node) << PAGE_SHIFT, 1);
} }
/** /**

View File

@ -56,6 +56,12 @@ extern bool i915_gpu_turbo_disable(void);
#define I830_GMCH_CTRL 0x52 #define I830_GMCH_CTRL 0x52
#define I830_GMCH_GMS_MASK 0x70
#define I830_GMCH_GMS_LOCAL 0x10
#define I830_GMCH_GMS_STOLEN_512 0x20
#define I830_GMCH_GMS_STOLEN_1024 0x30
#define I830_GMCH_GMS_STOLEN_8192 0x40
#define I855_GMCH_GMS_MASK 0xF0 #define I855_GMCH_GMS_MASK 0xF0
#define I855_GMCH_GMS_STOLEN_0M 0x0 #define I855_GMCH_GMS_STOLEN_0M 0x0
#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4) #define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
@ -72,4 +78,18 @@ extern bool i915_gpu_turbo_disable(void);
#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4) #define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) #define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
#define I830_DRB3 0x63
#define I85X_DRB3 0x43
#define I865_TOUD 0xc4
#define I830_ESMRAMC 0x91
#define I845_ESMRAMC 0x9e
#define I85X_ESMRAMC 0x61
#define TSEG_ENABLE (1 << 0)
#define I830_TSEG_SIZE_512K (0 << 1)
#define I830_TSEG_SIZE_1M (1 << 1)
#define I845_TSEG_SIZE_MASK (3 << 1)
#define I845_TSEG_SIZE_512K (2 << 1)
#define I845_TSEG_SIZE_1M (3 << 1)
#endif /* _I915_DRM_H_ */ #endif /* _I915_DRM_H_ */

View File

@ -191,8 +191,8 @@
INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \ INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \
INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \ INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \
INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \ INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \
INTEL_VGA_DEVICE(0x0A0E, info), /* ULT GT1 reserved */ \ INTEL_VGA_DEVICE(0x0A0E, info), /* ULX GT1 mobile */ \
INTEL_VGA_DEVICE(0x0A1E, info), /* ULT GT2 reserved */ \ INTEL_VGA_DEVICE(0x0A1E, info), /* ULX GT2 mobile */ \
INTEL_VGA_DEVICE(0x0A2E, info), /* ULT GT3 reserved */ \ INTEL_VGA_DEVICE(0x0A2E, info), /* ULT GT3 reserved */ \
INTEL_VGA_DEVICE(0x0D06, info), /* CRW GT1 mobile */ \ INTEL_VGA_DEVICE(0x0D06, info), /* CRW GT1 mobile */ \
INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \ INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \
@ -223,14 +223,40 @@
_INTEL_BDW_D(gt, 0x160A, info), /* Server */ \ _INTEL_BDW_D(gt, 0x160A, info), /* Server */ \
_INTEL_BDW_D(gt, 0x160D, info) /* Workstation */ _INTEL_BDW_D(gt, 0x160D, info) /* Workstation */
#define INTEL_BDW_M_IDS(info) \ #define INTEL_BDW_GT12M_IDS(info) \
_INTEL_BDW_M_IDS(1, info), \ _INTEL_BDW_M_IDS(1, info), \
_INTEL_BDW_M_IDS(2, info), \ _INTEL_BDW_M_IDS(2, info)
#define INTEL_BDW_GT12D_IDS(info) \
_INTEL_BDW_D_IDS(1, info), \
_INTEL_BDW_D_IDS(2, info)
#define INTEL_BDW_GT3M_IDS(info) \
_INTEL_BDW_M_IDS(3, info) _INTEL_BDW_M_IDS(3, info)
#define INTEL_BDW_D_IDS(info) \ #define INTEL_BDW_GT3D_IDS(info) \
_INTEL_BDW_D_IDS(1, info), \
_INTEL_BDW_D_IDS(2, info), \
_INTEL_BDW_D_IDS(3, info) _INTEL_BDW_D_IDS(3, info)
#define INTEL_BDW_RSVDM_IDS(info) \
_INTEL_BDW_M_IDS(4, info)
#define INTEL_BDW_RSVDD_IDS(info) \
_INTEL_BDW_D_IDS(4, info)
#define INTEL_BDW_M_IDS(info) \
INTEL_BDW_GT12M_IDS(info), \
INTEL_BDW_GT3M_IDS(info), \
INTEL_BDW_RSVDM_IDS(info)
#define INTEL_BDW_D_IDS(info) \
INTEL_BDW_GT12D_IDS(info), \
INTEL_BDW_GT3D_IDS(info), \
INTEL_BDW_RSVDD_IDS(info)
#define INTEL_CHV_IDS(info) \
INTEL_VGA_DEVICE(0x22b0, info), \
INTEL_VGA_DEVICE(0x22b1, info), \
INTEL_VGA_DEVICE(0x22b2, info), \
INTEL_VGA_DEVICE(0x22b3, info)
#endif /* _I915_PCIIDS_H */ #endif /* _I915_PCIIDS_H */

View File

@ -30,7 +30,8 @@
#define _I915_POWERWELL_H_ #define _I915_POWERWELL_H_
/* For use by hda_i915 driver */ /* For use by hda_i915 driver */
extern void i915_request_power_well(void); extern int i915_request_power_well(void);
extern void i915_release_power_well(void); extern int i915_release_power_well(void);
extern int i915_get_cdclk_freq(void);
#endif /* _I915_POWERWELL_H_ */ #endif /* _I915_POWERWELL_H_ */

View File

@ -39,15 +39,12 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/bitmap.h> #include <linux/bitmap.h>
#include <linux/reservation.h>
struct ttm_bo_device; struct ttm_bo_device;
struct drm_mm_node; struct drm_mm_node;
struct reservation_object {
struct mutex lock;
};
/** /**
* struct ttm_placement * struct ttm_placement
@ -488,13 +485,12 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev,
void (*destroy) (struct ttm_buffer_object *)); void (*destroy) (struct ttm_buffer_object *));
/** /**
* ttm_bo_synccpu_object_init * ttm_bo_create
* *
* @bdev: Pointer to a ttm_bo_device struct. * @bdev: Pointer to a ttm_bo_device struct.
* @bo: Pointer to a ttm_buffer_object to be initialized.
* @size: Requested size of buffer object. * @size: Requested size of buffer object.
* @type: Requested type of buffer object. * @type: Requested type of buffer object.
* @flags: Initial placement flags. * @placement: Initial placement.
* @page_alignment: Data alignment in pages. * @page_alignment: Data alignment in pages.
* @interruptible: If needing to sleep while waiting for GPU resources, * @interruptible: If needing to sleep while waiting for GPU resources,
* sleep interruptible. * sleep interruptible.

View File

@ -37,11 +37,10 @@
#include <drm/drm_mm.h> #include <drm/drm_mm.h>
#include <drm/drm_global.h> #include <drm/drm_global.h>
#include <drm/drm_vma_manager.h> #include <drm/drm_vma_manager.h>
//#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/reservation.h>
struct ww_acquire_ctx;
struct ttm_backend_func { struct ttm_backend_func {
/** /**
@ -134,6 +133,7 @@ struct ttm_tt {
* struct ttm_dma_tt * struct ttm_dma_tt
* *
* @ttm: Base ttm_tt struct. * @ttm: Base ttm_tt struct.
* @cpu_address: The CPU address of the pages
* @dma_address: The DMA (bus) addresses of the pages * @dma_address: The DMA (bus) addresses of the pages
* @pages_list: used by some page allocation backend * @pages_list: used by some page allocation backend
* *
@ -143,6 +143,7 @@ struct ttm_tt {
*/ */
struct ttm_dma_tt { struct ttm_dma_tt {
struct ttm_tt ttm; struct ttm_tt ttm;
void **cpu_address;
dma_addr_t *dma_address; dma_addr_t *dma_address;
struct list_head pages_list; struct list_head pages_list;
}; };
@ -183,6 +184,7 @@ struct ttm_mem_type_manager_func {
* @man: Pointer to a memory type manager. * @man: Pointer to a memory type manager.
* @bo: Pointer to the buffer object we're allocating space for. * @bo: Pointer to the buffer object we're allocating space for.
* @placement: Placement details. * @placement: Placement details.
* @flags: Additional placement flags.
* @mem: Pointer to a struct ttm_mem_reg to be filled in. * @mem: Pointer to a struct ttm_mem_reg to be filled in.
* *
* This function should allocate space in the memory type managed * This function should allocate space in the memory type managed
@ -207,6 +209,7 @@ struct ttm_mem_type_manager_func {
int (*get_node)(struct ttm_mem_type_manager *man, int (*get_node)(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo, struct ttm_buffer_object *bo,
struct ttm_placement *placement, struct ttm_placement *placement,
uint32_t flags,
struct ttm_mem_reg *mem); struct ttm_mem_reg *mem);
/** /**
@ -653,18 +656,6 @@ extern void ttm_tt_unbind(struct ttm_tt *ttm);
*/ */
extern int ttm_tt_swapin(struct ttm_tt *ttm); extern int ttm_tt_swapin(struct ttm_tt *ttm);
/**
* ttm_tt_cache_flush:
*
* @pages: An array of pointers to struct page:s to flush.
* @num_pages: Number of pages to flush.
*
* Flush the data of the indicated pages from the cpu caches.
* This is used when changing caching attributes of the pages from
* cache-coherent.
*/
extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages);
/** /**
* ttm_tt_set_placement_caching: * ttm_tt_set_placement_caching:
* *
@ -748,6 +739,7 @@ extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
* @bdev: A pointer to a struct ttm_bo_device to initialize. * @bdev: A pointer to a struct ttm_bo_device to initialize.
* @glob: A pointer to an initialized struct ttm_bo_global. * @glob: A pointer to an initialized struct ttm_bo_global.
* @driver: A pointer to a struct ttm_bo_driver set up by the caller. * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
* @mapping: The address space to use for this bo.
* @file_page_offset: Offset into the device address space that is available * @file_page_offset: Offset into the device address space that is available
* for buffer data. This ensures compatibility with other users of the * for buffer data. This ensures compatibility with other users of the
* address space. * address space.
@ -759,6 +751,7 @@ extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
extern int ttm_bo_device_init(struct ttm_bo_device *bdev, extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_global *glob, struct ttm_bo_global *glob,
struct ttm_bo_driver *driver, struct ttm_bo_driver *driver,
struct address_space *mapping,
uint64_t file_page_offset, bool need_dma32); uint64_t file_page_offset, bool need_dma32);
/** /**
@ -787,7 +780,7 @@ extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
/** /**
* ttm_bo_reserve_nolru: * __ttm_bo_reserve:
* *
* @bo: A pointer to a struct ttm_buffer_object. * @bo: A pointer to a struct ttm_buffer_object.
* @interruptible: Sleep interruptible if waiting. * @interruptible: Sleep interruptible if waiting.
@ -808,13 +801,13 @@ extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
* -EALREADY: Bo already reserved using @ticket. This error code will only * -EALREADY: Bo already reserved using @ticket. This error code will only
* be returned if @use_ticket is set to true. * be returned if @use_ticket is set to true.
*/ */
static inline int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
bool interruptible, bool interruptible,
bool no_wait, bool use_ticket, bool no_wait, bool use_ticket,
struct ww_acquire_ctx *ticket) struct ww_acquire_ctx *ticket)
{ {
int ret = 0; int ret = 0;
/*
if (no_wait) { if (no_wait) {
bool success; bool success;
if (WARN_ON(ticket)) if (WARN_ON(ticket))
@ -830,7 +823,6 @@ static inline int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
ret = ww_mutex_lock(&bo->resv->lock, ticket); ret = ww_mutex_lock(&bo->resv->lock, ticket);
if (ret == -EINTR) if (ret == -EINTR)
return -ERESTARTSYS; return -ERESTARTSYS;
*/
return ret; return ret;
} }
@ -888,8 +880,7 @@ static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
WARN_ON(!atomic_read(&bo->kref.refcount)); WARN_ON(!atomic_read(&bo->kref.refcount));
ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_ticket, ret = __ttm_bo_reserve(bo, interruptible, no_wait, use_ticket, ticket);
ticket);
if (likely(ret == 0)) if (likely(ret == 0))
ttm_bo_del_sub_from_lru(bo); ttm_bo_del_sub_from_lru(bo);
@ -914,11 +905,7 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
WARN_ON(!atomic_read(&bo->kref.refcount)); WARN_ON(!atomic_read(&bo->kref.refcount));
if (interruptible) ww_mutex_lock_slow(&bo->resv->lock, ticket);
ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
ticket);
else
ww_mutex_lock_slow(&bo->resv->lock, ticket);
if (likely(ret == 0)) if (likely(ret == 0))
ttm_bo_del_sub_from_lru(bo); ttm_bo_del_sub_from_lru(bo);
@ -928,6 +915,35 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
return ret; return ret;
} }
/**
* __ttm_bo_unreserve
* @bo: A pointer to a struct ttm_buffer_object.
*
* Unreserve a previous reservation of @bo where the buffer object is
* already on lru lists.
*/
static inline void __ttm_bo_unreserve(struct ttm_buffer_object *bo)
{
ww_mutex_unlock(&bo->resv->lock);
}
/**
* ttm_bo_unreserve
*
* @bo: A pointer to a struct ttm_buffer_object.
*
* Unreserve a previous reservation of @bo.
*/
static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
{
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
spin_lock(&bo->glob->lru_lock);
ttm_bo_add_to_lru(bo);
spin_unlock(&bo->glob->lru_lock);
}
__ttm_bo_unreserve(bo);
}
/** /**
* ttm_bo_unreserve_ticket * ttm_bo_unreserve_ticket
* @bo: A pointer to a struct ttm_buffer_object. * @bo: A pointer to a struct ttm_buffer_object.
@ -938,24 +954,7 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo, static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo,
struct ww_acquire_ctx *t) struct ww_acquire_ctx *t)
{ {
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { ttm_bo_unreserve(bo);
spin_lock(&bo->glob->lru_lock);
ttm_bo_add_to_lru(bo);
spin_unlock(&bo->glob->lru_lock);
}
// ww_mutex_unlock(&bo->resv->lock);
}
/**
* ttm_bo_unreserve
*
* @bo: A pointer to a struct ttm_buffer_object.
*
* Unreserve a previous reservation of @bo.
*/
static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
{
ttm_bo_unreserve_ticket(bo, NULL);
} }
/* /*

View File

@ -51,7 +51,7 @@
#include <ttm/ttm_object.h> #include <ttm/ttm_object.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/atomic.h> //#include <linux/atomic.h>
/** /**
* struct ttm_lock * struct ttm_lock

View File

@ -244,6 +244,10 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
extern int ttm_ref_object_add(struct ttm_object_file *tfile, extern int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base, struct ttm_base_object *base,
enum ttm_ref_type ref_type, bool *existed); enum ttm_ref_type ref_type, bool *existed);
extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
struct ttm_base_object *base);
/** /**
* ttm_ref_object_base_unref * ttm_ref_object_base_unref
* *

View File

@ -29,6 +29,8 @@
#include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_memory.h> #include <drm/ttm/ttm_memory.h>
struct device;
/** /**
* Initialize pool allocator. * Initialize pool allocator.
*/ */

View File

@ -65,6 +65,8 @@
* reference the buffer. * reference the buffer.
* TTM_PL_FLAG_NO_EVICT means that the buffer may never * TTM_PL_FLAG_NO_EVICT means that the buffer may never
* be evicted to make room for other buffers. * be evicted to make room for other buffers.
* TTM_PL_FLAG_TOPDOWN requests to be placed from the
* top of the memory area, instead of the bottom.
*/ */
#define TTM_PL_FLAG_CACHED (1 << 16) #define TTM_PL_FLAG_CACHED (1 << 16)
@ -72,6 +74,7 @@
#define TTM_PL_FLAG_WC (1 << 18) #define TTM_PL_FLAG_WC (1 << 18)
#define TTM_PL_FLAG_SHARED (1 << 20) #define TTM_PL_FLAG_SHARED (1 << 20)
#define TTM_PL_FLAG_NO_EVICT (1 << 21) #define TTM_PL_FLAG_NO_EVICT (1 << 21)
#define TTM_PL_FLAG_TOPDOWN (1 << 22)
#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \ #define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \
TTM_PL_FLAG_UNCACHED | \ TTM_PL_FLAG_UNCACHED | \

View File

@ -7,4 +7,36 @@
#ifndef _LINUX_BACKLIGHT_H #ifndef _LINUX_BACKLIGHT_H
#define _LINUX_BACKLIGHT_H #define _LINUX_BACKLIGHT_H
/* Notes on locking:
*
* backlight_device->ops_lock is an internal backlight lock protecting the
* ops pointer and no code outside the core should need to touch it.
*
* Access to update_status() is serialised by the update_lock mutex since
* most drivers seem to need this and historically get it wrong.
*
* Most drivers don't need locking on their get_brightness() method.
* If yours does, you need to implement it in the driver. You can use the
* update_lock mutex if appropriate.
*
* Any other use of the locks below is probably wrong.
*/
enum backlight_update_reason {
BACKLIGHT_UPDATE_HOTKEY,
BACKLIGHT_UPDATE_SYSFS,
};
enum backlight_type {
BACKLIGHT_RAW = 1,
BACKLIGHT_PLATFORM,
BACKLIGHT_FIRMWARE,
BACKLIGHT_TYPE_MAX,
};
enum backlight_notification {
BACKLIGHT_REGISTERED,
BACKLIGHT_UNREGISTERED,
};
#endif #endif

View File

@ -88,32 +88,32 @@
* lib/bitmap.c provides these functions: * lib/bitmap.c provides these functions:
*/ */
extern int __bitmap_empty(const unsigned long *bitmap, int bits); extern int __bitmap_empty(const unsigned long *bitmap, unsigned int nbits);
extern int __bitmap_full(const unsigned long *bitmap, int bits); extern int __bitmap_full(const unsigned long *bitmap, unsigned int nbits);
extern int __bitmap_equal(const unsigned long *bitmap1, extern int __bitmap_equal(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits); const unsigned long *bitmap2, unsigned int nbits);
extern void __bitmap_complement(unsigned long *dst, const unsigned long *src, extern void __bitmap_complement(unsigned long *dst, const unsigned long *src,
int bits); unsigned int nbits);
extern void __bitmap_shift_right(unsigned long *dst, extern void __bitmap_shift_right(unsigned long *dst,
const unsigned long *src, int shift, int bits); const unsigned long *src, int shift, int bits);
extern void __bitmap_shift_left(unsigned long *dst, extern void __bitmap_shift_left(unsigned long *dst,
const unsigned long *src, int shift, int bits); const unsigned long *src, int shift, int bits);
extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits); const unsigned long *bitmap2, unsigned int nbits);
extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits); const unsigned long *bitmap2, unsigned int nbits);
extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits); const unsigned long *bitmap2, unsigned int nbits);
extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits); const unsigned long *bitmap2, unsigned int nbits);
extern int __bitmap_intersects(const unsigned long *bitmap1, extern int __bitmap_intersects(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits); const unsigned long *bitmap2, unsigned int nbits);
extern int __bitmap_subset(const unsigned long *bitmap1, extern int __bitmap_subset(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits); const unsigned long *bitmap2, unsigned int nbits);
extern int __bitmap_weight(const unsigned long *bitmap, int bits); extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
extern void bitmap_set(unsigned long *map, int i, int len); extern void bitmap_set(unsigned long *map, unsigned int start, int len);
extern void bitmap_clear(unsigned long *map, int start, int nr); extern void bitmap_clear(unsigned long *map, unsigned int start, int len);
extern unsigned long bitmap_find_next_zero_area(unsigned long *map, extern unsigned long bitmap_find_next_zero_area(unsigned long *map,
unsigned long size, unsigned long size,
unsigned long start, unsigned long start,
@ -140,9 +140,9 @@ extern void bitmap_onto(unsigned long *dst, const unsigned long *orig,
const unsigned long *relmap, int bits); const unsigned long *relmap, int bits);
extern void bitmap_fold(unsigned long *dst, const unsigned long *orig, extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
int sz, int bits); int sz, int bits);
extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order); extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
extern void bitmap_release_region(unsigned long *bitmap, int pos, int order); extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order); extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits); extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits);
extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits); extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits);
@ -188,15 +188,15 @@ static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
} }
static inline int bitmap_and(unsigned long *dst, const unsigned long *src1, static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, int nbits) const unsigned long *src2, unsigned int nbits)
{ {
if (small_const_nbits(nbits)) if (small_const_nbits(nbits))
return (*dst = *src1 & *src2) != 0; return (*dst = *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)) != 0;
return __bitmap_and(dst, src1, src2, nbits); return __bitmap_and(dst, src1, src2, nbits);
} }
static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, int nbits) const unsigned long *src2, unsigned int nbits)
{ {
if (small_const_nbits(nbits)) if (small_const_nbits(nbits))
*dst = *src1 | *src2; *dst = *src1 | *src2;
@ -205,7 +205,7 @@ static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
} }
static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1, static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, int nbits) const unsigned long *src2, unsigned int nbits)
{ {
if (small_const_nbits(nbits)) if (small_const_nbits(nbits))
*dst = *src1 ^ *src2; *dst = *src1 ^ *src2;
@ -214,24 +214,24 @@ static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
} }
static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1, static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, int nbits) const unsigned long *src2, unsigned int nbits)
{ {
if (small_const_nbits(nbits)) if (small_const_nbits(nbits))
return (*dst = *src1 & ~(*src2)) != 0; return (*dst = *src1 & ~(*src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
return __bitmap_andnot(dst, src1, src2, nbits); return __bitmap_andnot(dst, src1, src2, nbits);
} }
static inline void bitmap_complement(unsigned long *dst, const unsigned long *src, static inline void bitmap_complement(unsigned long *dst, const unsigned long *src,
int nbits) unsigned int nbits)
{ {
if (small_const_nbits(nbits)) if (small_const_nbits(nbits))
*dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits); *dst = ~(*src);
else else
__bitmap_complement(dst, src, nbits); __bitmap_complement(dst, src, nbits);
} }
static inline int bitmap_equal(const unsigned long *src1, static inline int bitmap_equal(const unsigned long *src1,
const unsigned long *src2, int nbits) const unsigned long *src2, unsigned int nbits)
{ {
if (small_const_nbits(nbits)) if (small_const_nbits(nbits))
return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
@ -240,7 +240,7 @@ static inline int bitmap_equal(const unsigned long *src1,
} }
static inline int bitmap_intersects(const unsigned long *src1, static inline int bitmap_intersects(const unsigned long *src1,
const unsigned long *src2, int nbits) const unsigned long *src2, unsigned int nbits)
{ {
if (small_const_nbits(nbits)) if (small_const_nbits(nbits))
return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0; return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
@ -249,7 +249,7 @@ static inline int bitmap_intersects(const unsigned long *src1,
} }
static inline int bitmap_subset(const unsigned long *src1, static inline int bitmap_subset(const unsigned long *src1,
const unsigned long *src2, int nbits) const unsigned long *src2, unsigned int nbits)
{ {
if (small_const_nbits(nbits)) if (small_const_nbits(nbits))
return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits)); return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits));
@ -257,7 +257,7 @@ static inline int bitmap_subset(const unsigned long *src1,
return __bitmap_subset(src1, src2, nbits); return __bitmap_subset(src1, src2, nbits);
} }
static inline int bitmap_empty(const unsigned long *src, int nbits) static inline int bitmap_empty(const unsigned long *src, unsigned nbits)
{ {
if (small_const_nbits(nbits)) if (small_const_nbits(nbits))
return ! (*src & BITMAP_LAST_WORD_MASK(nbits)); return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
@ -265,7 +265,7 @@ static inline int bitmap_empty(const unsigned long *src, int nbits)
return __bitmap_empty(src, nbits); return __bitmap_empty(src, nbits);
} }
static inline int bitmap_full(const unsigned long *src, int nbits) static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
{ {
if (small_const_nbits(nbits)) if (small_const_nbits(nbits))
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits)); return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
@ -273,7 +273,7 @@ static inline int bitmap_full(const unsigned long *src, int nbits)
return __bitmap_full(src, nbits); return __bitmap_full(src, nbits);
} }
static inline int bitmap_weight(const unsigned long *src, int nbits) static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
{ {
if (small_const_nbits(nbits)) if (small_const_nbits(nbits))
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
@ -284,7 +284,7 @@ static inline void bitmap_shift_right(unsigned long *dst,
const unsigned long *src, int n, int nbits) const unsigned long *src, int n, int nbits)
{ {
if (small_const_nbits(nbits)) if (small_const_nbits(nbits))
*dst = *src >> n; *dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> n;
else else
__bitmap_shift_right(dst, src, n, nbits); __bitmap_shift_right(dst, src, n, nbits);
} }

View File

@ -4,12 +4,23 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#define BIT(nr) (1UL << (nr)) #define BIT(nr) (1UL << (nr))
#define BIT_ULL(nr) (1ULL << (nr))
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) #define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
#define BITS_PER_BYTE 8 #define BITS_PER_BYTE 8
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
#endif #endif
/*
* Create a contiguous bitmask starting at bit position @l and ending at
* position @h. For example
* GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
*/
#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l))
#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l))
extern unsigned int __sw_hweight8(unsigned int w); extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w); extern unsigned int __sw_hweight16(unsigned int w);
extern unsigned int __sw_hweight32(unsigned int w); extern unsigned int __sw_hweight32(unsigned int w);
@ -185,6 +196,21 @@ static inline unsigned long __ffs64(u64 word)
#ifdef __KERNEL__ #ifdef __KERNEL__
#ifndef set_mask_bits
#define set_mask_bits(ptr, _mask, _bits) \
({ \
const typeof(*ptr) mask = (_mask), bits = (_bits); \
typeof(*ptr) old, new; \
\
do { \
old = ACCESS_ONCE(*ptr); \
new = (old & ~mask) | bits; \
} while (cmpxchg(ptr, old, new) != old); \
\
new; \
})
#endif
#ifndef find_last_bit #ifndef find_last_bit
/** /**
* find_last_bit - find the last set bit in a memory region * find_last_bit - find the last set bit in a memory region

View File

@ -57,6 +57,7 @@
#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0) #define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0)
/* Force a compilation error if a constant expression is not a power of 2 */
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \ #define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0)) BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))

View File

@ -2,7 +2,7 @@
#define _LINUX_BYTEORDER_GENERIC_H #define _LINUX_BYTEORDER_GENERIC_H
/* /*
* linux/byteorder_generic.h * linux/byteorder/generic.h
* Generic Byte-reordering support * Generic Byte-reordering support
* *
* The "... p" macros, like le64_to_cpup, can be used with pointers * The "... p" macros, like le64_to_cpup, can be used with pointers

View File

@ -37,6 +37,9 @@
__asm__ ("" : "=r"(__ptr) : "0"(ptr)); \ __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
(typeof(ptr)) (__ptr + (off)); }) (typeof(ptr)) (__ptr + (off)); })
/* Make the optimizer believe the variable can be manipulated arbitrarily. */
#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var))
#ifdef __CHECKER__ #ifdef __CHECKER__
#define __must_be_array(arr) 0 #define __must_be_array(arr) 0
#else #else
@ -50,9 +53,14 @@
*/ */
#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \ #if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
!defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4) !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
# define inline inline __attribute__((always_inline)) # define inline inline __attribute__((always_inline)) notrace
# define __inline__ __inline__ __attribute__((always_inline)) # define __inline__ __inline__ __attribute__((always_inline)) notrace
# define __inline __inline __attribute__((always_inline)) # define __inline __inline __attribute__((always_inline)) notrace
#else
/* A lot of inline functions can cause havoc with function tracing */
# define inline inline notrace
# define __inline__ __inline__ notrace
# define __inline __inline notrace
#endif #endif
#define __deprecated __attribute__((deprecated)) #define __deprecated __attribute__((deprecated))

View File

@ -75,11 +75,7 @@
* *
* (asm goto is automatically volatile - the naming reflects this.) * (asm goto is automatically volatile - the naming reflects this.)
*/ */
#if GCC_VERSION <= 40801 #define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
# define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
#else
# define asm_volatile_goto(x...) do { asm goto(x); } while (0)
#endif
#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP #ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
#if GCC_VERSION >= 40400 #if GCC_VERSION >= 40400

View File

@ -63,6 +63,13 @@ extern void __chk_io_ptr(const volatile void __iomem *);
# include <linux/compiler-intel.h> # include <linux/compiler-intel.h>
#endif #endif
/* Clang compiler defines __GNUC__. So we will overwrite implementations
* coming from above header files here
*/
#ifdef __clang__
#include <linux/compiler-clang.h>
#endif
/* /*
* Generic compiler-dependent macros required for kernel * Generic compiler-dependent macros required for kernel
* build go below this comment. Actual compiler/compiler version * build go below this comment. Actual compiler/compiler version
@ -170,6 +177,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
(typeof(ptr)) (__ptr + (off)); }) (typeof(ptr)) (__ptr + (off)); })
#endif #endif
#ifndef OPTIMIZER_HIDE_VAR
#define OPTIMIZER_HIDE_VAR(var) barrier()
#endif
/* Not-quite-unique ID. */ /* Not-quite-unique ID. */
#ifndef __UNIQUE_ID #ifndef __UNIQUE_ID
# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
@ -298,6 +309,11 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
#endif #endif
/* Is this type a native word size -- useful for atomic operations */
#ifndef __native_word
# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
#endif
/* Compile time object size, -1 for unknown */ /* Compile time object size, -1 for unknown */
#ifndef __compiletime_object_size #ifndef __compiletime_object_size
# define __compiletime_object_size(obj) -1 # define __compiletime_object_size(obj) -1
@ -307,9 +323,18 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
#endif #endif
#ifndef __compiletime_error #ifndef __compiletime_error
# define __compiletime_error(message) # define __compiletime_error(message)
/*
* Sparse complains of variable sized arrays due to the temporary variable in
* __compiletime_assert. Unfortunately we can't just expand it out to make
* sparse see a constant array size without breaking compiletime_assert on old
* versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
*/
# ifndef __CHECKER__
# define __compiletime_error_fallback(condition) \ # define __compiletime_error_fallback(condition) \
do { ((void)sizeof(char[1 - 2 * condition])); } while (0) do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
#else # endif
#endif
#ifndef __compiletime_error_fallback
# define __compiletime_error_fallback(condition) do { } while (0) # define __compiletime_error_fallback(condition) do { } while (0)
#endif #endif
@ -337,6 +362,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
#define compiletime_assert(condition, msg) \ #define compiletime_assert(condition, msg) \
_compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
#define compiletime_assert_atomic_type(t) \
compiletime_assert(__native_word(t), \
"Need native word sized stores/loads for atomicity.")
/* /*
* Prevent the compiler from merging or refetching accesses. The compiler * Prevent the compiler from merging or refetching accesses. The compiler
* is also forbidden from reordering successive instances of ACCESS_ONCE(), * is also forbidden from reordering successive instances of ACCESS_ONCE(),
@ -354,7 +383,9 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */ /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
#ifdef CONFIG_KPROBES #ifdef CONFIG_KPROBES
# define __kprobes __attribute__((__section__(".kprobes.text"))) # define __kprobes __attribute__((__section__(".kprobes.text")))
# define nokprobe_inline __always_inline
#else #else
# define __kprobes # define __kprobes
# define nokprobe_inline inline
#endif #endif
#endif /* __LINUX_COMPILER_H */ #endif /* __LINUX_COMPILER_H */

View File

@ -115,6 +115,7 @@ struct dma_buf_ops {
* @exp_name: name of the exporter; useful for debugging. * @exp_name: name of the exporter; useful for debugging.
* @list_node: node for dma_buf accounting and debugging. * @list_node: node for dma_buf accounting and debugging.
* @priv: exporter specific private data for this buffer object. * @priv: exporter specific private data for this buffer object.
* @resv: reservation object linked to this dma-buf
*/ */
struct dma_buf { struct dma_buf {
size_t size; size_t size;
@ -168,10 +169,11 @@ void dma_buf_detach(struct dma_buf *dmabuf,
struct dma_buf_attachment *dmabuf_attach); struct dma_buf_attachment *dmabuf_attach);
struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops, struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
size_t size, int flags, const char *); size_t size, int flags, const char *,
struct reservation_object *);
#define dma_buf_export(priv, ops, size, flags) \ #define dma_buf_export(priv, ops, size, flags, resv) \
dma_buf_export_named(priv, ops, size, flags, __FILE__) dma_buf_export_named(priv, ops, size, flags, KBUILD_MODNAME, resv)
int dma_buf_fd(struct dma_buf *dmabuf, int flags); int dma_buf_fd(struct dma_buf *dmabuf, int flags);
struct dma_buf *dma_buf_get(int fd); struct dma_buf *dma_buf_get(int fd);
@ -194,4 +196,6 @@ int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
unsigned long); unsigned long);
void *dma_buf_vmap(struct dma_buf *); void *dma_buf_vmap(struct dma_buf *);
void dma_buf_vunmap(struct dma_buf *, void *vaddr); void dma_buf_vunmap(struct dma_buf *, void *vaddr);
int dma_buf_debugfs_create_file(const char *name,
int (*write)(struct seq_file *));
#endif /* __DMA_BUF_H__ */ #endif /* __DMA_BUF_H__ */

View File

@ -2,12 +2,13 @@
#define _LINUX_ERR_H #define _LINUX_ERR_H
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/types.h>
#include <errno.h> #include <errno.h>
/* /*
* Kernel pointers have redundant information, so we can use a * Kernel pointers have redundant information, so we can use a
* scheme where we can return either an error code or a dentry * scheme where we can return either an error code or a normal
* pointer with the same return value. * pointer with the same return value.
* *
* This should be a per-architecture thing, to allow different * This should be a per-architecture thing, to allow different
@ -29,12 +30,12 @@ static inline long __must_check PTR_ERR(__force const void *ptr)
return (long) ptr; return (long) ptr;
} }
static inline long __must_check IS_ERR(__force const void *ptr) static inline bool __must_check IS_ERR(__force const void *ptr)
{ {
return IS_ERR_VALUE((unsigned long)ptr); return IS_ERR_VALUE((unsigned long)ptr);
} }
static inline long __must_check IS_ERR_OR_NULL(__force const void *ptr) static inline bool __must_check IS_ERR_OR_NULL(__force const void *ptr)
{ {
return !ptr || IS_ERR_VALUE((unsigned long)ptr); return !ptr || IS_ERR_VALUE((unsigned long)ptr);
} }

View File

@ -413,6 +413,8 @@ struct vm_area_struct;
struct fb_info; struct fb_info;
struct device; struct device;
struct file; struct file;
struct videomode;
struct device_node;
/* Definitions below are used in the parsed monitor specs */ /* Definitions below are used in the parsed monitor specs */
#define FB_DPMS_ACTIVE_OFF 1 #define FB_DPMS_ACTIVE_OFF 1
@ -439,6 +441,7 @@ struct file;
#define FB_MISC_PRIM_COLOR 1 #define FB_MISC_PRIM_COLOR 1
#define FB_MISC_1ST_DETAIL 2 /* First Detailed Timing is preferred */ #define FB_MISC_1ST_DETAIL 2 /* First Detailed Timing is preferred */
#define FB_MISC_HDMI 4
struct fb_chroma { struct fb_chroma {
__u32 redx; /* in fraction of 1024 */ __u32 redx; /* in fraction of 1024 */
__u32 greenx; __u32 greenx;
@ -690,6 +693,10 @@ struct fb_ops {
/* teardown any resources to do with this framebuffer */ /* teardown any resources to do with this framebuffer */
void (*fb_destroy)(struct fb_info *info); void (*fb_destroy)(struct fb_info *info);
/* called at KDB enter and leave time to prepare the console */
int (*fb_debug_enter)(struct fb_info *info);
int (*fb_debug_leave)(struct fb_info *info);
}; };
#ifdef CONFIG_FB_TILEBLITTING #ifdef CONFIG_FB_TILEBLITTING
@ -938,7 +945,7 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
#define fb_memcpy_fromfb sbus_memcpy_fromio #define fb_memcpy_fromfb sbus_memcpy_fromio
#define fb_memcpy_tofb sbus_memcpy_toio #define fb_memcpy_tofb sbus_memcpy_toio
#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__) || defined(__bfin__) #elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__) || defined(__bfin__) || defined(__arm__)
#define fb_readb __raw_readb #define fb_readb __raw_readb
#define fb_readw __raw_readw #define fb_readw __raw_readw
@ -999,7 +1006,7 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
extern int register_framebuffer(struct fb_info *fb_info); extern int register_framebuffer(struct fb_info *fb_info);
extern int unregister_framebuffer(struct fb_info *fb_info); extern int unregister_framebuffer(struct fb_info *fb_info);
extern int unlink_framebuffer(struct fb_info *fb_info); extern int unlink_framebuffer(struct fb_info *fb_info);
extern void remove_conflicting_framebuffers(struct apertures_struct *a, extern int remove_conflicting_framebuffers(struct apertures_struct *a,
const char *name, bool primary); const char *name, bool primary);
extern int fb_prepare_logo(struct fb_info *fb_info, int rotate); extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
extern int fb_show_logo(struct fb_info *fb_info, int rotate); extern int fb_show_logo(struct fb_info *fb_info, int rotate);
@ -1027,7 +1034,7 @@ static inline void unlock_fb_info(struct fb_info *info)
static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch,
u8 *src, u32 s_pitch, u32 height) u8 *src, u32 s_pitch, u32 height)
{ {
int i, j; u32 i, j;
d_pitch -= s_pitch; d_pitch -= s_pitch;

View File

@ -4,4 +4,5 @@
#ifndef __LINUX_FILE_H #ifndef __LINUX_FILE_H
#define __LINUX_FILE_H #define __LINUX_FILE_H
struct file;
#endif /* __LINUX_FILE_H */ #endif /* __LINUX_FILE_H */

View File

@ -43,9 +43,11 @@ struct builtin_fw {
int request_firmware(const struct firmware **fw, const char *name, int request_firmware(const struct firmware **fw, const char *name,
struct device *device); struct device *device);
int request_firmware_nowait( int request_firmware_nowait(
struct module *module, int uevent, struct module *module, bool uevent,
const char *name, struct device *device, void *context, const char *name, struct device *device, gfp_t gfp, void *context,
void (*cont)(const struct firmware *fw, void *context)); void (*cont)(const struct firmware *fw, void *context));
int request_firmware_direct(const struct firmware **fw, const char *name,
struct device *device);
void release_firmware(const struct firmware *fw); void release_firmware(const struct firmware *fw);

View File

@ -0,0 +1,205 @@
/*
* Statically sized hash table implementation
* (C) 2012 Sasha Levin <levinsasha928@gmail.com>
*/
#ifndef _LINUX_HASHTABLE_H
#define _LINUX_HASHTABLE_H
#include <linux/list.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/hash.h>
#include <linux/rculist.h>
#define DEFINE_HASHTABLE(name, bits) \
struct hlist_head name[1 << (bits)] = \
{ [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
#define DECLARE_HASHTABLE(name, bits) \
struct hlist_head name[1 << (bits)]
#define HASH_SIZE(name) (ARRAY_SIZE(name))
#define HASH_BITS(name) ilog2(HASH_SIZE(name))
/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */
#define hash_min(val, bits) \
(sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits))
static inline void __hash_init(struct hlist_head *ht, unsigned int sz)
{
unsigned int i;
for (i = 0; i < sz; i++)
INIT_HLIST_HEAD(&ht[i]);
}
/**
* hash_init - initialize a hash table
* @hashtable: hashtable to be initialized
*
* Calculates the size of the hashtable from the given parameter, otherwise
* same as hash_init_size.
*
* This has to be a macro since HASH_BITS() will not work on pointers since
* it calculates the size during preprocessing.
*/
#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable))
/**
* hash_add - add an object to a hashtable
* @hashtable: hashtable to add to
* @node: the &struct hlist_node of the object to be added
* @key: the key of the object to be added
*/
#define hash_add(hashtable, node, key) \
hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
/**
* hash_add_rcu - add an object to a rcu enabled hashtable
* @hashtable: hashtable to add to
* @node: the &struct hlist_node of the object to be added
* @key: the key of the object to be added
*/
#define hash_add_rcu(hashtable, node, key) \
hlist_add_head_rcu(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
/**
* hash_hashed - check whether an object is in any hashtable
* @node: the &struct hlist_node of the object to be checked
*/
static inline bool hash_hashed(struct hlist_node *node)
{
return !hlist_unhashed(node);
}
static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz)
{
unsigned int i;
for (i = 0; i < sz; i++)
if (!hlist_empty(&ht[i]))
return false;
return true;
}
/**
* hash_empty - check whether a hashtable is empty
* @hashtable: hashtable to check
*
* This has to be a macro since HASH_BITS() will not work on pointers since
* it calculates the size during preprocessing.
*/
#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable))
/**
* hash_del - remove an object from a hashtable
* @node: &struct hlist_node of the object to remove
*/
static inline void hash_del(struct hlist_node *node)
{
hlist_del_init(node);
}
/**
* hash_del_rcu - remove an object from a rcu enabled hashtable
* @node: &struct hlist_node of the object to remove
*/
static inline void hash_del_rcu(struct hlist_node *node)
{
hlist_del_init_rcu(node);
}
/**
* hash_for_each - iterate over a hashtable
* @name: hashtable to iterate
* @bkt: integer to use as bucket loop cursor
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
*/
#define hash_for_each(name, bkt, obj, member) \
for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
(bkt)++)\
hlist_for_each_entry(obj, &name[bkt], member)
/**
* hash_for_each_rcu - iterate over a rcu enabled hashtable
* @name: hashtable to iterate
* @bkt: integer to use as bucket loop cursor
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
*/
#define hash_for_each_rcu(name, bkt, obj, member) \
for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
(bkt)++)\
hlist_for_each_entry_rcu(obj, &name[bkt], member)
/**
* hash_for_each_safe - iterate over a hashtable safe against removal of
* hash entry
* @name: hashtable to iterate
* @bkt: integer to use as bucket loop cursor
* @tmp: a &struct used for temporary storage
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
*/
#define hash_for_each_safe(name, bkt, tmp, obj, member) \
for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
(bkt)++)\
hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
/**
* hash_for_each_possible - iterate over all possible objects hashing to the
* same bucket
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
#define hash_for_each_possible(name, obj, member, key) \
hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member)
/**
* hash_for_each_possible_rcu - iterate over all possible objects hashing to the
* same bucket in an rcu enabled hashtable
* in a rcu enabled hashtable
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
#define hash_for_each_possible_rcu(name, obj, member, key) \
hlist_for_each_entry_rcu(obj, &name[hash_min(key, HASH_BITS(name))],\
member)
/**
* hash_for_each_possible_rcu_notrace - iterate over all possible objects hashing
* to the same bucket in an rcu enabled hashtable in a rcu enabled hashtable
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*
* This is the same as hash_for_each_possible_rcu() except that it does
* not do any RCU debugging or tracing.
*/
#define hash_for_each_possible_rcu_notrace(name, obj, member, key) \
hlist_for_each_entry_rcu_notrace(obj, \
&name[hash_min(key, HASH_BITS(name))], member)
/**
* hash_for_each_possible_safe - iterate over all possible objects hashing to the
* same bucket safe against removals
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
* @tmp: a &struct used for temporary storage
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
#define hash_for_each_possible_safe(name, obj, tmp, member, key) \
hlist_for_each_entry_safe(obj, tmp,\
&name[hash_min(key, HASH_BITS(name))], member)
#endif

View File

@ -262,6 +262,18 @@ union hdmi_vendor_any_infoframe {
struct hdmi_vendor_infoframe hdmi; struct hdmi_vendor_infoframe hdmi;
}; };
/**
* union hdmi_infoframe - overall union of all abstract infoframe representations
* @any: generic infoframe
* @avi: avi infoframe
* @spd: spd infoframe
* @vendor: union of all vendor infoframes
* @audio: audio infoframe
*
* This is used by the generic pack function. This works since all infoframes
* have the same header which also indicates which type of infoframe should be
* packed.
*/
union hdmi_infoframe { union hdmi_infoframe {
struct hdmi_any_infoframe any; struct hdmi_any_infoframe any;
struct hdmi_avi_infoframe avi; struct hdmi_avi_infoframe avi;

View File

@ -135,7 +135,6 @@ struct i2c_driver {
* @name: Indicates the type of the device, usually a chip name that's * @name: Indicates the type of the device, usually a chip name that's
* generic enough to hide second-sourcing and compatible revisions. * generic enough to hide second-sourcing and compatible revisions.
* @adapter: manages the bus segment hosting this I2C device * @adapter: manages the bus segment hosting this I2C device
* @driver: device's driver, hence pointer to access routines
* @dev: Driver model device node for the slave. * @dev: Driver model device node for the slave.
* @irq: indicates the IRQ generated by this device (if any) * @irq: indicates the IRQ generated by this device (if any)
* @detected: member of an i2c_driver.clients list or i2c-core's * @detected: member of an i2c_driver.clients list or i2c-core's
@ -152,7 +151,6 @@ struct i2c_client {
/* _LOWER_ 7 bits */ /* _LOWER_ 7 bits */
char name[I2C_NAME_SIZE]; char name[I2C_NAME_SIZE];
struct i2c_adapter *adapter; /* the adapter we sit on */ struct i2c_adapter *adapter; /* the adapter we sit on */
struct i2c_driver *driver; /* and our access routines */
struct device dev; /* the device structure */ struct device dev; /* the device structure */
int irq; /* irq issued by device */ int irq; /* irq issued by device */
struct list_head detected; struct list_head detected;
@ -160,6 +158,7 @@ struct i2c_client {
#define to_i2c_client(d) container_of(d, struct i2c_client, dev) #define to_i2c_client(d) container_of(d, struct i2c_client, dev)
extern struct i2c_client *i2c_verify_client(struct device *dev); extern struct i2c_client *i2c_verify_client(struct device *dev);
extern struct i2c_adapter *i2c_verify_adapter(struct device *dev);
/** /**
* struct i2c_board_info - template for device creation * struct i2c_board_info - template for device creation
@ -209,6 +208,10 @@ struct i2c_board_info {
* i2c_algorithm is the interface to a class of hardware solutions which can * i2c_algorithm is the interface to a class of hardware solutions which can
* be addressed using the same bus algorithms - i.e. bit-banging or the PCF8584 * be addressed using the same bus algorithms - i.e. bit-banging or the PCF8584
* to name two of the most common. * to name two of the most common.
*
* The return codes from the @master_xfer field should indicate the type of
* error code that occured during the transfer, as documented in the kernel
* Documentation file Documentation/i2c/fault-codes.
*/ */
struct i2c_algorithm { struct i2c_algorithm {
/* If an adapter algorithm can't do I2C-level access, set master_xfer /* If an adapter algorithm can't do I2C-level access, set master_xfer
@ -275,6 +278,7 @@ void i2c_unlock_adapter(struct i2c_adapter *);
#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */ #define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */
#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */ #define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */
#define I2C_CLASS_SPD (1<<7) /* Memory modules */ #define I2C_CLASS_SPD (1<<7) /* Memory modules */
#define I2C_CLASS_DEPRECATED (1<<8) /* Warn users that adapter will stop using classes */
/* Internal numbers to terminate lists */ /* Internal numbers to terminate lists */
#define I2C_CLIENT_END 0xfffeU #define I2C_CLIENT_END 0xfffeU

View File

@ -35,21 +35,24 @@
struct idr_layer { struct idr_layer {
int prefix; /* the ID prefix of this idr_layer */ int prefix; /* the ID prefix of this idr_layer */
DECLARE_BITMAP(bitmap, IDR_SIZE); /* A zero bit means "space here" */ int layer; /* distance from leaf */
struct idr_layer __rcu *ary[1<<IDR_BITS]; struct idr_layer __rcu *ary[1<<IDR_BITS];
int count; /* When zero, we can release it */ int count; /* When zero, we can release it */
int layer; /* distance from leaf */ union {
/* A zero bit means "space here" */
DECLARE_BITMAP(bitmap, IDR_SIZE);
struct rcu_head rcu_head; struct rcu_head rcu_head;
};
}; };
struct idr { struct idr {
struct idr_layer __rcu *hint; /* the last layer allocated from */ struct idr_layer __rcu *hint; /* the last layer allocated from */
struct idr_layer __rcu *top; struct idr_layer __rcu *top;
struct idr_layer *id_free;
int layers; /* only valid w/o concurrent changes */ int layers; /* only valid w/o concurrent changes */
int id_free_cnt;
int cur; /* current pos for cyclic allocation */ int cur; /* current pos for cyclic allocation */
spinlock_t lock; spinlock_t lock;
int id_free_cnt;
struct idr_layer *id_free;
}; };
#define IDR_INIT(name) \ #define IDR_INIT(name) \
@ -88,9 +91,9 @@ int idr_for_each(struct idr *idp,
void *idr_get_next(struct idr *idp, int *nextid); void *idr_get_next(struct idr *idp, int *nextid);
void *idr_replace(struct idr *idp, void *ptr, int id); void *idr_replace(struct idr *idp, void *ptr, int id);
void idr_remove(struct idr *idp, int id); void idr_remove(struct idr *idp, int id);
void idr_free(struct idr *idp, int id);
void idr_destroy(struct idr *idp); void idr_destroy(struct idr *idp);
void idr_init(struct idr *idp); void idr_init(struct idr *idp);
bool idr_is_empty(struct idr *idp);
/** /**
* idr_preload_end - end preload section started with idr_preload() * idr_preload_end - end preload section started with idr_preload()
@ -138,69 +141,6 @@ static inline void *idr_find(struct idr *idr, int id)
#define idr_for_each_entry(idp, entry, id) \ #define idr_for_each_entry(idp, entry, id) \
for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id) for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id)
/*
* Don't use the following functions. These exist only to suppress
* deprecated warnings on EXPORT_SYMBOL()s.
*/
int __idr_pre_get(struct idr *idp, gfp_t gfp_mask);
int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id);
void __idr_remove_all(struct idr *idp);
/**
* idr_pre_get - reserve resources for idr allocation
* @idp: idr handle
* @gfp_mask: memory allocation flags
*
* Part of old alloc interface. This is going away. Use
* idr_preload[_end]() and idr_alloc() instead.
*/
static inline int __deprecated idr_pre_get(struct idr *idp, gfp_t gfp_mask)
{
return __idr_pre_get(idp, gfp_mask);
}
/**
* idr_get_new_above - allocate new idr entry above or equal to a start id
* @idp: idr handle
* @ptr: pointer you want associated with the id
* @starting_id: id to start search at
* @id: pointer to the allocated handle
*
* Part of old alloc interface. This is going away. Use
* idr_preload[_end]() and idr_alloc() instead.
*/
static inline int __deprecated idr_get_new_above(struct idr *idp, void *ptr,
int starting_id, int *id)
{
return __idr_get_new_above(idp, ptr, starting_id, id);
}
/**
* idr_get_new - allocate new idr entry
* @idp: idr handle
* @ptr: pointer you want associated with the id
* @id: pointer to the allocated handle
*
* Part of old alloc interface. This is going away. Use
* idr_preload[_end]() and idr_alloc() instead.
*/
static inline int __deprecated idr_get_new(struct idr *idp, void *ptr, int *id)
{
return __idr_get_new_above(idp, ptr, 0, id);
}
/**
* idr_remove_all - remove all ids from the given idr tree
* @idp: idr handle
*
* If you're trying to destroy @idp, calling idr_destroy() is enough.
* This is going away. Don't use.
*/
static inline void __deprecated idr_remove_all(struct idr *idp)
{
__idr_remove_all(idp);
}
/* /*
* IDA - IDR based id allocator, use when translation from id to * IDA - IDR based id allocator, use when translation from id to
* pointer isn't necessary. * pointer isn't necessary.

View File

@ -0,0 +1,27 @@
#ifndef _LINUX_INTERVAL_TREE_H
#define _LINUX_INTERVAL_TREE_H
#include <linux/rbtree.h>
struct interval_tree_node {
struct rb_node rb;
unsigned long start; /* Start of interval */
unsigned long last; /* Last location _in_ interval */
unsigned long __subtree_last;
};
extern void
interval_tree_insert(struct interval_tree_node *node, struct rb_root *root);
extern void
interval_tree_remove(struct interval_tree_node *node, struct rb_root *root);
extern struct interval_tree_node *
interval_tree_iter_first(struct rb_root *root,
unsigned long start, unsigned long last);
extern struct interval_tree_node *
interval_tree_iter_next(struct interval_tree_node *node,
unsigned long start, unsigned long last);
#endif /* _LINUX_INTERVAL_TREE_H */

View File

@ -0,0 +1,191 @@
/*
Interval Trees
(C) 2012 Michel Lespinasse <walken@google.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
include/linux/interval_tree_generic.h
*/
#include <linux/rbtree_augmented.h>
/*
* Template for implementing interval trees
*
* ITSTRUCT: struct type of the interval tree nodes
* ITRB: name of struct rb_node field within ITSTRUCT
* ITTYPE: type of the interval endpoints
* ITSUBTREE: name of ITTYPE field within ITSTRUCT holding last-in-subtree
* ITSTART(n): start endpoint of ITSTRUCT node n
* ITLAST(n): last endpoint of ITSTRUCT node n
* ITSTATIC: 'static' or empty
* ITPREFIX: prefix to use for the inline tree definitions
*
* Note - before using this, please consider if non-generic version
* (interval_tree.h) would work for you...
*/
#define INTERVAL_TREE_DEFINE(ITSTRUCT, ITRB, ITTYPE, ITSUBTREE, \
ITSTART, ITLAST, ITSTATIC, ITPREFIX) \
\
/* Callbacks for augmented rbtree insert and remove */ \
\
static inline ITTYPE ITPREFIX ## _compute_subtree_last(ITSTRUCT *node) \
{ \
ITTYPE max = ITLAST(node), subtree_last; \
if (node->ITRB.rb_left) { \
subtree_last = rb_entry(node->ITRB.rb_left, \
ITSTRUCT, ITRB)->ITSUBTREE; \
if (max < subtree_last) \
max = subtree_last; \
} \
if (node->ITRB.rb_right) { \
subtree_last = rb_entry(node->ITRB.rb_right, \
ITSTRUCT, ITRB)->ITSUBTREE; \
if (max < subtree_last) \
max = subtree_last; \
} \
return max; \
} \
\
RB_DECLARE_CALLBACKS(static, ITPREFIX ## _augment, ITSTRUCT, ITRB, \
ITTYPE, ITSUBTREE, ITPREFIX ## _compute_subtree_last) \
\
/* Insert / remove interval nodes from the tree */ \
\
ITSTATIC void ITPREFIX ## _insert(ITSTRUCT *node, struct rb_root *root) \
{ \
struct rb_node **link = &root->rb_node, *rb_parent = NULL; \
ITTYPE start = ITSTART(node), last = ITLAST(node); \
ITSTRUCT *parent; \
\
while (*link) { \
rb_parent = *link; \
parent = rb_entry(rb_parent, ITSTRUCT, ITRB); \
if (parent->ITSUBTREE < last) \
parent->ITSUBTREE = last; \
if (start < ITSTART(parent)) \
link = &parent->ITRB.rb_left; \
else \
link = &parent->ITRB.rb_right; \
} \
\
node->ITSUBTREE = last; \
rb_link_node(&node->ITRB, rb_parent, link); \
rb_insert_augmented(&node->ITRB, root, &ITPREFIX ## _augment); \
} \
\
ITSTATIC void ITPREFIX ## _remove(ITSTRUCT *node, struct rb_root *root) \
{ \
rb_erase_augmented(&node->ITRB, root, &ITPREFIX ## _augment); \
} \
\
/* \
* Iterate over intervals intersecting [start;last] \
* \
* Note that a node's interval intersects [start;last] iff: \
* Cond1: ITSTART(node) <= last \
* and \
* Cond2: start <= ITLAST(node) \
*/ \
\
static ITSTRUCT * \
ITPREFIX ## _subtree_search(ITSTRUCT *node, ITTYPE start, ITTYPE last) \
{ \
while (true) { \
/* \
* Loop invariant: start <= node->ITSUBTREE \
* (Cond2 is satisfied by one of the subtree nodes) \
*/ \
if (node->ITRB.rb_left) { \
ITSTRUCT *left = rb_entry(node->ITRB.rb_left, \
ITSTRUCT, ITRB); \
if (start <= left->ITSUBTREE) { \
/* \
* Some nodes in left subtree satisfy Cond2. \
* Iterate to find the leftmost such node N. \
* If it also satisfies Cond1, that's the \
* match we are looking for. Otherwise, there \
* is no matching interval as nodes to the \
* right of N can't satisfy Cond1 either. \
*/ \
node = left; \
continue; \
} \
} \
if (ITSTART(node) <= last) { /* Cond1 */ \
if (start <= ITLAST(node)) /* Cond2 */ \
return node; /* node is leftmost match */ \
if (node->ITRB.rb_right) { \
node = rb_entry(node->ITRB.rb_right, \
ITSTRUCT, ITRB); \
if (start <= node->ITSUBTREE) \
continue; \
} \
} \
return NULL; /* No match */ \
} \
} \
\
ITSTATIC ITSTRUCT * \
ITPREFIX ## _iter_first(struct rb_root *root, ITTYPE start, ITTYPE last) \
{ \
ITSTRUCT *node; \
\
if (!root->rb_node) \
return NULL; \
node = rb_entry(root->rb_node, ITSTRUCT, ITRB); \
if (node->ITSUBTREE < start) \
return NULL; \
return ITPREFIX ## _subtree_search(node, start, last); \
} \
\
ITSTATIC ITSTRUCT * \
ITPREFIX ## _iter_next(ITSTRUCT *node, ITTYPE start, ITTYPE last) \
{ \
struct rb_node *rb = node->ITRB.rb_right, *prev; \
\
while (true) { \
/* \
* Loop invariants: \
* Cond1: ITSTART(node) <= last \
* rb == node->ITRB.rb_right \
* \
* First, search right subtree if suitable \
*/ \
if (rb) { \
ITSTRUCT *right = rb_entry(rb, ITSTRUCT, ITRB); \
if (start <= right->ITSUBTREE) \
return ITPREFIX ## _subtree_search(right, \
start, last); \
} \
\
/* Move up the tree until we come from a node's left child */ \
do { \
rb = rb_parent(&node->ITRB); \
if (!rb) \
return NULL; \
prev = &node->ITRB; \
node = rb_entry(rb, ITSTRUCT, ITRB); \
rb = node->ITRB.rb_right; \
} while (prev == rb); \
\
/* Check if the node intersects [start;last] */ \
if (last < ITSTART(node)) /* !Cond1 */ \
return NULL; \
else if (start <= ITLAST(node)) /* Cond2 */ \
return node; \
} \
}

View File

@ -57,7 +57,7 @@ struct resource_list {
#define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */ #define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */
#define IORESOURCE_DISABLED 0x10000000 #define IORESOURCE_DISABLED 0x10000000
#define IORESOURCE_UNSET 0x20000000 #define IORESOURCE_UNSET 0x20000000 /* No address assigned yet */
#define IORESOURCE_AUTO 0x40000000 #define IORESOURCE_AUTO 0x40000000
#define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */ #define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */

View File

@ -14,6 +14,6 @@ enum irqreturn {
}; };
typedef enum irqreturn irqreturn_t; typedef enum irqreturn irqreturn_t;
#define IRQ_RETVAL(x) ((x) != IRQ_NONE) #define IRQ_RETVAL(x) ((x) ? IRQ_HANDLED : IRQ_NONE)
#endif #endif

View File

@ -76,10 +76,18 @@ extern int register_refined_jiffies(long clock_tick_rate);
* The 64-bit value is not atomic - you MUST NOT read it * The 64-bit value is not atomic - you MUST NOT read it
* without sampling the sequence number in jiffies_lock. * without sampling the sequence number in jiffies_lock.
* get_jiffies_64() will do this for you as appropriate. * get_jiffies_64() will do this for you as appropriate.
*/
extern u64 jiffies_64;
extern unsigned long volatile jiffies;
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void);
#else
static inline u64 get_jiffies_64(void) static inline u64 get_jiffies_64(void)
{ {
return (u64)GetTimerTicks(); return (u64)jiffies;
} }
#endif
/* /*
* These inlines deal with timer wrapping correctly. You are * These inlines deal with timer wrapping correctly. You are
@ -290,6 +298,12 @@ extern unsigned long preset_lpj;
*/ */
extern unsigned int jiffies_to_msecs(const unsigned long j); extern unsigned int jiffies_to_msecs(const unsigned long j);
extern unsigned int jiffies_to_usecs(const unsigned long j); extern unsigned int jiffies_to_usecs(const unsigned long j);
static inline u64 jiffies_to_nsecs(const unsigned long j)
{
return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
}
extern unsigned long msecs_to_jiffies(const unsigned int m); extern unsigned long msecs_to_jiffies(const unsigned int m);
extern unsigned long usecs_to_jiffies(const unsigned int u); extern unsigned long usecs_to_jiffies(const unsigned int u);
extern unsigned long timespec_to_jiffies(const struct timespec *value); extern unsigned long timespec_to_jiffies(const struct timespec *value);

View File

@ -31,6 +31,19 @@
#define ULLONG_MAX (~0ULL) #define ULLONG_MAX (~0ULL)
#define SIZE_MAX (~(size_t)0) #define SIZE_MAX (~(size_t)0)
#define U8_MAX ((u8)~0U)
#define S8_MAX ((s8)(U8_MAX>>1))
#define S8_MIN ((s8)(-S8_MAX - 1))
#define U16_MAX ((u16)~0U)
#define S16_MAX ((s16)(U16_MAX>>1))
#define S16_MIN ((s16)(-S16_MAX - 1))
#define U32_MAX ((u32)~0U)
#define S32_MAX ((s32)(U32_MAX>>1))
#define S32_MIN ((s32)(-S32_MAX - 1))
#define U64_MAX ((u64)~0ULL)
#define S64_MAX ((s64)(U64_MAX>>1))
#define S64_MIN ((s64)(-S64_MAX - 1))
#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) #define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1)
#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) #define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask))
#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) #define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
@ -126,6 +139,13 @@
*/ */
#define lower_32_bits(n) ((u32)(n)) #define lower_32_bits(n) ((u32)(n))
#define abs64(x) ({ \
s64 __x = (x); \
(__x < 0) ? -__x : __x; \
})
#define KERN_EMERG "<0>" /* system is unusable */ #define KERN_EMERG "<0>" /* system is unusable */
#define KERN_ALERT "<1>" /* action must be taken immediately */ #define KERN_ALERT "<1>" /* action must be taken immediately */
#define KERN_CRIT "<2>" /* critical conditions */ #define KERN_CRIT "<2>" /* critical conditions */
@ -159,6 +179,9 @@ int hex2bin(u8 *dst, const char *src, size_t count);
#define printk(fmt, arg...) dbgprintf(fmt , ##arg) #define printk(fmt, arg...) dbgprintf(fmt , ##arg)
extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...);
extern __printf(2, 3)
char *kasprintf(gfp_t gfp, const char *fmt, ...);
/* /*
* min()/max()/clamp() macros that also do * min()/max()/clamp() macros that also do
@ -493,5 +516,35 @@ extern unsigned int tsc_khz;
}) })
static inline __must_check long __copy_to_user(void __user *to,
const void *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
switch(n) {
case 1:
*(u8 __force *)to = *(u8 *)from;
return 0;
case 2:
*(u16 __force *)to = *(u16 *)from;
return 0;
case 4:
*(u32 __force *)to = *(u32 *)from;
return 0;
#ifdef CONFIG_64BIT
case 8:
*(u64 __force *)to = *(u64 *)from;
return 0;
#endif
default:
break;
}
}
memcpy((void __force *)to, from, n);
return 0;
}
struct seq_file;
#endif #endif

View File

@ -0,0 +1,24 @@
#ifndef _KDB_H
#define _KDB_H
/*
* Kernel Debugger Architecture Independent Global Headers
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2000-2007 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (C) 2000 Stephane Eranian <eranian@hpl.hp.com>
* Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com>
*/
typedef enum {
KDB_REPEAT_NONE = 0, /* Do not repeat this command */
KDB_REPEAT_NO_ARGS, /* Repeat the command without arguments */
KDB_REPEAT_WITH_ARGS, /* Repeat the command including its arguments */
} kdb_repeat_t;
typedef int (*kdb_func_t)(int, const char **);
#endif /* !_KDB_H */

View File

@ -31,8 +31,10 @@
#define UEVENT_NUM_ENVP 32 /* number of env pointers */ #define UEVENT_NUM_ENVP 32 /* number of env pointers */
#define UEVENT_BUFFER_SIZE 2048 /* buffer for the variables */ #define UEVENT_BUFFER_SIZE 2048 /* buffer for the variables */
#ifdef CONFIG_UEVENT_HELPER
/* path to the userspace helper executed on an event */ /* path to the userspace helper executed on an event */
extern char uevent_helper[]; extern char uevent_helper[];
#endif
/* counter to tag the uevent, read only except for the kobject core */ /* counter to tag the uevent, read only except for the kobject core */
extern u64 uevent_seqnum; extern u64 uevent_seqnum;
@ -65,6 +67,9 @@ struct kobject {
struct kobj_type *ktype; struct kobj_type *ktype;
// struct sysfs_dirent *sd; // struct sysfs_dirent *sd;
struct kref kref; struct kref kref;
#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
struct delayed_work release;
#endif
unsigned int state_initialized:1; unsigned int state_initialized:1;
unsigned int state_in_sysfs:1; unsigned int state_in_sysfs:1;
unsigned int state_add_uevent_sent:1; unsigned int state_add_uevent_sent:1;
@ -103,6 +108,7 @@ extern int __must_check kobject_move(struct kobject *, struct kobject *);
extern struct kobject *kobject_get(struct kobject *kobj); extern struct kobject *kobject_get(struct kobject *kobj);
extern void kobject_put(struct kobject *kobj); extern void kobject_put(struct kobject *kobj);
extern const void *kobject_namespace(struct kobject *kobj);
extern char *kobject_get_path(struct kobject *kobj, gfp_t flag); extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
struct kobj_type { struct kobj_type {

View File

@ -360,6 +360,17 @@ static inline void list_splice_tail_init(struct list_head *list,
#define list_first_entry(ptr, type, member) \ #define list_first_entry(ptr, type, member) \
list_entry((ptr)->next, type, member) list_entry((ptr)->next, type, member)
/**
* list_last_entry - get the last element from a list
* @ptr: the list head to take the element from.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_struct within the struct.
*
* Note, that list is expected to be not empty.
*/
#define list_last_entry(ptr, type, member) \
list_entry((ptr)->prev, type, member)
/** /**
* list_first_entry_or_null - get the first element from a list * list_first_entry_or_null - get the first element from a list
* @ptr: the list head to take the element from. * @ptr: the list head to take the element from.
@ -371,6 +382,22 @@ static inline void list_splice_tail_init(struct list_head *list,
#define list_first_entry_or_null(ptr, type, member) \ #define list_first_entry_or_null(ptr, type, member) \
(!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
/**
* list_next_entry - get the next element in list
* @pos: the type * to cursor
* @member: the name of the list_struct within the struct.
*/
#define list_next_entry(pos, member) \
list_entry((pos)->member.next, typeof(*(pos)), member)
/**
* list_prev_entry - get the prev element in list
* @pos: the type * to cursor
* @member: the name of the list_struct within the struct.
*/
#define list_prev_entry(pos, member) \
list_entry((pos)->member.prev, typeof(*(pos)), member)
/** /**
* list_for_each - iterate over a list * list_for_each - iterate over a list
* @pos: the &struct list_head to use as a loop cursor. * @pos: the &struct list_head to use as a loop cursor.
@ -415,9 +442,9 @@ static inline void list_splice_tail_init(struct list_head *list,
* @member: the name of the list_struct within the struct. * @member: the name of the list_struct within the struct.
*/ */
#define list_for_each_entry(pos, head, member) \ #define list_for_each_entry(pos, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member); \ for (pos = list_first_entry(head, typeof(*pos), member); \
&pos->member != (head); \ &pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member)) pos = list_next_entry(pos, member))
/** /**
* list_for_each_entry_reverse - iterate backwards over list of given type. * list_for_each_entry_reverse - iterate backwards over list of given type.
@ -426,9 +453,9 @@ static inline void list_splice_tail_init(struct list_head *list,
* @member: the name of the list_struct within the struct. * @member: the name of the list_struct within the struct.
*/ */
#define list_for_each_entry_reverse(pos, head, member) \ #define list_for_each_entry_reverse(pos, head, member) \
for (pos = list_entry((head)->prev, typeof(*pos), member); \ for (pos = list_last_entry(head, typeof(*pos), member); \
&pos->member != (head); \ &pos->member != (head); \
pos = list_entry(pos->member.prev, typeof(*pos), member)) pos = list_prev_entry(pos, member))
/** /**
* list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue() * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
@ -451,9 +478,9 @@ static inline void list_splice_tail_init(struct list_head *list,
* the current position. * the current position.
*/ */
#define list_for_each_entry_continue(pos, head, member) \ #define list_for_each_entry_continue(pos, head, member) \
for (pos = list_entry(pos->member.next, typeof(*pos), member); \ for (pos = list_next_entry(pos, member); \
&pos->member != (head); \ &pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member)) pos = list_next_entry(pos, member))
/** /**
* list_for_each_entry_continue_reverse - iterate backwards from the given point * list_for_each_entry_continue_reverse - iterate backwards from the given point
@ -465,9 +492,9 @@ static inline void list_splice_tail_init(struct list_head *list,
* the current position. * the current position.
*/ */
#define list_for_each_entry_continue_reverse(pos, head, member) \ #define list_for_each_entry_continue_reverse(pos, head, member) \
for (pos = list_entry(pos->member.prev, typeof(*pos), member); \ for (pos = list_prev_entry(pos, member); \
&pos->member != (head); \ &pos->member != (head); \
pos = list_entry(pos->member.prev, typeof(*pos), member)) pos = list_prev_entry(pos, member))
/** /**
* list_for_each_entry_from - iterate over list of given type from the current point * list_for_each_entry_from - iterate over list of given type from the current point
@ -479,7 +506,7 @@ static inline void list_splice_tail_init(struct list_head *list,
*/ */
#define list_for_each_entry_from(pos, head, member) \ #define list_for_each_entry_from(pos, head, member) \
for (; &pos->member != (head); \ for (; &pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member)) pos = list_next_entry(pos, member))
/** /**
* list_for_each_entry_safe - iterate over list of given type safe against removal of list entry * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
@ -489,10 +516,10 @@ static inline void list_splice_tail_init(struct list_head *list,
* @member: the name of the list_struct within the struct. * @member: the name of the list_struct within the struct.
*/ */
#define list_for_each_entry_safe(pos, n, head, member) \ #define list_for_each_entry_safe(pos, n, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member), \ for (pos = list_first_entry(head, typeof(*pos), member), \
n = list_entry(pos->member.next, typeof(*pos), member); \ n = list_next_entry(pos, member); \
&pos->member != (head); \ &pos->member != (head); \
pos = n, n = list_entry(n->member.next, typeof(*n), member)) pos = n, n = list_next_entry(n, member))
/** /**
* list_for_each_entry_safe_continue - continue list iteration safe against removal * list_for_each_entry_safe_continue - continue list iteration safe against removal
@ -505,10 +532,10 @@ static inline void list_splice_tail_init(struct list_head *list,
* safe against removal of list entry. * safe against removal of list entry.
*/ */
#define list_for_each_entry_safe_continue(pos, n, head, member) \ #define list_for_each_entry_safe_continue(pos, n, head, member) \
for (pos = list_entry(pos->member.next, typeof(*pos), member), \ for (pos = list_next_entry(pos, member), \
n = list_entry(pos->member.next, typeof(*pos), member); \ n = list_next_entry(pos, member); \
&pos->member != (head); \ &pos->member != (head); \
pos = n, n = list_entry(n->member.next, typeof(*n), member)) pos = n, n = list_next_entry(n, member))
/** /**
* list_for_each_entry_safe_from - iterate over list from current point safe against removal * list_for_each_entry_safe_from - iterate over list from current point safe against removal
@ -521,9 +548,9 @@ static inline void list_splice_tail_init(struct list_head *list,
* removal of list entry. * removal of list entry.
*/ */
#define list_for_each_entry_safe_from(pos, n, head, member) \ #define list_for_each_entry_safe_from(pos, n, head, member) \
for (n = list_entry(pos->member.next, typeof(*pos), member); \ for (n = list_next_entry(pos, member); \
&pos->member != (head); \ &pos->member != (head); \
pos = n, n = list_entry(n->member.next, typeof(*n), member)) pos = n, n = list_next_entry(n, member))
/** /**
* list_for_each_entry_safe_reverse - iterate backwards over list safe against removal * list_for_each_entry_safe_reverse - iterate backwards over list safe against removal
@ -536,10 +563,10 @@ static inline void list_splice_tail_init(struct list_head *list,
* of list entry. * of list entry.
*/ */
#define list_for_each_entry_safe_reverse(pos, n, head, member) \ #define list_for_each_entry_safe_reverse(pos, n, head, member) \
for (pos = list_entry((head)->prev, typeof(*pos), member), \ for (pos = list_last_entry(head, typeof(*pos), member), \
n = list_entry(pos->member.prev, typeof(*pos), member); \ n = list_prev_entry(pos, member); \
&pos->member != (head); \ &pos->member != (head); \
pos = n, n = list_entry(n->member.prev, typeof(*n), member)) pos = n, n = list_prev_entry(n, member))
/** /**
* list_safe_reset_next - reset a stale list_for_each_entry_safe loop * list_safe_reset_next - reset a stale list_for_each_entry_safe loop
@ -554,7 +581,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* completing the current iteration of the loop body. * completing the current iteration of the loop body.
*/ */
#define list_safe_reset_next(pos, n, member) \ #define list_safe_reset_next(pos, n, member) \
n = list_entry(pos->member.next, typeof(*pos), member) n = list_next_entry(pos, member)
/* /*
* Double linked lists with a single pointer list head. * Double linked lists with a single pointer list head.
@ -626,15 +653,15 @@ static inline void hlist_add_before(struct hlist_node *n,
*(n->pprev) = n; *(n->pprev) = n;
} }
static inline void hlist_add_after(struct hlist_node *n, static inline void hlist_add_behind(struct hlist_node *n,
struct hlist_node *next) struct hlist_node *prev)
{ {
next->next = n->next; n->next = prev->next;
n->next = next; prev->next = n;
next->pprev = &n->next; n->pprev = &prev->next;
if(next->next) if (n->next)
next->next->pprev = &next->next; n->next->pprev = &n->next;
} }
/* after that we'll appear to be on some hlist and hlist_del will work */ /* after that we'll appear to be on some hlist and hlist_del will work */

View File

@ -228,9 +228,9 @@ struct held_lock {
unsigned int trylock:1; /* 16 bits */ unsigned int trylock:1; /* 16 bits */
unsigned int read:2; /* see lock_acquire() comment */ unsigned int read:2; /* see lock_acquire() comment */
unsigned int check:2; /* see lock_acquire() comment */ unsigned int check:1; /* see lock_acquire() comment */
unsigned int hardirqs_off:1; unsigned int hardirqs_off:1;
unsigned int references:11; /* 32 bits */ unsigned int references:12; /* 32 bits */
}; };
/* /*
@ -241,7 +241,7 @@ extern void lockdep_info(void);
extern void lockdep_reset(void); extern void lockdep_reset(void);
extern void lockdep_reset_lock(struct lockdep_map *lock); extern void lockdep_reset_lock(struct lockdep_map *lock);
extern void lockdep_free_key_range(void *start, unsigned long size); extern void lockdep_free_key_range(void *start, unsigned long size);
extern void lockdep_sys_exit(void); extern asmlinkage void lockdep_sys_exit(void);
extern void lockdep_off(void); extern void lockdep_off(void);
extern void lockdep_on(void); extern void lockdep_on(void);
@ -279,7 +279,7 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
(lock)->dep_map.key, sub) (lock)->dep_map.key, sub)
#define lockdep_set_novalidate_class(lock) \ #define lockdep_set_novalidate_class(lock) \
lockdep_set_class(lock, &__lockdep_no_validate__) lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
/* /*
* Compare locking classes * Compare locking classes
*/ */
@ -302,9 +302,8 @@ static inline int lockdep_match_key(struct lockdep_map *lock,
* *
* Values for check: * Values for check:
* *
* 0: disabled * 0: simple checks (freeing, held-at-exit-time, etc.)
* 1: simple checks (freeing, held-at-exit-time, etc.) * 1: full validation
* 2: full validation
*/ */
extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
int trylock, int read, int check, int trylock, int read, int check,
@ -335,9 +334,13 @@ extern void lockdep_trace_alloc(gfp_t mask);
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
#define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l)) #define lockdep_assert_held(l) do { \
WARN_ON(debug_locks && !lockdep_is_held(l)); \
} while (0)
#else /* !LOCKDEP */ #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
#else /* !CONFIG_LOCKDEP */
static inline void lockdep_off(void) static inline void lockdep_off(void)
{ {
@ -384,7 +387,7 @@ struct lock_class_key { };
#define lockdep_depth(tsk) (0) #define lockdep_depth(tsk) (0)
#define lockdep_assert_held(l) do { } while (0) #define lockdep_assert_held(l) do { (void)(l); } while (0)
#define lockdep_recursing(tsk) (0) #define lockdep_recursing(tsk) (0)
@ -532,13 +535,13 @@ static inline void print_irqtrace_events(struct task_struct *curr)
# define might_lock(lock) \ # define might_lock(lock) \
do { \ do { \
typecheck(struct lockdep_map *, &(lock)->dep_map); \ typecheck(struct lockdep_map *, &(lock)->dep_map); \
lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \ lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
} while (0) } while (0)
# define might_lock_read(lock) \ # define might_lock_read(lock) \
do { \ do { \
typecheck(struct lockdep_map *, &(lock)->dep_map); \ typecheck(struct lockdep_map *, &(lock)->dep_map); \
lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \ lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
} while (0) } while (0)
#else #else

View File

@ -133,4 +133,34 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
return ret; return ret;
} }
#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
#ifndef mul_u64_u32_shr
static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
{
return (u64)(((unsigned __int128)a * mul) >> shift);
}
#endif /* mul_u64_u32_shr */
#else
#ifndef mul_u64_u32_shr
static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
{
u32 ah, al;
u64 ret;
al = a;
ah = a >> 32;
ret = ((u64)al * mul) >> shift;
if (ah)
ret += ((u64)ah * mul) << (32 - shift);
return ret;
}
#endif /* mul_u64_u32_shr */
#endif
#endif /* _LINUX_MATH64_H */ #endif /* _LINUX_MATH64_H */

View File

@ -431,6 +431,14 @@ struct spi_device_id {
kernel_ulong_t driver_data; /* Data private to the driver */ kernel_ulong_t driver_data; /* Data private to the driver */
}; };
#define SPMI_NAME_SIZE 32
#define SPMI_MODULE_PREFIX "spmi:"
struct spmi_device_id {
char name[SPMI_NAME_SIZE];
kernel_ulong_t driver_data; /* Data private to the driver */
};
/* dmi */ /* dmi */
enum dmi_field { enum dmi_field {
DMI_NONE, DMI_NONE,
@ -547,6 +555,11 @@ struct amba_id {
* See documentation of "x86_match_cpu" for details. * See documentation of "x86_match_cpu" for details.
*/ */
/*
* MODULE_DEVICE_TABLE expects this struct to be called x86cpu_device_id.
* Although gcc seems to ignore this error, clang fails without this define.
*/
#define x86cpu_device_id x86_cpu_id
struct x86_cpu_id { struct x86_cpu_id {
__u16 vendor; __u16 vendor;
__u16 family; __u16 family;
@ -563,6 +576,15 @@ struct x86_cpu_id {
#define X86_MODEL_ANY 0 #define X86_MODEL_ANY 0
#define X86_FEATURE_ANY 0 /* Same as FPU, you can't test for that */ #define X86_FEATURE_ANY 0 /* Same as FPU, you can't test for that */
/*
* Generic table type for matching CPU features.
* @feature: the bit number of the feature (0 - 65535)
*/
struct cpu_feature {
__u16 feature;
};
#define IPACK_ANY_FORMAT 0xff #define IPACK_ANY_FORMAT 0xff
#define IPACK_ANY_ID (~0) #define IPACK_ANY_ID (~0)
struct ipack_device_id { struct ipack_device_id {
@ -598,4 +620,9 @@ struct rio_device_id {
__u16 asm_did, asm_vid; __u16 asm_did, asm_vid;
}; };
struct mcb_device_id {
__u16 device;
kernel_ulong_t driver_data;
};
#endif /* LINUX_MOD_DEVICETABLE_H */ #endif /* LINUX_MOD_DEVICETABLE_H */

View File

@ -92,4 +92,16 @@ static inline int mutex_is_locked(struct mutex *lock)
return atomic_read(&lock->count) != 1; return atomic_read(&lock->count) != 1;
} }
static inline int mutex_trylock(struct mutex *lock)
{
if (likely(atomic_cmpxchg(&lock->count, 1, 0) == 1))
return 1;
return 0;
}
static inline void mutex_destroy(struct mutex *lock)
{
};
#endif #endif

View File

@ -456,6 +456,7 @@ struct pci_dev {
(pci_resource_end((dev), (bar)) - \ (pci_resource_end((dev), (bar)) - \
pci_resource_start((dev), (bar)) + 1)) pci_resource_start((dev), (bar)) + 1))
#define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */
struct pci_bus { struct pci_bus {
struct list_head node; /* node in list of buses */ struct list_head node; /* node in list of buses */
@ -480,7 +481,7 @@ struct pci_bus {
char name[48]; char name[48];
unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */ unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */
pci_bus_flags_t bus_flags; /* Inherited by child busses */ pci_bus_flags_t bus_flags; /* inherited by child buses */
struct device *bridge; struct device *bridge;
struct device dev; struct device dev;
struct bin_attribute *legacy_io; /* legacy I/O for this bus */ struct bin_attribute *legacy_io; /* legacy I/O for this bus */
@ -508,8 +509,12 @@ struct pci_sysdata {
#define to_pci_bus(n) container_of(n, struct pci_bus, dev) #define to_pci_bus(n) container_of(n, struct pci_bus, dev)
/* /*
* Returns true if the pci bus is root (behind host-pci bridge), * Returns true if the PCI bus is root (behind host-PCI bridge),
* false otherwise * false otherwise
*
* Some code assumes that "bus->self == NULL" means that bus is a root bus.
* This is incorrect because "virtual" buses added for SR-IOV (via
* virtfn_add_bus()) have "bus->self == NULL" but are not root buses.
*/ */
static inline bool pci_is_root_bus(struct pci_bus *pbus) static inline bool pci_is_root_bus(struct pci_bus *pbus)
{ {
@ -531,6 +536,32 @@ pci_find_next_bus(const struct pci_bus *from);
#define PCIBIOS_SET_FAILED 0x88 #define PCIBIOS_SET_FAILED 0x88
#define PCIBIOS_BUFFER_TOO_SMALL 0x89 #define PCIBIOS_BUFFER_TOO_SMALL 0x89
/*
* Translate above to generic errno for passing back through non-PCI code.
*/
static inline int pcibios_err_to_errno(int err)
{
if (err <= PCIBIOS_SUCCESSFUL)
return err; /* Assume already errno */
switch (err) {
case PCIBIOS_FUNC_NOT_SUPPORTED:
return -ENOENT;
case PCIBIOS_BAD_VENDOR_ID:
return -EINVAL;
case PCIBIOS_DEVICE_NOT_FOUND:
return -ENODEV;
case PCIBIOS_BAD_REGISTER_NUMBER:
return -EFAULT;
case PCIBIOS_SET_FAILED:
return -EIO;
case PCIBIOS_BUFFER_TOO_SMALL:
return -ENOSPC;
}
return -ENOTTY;
}
/* Low-level architecture-dependent routines */ /* Low-level architecture-dependent routines */
struct pci_ops { struct pci_ops {
@ -586,7 +617,7 @@ static inline int pci_pcie_cap(struct pci_dev *dev)
* pci_is_pcie - check if the PCI device is PCI Express capable * pci_is_pcie - check if the PCI device is PCI Express capable
* @dev: PCI device * @dev: PCI device
* *
* Retrun true if the PCI device is PCI Express capable, false otherwise. * Returns: true if the PCI device is PCI Express capable, false otherwise.
*/ */
static inline bool pci_is_pcie(struct pci_dev *dev) static inline bool pci_is_pcie(struct pci_dev *dev)
{ {
@ -672,6 +703,11 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size);
#define pci_name(x) "radeon" #define pci_name(x) "radeon"
static inline dma_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
{
return pdev->resource[bar].start;
}
#endif //__PCI__H__ #endif //__PCI__H__

View File

@ -85,6 +85,11 @@ static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
*rb_link = node; *rb_link = node;
} }
#define rb_entry_safe(ptr, type, member) \
({ typeof(ptr) ____ptr = (ptr); \
____ptr ? rb_entry(____ptr, type, member) : NULL; \
})
/** /**
* rbtree_postorder_for_each_entry_safe - iterate over rb_root in post order of * rbtree_postorder_for_each_entry_safe - iterate over rb_root in post order of
* given type safe against removal of rb_node entry * given type safe against removal of rb_node entry
@ -95,12 +100,9 @@ static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
* @field: the name of the rb_node field within 'type'. * @field: the name of the rb_node field within 'type'.
*/ */
#define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \ #define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \
for (pos = rb_entry(rb_first_postorder(root), typeof(*pos), field),\ for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \
n = rb_entry(rb_next_postorder(&pos->field), \ pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \
typeof(*pos), field); \ typeof(*pos), field); 1; }); \
&pos->field; \ pos = n)
pos = n, \
n = rb_entry(rb_next_postorder(&pos->field), \
typeof(*pos), field))
#endif /* _LINUX_RBTREE_H */ #endif /* _LINUX_RBTREE_H */

View File

@ -40,7 +40,7 @@ static inline void __list_add_rcu(struct list_head *new,
next->prev = new; next->prev = new;
} }
#else #else
extern void __list_add_rcu(struct list_head *new, void __list_add_rcu(struct list_head *new,
struct list_head *prev, struct list_head *next); struct list_head *prev, struct list_head *next);
#endif #endif
@ -191,7 +191,11 @@ static inline void list_splice_init_rcu(struct list_head *list,
if (list_empty(list)) if (list_empty(list))
return; return;
/* "first" and "last" tracking list, so initialize it. */ /*
* "first" and "last" tracking list, so initialize it. RCU readers
* have access to this list, so we must use INIT_LIST_HEAD_RCU()
* instead of INIT_LIST_HEAD().
*/
INIT_LIST_HEAD(list); INIT_LIST_HEAD(list);
@ -228,9 +232,10 @@ static inline void list_splice_init_rcu(struct list_head *list,
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
*/ */
#define list_entry_rcu(ptr, type, member) \ #define list_entry_rcu(ptr, type, member) \
({typeof (*ptr) __rcu *__ptr = (typeof (*ptr) __rcu __force *)ptr; \ ({ \
typeof(*ptr) __rcu *__ptr = (typeof(*ptr) __rcu __force *)ptr; \
container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \ container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \
}) })
/** /**
* Where are list_empty_rcu() and list_first_entry_rcu()? * Where are list_empty_rcu() and list_first_entry_rcu()?
@ -266,11 +271,11 @@ static inline void list_splice_init_rcu(struct list_head *list,
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
*/ */
#define list_first_or_null_rcu(ptr, type, member) \ #define list_first_or_null_rcu(ptr, type, member) \
({struct list_head *__ptr = (ptr); \ ({ \
struct list_head *__ptr = (ptr); \
struct list_head *__next = ACCESS_ONCE(__ptr->next); \ struct list_head *__next = ACCESS_ONCE(__ptr->next); \
likely(__ptr != __next) ? \ likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \
list_entry_rcu(__next, type, member) : NULL; \ })
})
/** /**
* list_for_each_entry_rcu - iterate over rcu list of given type * list_for_each_entry_rcu - iterate over rcu list of given type
@ -412,9 +417,9 @@ static inline void hlist_add_before_rcu(struct hlist_node *n,
} }
/** /**
* hlist_add_after_rcu * hlist_add_behind_rcu
* @prev: the existing element to add the new element after.
* @n: the new element to add to the hash list. * @n: the new element to add to the hash list.
* @prev: the existing element to add the new element after.
* *
* Description: * Description:
* Adds the specified element to the specified hlist * Adds the specified element to the specified hlist
@ -429,8 +434,8 @@ static inline void hlist_add_before_rcu(struct hlist_node *n,
* hlist_for_each_entry_rcu(), used to prevent memory-consistency * hlist_for_each_entry_rcu(), used to prevent memory-consistency
* problems on Alpha CPUs. * problems on Alpha CPUs.
*/ */
static inline void hlist_add_after_rcu(struct hlist_node *prev, static inline void hlist_add_behind_rcu(struct hlist_node *n,
struct hlist_node *n) struct hlist_node *prev)
{ {
n->next = prev->next; n->next = prev->next;
n->pprev = &prev->next; n->pprev = &prev->next;

View File

@ -0,0 +1,62 @@
/*
* Header file for reservations for dma-buf and ttm
*
* Copyright(C) 2011 Linaro Limited. All rights reserved.
* Copyright (C) 2012-2013 Canonical Ltd
* Copyright (C) 2012 Texas Instruments
*
* Authors:
* Rob Clark <robdclark@gmail.com>
* Maarten Lankhorst <maarten.lankhorst@canonical.com>
* Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*
* Based on bo.c which bears the following copyright notice,
* but is dual licensed:
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _LINUX_RESERVATION_H
#define _LINUX_RESERVATION_H
#include <linux/ww_mutex.h>
extern struct ww_class reservation_ww_class;
struct reservation_object {
struct ww_mutex lock;
};
static inline void
reservation_object_init(struct reservation_object *obj)
{
ww_mutex_init(&obj->lock, &reservation_ww_class);
}
static inline void
reservation_object_fini(struct reservation_object *obj)
{
ww_mutex_destroy(&obj->lock);
}
#endif /* _LINUX_RESERVATION_H */

View File

@ -101,19 +101,6 @@ static inline struct page *sg_page(struct scatterlist *sg)
return (struct page *)((sg)->page_link & ~0x3); return (struct page *)((sg)->page_link & ~0x3);
} }
/**
* sg_set_buf - Set sg entry to point at given data
* @sg: SG entry
* @buf: Data
* @buflen: Data length
*
**/
//static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
// unsigned int buflen)
//{
// sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
//}
/* /*
* Loop over each sg element, following the pointer to a new list if necessary * Loop over each sg element, following the pointer to a new list if necessary
*/ */
@ -226,10 +213,10 @@ void sg_init_one(struct scatterlist *, const void *, unsigned int);
typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t); typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
typedef void (sg_free_fn)(struct scatterlist *, unsigned int); typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
void __sg_free_table(struct sg_table *, unsigned int, sg_free_fn *); void __sg_free_table(struct sg_table *, unsigned int, bool, sg_free_fn *);
void sg_free_table(struct sg_table *); void sg_free_table(struct sg_table *);
int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, gfp_t, int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int,
sg_alloc_fn *); struct scatterlist *, gfp_t, sg_alloc_fn *);
int sg_alloc_table(struct sg_table *, unsigned int, gfp_t); int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
int sg_alloc_table_from_pages(struct sg_table *sgt, int sg_alloc_table_from_pages(struct sg_table *sgt,
struct page **pages, unsigned int n_pages, struct page **pages, unsigned int n_pages,
@ -241,6 +228,11 @@ size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen); void *buf, size_t buflen);
size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen, off_t skip);
size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen, off_t skip);
/* /*
* Maximum number of entries that will be allocated in one piece, if * Maximum number of entries that will be allocated in one piece, if
* a list larger than this is required then chaining will be utilized. * a list larger than this is required then chaining will be utilized.

View File

@ -3,6 +3,8 @@
#define TASK_UNINTERRUPTIBLE 2 #define TASK_UNINTERRUPTIBLE 2
/* Task command name length */
#define TASK_COMM_LEN 16
#define schedule_timeout(x) delay(x) #define schedule_timeout(x) delay(x)

View File

@ -51,6 +51,9 @@ extern int strncasecmp(const char *s1, const char *s2, size_t n);
#ifndef __HAVE_ARCH_STRCHR #ifndef __HAVE_ARCH_STRCHR
extern char * strchr(const char *,int); extern char * strchr(const char *,int);
#endif #endif
#ifndef __HAVE_ARCH_STRCHRNUL
extern char * strchrnul(const char *,int);
#endif
#ifndef __HAVE_ARCH_STRNCHR #ifndef __HAVE_ARCH_STRNCHR
extern char * strnchr(const char *, size_t, int); extern char * strnchr(const char *, size_t, int);
#endif #endif

View File

@ -116,10 +116,11 @@ typedef __u8 uint8_t;
typedef __u16 uint16_t; typedef __u16 uint16_t;
typedef __u32 uint32_t; typedef __u32 uint32_t;
#if defined(__GNUC__)
typedef __u64 uint64_t; typedef __u64 uint64_t;
typedef __u64 u_int64_t; typedef __u64 u_int64_t;
typedef __s64 int64_t;
typedef __signed__ long long int64_t; #endif
/* this is a special 64bit data type that is 8-byte aligned */ /* this is a special 64bit data type that is 8-byte aligned */
#define aligned_u64 __u64 __attribute__((aligned(8))) #define aligned_u64 __u64 __attribute__((aligned(8)))
@ -150,6 +151,7 @@ typedef unsigned long blkcnt_t;
#define pgoff_t unsigned long #define pgoff_t unsigned long
#endif #endif
/* A dma_addr_t can hold any valid DMA or bus address for the platform */
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
typedef u64 dma_addr_t; typedef u64 dma_addr_t;
#else #else
@ -200,6 +202,7 @@ typedef __u32 __bitwise __wsum;
#ifdef __KERNEL__ #ifdef __KERNEL__
typedef unsigned __bitwise__ gfp_t; typedef unsigned __bitwise__ gfp_t;
typedef unsigned __bitwise__ fmode_t; typedef unsigned __bitwise__ fmode_t;
typedef unsigned __bitwise__ oom_flags_t;
#ifdef CONFIG_PHYS_ADDR_T_64BIT #ifdef CONFIG_PHYS_ADDR_T_64BIT
typedef u64 phys_addr_t; typedef u64 phys_addr_t;
@ -209,6 +212,12 @@ typedef u32 phys_addr_t;
typedef phys_addr_t resource_size_t; typedef phys_addr_t resource_size_t;
/*
* This type is the placeholder for a hardware interrupt number. It has to be
* big enough to enclose whatever representation is used by a given platform.
*/
typedef unsigned long irq_hw_number_t;
typedef struct { typedef struct {
int counter; int counter;
} atomic_t; } atomic_t;

View File

@ -39,7 +39,7 @@
#if defined(__KERNEL__) || defined(__linux__) #if defined(__KERNEL__) || defined(__linux__)
#include <linux/types.h> #include <linux/types.h>
#include <asm/ioctl.h> //#include <asm/ioctl.h>
typedef unsigned int drm_handle_t; typedef unsigned int drm_handle_t;
#else /* One of the BSDs */ #else /* One of the BSDs */
@ -619,6 +619,17 @@ struct drm_gem_open {
#define DRM_PRIME_CAP_EXPORT 0x2 #define DRM_PRIME_CAP_EXPORT 0x2
#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6 #define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
#define DRM_CAP_ASYNC_PAGE_FLIP 0x7 #define DRM_CAP_ASYNC_PAGE_FLIP 0x7
/*
* The CURSOR_WIDTH and CURSOR_HEIGHT capabilities return a valid widthxheight
* combination for the hardware cursor. The intention is that a hardware
* agnostic userspace can query a cursor plane size to use.
*
* Note that the cross-driver contract is to merely return a valid size;
* drivers are free to attach another meaning on top, eg. i915 returns the
* maximum plane size.
*/
#define DRM_CAP_CURSOR_WIDTH 0x8
#define DRM_CAP_CURSOR_HEIGHT 0x9
/** DRM_IOCTL_GET_CAP ioctl argument type */ /** DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap { struct drm_get_cap {
@ -635,6 +646,14 @@ struct drm_get_cap {
*/ */
#define DRM_CLIENT_CAP_STEREO_3D 1 #define DRM_CLIENT_CAP_STEREO_3D 1
/**
* DRM_CLIENT_CAP_UNIVERSAL_PLANES
*
* If set to 1, the DRM core will expose all planes (overlay, primary, and
* cursor) to userspace.
*/
#define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2
/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */ /** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
struct drm_set_client_cap { struct drm_set_client_cap {
__u64 capability; __u64 capability;
@ -761,7 +780,7 @@ struct drm_prime_handle {
/** /**
* Device specific ioctls should only be in their respective headers * Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x99. * The device specific ioctl range is from 0x40 to 0x9f.
* Generic IOCTLS restart at 0xA0. * Generic IOCTLS restart at 0xA0.
* *
* \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and

View File

@ -44,20 +44,20 @@
/* Video mode flags */ /* Video mode flags */
/* bit compatible with the xorg definitions. */ /* bit compatible with the xorg definitions. */
#define DRM_MODE_FLAG_PHSYNC (1<<0) #define DRM_MODE_FLAG_PHSYNC (1<<0)
#define DRM_MODE_FLAG_NHSYNC (1<<1) #define DRM_MODE_FLAG_NHSYNC (1<<1)
#define DRM_MODE_FLAG_PVSYNC (1<<2) #define DRM_MODE_FLAG_PVSYNC (1<<2)
#define DRM_MODE_FLAG_NVSYNC (1<<3) #define DRM_MODE_FLAG_NVSYNC (1<<3)
#define DRM_MODE_FLAG_INTERLACE (1<<4) #define DRM_MODE_FLAG_INTERLACE (1<<4)
#define DRM_MODE_FLAG_DBLSCAN (1<<5) #define DRM_MODE_FLAG_DBLSCAN (1<<5)
#define DRM_MODE_FLAG_CSYNC (1<<6) #define DRM_MODE_FLAG_CSYNC (1<<6)
#define DRM_MODE_FLAG_PCSYNC (1<<7) #define DRM_MODE_FLAG_PCSYNC (1<<7)
#define DRM_MODE_FLAG_NCSYNC (1<<8) #define DRM_MODE_FLAG_NCSYNC (1<<8)
#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */ #define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */
#define DRM_MODE_FLAG_BCAST (1<<10) #define DRM_MODE_FLAG_BCAST (1<<10)
#define DRM_MODE_FLAG_PIXMUX (1<<11) #define DRM_MODE_FLAG_PIXMUX (1<<11)
#define DRM_MODE_FLAG_DBLCLK (1<<12) #define DRM_MODE_FLAG_DBLCLK (1<<12)
#define DRM_MODE_FLAG_CLKDIV2 (1<<13) #define DRM_MODE_FLAG_CLKDIV2 (1<<13)
/* /*
* When adding a new stereo mode don't forget to adjust DRM_MODE_FLAGS_3D_MAX * When adding a new stereo mode don't forget to adjust DRM_MODE_FLAGS_3D_MAX
* (define not exposed to user space). * (define not exposed to user space).
@ -88,6 +88,11 @@
#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */ #define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */
#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */ #define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */
/* Picture aspect ratio options */
#define DRM_MODE_PICTURE_ASPECT_NONE 0
#define DRM_MODE_PICTURE_ASPECT_4_3 1
#define DRM_MODE_PICTURE_ASPECT_16_9 2
/* Dithering mode options */ /* Dithering mode options */
#define DRM_MODE_DITHERING_OFF 0 #define DRM_MODE_DITHERING_OFF 0
#define DRM_MODE_DITHERING_ON 1 #define DRM_MODE_DITHERING_ON 1
@ -181,6 +186,7 @@ struct drm_mode_get_plane_res {
#define DRM_MODE_ENCODER_TVDAC 4 #define DRM_MODE_ENCODER_TVDAC 4
#define DRM_MODE_ENCODER_VIRTUAL 5 #define DRM_MODE_ENCODER_VIRTUAL 5
#define DRM_MODE_ENCODER_DSI 6 #define DRM_MODE_ENCODER_DSI 6
#define DRM_MODE_ENCODER_DPMST 7
struct drm_mode_get_encoder { struct drm_mode_get_encoder {
__u32 encoder_id; __u32 encoder_id;
@ -251,6 +257,21 @@ struct drm_mode_get_connector {
#define DRM_MODE_PROP_BLOB (1<<4) #define DRM_MODE_PROP_BLOB (1<<4)
#define DRM_MODE_PROP_BITMASK (1<<5) /* bitmask of enumerated types */ #define DRM_MODE_PROP_BITMASK (1<<5) /* bitmask of enumerated types */
/* non-extended types: legacy bitmask, one bit per type: */
#define DRM_MODE_PROP_LEGACY_TYPE ( \
DRM_MODE_PROP_RANGE | \
DRM_MODE_PROP_ENUM | \
DRM_MODE_PROP_BLOB | \
DRM_MODE_PROP_BITMASK)
/* extended-types: rather than continue to consume a bit per type,
* grab a chunk of the bits to use as integer type id.
*/
#define DRM_MODE_PROP_EXTENDED_TYPE 0x0000ffc0
#define DRM_MODE_PROP_TYPE(n) ((n) << 6)
#define DRM_MODE_PROP_OBJECT DRM_MODE_PROP_TYPE(1)
#define DRM_MODE_PROP_SIGNED_RANGE DRM_MODE_PROP_TYPE(2)
struct drm_mode_property_enum { struct drm_mode_property_enum {
__u64 value; __u64 value;
char name[DRM_PROP_NAME_LEN]; char name[DRM_PROP_NAME_LEN];

View File

@ -223,6 +223,7 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_GEM_GET_CACHING 0x30 #define DRM_I915_GEM_GET_CACHING 0x30
#define DRM_I915_REG_READ 0x31 #define DRM_I915_REG_READ 0x31
#define DRM_I915_GET_RESET_STATS 0x32 #define DRM_I915_GET_RESET_STATS 0x32
#define DRM_I915_GEM_USERPTR 0x33
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@ -273,6 +274,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy) #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read) #define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats) #define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
#define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
/* Allow drivers to submit batchbuffers directly to hardware, relying /* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware. * on the security mechanisms provided by hardware.
@ -337,6 +339,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_EXEC_NO_RELOC 25 #define I915_PARAM_HAS_EXEC_NO_RELOC 25
#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26 #define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
#define I915_PARAM_HAS_WT 27 #define I915_PARAM_HAS_WT 27
#define I915_PARAM_CMD_PARSER_VERSION 28
typedef struct drm_i915_getparam { typedef struct drm_i915_getparam {
int param; int param;
@ -1049,6 +1052,20 @@ struct drm_i915_reset_stats {
__u32 pad; __u32 pad;
}; };
struct drm_i915_gem_userptr {
__u64 user_ptr;
__u64 user_size;
__u32 flags;
#define I915_USERPTR_READ_ONLY 0x1
#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
/**
* Returned handle for the object.
*
* Object handles are nonzero.
*/
__u32 handle;
};
struct drm_i915_mask { struct drm_i915_mask {
__u32 handle; __u32 handle;
__u32 width; __u32 width;

View File

@ -87,6 +87,16 @@
#define DRM_VMW_PARAM_MAX_SURF_MEMORY 7 #define DRM_VMW_PARAM_MAX_SURF_MEMORY 7
#define DRM_VMW_PARAM_3D_CAPS_SIZE 8 #define DRM_VMW_PARAM_3D_CAPS_SIZE 8
#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9 #define DRM_VMW_PARAM_MAX_MOB_MEMORY 9
#define DRM_VMW_PARAM_MAX_MOB_SIZE 10
/**
* enum drm_vmw_handle_type - handle type for ref ioctls
*
*/
enum drm_vmw_handle_type {
DRM_VMW_HANDLE_LEGACY = 0,
DRM_VMW_HANDLE_PRIME = 1
};
/** /**
* struct drm_vmw_getparam_arg * struct drm_vmw_getparam_arg
@ -176,6 +186,7 @@ struct drm_vmw_surface_create_req {
* struct drm_wmv_surface_arg * struct drm_wmv_surface_arg
* *
* @sid: Surface id of created surface or surface to destroy or reference. * @sid: Surface id of created surface or surface to destroy or reference.
* @handle_type: Handle type for DRM_VMW_REF_SURFACE Ioctl.
* *
* Output data from the DRM_VMW_CREATE_SURFACE Ioctl. * Output data from the DRM_VMW_CREATE_SURFACE Ioctl.
* Input argument to the DRM_VMW_UNREF_SURFACE Ioctl. * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl.
@ -184,7 +195,7 @@ struct drm_vmw_surface_create_req {
struct drm_vmw_surface_arg { struct drm_vmw_surface_arg {
int32_t sid; int32_t sid;
uint32_t pad64; enum drm_vmw_handle_type handle_type;
}; };
/** /**

View File

@ -84,7 +84,7 @@ do{ \
#define wait_event(wq, condition) \ #define wait_event(wq, condition) \
do{ \ do{ \
wait_queue_t __wait = { \ wait_queue_t __wait = { \
.task_list = LIST_HEAD_INIT(__wait.task_list), \ .task_list = LIST_HEAD_INIT(__wait.task_list), \
.evnt = CreateEvent(NULL, MANUAL_DESTROY), \ .evnt = CreateEvent(NULL, MANUAL_DESTROY), \
@ -109,7 +109,13 @@ do{ \
DestroyEvent(__wait.evnt); \ DestroyEvent(__wait.evnt); \
} while (0) } while (0)
#define wait_event_interruptible(wq, condition) \
({ \
int __ret = 0; \
if (!(condition)) \
wait_event(wq, condition); \
__ret; \
})
static inline static inline

View File

@ -0,0 +1,381 @@
/*
* Wound/Wait Mutexes: blocking mutual exclusion locks with deadlock avoidance
*
* Original mutex implementation started by Ingo Molnar:
*
* Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*
* Wound/wait implementation:
* Copyright (C) 2013 Canonical Ltd.
*
* This file contains the main data structure and API definitions.
*/
#ifndef __LINUX_WW_MUTEX_H
#define __LINUX_WW_MUTEX_H
#include <linux/mutex.h>
#include <syscall.h>
#define current (void*)GetPid()
struct ww_class {
atomic_long_t stamp;
struct lock_class_key acquire_key;
struct lock_class_key mutex_key;
const char *acquire_name;
const char *mutex_name;
};
struct ww_acquire_ctx {
struct task_struct *task;
unsigned long stamp;
unsigned acquired;
#ifdef CONFIG_DEBUG_MUTEXES
unsigned done_acquire;
struct ww_class *ww_class;
struct ww_mutex *contending_lock;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
unsigned deadlock_inject_interval;
unsigned deadlock_inject_countdown;
#endif
};
struct ww_mutex {
struct mutex base;
struct ww_acquire_ctx *ctx;
#ifdef CONFIG_DEBUG_MUTEXES
struct ww_class *ww_class;
#endif
};
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) \
, .ww_class = &ww_class
#else
# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class)
#endif
#define __WW_CLASS_INITIALIZER(ww_class) \
{ .stamp = ATOMIC_LONG_INIT(0) \
, .acquire_name = #ww_class "_acquire" \
, .mutex_name = #ww_class "_mutex" }
#define __WW_MUTEX_INITIALIZER(lockname, class) \
{ .base = { \__MUTEX_INITIALIZER(lockname) } \
__WW_CLASS_MUTEX_INITIALIZER(lockname, class) }
#define DEFINE_WW_CLASS(classname) \
struct ww_class classname = __WW_CLASS_INITIALIZER(classname)
#define DEFINE_WW_MUTEX(mutexname, ww_class) \
struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class)
/**
* ww_mutex_init - initialize the w/w mutex
* @lock: the mutex to be initialized
* @ww_class: the w/w class the mutex should belong to
*
* Initialize the w/w mutex to unlocked state and associate it with the given
* class.
*
* It is not allowed to initialize an already locked mutex.
*/
static inline void ww_mutex_init(struct ww_mutex *lock,
struct ww_class *ww_class)
{
MutexInit(&lock->base);
lock->ctx = NULL;
#ifdef CONFIG_DEBUG_MUTEXES
lock->ww_class = ww_class;
#endif
}
/**
* ww_acquire_init - initialize a w/w acquire context
* @ctx: w/w acquire context to initialize
* @ww_class: w/w class of the context
*
* Initializes an context to acquire multiple mutexes of the given w/w class.
*
* Context-based w/w mutex acquiring can be done in any order whatsoever within
* a given lock class. Deadlocks will be detected and handled with the
* wait/wound logic.
*
* Mixing of context-based w/w mutex acquiring and single w/w mutex locking can
* result in undetected deadlocks and is so forbidden. Mixing different contexts
* for the same w/w class when acquiring mutexes can also result in undetected
* deadlocks, and is hence also forbidden. Both types of abuse will be caught by
* enabling CONFIG_PROVE_LOCKING.
*
* Nesting of acquire contexts for _different_ w/w classes is possible, subject
* to the usual locking rules between different lock classes.
*
* An acquire context must be released with ww_acquire_fini by the same task
* before the memory is freed. It is recommended to allocate the context itself
* on the stack.
*/
static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
struct ww_class *ww_class)
{
ctx->task = current;
ctx->stamp = atomic_long_inc_return(&ww_class->stamp);
ctx->acquired = 0;
#ifdef CONFIG_DEBUG_MUTEXES
ctx->ww_class = ww_class;
ctx->done_acquire = 0;
ctx->contending_lock = NULL;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
debug_check_no_locks_freed((void *)ctx, sizeof(*ctx));
lockdep_init_map(&ctx->dep_map, ww_class->acquire_name,
&ww_class->acquire_key, 0);
mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_);
#endif
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
ctx->deadlock_inject_interval = 1;
ctx->deadlock_inject_countdown = ctx->stamp & 0xf;
#endif
}
/**
* ww_acquire_done - marks the end of the acquire phase
* @ctx: the acquire context
*
* Marks the end of the acquire phase, any further w/w mutex lock calls using
* this context are forbidden.
*
* Calling this function is optional, it is just useful to document w/w mutex
* code and clearly designated the acquire phase from actually using the locked
* data structures.
*/
static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
{
#ifdef CONFIG_DEBUG_MUTEXES
lockdep_assert_held(ctx);
DEBUG_LOCKS_WARN_ON(ctx->done_acquire);
ctx->done_acquire = 1;
#endif
}
/**
* ww_acquire_fini - releases a w/w acquire context
* @ctx: the acquire context to free
*
* Releases a w/w acquire context. This must be called _after_ all acquired w/w
* mutexes have been released with ww_mutex_unlock.
*/
static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
{
#ifdef CONFIG_DEBUG_MUTEXES
mutex_release(&ctx->dep_map, 0, _THIS_IP_);
DEBUG_LOCKS_WARN_ON(ctx->acquired);
if (!config_enabled(CONFIG_PROVE_LOCKING))
/*
* lockdep will normally handle this,
* but fail without anyway
*/
ctx->done_acquire = 1;
if (!config_enabled(CONFIG_DEBUG_LOCK_ALLOC))
/* ensure ww_acquire_fini will still fail if called twice */
ctx->acquired = ~0U;
#endif
}
extern int __must_check __ww_mutex_lock(struct ww_mutex *lock,
struct ww_acquire_ctx *ctx);
extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock,
struct ww_acquire_ctx *ctx);
/**
* ww_mutex_lock - acquire the w/w mutex
* @lock: the mutex to be acquired
* @ctx: w/w acquire context, or NULL to acquire only a single lock.
*
* Lock the w/w mutex exclusively for this task.
*
* Deadlocks within a given w/w class of locks are detected and handled with the
* wait/wound algorithm. If the lock isn't immediately avaiable this function
* will either sleep until it is (wait case). Or it selects the current context
* for backing off by returning -EDEADLK (wound case). Trying to acquire the
* same lock with the same context twice is also detected and signalled by
* returning -EALREADY. Returns 0 if the mutex was successfully acquired.
*
* In the wound case the caller must release all currently held w/w mutexes for
* the given context and then wait for this contending lock to be available by
* calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this
* lock and proceed with trying to acquire further w/w mutexes (e.g. when
* scanning through lru lists trying to free resources).
*
* The mutex must later on be released by the same task that
* acquired it. The task may not exit without first unlocking the mutex. Also,
* kernel memory where the mutex resides must not be freed with the mutex still
* locked. The mutex must first be initialized (or statically defined) before it
* can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be
* of the same w/w lock class as was used to initialize the acquire context.
*
* A mutex acquired with this function must be released with ww_mutex_unlock.
*/
static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
if (ctx)
return __ww_mutex_lock(lock, ctx);
mutex_lock(&lock->base);
return 0;
}
/**
* ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible
* @lock: the mutex to be acquired
* @ctx: w/w acquire context
*
* Lock the w/w mutex exclusively for this task.
*
* Deadlocks within a given w/w class of locks are detected and handled with the
* wait/wound algorithm. If the lock isn't immediately avaiable this function
* will either sleep until it is (wait case). Or it selects the current context
* for backing off by returning -EDEADLK (wound case). Trying to acquire the
* same lock with the same context twice is also detected and signalled by
* returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a
* signal arrives while waiting for the lock then this function returns -EINTR.
*
* In the wound case the caller must release all currently held w/w mutexes for
* the given context and then wait for this contending lock to be available by
* calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to
* not acquire this lock and proceed with trying to acquire further w/w mutexes
* (e.g. when scanning through lru lists trying to free resources).
*
* The mutex must later on be released by the same task that
* acquired it. The task may not exit without first unlocking the mutex. Also,
* kernel memory where the mutex resides must not be freed with the mutex still
* locked. The mutex must first be initialized (or statically defined) before it
* can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be
* of the same w/w lock class as was used to initialize the acquire context.
*
* A mutex acquired with this function must be released with ww_mutex_unlock.
*/
static inline int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
struct ww_acquire_ctx *ctx)
{
if (ctx)
return __ww_mutex_lock_interruptible(lock, ctx);
else
return mutex_lock_interruptible(&lock->base);
}
/**
* ww_mutex_lock_slow - slowpath acquiring of the w/w mutex
* @lock: the mutex to be acquired
* @ctx: w/w acquire context
*
* Acquires a w/w mutex with the given context after a wound case. This function
* will sleep until the lock becomes available.
*
* The caller must have released all w/w mutexes already acquired with the
* context and then call this function on the contended lock.
*
* Afterwards the caller may continue to (re)acquire the other w/w mutexes it
* needs with ww_mutex_lock. Note that the -EALREADY return code from
* ww_mutex_lock can be used to avoid locking this contended mutex twice.
*
* It is forbidden to call this function with any other w/w mutexes associated
* with the context held. It is forbidden to call this on anything else than the
* contending mutex.
*
* Note that the slowpath lock acquiring can also be done by calling
* ww_mutex_lock directly. This function here is simply to help w/w mutex
* locking code readability by clearly denoting the slowpath.
*/
static inline void
ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
int ret;
#ifdef CONFIG_DEBUG_MUTEXES
DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
#endif
ret = ww_mutex_lock(lock, ctx);
(void)ret;
}
/**
* ww_mutex_lock_slow_interruptible - slowpath acquiring of the w/w mutex, interruptible
* @lock: the mutex to be acquired
* @ctx: w/w acquire context
*
* Acquires a w/w mutex with the given context after a wound case. This function
* will sleep until the lock becomes available and returns 0 when the lock has
* been acquired. If a signal arrives while waiting for the lock then this
* function returns -EINTR.
*
* The caller must have released all w/w mutexes already acquired with the
* context and then call this function on the contended lock.
*
* Afterwards the caller may continue to (re)acquire the other w/w mutexes it
* needs with ww_mutex_lock. Note that the -EALREADY return code from
* ww_mutex_lock can be used to avoid locking this contended mutex twice.
*
* It is forbidden to call this function with any other w/w mutexes associated
* with the given context held. It is forbidden to call this on anything else
* than the contending mutex.
*
* Note that the slowpath lock acquiring can also be done by calling
* ww_mutex_lock_interruptible directly. This function here is simply to help
* w/w mutex locking code readability by clearly denoting the slowpath.
*/
static inline int __must_check
ww_mutex_lock_slow_interruptible(struct ww_mutex *lock,
struct ww_acquire_ctx *ctx)
{
#ifdef CONFIG_DEBUG_MUTEXES
DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
#endif
return ww_mutex_lock_interruptible(lock, ctx);
}
extern void ww_mutex_unlock(struct ww_mutex *lock);
/**
* ww_mutex_trylock - tries to acquire the w/w mutex without acquire context
* @lock: mutex to lock
*
* Trylocks a mutex without acquire context, so no deadlock detection is
* possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
*/
static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock)
{
return mutex_trylock(&lock->base);
}
/***
* ww_mutex_destroy - mark a w/w mutex unusable
* @lock: the mutex to be destroyed
*
* This function marks the mutex uninitialized, and any subsequent
* use of the mutex is forbidden. The mutex must not be locked when
* this function is called.
*/
static inline void ww_mutex_destroy(struct ww_mutex *lock)
{
mutex_destroy(&lock->base);
}
/**
* ww_mutex_is_locked - is the w/w mutex locked
* @lock: the mutex to be queried
*
* Returns 1 if the mutex is locked, 0 if unlocked.
*/
static inline bool ww_mutex_is_locked(struct ww_mutex *lock)
{
return mutex_is_locked(&lock->base);
}
#endif

View File

@ -0,0 +1,130 @@
/*
* Defines for Mobile Industry Processor Interface (MIPI(R))
* Display Working Group standards: DSI, DCS, DBI, DPI
*
* Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
* Copyright (C) 2006 Nokia Corporation
* Author: Imre Deak <imre.deak@nokia.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef MIPI_DISPLAY_H
#define MIPI_DISPLAY_H
/* MIPI DSI Processor-to-Peripheral transaction types */
enum {
MIPI_DSI_V_SYNC_START = 0x01,
MIPI_DSI_V_SYNC_END = 0x11,
MIPI_DSI_H_SYNC_START = 0x21,
MIPI_DSI_H_SYNC_END = 0x31,
MIPI_DSI_COLOR_MODE_OFF = 0x02,
MIPI_DSI_COLOR_MODE_ON = 0x12,
MIPI_DSI_SHUTDOWN_PERIPHERAL = 0x22,
MIPI_DSI_TURN_ON_PERIPHERAL = 0x32,
MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM = 0x03,
MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM = 0x13,
MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM = 0x23,
MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM = 0x04,
MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM = 0x14,
MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM = 0x24,
MIPI_DSI_DCS_SHORT_WRITE = 0x05,
MIPI_DSI_DCS_SHORT_WRITE_PARAM = 0x15,
MIPI_DSI_DCS_READ = 0x06,
MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE = 0x37,
MIPI_DSI_END_OF_TRANSMISSION = 0x08,
MIPI_DSI_NULL_PACKET = 0x09,
MIPI_DSI_BLANKING_PACKET = 0x19,
MIPI_DSI_GENERIC_LONG_WRITE = 0x29,
MIPI_DSI_DCS_LONG_WRITE = 0x39,
MIPI_DSI_LOOSELY_PACKED_PIXEL_STREAM_YCBCR20 = 0x0c,
MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR24 = 0x1c,
MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR16 = 0x2c,
MIPI_DSI_PACKED_PIXEL_STREAM_30 = 0x0d,
MIPI_DSI_PACKED_PIXEL_STREAM_36 = 0x1d,
MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR12 = 0x3d,
MIPI_DSI_PACKED_PIXEL_STREAM_16 = 0x0e,
MIPI_DSI_PACKED_PIXEL_STREAM_18 = 0x1e,
MIPI_DSI_PIXEL_STREAM_3BYTE_18 = 0x2e,
MIPI_DSI_PACKED_PIXEL_STREAM_24 = 0x3e,
};
/* MIPI DSI Peripheral-to-Processor transaction types */
enum {
MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT = 0x02,
MIPI_DSI_RX_END_OF_TRANSMISSION = 0x08,
MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE = 0x11,
MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE = 0x12,
MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE = 0x1a,
MIPI_DSI_RX_DCS_LONG_READ_RESPONSE = 0x1c,
MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE = 0x21,
MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE = 0x22,
};
/* MIPI DCS commands */
enum {
MIPI_DCS_NOP = 0x00,
MIPI_DCS_SOFT_RESET = 0x01,
MIPI_DCS_GET_DISPLAY_ID = 0x04,
MIPI_DCS_GET_RED_CHANNEL = 0x06,
MIPI_DCS_GET_GREEN_CHANNEL = 0x07,
MIPI_DCS_GET_BLUE_CHANNEL = 0x08,
MIPI_DCS_GET_DISPLAY_STATUS = 0x09,
MIPI_DCS_GET_POWER_MODE = 0x0A,
MIPI_DCS_GET_ADDRESS_MODE = 0x0B,
MIPI_DCS_GET_PIXEL_FORMAT = 0x0C,
MIPI_DCS_GET_DISPLAY_MODE = 0x0D,
MIPI_DCS_GET_SIGNAL_MODE = 0x0E,
MIPI_DCS_GET_DIAGNOSTIC_RESULT = 0x0F,
MIPI_DCS_ENTER_SLEEP_MODE = 0x10,
MIPI_DCS_EXIT_SLEEP_MODE = 0x11,
MIPI_DCS_ENTER_PARTIAL_MODE = 0x12,
MIPI_DCS_ENTER_NORMAL_MODE = 0x13,
MIPI_DCS_EXIT_INVERT_MODE = 0x20,
MIPI_DCS_ENTER_INVERT_MODE = 0x21,
MIPI_DCS_SET_GAMMA_CURVE = 0x26,
MIPI_DCS_SET_DISPLAY_OFF = 0x28,
MIPI_DCS_SET_DISPLAY_ON = 0x29,
MIPI_DCS_SET_COLUMN_ADDRESS = 0x2A,
MIPI_DCS_SET_PAGE_ADDRESS = 0x2B,
MIPI_DCS_WRITE_MEMORY_START = 0x2C,
MIPI_DCS_WRITE_LUT = 0x2D,
MIPI_DCS_READ_MEMORY_START = 0x2E,
MIPI_DCS_SET_PARTIAL_AREA = 0x30,
MIPI_DCS_SET_SCROLL_AREA = 0x33,
MIPI_DCS_SET_TEAR_OFF = 0x34,
MIPI_DCS_SET_TEAR_ON = 0x35,
MIPI_DCS_SET_ADDRESS_MODE = 0x36,
MIPI_DCS_SET_SCROLL_START = 0x37,
MIPI_DCS_EXIT_IDLE_MODE = 0x38,
MIPI_DCS_ENTER_IDLE_MODE = 0x39,
MIPI_DCS_SET_PIXEL_FORMAT = 0x3A,
MIPI_DCS_WRITE_MEMORY_CONTINUE = 0x3C,
MIPI_DCS_READ_MEMORY_CONTINUE = 0x3E,
MIPI_DCS_SET_TEAR_SCANLINE = 0x44,
MIPI_DCS_GET_SCANLINE = 0x45,
MIPI_DCS_READ_DDB_START = 0xA1,
MIPI_DCS_READ_DDB_CONTINUE = 0xA8,
};
/* MIPI DCS pixel formats */
#define MIPI_DCS_PIXEL_FMT_24BIT 7
#define MIPI_DCS_PIXEL_FMT_18BIT 6
#define MIPI_DCS_PIXEL_FMT_16BIT 5
#define MIPI_DCS_PIXEL_FMT_12BIT 3
#define MIPI_DCS_PIXEL_FMT_8BIT 2
#define MIPI_DCS_PIXEL_FMT_3BIT 1
#endif