encoders, connectors, crtcs

git-svn-id: svn://kolibrios.org@1123 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge) 2009-07-03 15:24:50 +00:00
parent 44e90288dd
commit 3bbe7b485a
26 changed files with 15522 additions and 118 deletions

2477
drivers/video/drm/drm_crtc.c Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,797 @@
/*
* Copyright (c) 2006 Luc Verhaegen (quirks list)
* Copyright (c) 2007-2008 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
*
* DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
* FB layer.
* Copyright (C) 2006 Dennis Munsie <dmunsie@cecropia.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
//#include <linux/kernel.h>
//#include <linux/i2c.h>
//#include <linux/i2c-algo-bit.h>
#include "drmP.h"
#include "drm_edid.h"
/*
* TODO:
* - support EDID 1.4 (incl. CE blocks)
*/
/*
* EDID blocks out in the wild have a variety of bugs, try to collect
* them here (note that userspace may work around broken monitors first,
* but fixes should make their way here so that the kernel "just works"
* on as many displays as possible).
*/
/* First detailed mode wrong, use largest 60Hz mode */
#define EDID_QUIRK_PREFER_LARGE_60 (1 << 0)
/* Reported 135MHz pixel clock is too high, needs adjustment */
#define EDID_QUIRK_135_CLOCK_TOO_HIGH (1 << 1)
/* Prefer the largest mode at 75 Hz */
#define EDID_QUIRK_PREFER_LARGE_75 (1 << 2)
/* Detail timing is in cm not mm */
#define EDID_QUIRK_DETAILED_IN_CM (1 << 3)
/* Detailed timing descriptors have bogus size values, so just take the
* maximum size and use that.
*/
#define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE (1 << 4)
/* Monitor forgot to set the first detailed is preferred bit. */
#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
/* use +hsync +vsync for detailed mode */
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
static struct edid_quirk {
char *vendor;
int product_id;
u32 quirks;
} edid_quirk_list[] = {
/* Acer AL1706 */
{ "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
/* Acer F51 */
{ "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 },
/* Unknown Acer */
{ "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
/* Belinea 10 15 55 */
{ "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
{ "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
/* Envision Peripherals, Inc. EN-7100e */
{ "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
/* Funai Electronics PM36B */
{ "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
EDID_QUIRK_DETAILED_IN_CM },
/* LG Philips LCD LP154W01-A5 */
{ "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
{ "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
/* Philips 107p5 CRT */
{ "PHL", 57364, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
/* Proview AY765C */
{ "PTS", 765, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
/* Samsung SyncMaster 205BW. Note: irony */
{ "SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP },
/* Samsung SyncMaster 22[5-6]BW */
{ "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
{ "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
};
/* Valid EDID header has these bytes */
static u8 edid_header[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
/**
* edid_is_valid - sanity check EDID data
* @edid: EDID data
*
* Sanity check the EDID block by looking at the header, the version number
* and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's
* valid.
*/
static bool edid_is_valid(struct edid *edid)
{
int i;
u8 csum = 0;
u8 *raw_edid = (u8 *)edid;
if (memcmp(edid->header, edid_header, sizeof(edid_header)))
goto bad;
if (edid->version != 1) {
DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
goto bad;
}
if (edid->revision > 4)
DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
for (i = 0; i < EDID_LENGTH; i++)
csum += raw_edid[i];
if (csum) {
DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
goto bad;
}
return 1;
bad:
if (raw_edid) {
DRM_ERROR("Raw EDID:\n");
// print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH);
// printk("\n");
}
return 0;
}
/**
* edid_vendor - match a string against EDID's obfuscated vendor field
* @edid: EDID to match
* @vendor: vendor string
*
* Returns true if @vendor is in @edid, false otherwise
*/
static bool edid_vendor(struct edid *edid, char *vendor)
{
char edid_vendor[3];
edid_vendor[0] = ((edid->mfg_id[0] & 0x7c) >> 2) + '@';
edid_vendor[1] = (((edid->mfg_id[0] & 0x3) << 3) |
((edid->mfg_id[1] & 0xe0) >> 5)) + '@';
edid_vendor[2] = (edid->mfg_id[1] & 0x1f) + '@';
return !strncmp(edid_vendor, vendor, 3);
}
/**
* edid_get_quirks - return quirk flags for a given EDID
* @edid: EDID to process
*
* This tells subsequent routines what fixes they need to apply.
*/
static u32 edid_get_quirks(struct edid *edid)
{
struct edid_quirk *quirk;
int i;
for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
quirk = &edid_quirk_list[i];
if (edid_vendor(edid, quirk->vendor) &&
(EDID_PRODUCT_ID(edid) == quirk->product_id))
return quirk->quirks;
}
return 0;
}
#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
#define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh))
/**
* edid_fixup_preferred - set preferred modes based on quirk list
* @connector: has mode list to fix up
* @quirks: quirks list
*
* Walk the mode list for @connector, clearing the preferred status
* on existing modes and setting it anew for the right mode ala @quirks.
*/
static void edid_fixup_preferred(struct drm_connector *connector,
u32 quirks)
{
struct drm_display_mode *t, *cur_mode, *preferred_mode;
int target_refresh = 0;
if (list_empty(&connector->probed_modes))
return;
if (quirks & EDID_QUIRK_PREFER_LARGE_60)
target_refresh = 60;
if (quirks & EDID_QUIRK_PREFER_LARGE_75)
target_refresh = 75;
preferred_mode = list_first_entry(&connector->probed_modes,
struct drm_display_mode, head);
list_for_each_entry_safe(cur_mode, t, &connector->probed_modes, head) {
cur_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
if (cur_mode == preferred_mode)
continue;
/* Largest mode is preferred */
if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode))
preferred_mode = cur_mode;
/* At a given size, try to get closest to target refresh */
if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) &&
MODE_REFRESH_DIFF(cur_mode, target_refresh) <
MODE_REFRESH_DIFF(preferred_mode, target_refresh)) {
preferred_mode = cur_mode;
}
}
preferred_mode->type |= DRM_MODE_TYPE_PREFERRED;
}
/**
* drm_mode_std - convert standard mode info (width, height, refresh) into mode
* @t: standard timing params
*
* Take the standard timing params (in this case width, aspect, and refresh)
* and convert them into a real mode using CVT.
*
* Punts for now, but should eventually use the FB layer's CVT based mode
* generation code.
*/
struct drm_display_mode *drm_mode_std(struct drm_device *dev,
struct std_timing *t)
{
struct drm_display_mode *mode;
int hsize = t->hsize * 8 + 248, vsize;
unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK)
>> EDID_TIMING_ASPECT_SHIFT;
mode = drm_mode_create(dev);
if (!mode)
return NULL;
if (aspect_ratio == 0)
vsize = (hsize * 10) / 16;
else if (aspect_ratio == 1)
vsize = (hsize * 3) / 4;
else if (aspect_ratio == 2)
vsize = (hsize * 4) / 5;
else
vsize = (hsize * 9) / 16;
drm_mode_set_name(mode);
return mode;
}
/**
* drm_mode_detailed - create a new mode from an EDID detailed timing section
* @dev: DRM device (needed to create new mode)
* @edid: EDID block
* @timing: EDID detailed timing info
* @quirks: quirks to apply
*
* An EDID detailed timing block contains enough info for us to create and
* return a new struct drm_display_mode.
*/
static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
struct edid *edid,
struct detailed_timing *timing,
u32 quirks)
{
struct drm_display_mode *mode;
struct detailed_pixel_timing *pt = &timing->data.pixel_data;
unsigned hactive = (pt->hactive_hblank_hi & 0xf0) << 4 | pt->hactive_lo;
unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo;
unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo;
unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
/* ignore tiny modes */
if (hactive < 64 || vactive < 64)
return NULL;
if (pt->misc & DRM_EDID_PT_STEREO) {
printk(KERN_WARNING "stereo mode not supported\n");
return NULL;
}
if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
printk(KERN_WARNING "integrated sync not supported\n");
return NULL;
}
mode = drm_mode_create(dev);
if (!mode)
return NULL;
mode->type = DRM_MODE_TYPE_DRIVER;
if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
timing->pixel_clock = cpu_to_le16(1088);
mode->clock = le16_to_cpu(timing->pixel_clock) * 10;
mode->hdisplay = hactive;
mode->hsync_start = mode->hdisplay + hsync_offset;
mode->hsync_end = mode->hsync_start + hsync_pulse_width;
mode->htotal = mode->hdisplay + hblank;
mode->vdisplay = vactive;
mode->vsync_start = mode->vdisplay + vsync_offset;
mode->vsync_end = mode->vsync_start + vsync_pulse_width;
mode->vtotal = mode->vdisplay + vblank;
drm_mode_set_name(mode);
if (pt->misc & DRM_EDID_PT_INTERLACED)
mode->flags |= DRM_MODE_FLAG_INTERLACE;
if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
}
mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
if (quirks & EDID_QUIRK_DETAILED_IN_CM) {
mode->width_mm *= 10;
mode->height_mm *= 10;
}
if (quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) {
mode->width_mm = edid->width_cm * 10;
mode->height_mm = edid->height_cm * 10;
}
return mode;
}
/*
* Detailed mode info for the EDID "established modes" data to use.
*/
static struct drm_display_mode edid_est_modes[] = {
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
968, 1056, 0, 600, 601, 605, 628, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
896, 1024, 0, 600, 601, 603, 625, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
720, 840, 0, 480, 481, 484, 500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
704, 832, 0, 480, 489, 491, 520, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
768, 864, 0, 480, 483, 486, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
846, 900, 0, 400, 421, 423, 449, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */
{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
846, 900, 0, 400, 412, 414, 449, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
1136, 1312, 0, 768, 769, 772, 800, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
1184, 1328, 0, 768, 771, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1184, 1344, 0, 768, 771, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
1208, 1264, 0, 768, 768, 776, 817, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
{ DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
928, 1152, 0, 624, 625, 628, 667, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
896, 1056, 0, 600, 601, 604, 625, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
976, 1040, 0, 600, 637, 643, 666, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
1344, 1600, 0, 864, 865, 868, 900, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
};
#define EDID_EST_TIMINGS 16
#define EDID_STD_TIMINGS 8
#define EDID_DETAILED_TIMINGS 4
/**
* add_established_modes - get est. modes from EDID and add them
* @edid: EDID block to scan
*
* Each EDID block contains a bitmap of the supported "established modes" list
* (defined above). Tease them out and add them to the global modes list.
*/
static int add_established_modes(struct drm_connector *connector, struct edid *edid)
{
struct drm_device *dev = connector->dev;
unsigned long est_bits = edid->established_timings.t1 |
(edid->established_timings.t2 << 8) |
((edid->established_timings.mfg_rsvd & 0x80) << 9);
int i, modes = 0;
for (i = 0; i <= EDID_EST_TIMINGS; i++)
if (est_bits & (1<<i)) {
struct drm_display_mode *newmode;
newmode = drm_mode_duplicate(dev, &edid_est_modes[i]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
}
}
return modes;
}
/**
* add_standard_modes - get std. modes from EDID and add them
* @edid: EDID block to scan
*
* Standard modes can be calculated using the CVT standard. Grab them from
* @edid, calculate them, and add them to the list.
*/
static int add_standard_modes(struct drm_connector *connector, struct edid *edid)
{
struct drm_device *dev = connector->dev;
int i, modes = 0;
for (i = 0; i < EDID_STD_TIMINGS; i++) {
struct std_timing *t = &edid->standard_timings[i];
struct drm_display_mode *newmode;
/* If std timings bytes are 1, 1 it's empty */
if (t->hsize == 1 && t->vfreq_aspect == 1)
continue;
newmode = drm_mode_std(dev, &edid->standard_timings[i]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
}
}
return modes;
}
/**
* add_detailed_modes - get detailed mode info from EDID data
* @connector: attached connector
* @edid: EDID block to scan
* @quirks: quirks to apply
*
* Some of the detailed timing sections may contain mode information. Grab
* it and add it to the list.
*/
static int add_detailed_info(struct drm_connector *connector,
struct edid *edid, u32 quirks)
{
struct drm_device *dev = connector->dev;
int i, j, modes = 0;
for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
struct detailed_timing *timing = &edid->detailed_timings[i];
struct detailed_non_pixel *data = &timing->data.other_data;
struct drm_display_mode *newmode;
/* EDID up to and including 1.2 may put monitor info here */
if (edid->version == 1 && edid->revision < 3)
continue;
/* Detailed mode timing */
if (timing->pixel_clock) {
newmode = drm_mode_detailed(dev, edid, timing, quirks);
if (!newmode)
continue;
/* First detailed mode is preferred */
if (i == 0 && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING))
newmode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, newmode);
modes++;
continue;
}
/* Other timing or info */
switch (data->type) {
case EDID_DETAIL_MONITOR_SERIAL:
break;
case EDID_DETAIL_MONITOR_STRING:
break;
case EDID_DETAIL_MONITOR_RANGE:
/* Get monitor range data */
break;
case EDID_DETAIL_MONITOR_NAME:
break;
case EDID_DETAIL_MONITOR_CPDATA:
break;
case EDID_DETAIL_STD_MODES:
/* Five modes per detailed section */
for (j = 0; j < 5; i++) {
struct std_timing *std;
struct drm_display_mode *newmode;
std = &data->data.timings[j];
newmode = drm_mode_std(dev, std);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
}
}
break;
default:
break;
}
}
return modes;
}
#define DDC_ADDR 0x50
/**
* Get EDID information via I2C.
*
* \param adapter : i2c device adaptor
* \param buf : EDID data buffer to be filled
* \param len : EDID data buffer length
* \return 0 on success or -1 on failure.
*
* Try to fetch EDID information by calling i2c driver function.
*/
int drm_do_probe_ddc_edid(struct i2c_adapter *adapter,
unsigned char *buf, int len)
{
unsigned char start = 0x0;
struct i2c_msg msgs[] = {
{
.addr = DDC_ADDR,
.flags = 0,
.len = 1,
.buf = &start,
}, {
.addr = DDC_ADDR,
.flags = I2C_M_RD,
.len = len,
.buf = buf,
}
};
if (i2c_transfer(adapter, msgs, 2) == 2)
return 0;
// dev_info(&adapter->dev, "unable to read EDID block.\n");
return -1;
}
EXPORT_SYMBOL(drm_do_probe_ddc_edid);
static int drm_ddc_read_edid(struct drm_connector *connector,
struct i2c_adapter *adapter,
char *buf, int len)
{
int ret;
ret = drm_do_probe_ddc_edid(adapter, buf, len);
if (ret != 0) {
// dev_info(&connector->dev->pdev->dev, "%s: no EDID data\n",
// drm_get_connector_name(connector));
goto end;
}
if (!edid_is_valid((struct edid *)buf)) {
// dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
// drm_get_connector_name(connector));
ret = -1;
}
end:
return ret;
}
#define MAX_EDID_EXT_NUM 4
/**
* drm_get_edid - get EDID data, if available
* @connector: connector we're probing
* @adapter: i2c adapter to use for DDC
*
* Poke the given connector's i2c channel to grab EDID data if possible.
*
* Return edid data or NULL if we couldn't find any.
*/
struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
int ret;
struct edid *edid;
edid = kmalloc(EDID_LENGTH * (MAX_EDID_EXT_NUM + 1),
GFP_KERNEL);
if (edid == NULL) {
// dev_warn(&connector->dev->pdev->dev,
// "Failed to allocate EDID\n");
goto end;
}
/* Read first EDID block */
ret = drm_ddc_read_edid(connector, adapter,
(unsigned char *)edid, EDID_LENGTH);
if (ret != 0)
goto clean_up;
/* There are EDID extensions to be read */
if (edid->extensions != 0) {
int edid_ext_num = edid->extensions;
if (edid_ext_num > MAX_EDID_EXT_NUM) {
// dev_warn(&connector->dev->pdev->dev,
// "The number of extension(%d) is "
// "over max (%d), actually read number (%d)\n",
// edid_ext_num, MAX_EDID_EXT_NUM,
// MAX_EDID_EXT_NUM);
/* Reset EDID extension number to be read */
edid_ext_num = MAX_EDID_EXT_NUM;
}
/* Read EDID including extensions too */
ret = drm_ddc_read_edid(connector, adapter, (char *)edid,
EDID_LENGTH * (edid_ext_num + 1));
if (ret != 0)
goto clean_up;
}
connector->display_info.raw_edid = (char *)edid;
goto end;
clean_up:
kfree(edid);
edid = NULL;
end:
return edid;
}
EXPORT_SYMBOL(drm_get_edid);
#define HDMI_IDENTIFIER 0x000C03
#define VENDOR_BLOCK 0x03
/**
* drm_detect_hdmi_monitor - detect whether monitor is hdmi.
* @edid: monitor EDID information
*
* Parse the CEA extension according to CEA-861-B.
* Return true if HDMI, false if not or unknown.
*/
bool drm_detect_hdmi_monitor(struct edid *edid)
{
char *edid_ext = NULL;
int i, hdmi_id, edid_ext_num;
int start_offset, end_offset;
bool is_hdmi = false;
/* No EDID or EDID extensions */
if (edid == NULL || edid->extensions == 0)
goto end;
/* Chose real EDID extension number */
edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ?
MAX_EDID_EXT_NUM : edid->extensions;
/* Find CEA extension */
for (i = 0; i < edid_ext_num; i++) {
edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
/* This block is CEA extension */
if (edid_ext[0] == 0x02)
break;
}
if (i == edid_ext_num)
goto end;
/* Data block offset in CEA extension block */
start_offset = 4;
end_offset = edid_ext[2];
/*
* Because HDMI identifier is in Vendor Specific Block,
* search it from all data blocks of CEA extension.
*/
for (i = start_offset; i < end_offset;
/* Increased by data block len */
i += ((edid_ext[i] & 0x1f) + 1)) {
/* Find vendor specific block */
if ((edid_ext[i] >> 5) == VENDOR_BLOCK) {
hdmi_id = edid_ext[i + 1] | (edid_ext[i + 2] << 8) |
edid_ext[i + 3] << 16;
/* Find HDMI identifier */
if (hdmi_id == HDMI_IDENTIFIER)
is_hdmi = true;
break;
}
}
end:
return is_hdmi;
}
EXPORT_SYMBOL(drm_detect_hdmi_monitor);
/**
* drm_add_edid_modes - add modes from EDID data, if available
* @connector: connector we're probing
* @edid: edid data
*
* Add the specified modes to the connector's mode list.
*
* Return number of modes added or 0 if we couldn't find any.
*/
int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
{
int num_modes = 0;
u32 quirks;
if (edid == NULL) {
return 0;
}
if (!edid_is_valid(edid)) {
// dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
// drm_get_connector_name(connector));
return 0;
}
quirks = edid_get_quirks(edid);
num_modes += add_established_modes(connector, edid);
num_modes += add_standard_modes(connector, edid);
num_modes += add_detailed_info(connector, edid, quirks);
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
edid_fixup_preferred(connector, quirks);
connector->display_info.serration_vsync = (edid->input & DRM_EDID_INPUT_SERRATION_VSYNC) ? 1 : 0;
connector->display_info.sync_on_green = (edid->input & DRM_EDID_INPUT_SYNC_ON_GREEN) ? 1 : 0;
connector->display_info.composite_sync = (edid->input & DRM_EDID_INPUT_COMPOSITE_SYNC) ? 1 : 0;
connector->display_info.separate_syncs = (edid->input & DRM_EDID_INPUT_SEPARATE_SYNCS) ? 1 : 0;
connector->display_info.blank_to_black = (edid->input & DRM_EDID_INPUT_BLANK_TO_BLACK) ? 1 : 0;
connector->display_info.video_level = (edid->input & DRM_EDID_INPUT_VIDEO_LEVEL) >> 5;
connector->display_info.digital = (edid->input & DRM_EDID_INPUT_DIGITAL) ? 1 : 0;
connector->display_info.width_mm = edid->width_cm * 10;
connector->display_info.height_mm = edid->height_cm * 10;
connector->display_info.gamma = edid->gamma;
connector->display_info.gtf_supported = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF) ? 1 : 0;
connector->display_info.standard_color = (edid->features & DRM_EDID_FEATURE_STANDARD_COLOR) ? 1 : 0;
connector->display_info.display_type = (edid->features & DRM_EDID_FEATURE_DISPLAY_TYPE) >> 3;
connector->display_info.active_off_supported = (edid->features & DRM_EDID_FEATURE_PM_ACTIVE_OFF) ? 1 : 0;
connector->display_info.suspend_supported = (edid->features & DRM_EDID_FEATURE_PM_SUSPEND) ? 1 : 0;
connector->display_info.standby_supported = (edid->features & DRM_EDID_FEATURE_PM_STANDBY) ? 1 : 0;
connector->display_info.gamma = edid->gamma;
return num_modes;
}
EXPORT_SYMBOL(drm_add_edid_modes);

369
drivers/video/drm/drm_mm.c Normal file
View File

@ -0,0 +1,369 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*
**************************************************************************/
/*
* Generic simple memory manager implementation. Intended to be used as a base
* class implementation for more advanced memory managers.
*
* Note that the algorithm used is quite simple and there might be substantial
* performance gains if a smarter free list is implemented. Currently it is just an
* unordered stack of free regions. This could easily be improved if an RB-tree
* is used instead. At least if we expect heavy fragmentation.
*
* Aligned allocations can also see improvement.
*
* Authors:
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
//#include "drmP.h"
#include "drm_mm.h"
//#include <linux/slab.h>
#define MM_UNUSED_TARGET 4
unsigned long drm_mm_tail_space(struct drm_mm *mm)
{
struct list_head *tail_node;
struct drm_mm_node *entry;
tail_node = mm->ml_entry.prev;
entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
if (!entry->free)
return 0;
return entry->size;
}
int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
{
struct list_head *tail_node;
struct drm_mm_node *entry;
tail_node = mm->ml_entry.prev;
entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
if (!entry->free)
return -ENOMEM;
if (entry->size <= size)
return -ENOMEM;
entry->size -= size;
return 0;
}
static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
{
struct drm_mm_node *child;
child = malloc(sizeof(*child));
if (unlikely(child == NULL)) {
spin_lock(&mm->unused_lock);
if (list_empty(&mm->unused_nodes))
child = NULL;
else {
child =
list_entry(mm->unused_nodes.next,
struct drm_mm_node, fl_entry);
list_del(&child->fl_entry);
--mm->num_unused;
}
spin_unlock(&mm->unused_lock);
}
return child;
}
int drm_mm_pre_get(struct drm_mm *mm)
{
struct drm_mm_node *node;
spin_lock(&mm->unused_lock);
while (mm->num_unused < MM_UNUSED_TARGET) {
spin_unlock(&mm->unused_lock);
node = kmalloc(sizeof(*node), GFP_KERNEL);
spin_lock(&mm->unused_lock);
if (unlikely(node == NULL)) {
int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
spin_unlock(&mm->unused_lock);
return ret;
}
++mm->num_unused;
list_add_tail(&node->fl_entry, &mm->unused_nodes);
}
spin_unlock(&mm->unused_lock);
return 0;
}
//EXPORT_SYMBOL(drm_mm_pre_get);
static int drm_mm_create_tail_node(struct drm_mm *mm,
unsigned long start,
unsigned long size, int atomic)
{
struct drm_mm_node *child;
child = drm_mm_kmalloc(mm, atomic);
if (unlikely(child == NULL))
return -ENOMEM;
child->free = 1;
child->size = size;
child->start = start;
child->mm = mm;
list_add_tail(&child->ml_entry, &mm->ml_entry);
list_add_tail(&child->fl_entry, &mm->fl_entry);
return 0;
}
int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
{
struct list_head *tail_node;
struct drm_mm_node *entry;
tail_node = mm->ml_entry.prev;
entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
if (!entry->free) {
return drm_mm_create_tail_node(mm, entry->start + entry->size,
size, atomic);
}
entry->size += size;
return 0;
}
static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
unsigned long size,
int atomic)
{
struct drm_mm_node *child;
child = drm_mm_kmalloc(parent->mm, atomic);
if (unlikely(child == NULL))
return NULL;
INIT_LIST_HEAD(&child->fl_entry);
child->free = 0;
child->size = size;
child->start = parent->start;
child->mm = parent->mm;
list_add_tail(&child->ml_entry, &parent->ml_entry);
INIT_LIST_HEAD(&child->fl_entry);
parent->size -= size;
parent->start += size;
return child;
}
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
unsigned long size,
unsigned alignment,
int atomic)
{
struct drm_mm_node *align_splitoff = NULL;
unsigned tmp = 0;
if (alignment)
tmp = node->start % alignment;
if (tmp) {
align_splitoff =
drm_mm_split_at_start(node, alignment - tmp, atomic);
if (unlikely(align_splitoff == NULL))
return NULL;
}
if (node->size == size) {
list_del_init(&node->fl_entry);
node->free = 0;
} else {
node = drm_mm_split_at_start(node, size, atomic);
}
if (align_splitoff)
drm_mm_put_block(align_splitoff);
return node;
}
//EXPORT_SYMBOL(drm_mm_get_block_generic);
/*
* Put a block. Merge with the previous and / or next block if they are free.
* Otherwise add to the free stack.
*/
void drm_mm_put_block(struct drm_mm_node *cur)
{
struct drm_mm *mm = cur->mm;
struct list_head *cur_head = &cur->ml_entry;
struct list_head *root_head = &mm->ml_entry;
struct drm_mm_node *prev_node = NULL;
struct drm_mm_node *next_node;
int merged = 0;
if (cur_head->prev != root_head) {
prev_node =
list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
if (prev_node->free) {
prev_node->size += cur->size;
merged = 1;
}
}
if (cur_head->next != root_head) {
next_node =
list_entry(cur_head->next, struct drm_mm_node, ml_entry);
if (next_node->free) {
if (merged) {
prev_node->size += next_node->size;
list_del(&next_node->ml_entry);
list_del(&next_node->fl_entry);
if (mm->num_unused < MM_UNUSED_TARGET) {
list_add(&next_node->fl_entry,
&mm->unused_nodes);
++mm->num_unused;
} else
kfree(next_node);
} else {
next_node->size += cur->size;
next_node->start = cur->start;
merged = 1;
}
}
}
if (!merged) {
cur->free = 1;
list_add(&cur->fl_entry, &mm->fl_entry);
} else {
list_del(&cur->ml_entry);
if (mm->num_unused < MM_UNUSED_TARGET) {
list_add(&cur->fl_entry, &mm->unused_nodes);
++mm->num_unused;
} else
kfree(cur);
}
}
//EXPORT_SYMBOL(drm_mm_put_block);
struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
unsigned long size,
unsigned alignment, int best_match)
{
struct list_head *list;
const struct list_head *free_stack = &mm->fl_entry;
struct drm_mm_node *entry;
struct drm_mm_node *best;
unsigned long best_size;
unsigned wasted;
best = NULL;
best_size = ~0UL;
list_for_each(list, free_stack) {
entry = list_entry(list, struct drm_mm_node, fl_entry);
wasted = 0;
if (entry->size < size)
continue;
if (alignment) {
register unsigned tmp = entry->start % alignment;
if (tmp)
wasted += alignment - tmp;
}
if (entry->size >= size + wasted) {
if (!best_match)
return entry;
if (size < best_size) {
best = entry;
best_size = entry->size;
}
}
}
return best;
}
//EXPORT_SYMBOL(drm_mm_search_free);
int drm_mm_clean(struct drm_mm * mm)
{
struct list_head *head = &mm->ml_entry;
return (head->next->next == head);
}
//EXPORT_SYMBOL(drm_mm_clean);
int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
{
INIT_LIST_HEAD(&mm->ml_entry);
INIT_LIST_HEAD(&mm->fl_entry);
INIT_LIST_HEAD(&mm->unused_nodes);
mm->num_unused = 0;
spin_lock_init(&mm->unused_lock);
return drm_mm_create_tail_node(mm, start, size, 0);
}
//EXPORT_SYMBOL(drm_mm_init);
void drm_mm_takedown(struct drm_mm * mm)
{
struct list_head *bnode = mm->fl_entry.next;
struct drm_mm_node *entry;
struct drm_mm_node *next;
entry = list_entry(bnode, struct drm_mm_node, fl_entry);
if (entry->ml_entry.next != &mm->ml_entry ||
entry->fl_entry.next != &mm->fl_entry) {
DRM_ERROR("Memory manager not clean. Delaying takedown\n");
return;
}
list_del(&entry->fl_entry);
list_del(&entry->ml_entry);
kfree(entry);
spin_lock(&mm->unused_lock);
list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
list_del(&entry->fl_entry);
kfree(entry);
--mm->num_unused;
}
spin_unlock(&mm->unused_lock);
BUG_ON(mm->num_unused != 0);
}
//EXPORT_SYMBOL(drm_mm_takedown);

View File

@ -0,0 +1,580 @@
/*
* The list_sort function is (presumably) licensed under the GPL (see the
* top level "COPYING" file for details).
*
* The remainder of this file is:
*
* Copyright © 1997-2003 by The XFree86 Project, Inc.
* Copyright © 2007 Dave Airlie
* Copyright © 2007-2008 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Except as contained in this notice, the name of the copyright holder(s)
* and author(s) shall not be used in advertising or otherwise to promote
* the sale, use or other dealings in this Software without prior written
* authorization from the copyright holder(s) and author(s).
*/
#include <list.h>
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
#define DRM_MODESET_DEBUG "drm_mode"
/**
* drm_mode_debug_printmodeline - debug print a mode
* @dev: DRM device
* @mode: mode to print
*
* LOCKING:
* None.
*
* Describe @mode using DRM_DEBUG.
*/
void drm_mode_debug_printmodeline(struct drm_display_mode *mode)
{
DRM_DEBUG_MODE(DRM_MODESET_DEBUG,
"Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n",
mode->base.id, mode->name, mode->vrefresh, mode->clock,
mode->hdisplay, mode->hsync_start,
mode->hsync_end, mode->htotal,
mode->vdisplay, mode->vsync_start,
mode->vsync_end, mode->vtotal, mode->type, mode->flags);
}
EXPORT_SYMBOL(drm_mode_debug_printmodeline);
/**
* drm_mode_set_name - set the name on a mode
* @mode: name will be set in this mode
*
* LOCKING:
* None.
*
* Set the name of @mode to a standard format.
*/
void drm_mode_set_name(struct drm_display_mode *mode)
{
snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d", mode->hdisplay,
mode->vdisplay);
}
EXPORT_SYMBOL(drm_mode_set_name);
/**
* drm_mode_list_concat - move modes from one list to another
* @head: source list
* @new: dst list
*
* LOCKING:
* Caller must ensure both lists are locked.
*
* Move all the modes from @head to @new.
*/
void drm_mode_list_concat(struct list_head *head, struct list_head *new)
{
struct list_head *entry, *tmp;
list_for_each_safe(entry, tmp, head) {
list_move_tail(entry, new);
}
}
EXPORT_SYMBOL(drm_mode_list_concat);
/**
* drm_mode_width - get the width of a mode
* @mode: mode
*
* LOCKING:
* None.
*
* Return @mode's width (hdisplay) value.
*
* FIXME: is this needed?
*
* RETURNS:
* @mode->hdisplay
*/
int drm_mode_width(struct drm_display_mode *mode)
{
return mode->hdisplay;
}
EXPORT_SYMBOL(drm_mode_width);
/**
* drm_mode_height - get the height of a mode
* @mode: mode
*
* LOCKING:
* None.
*
* Return @mode's height (vdisplay) value.
*
* FIXME: is this needed?
*
* RETURNS:
* @mode->vdisplay
*/
int drm_mode_height(struct drm_display_mode *mode)
{
return mode->vdisplay;
}
EXPORT_SYMBOL(drm_mode_height);
/**
* drm_mode_vrefresh - get the vrefresh of a mode
* @mode: mode
*
* LOCKING:
* None.
*
* Return @mode's vrefresh rate or calculate it if necessary.
*
* FIXME: why is this needed? shouldn't vrefresh be set already?
*
* RETURNS:
* Vertical refresh rate of @mode x 1000. For precision reasons.
*/
int drm_mode_vrefresh(struct drm_display_mode *mode)
{
int refresh = 0;
unsigned int calc_val;
if (mode->vrefresh > 0)
refresh = mode->vrefresh;
else if (mode->htotal > 0 && mode->vtotal > 0) {
/* work out vrefresh the value will be x1000 */
calc_val = (mode->clock * 1000);
calc_val /= mode->htotal;
calc_val *= 1000;
calc_val /= mode->vtotal;
refresh = calc_val;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
refresh *= 2;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
refresh /= 2;
if (mode->vscan > 1)
refresh /= mode->vscan;
}
return refresh;
}
EXPORT_SYMBOL(drm_mode_vrefresh);
/**
* drm_mode_set_crtcinfo - set CRTC modesetting parameters
* @p: mode
* @adjust_flags: unused? (FIXME)
*
* LOCKING:
* None.
*
* Setup the CRTC modesetting parameters for @p, adjusting if necessary.
*/
void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
{
if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
return;
p->crtc_hdisplay = p->hdisplay;
p->crtc_hsync_start = p->hsync_start;
p->crtc_hsync_end = p->hsync_end;
p->crtc_htotal = p->htotal;
p->crtc_hskew = p->hskew;
p->crtc_vdisplay = p->vdisplay;
p->crtc_vsync_start = p->vsync_start;
p->crtc_vsync_end = p->vsync_end;
p->crtc_vtotal = p->vtotal;
if (p->flags & DRM_MODE_FLAG_INTERLACE) {
if (adjust_flags & CRTC_INTERLACE_HALVE_V) {
p->crtc_vdisplay /= 2;
p->crtc_vsync_start /= 2;
p->crtc_vsync_end /= 2;
p->crtc_vtotal /= 2;
}
p->crtc_vtotal |= 1;
}
if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
p->crtc_vdisplay *= 2;
p->crtc_vsync_start *= 2;
p->crtc_vsync_end *= 2;
p->crtc_vtotal *= 2;
}
if (p->vscan > 1) {
p->crtc_vdisplay *= p->vscan;
p->crtc_vsync_start *= p->vscan;
p->crtc_vsync_end *= p->vscan;
p->crtc_vtotal *= p->vscan;
}
p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
p->crtc_hadjusted = false;
p->crtc_vadjusted = false;
}
EXPORT_SYMBOL(drm_mode_set_crtcinfo);
/**
* drm_mode_duplicate - allocate and duplicate an existing mode
* @m: mode to duplicate
*
* LOCKING:
* None.
*
* Just allocate a new mode, copy the existing mode into it, and return
* a pointer to it. Used to create new instances of established modes.
*/
struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
struct drm_display_mode *mode)
{
struct drm_display_mode *nmode;
int new_id;
nmode = drm_mode_create(dev);
if (!nmode)
return NULL;
new_id = nmode->base.id;
*nmode = *mode;
nmode->base.id = new_id;
INIT_LIST_HEAD(&nmode->head);
return nmode;
}
EXPORT_SYMBOL(drm_mode_duplicate);
/**
* drm_mode_equal - test modes for equality
* @mode1: first mode
* @mode2: second mode
*
* LOCKING:
* None.
*
* Check to see if @mode1 and @mode2 are equivalent.
*
* RETURNS:
* True if the modes are equal, false otherwise.
*/
bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2)
{
/* do clock check convert to PICOS so fb modes get matched
* the same */
if (mode1->clock && mode2->clock) {
if (KHZ2PICOS(mode1->clock) != KHZ2PICOS(mode2->clock))
return false;
} else if (mode1->clock != mode2->clock)
return false;
if (mode1->hdisplay == mode2->hdisplay &&
mode1->hsync_start == mode2->hsync_start &&
mode1->hsync_end == mode2->hsync_end &&
mode1->htotal == mode2->htotal &&
mode1->hskew == mode2->hskew &&
mode1->vdisplay == mode2->vdisplay &&
mode1->vsync_start == mode2->vsync_start &&
mode1->vsync_end == mode2->vsync_end &&
mode1->vtotal == mode2->vtotal &&
mode1->vscan == mode2->vscan &&
mode1->flags == mode2->flags)
return true;
return false;
}
EXPORT_SYMBOL(drm_mode_equal);
/**
* drm_mode_validate_size - make sure modes adhere to size constraints
* @dev: DRM device
* @mode_list: list of modes to check
* @maxX: maximum width
* @maxY: maximum height
* @maxPitch: max pitch
*
* LOCKING:
* Caller must hold a lock protecting @mode_list.
*
* The DRM device (@dev) has size and pitch limits. Here we validate the
* modes we probed for @dev against those limits and set their status as
* necessary.
*/
void drm_mode_validate_size(struct drm_device *dev,
struct list_head *mode_list,
int maxX, int maxY, int maxPitch)
{
struct drm_display_mode *mode;
list_for_each_entry(mode, mode_list, head) {
if (maxPitch > 0 && mode->hdisplay > maxPitch)
mode->status = MODE_BAD_WIDTH;
if (maxX > 0 && mode->hdisplay > maxX)
mode->status = MODE_VIRTUAL_X;
if (maxY > 0 && mode->vdisplay > maxY)
mode->status = MODE_VIRTUAL_Y;
}
}
EXPORT_SYMBOL(drm_mode_validate_size);
/**
* drm_mode_validate_clocks - validate modes against clock limits
* @dev: DRM device
* @mode_list: list of modes to check
* @min: minimum clock rate array
* @max: maximum clock rate array
* @n_ranges: number of clock ranges (size of arrays)
*
* LOCKING:
* Caller must hold a lock protecting @mode_list.
*
* Some code may need to check a mode list against the clock limits of the
* device in question. This function walks the mode list, testing to make
* sure each mode falls within a given range (defined by @min and @max
* arrays) and sets @mode->status as needed.
*/
void drm_mode_validate_clocks(struct drm_device *dev,
struct list_head *mode_list,
int *min, int *max, int n_ranges)
{
struct drm_display_mode *mode;
int i;
list_for_each_entry(mode, mode_list, head) {
bool good = false;
for (i = 0; i < n_ranges; i++) {
if (mode->clock >= min[i] && mode->clock <= max[i]) {
good = true;
break;
}
}
if (!good)
mode->status = MODE_CLOCK_RANGE;
}
}
EXPORT_SYMBOL(drm_mode_validate_clocks);
/**
* drm_mode_prune_invalid - remove invalid modes from mode list
* @dev: DRM device
* @mode_list: list of modes to check
* @verbose: be verbose about it
*
* LOCKING:
* Caller must hold a lock protecting @mode_list.
*
* Once mode list generation is complete, a caller can use this routine to
* remove invalid modes from a mode list. If any of the modes have a
* status other than %MODE_OK, they are removed from @mode_list and freed.
*/
void drm_mode_prune_invalid(struct drm_device *dev,
struct list_head *mode_list, bool verbose)
{
struct drm_display_mode *mode, *t;
list_for_each_entry_safe(mode, t, mode_list, head) {
if (mode->status != MODE_OK) {
list_del(&mode->head);
if (verbose) {
drm_mode_debug_printmodeline(mode);
DRM_DEBUG_MODE(DRM_MODESET_DEBUG,
"Not using %s mode %d\n",
mode->name, mode->status);
}
drm_mode_destroy(dev, mode);
}
}
}
EXPORT_SYMBOL(drm_mode_prune_invalid);
/**
* drm_mode_compare - compare modes for favorability
* @lh_a: list_head for first mode
* @lh_b: list_head for second mode
*
* LOCKING:
* None.
*
* Compare two modes, given by @lh_a and @lh_b, returning a value indicating
* which is better.
*
* RETURNS:
* Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
* positive if @lh_b is better than @lh_a.
*/
static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b)
{
struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head);
struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head);
int diff;
diff = ((b->type & DRM_MODE_TYPE_PREFERRED) != 0) -
((a->type & DRM_MODE_TYPE_PREFERRED) != 0);
if (diff)
return diff;
diff = b->hdisplay * b->vdisplay - a->hdisplay * a->vdisplay;
if (diff)
return diff;
diff = b->clock - a->clock;
return diff;
}
/* FIXME: what we don't have a list sort function? */
/* list sort from Mark J Roberts (mjr@znex.org) */
void list_sort(struct list_head *head,
int (*cmp)(struct list_head *a, struct list_head *b))
{
struct list_head *p, *q, *e, *list, *tail, *oldhead;
int insize, nmerges, psize, qsize, i;
list = head->next;
list_del(head);
insize = 1;
for (;;) {
p = oldhead = list;
list = tail = NULL;
nmerges = 0;
while (p) {
nmerges++;
q = p;
psize = 0;
for (i = 0; i < insize; i++) {
psize++;
q = q->next == oldhead ? NULL : q->next;
if (!q)
break;
}
qsize = insize;
while (psize > 0 || (qsize > 0 && q)) {
if (!psize) {
e = q;
q = q->next;
qsize--;
if (q == oldhead)
q = NULL;
} else if (!qsize || !q) {
e = p;
p = p->next;
psize--;
if (p == oldhead)
p = NULL;
} else if (cmp(p, q) <= 0) {
e = p;
p = p->next;
psize--;
if (p == oldhead)
p = NULL;
} else {
e = q;
q = q->next;
qsize--;
if (q == oldhead)
q = NULL;
}
if (tail)
tail->next = e;
else
list = e;
e->prev = tail;
tail = e;
}
p = q;
}
tail->next = list;
list->prev = tail;
if (nmerges <= 1)
break;
insize *= 2;
}
head->next = list;
head->prev = list->prev;
list->prev->next = head;
list->prev = head;
}
/**
* drm_mode_sort - sort mode list
* @mode_list: list to sort
*
* LOCKING:
* Caller must hold a lock protecting @mode_list.
*
* Sort @mode_list by favorability, putting good modes first.
*/
void drm_mode_sort(struct list_head *mode_list)
{
list_sort(mode_list, drm_mode_compare);
}
EXPORT_SYMBOL(drm_mode_sort);
/**
* drm_mode_connector_list_update - update the mode list for the connector
* @connector: the connector to update
*
* LOCKING:
* Caller must hold a lock protecting @mode_list.
*
* This moves the modes from the @connector probed_modes list
* to the actual mode list. It compares the probed mode against the current
* list and only adds different modes. All modes unverified after this point
* will be removed by the prune invalid modes.
*/
void drm_mode_connector_list_update(struct drm_connector *connector)
{
struct drm_display_mode *mode;
struct drm_display_mode *pmode, *pt;
int found_it;
list_for_each_entry_safe(pmode, pt, &connector->probed_modes,
head) {
found_it = 0;
/* go through current modes checking for the new probed mode */
list_for_each_entry(mode, &connector->modes, head) {
if (drm_mode_equal(pmode, mode)) {
found_it = 1;
/* if equal delete the probed mode */
mode->status = pmode->status;
list_del(&pmode->head);
drm_mode_destroy(connector->dev, pmode);
break;
}
}
if (!found_it) {
list_move_tail(&pmode->head, &connector->modes);
}
}
}
EXPORT_SYMBOL(drm_mode_connector_list_update);

View File

@ -0,0 +1,744 @@
/**
* \file drm.h
* Header for the Direct Rendering Manager
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
*
* \par Acknowledgments:
* Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg.
*/
/*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _DRM_H_
#define _DRM_H_
#include <types.h>
#include <errno-base.h>
//#include <asm/ioctl.h> /* For _IO* macros */
#define DRM_MAJOR 226
#define DRM_MAX_MINOR 15
#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */
#define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */
#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD)
#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
typedef unsigned int drm_handle_t;
typedef unsigned int drm_context_t;
typedef unsigned int drm_drawable_t;
typedef unsigned int drm_magic_t;
/**
* Cliprect.
*
* \warning: If you change this structure, make sure you change
* XF86DRIClipRectRec in the server as well
*
* \note KW: Actually it's illegal to change either for
* backwards-compatibility reasons.
*/
struct drm_clip_rect {
unsigned short x1;
unsigned short y1;
unsigned short x2;
unsigned short y2;
};
/**
* Drawable information.
*/
struct drm_drawable_info {
unsigned int num_rects;
struct drm_clip_rect *rects;
};
/**
* Texture region,
*/
struct drm_tex_region {
unsigned char next;
unsigned char prev;
unsigned char in_use;
unsigned char padding;
unsigned int age;
};
/**
* Hardware lock.
*
* The lock structure is a simple cache-line aligned integer. To avoid
* processor bus contention on a multiprocessor system, there should not be any
* other data stored in the same cache line.
*/
struct drm_hw_lock {
__volatile__ unsigned int lock; /**< lock variable */
char padding[60]; /**< Pad to cache line */
};
/**
* DRM_IOCTL_VERSION ioctl argument type.
*
* \sa drmGetVersion().
*/
struct drm_version {
int version_major; /**< Major version */
int version_minor; /**< Minor version */
int version_patchlevel; /**< Patch level */
size_t name_len; /**< Length of name buffer */
char __user *name; /**< Name of driver */
size_t date_len; /**< Length of date buffer */
char __user *date; /**< User-space buffer to hold date */
size_t desc_len; /**< Length of desc buffer */
char __user *desc; /**< User-space buffer to hold desc */
};
/**
* DRM_IOCTL_GET_UNIQUE ioctl argument type.
*
* \sa drmGetBusid() and drmSetBusId().
*/
struct drm_unique {
size_t unique_len; /**< Length of unique */
char __user *unique; /**< Unique name for driver instantiation */
};
struct drm_list {
int count; /**< Length of user-space structures */
struct drm_version __user *version;
};
struct drm_block {
int unused;
};
/**
* DRM_IOCTL_CONTROL ioctl argument type.
*
* \sa drmCtlInstHandler() and drmCtlUninstHandler().
*/
struct drm_control {
enum {
DRM_ADD_COMMAND,
DRM_RM_COMMAND,
DRM_INST_HANDLER,
DRM_UNINST_HANDLER
} func;
int irq;
};
/**
* Type of memory to map.
*/
enum drm_map_type {
_DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */
_DRM_REGISTERS = 1, /**< no caching, no core dump */
_DRM_SHM = 2, /**< shared, cached */
_DRM_AGP = 3, /**< AGP/GART */
_DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
_DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
_DRM_GEM = 6, /**< GEM object */
};
/**
* Memory mapping flags.
*/
enum drm_map_flags {
_DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */
_DRM_READ_ONLY = 0x02,
_DRM_LOCKED = 0x04, /**< shared, cached, locked */
_DRM_KERNEL = 0x08, /**< kernel requires access */
_DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
_DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */
_DRM_REMOVABLE = 0x40, /**< Removable mapping */
_DRM_DRIVER = 0x80 /**< Managed by driver */
};
struct drm_ctx_priv_map {
unsigned int ctx_id; /**< Context requesting private mapping */
void *handle; /**< Handle of map */
};
/**
* DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
* argument type.
*
* \sa drmAddMap().
*/
struct drm_map {
unsigned long offset; /**< Requested physical address (0 for SAREA)*/
unsigned long size; /**< Requested physical size (bytes) */
enum drm_map_type type; /**< Type of memory to map */
enum drm_map_flags flags; /**< Flags */
void *handle; /**< User-space: "Handle" to pass to mmap() */
/**< Kernel-space: kernel-virtual address */
int mtrr; /**< MTRR slot used */
/* Private data */
};
/**
* DRM_IOCTL_GET_CLIENT ioctl argument type.
*/
struct drm_client {
int idx; /**< Which client desired? */
int auth; /**< Is client authenticated? */
unsigned long pid; /**< Process ID */
unsigned long uid; /**< User ID */
unsigned long magic; /**< Magic */
unsigned long iocs; /**< Ioctl count */
};
enum drm_stat_type {
_DRM_STAT_LOCK,
_DRM_STAT_OPENS,
_DRM_STAT_CLOSES,
_DRM_STAT_IOCTLS,
_DRM_STAT_LOCKS,
_DRM_STAT_UNLOCKS,
_DRM_STAT_VALUE, /**< Generic value */
_DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */
_DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */
_DRM_STAT_IRQ, /**< IRQ */
_DRM_STAT_PRIMARY, /**< Primary DMA bytes */
_DRM_STAT_SECONDARY, /**< Secondary DMA bytes */
_DRM_STAT_DMA, /**< DMA */
_DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */
_DRM_STAT_MISSED /**< Missed DMA opportunity */
/* Add to the *END* of the list */
};
/**
* DRM_IOCTL_GET_STATS ioctl argument type.
*/
struct drm_stats {
unsigned long count;
struct {
unsigned long value;
enum drm_stat_type type;
} data[15];
};
/**
* Hardware locking flags.
*/
enum drm_lock_flags {
_DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */
_DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */
_DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */
_DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */
/* These *HALT* flags aren't supported yet
-- they will be used to support the
full-screen DGA-like mode. */
_DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
_DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
};
/**
* DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
*
* \sa drmGetLock() and drmUnlock().
*/
struct drm_lock {
int context;
enum drm_lock_flags flags;
};
/**
* DMA flags
*
* \warning
* These values \e must match xf86drm.h.
*
* \sa drm_dma.
*/
enum drm_dma_flags {
/* Flags for DMA buffer dispatch */
_DRM_DMA_BLOCK = 0x01, /**<
* Block until buffer dispatched.
*
* \note The buffer may not yet have
* been processed by the hardware --
* getting a hardware lock with the
* hardware quiescent will ensure
* that the buffer has been
* processed.
*/
_DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
_DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */
/* Flags for DMA buffer request */
_DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */
_DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */
_DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
};
/**
* DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
*
* \sa drmAddBufs().
*/
struct drm_buf_desc {
int count; /**< Number of buffers of this size */
int size; /**< Size in bytes */
int low_mark; /**< Low water mark */
int high_mark; /**< High water mark */
enum {
_DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
_DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
_DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */
_DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */
_DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
} flags;
unsigned long agp_start; /**<
* Start address of where the AGP buffers are
* in the AGP aperture
*/
};
/**
* DRM_IOCTL_INFO_BUFS ioctl argument type.
*/
struct drm_buf_info {
int count; /**< Entries in list */
struct drm_buf_desc __user *list;
};
/**
* DRM_IOCTL_FREE_BUFS ioctl argument type.
*/
struct drm_buf_free {
int count;
int __user *list;
};
/**
* Buffer information
*
* \sa drm_buf_map.
*/
struct drm_buf_pub {
int idx; /**< Index into the master buffer list */
int total; /**< Buffer size */
int used; /**< Amount of buffer in use (for DMA) */
void __user *address; /**< Address of buffer */
};
/**
* DRM_IOCTL_MAP_BUFS ioctl argument type.
*/
struct drm_buf_map {
int count; /**< Length of the buffer list */
void __user *virtual; /**< Mmap'd area in user-virtual */
struct drm_buf_pub __user *list; /**< Buffer information */
};
/**
* DRM_IOCTL_DMA ioctl argument type.
*
* Indices here refer to the offset into the buffer list in drm_buf_get.
*
* \sa drmDMA().
*/
struct drm_dma {
int context; /**< Context handle */
int send_count; /**< Number of buffers to send */
int __user *send_indices; /**< List of handles to buffers */
int __user *send_sizes; /**< Lengths of data to send */
enum drm_dma_flags flags; /**< Flags */
int request_count; /**< Number of buffers requested */
int request_size; /**< Desired size for buffers */
int __user *request_indices; /**< Buffer information */
int __user *request_sizes;
int granted_count; /**< Number of buffers granted */
};
enum drm_ctx_flags {
_DRM_CONTEXT_PRESERVED = 0x01,
_DRM_CONTEXT_2DONLY = 0x02
};
/**
* DRM_IOCTL_ADD_CTX ioctl argument type.
*
* \sa drmCreateContext() and drmDestroyContext().
*/
struct drm_ctx {
drm_context_t handle;
enum drm_ctx_flags flags;
};
/**
* DRM_IOCTL_RES_CTX ioctl argument type.
*/
struct drm_ctx_res {
int count;
struct drm_ctx __user *contexts;
};
/**
* DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
*/
struct drm_draw {
drm_drawable_t handle;
};
/**
* DRM_IOCTL_UPDATE_DRAW ioctl argument type.
*/
typedef enum {
DRM_DRAWABLE_CLIPRECTS,
} drm_drawable_info_type_t;
struct drm_update_draw {
drm_drawable_t handle;
unsigned int type;
unsigned int num;
unsigned long long data;
};
/**
* DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
*/
struct drm_auth {
drm_magic_t magic;
};
/**
* DRM_IOCTL_IRQ_BUSID ioctl argument type.
*
* \sa drmGetInterruptFromBusID().
*/
struct drm_irq_busid {
int irq; /**< IRQ number */
int busnum; /**< bus number */
int devnum; /**< device number */
int funcnum; /**< function number */
};
enum drm_vblank_seq_type {
_DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
_DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
_DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
_DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
_DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
_DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking, unsupported */
};
#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \
_DRM_VBLANK_NEXTONMISS)
struct drm_wait_vblank_request {
enum drm_vblank_seq_type type;
unsigned int sequence;
unsigned long signal;
};
struct drm_wait_vblank_reply {
enum drm_vblank_seq_type type;
unsigned int sequence;
long tval_sec;
long tval_usec;
};
/**
* DRM_IOCTL_WAIT_VBLANK ioctl argument type.
*
* \sa drmWaitVBlank().
*/
union drm_wait_vblank {
struct drm_wait_vblank_request request;
struct drm_wait_vblank_reply reply;
};
#define _DRM_PRE_MODESET 1
#define _DRM_POST_MODESET 2
/**
* DRM_IOCTL_MODESET_CTL ioctl argument type
*
* \sa drmModesetCtl().
*/
struct drm_modeset_ctl {
__u32 crtc;
__u32 cmd;
};
/**
* DRM_IOCTL_AGP_ENABLE ioctl argument type.
*
* \sa drmAgpEnable().
*/
struct drm_agp_mode {
unsigned long mode; /**< AGP mode */
};
/**
* DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
*
* \sa drmAgpAlloc() and drmAgpFree().
*/
struct drm_agp_buffer {
unsigned long size; /**< In bytes -- will round to page boundary */
unsigned long handle; /**< Used for binding / unbinding */
unsigned long type; /**< Type of memory to allocate */
unsigned long physical; /**< Physical used by i810 */
};
/**
* DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
*
* \sa drmAgpBind() and drmAgpUnbind().
*/
struct drm_agp_binding {
unsigned long handle; /**< From drm_agp_buffer */
unsigned long offset; /**< In bytes -- will round to page boundary */
};
/**
* DRM_IOCTL_AGP_INFO ioctl argument type.
*
* \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
* drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
* drmAgpVendorId() and drmAgpDeviceId().
*/
struct drm_agp_info {
int agp_version_major;
int agp_version_minor;
unsigned long mode;
unsigned long aperture_base; /* physical address */
unsigned long aperture_size; /* bytes */
unsigned long memory_allowed; /* bytes */
unsigned long memory_used;
/* PCI information */
unsigned short id_vendor;
unsigned short id_device;
};
/**
* DRM_IOCTL_SG_ALLOC ioctl argument type.
*/
struct drm_scatter_gather {
unsigned long size; /**< In bytes -- will round to page boundary */
unsigned long handle; /**< Used for mapping / unmapping */
};
/**
* DRM_IOCTL_SET_VERSION ioctl argument type.
*/
struct drm_set_version {
int drm_di_major;
int drm_di_minor;
int drm_dd_major;
int drm_dd_minor;
};
/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
struct drm_gem_close {
/** Handle of the object to be closed. */
__u32 handle;
__u32 pad;
};
/** DRM_IOCTL_GEM_FLINK ioctl argument type */
struct drm_gem_flink {
/** Handle for the object being named */
__u32 handle;
/** Returned global name */
__u32 name;
};
/** DRM_IOCTL_GEM_OPEN ioctl argument type */
struct drm_gem_open {
/** Name of object being opened */
__u32 name;
/** Returned handle for the object */
__u32 handle;
/** Returned size of the object */
__u64 size;
};
#include "drm_mode.h"
/*
#define DRM_IOCTL_BASE 'd'
#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type)
#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type)
#define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version)
#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique)
#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth)
#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid)
#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map)
#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block)
#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block)
#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control)
#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map)
#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc)
#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc)
#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info)
#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map)
#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free)
#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map)
#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map)
#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map)
#define DRM_IOCTL_SET_MASTER DRM_IO(0x1e)
#define DRM_IOCTL_DROP_MASTER DRM_IO(0x1f)
#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx)
#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx)
#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx)
#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx)
#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx)
#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx)
#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res)
#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw)
#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw)
#define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma)
#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock)
#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock)
#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock)
#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode)
#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info)
#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer)
#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer)
#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding)
#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding)
#define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather)
#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather)
#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
#define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc)
#define DRM_IOCTL_MODE_SETCRTC DRM_IOWR(0xA2, struct drm_mode_crtc)
#define DRM_IOCTL_MODE_CURSOR DRM_IOWR(0xA3, struct drm_mode_cursor)
#define DRM_IOCTL_MODE_GETGAMMA DRM_IOWR(0xA4, struct drm_mode_crtc_lut)
#define DRM_IOCTL_MODE_SETGAMMA DRM_IOWR(0xA5, struct drm_mode_crtc_lut)
#define DRM_IOCTL_MODE_GETENCODER DRM_IOWR(0xA6, struct drm_mode_get_encoder)
#define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA7, struct drm_mode_get_connector)
#define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA8, struct drm_mode_mode_cmd)
#define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd)
#define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAA, struct drm_mode_get_property)
#define DRM_IOCTL_MODE_SETPROPERTY DRM_IOWR(0xAB, struct drm_mode_connector_set_property)
#define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob)
#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int)
*/
/**
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x99.
* Generic IOCTLS restart at 0xA0.
*
* \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
* drmCommandReadWrite().
*/
#define DRM_COMMAND_BASE 0x40
#define DRM_COMMAND_END 0xA0
/* typedef area */
#ifndef __KERNEL__
typedef struct drm_clip_rect drm_clip_rect_t;
typedef struct drm_drawable_info drm_drawable_info_t;
typedef struct drm_tex_region drm_tex_region_t;
typedef struct drm_hw_lock drm_hw_lock_t;
typedef struct drm_version drm_version_t;
typedef struct drm_unique drm_unique_t;
typedef struct drm_list drm_list_t;
typedef struct drm_block drm_block_t;
typedef struct drm_control drm_control_t;
typedef enum drm_map_type drm_map_type_t;
typedef enum drm_map_flags drm_map_flags_t;
typedef struct drm_ctx_priv_map drm_ctx_priv_map_t;
typedef struct drm_map drm_map_t;
typedef struct drm_client drm_client_t;
typedef enum drm_stat_type drm_stat_type_t;
typedef struct drm_stats drm_stats_t;
typedef enum drm_lock_flags drm_lock_flags_t;
typedef struct drm_lock drm_lock_t;
typedef enum drm_dma_flags drm_dma_flags_t;
typedef struct drm_buf_desc drm_buf_desc_t;
typedef struct drm_buf_info drm_buf_info_t;
typedef struct drm_buf_free drm_buf_free_t;
typedef struct drm_buf_pub drm_buf_pub_t;
typedef struct drm_buf_map drm_buf_map_t;
typedef struct drm_dma drm_dma_t;
typedef union drm_wait_vblank drm_wait_vblank_t;
typedef struct drm_agp_mode drm_agp_mode_t;
typedef enum drm_ctx_flags drm_ctx_flags_t;
typedef struct drm_ctx drm_ctx_t;
typedef struct drm_ctx_res drm_ctx_res_t;
typedef struct drm_draw drm_draw_t;
typedef struct drm_update_draw drm_update_draw_t;
typedef struct drm_auth drm_auth_t;
typedef struct drm_irq_busid drm_irq_busid_t;
typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
typedef struct drm_agp_buffer drm_agp_buffer_t;
typedef struct drm_agp_binding drm_agp_binding_t;
typedef struct drm_agp_info drm_agp_info_t;
typedef struct drm_scatter_gather drm_scatter_gather_t;
typedef struct drm_set_version drm_set_version_t;
#endif
#endif

File diff suppressed because it is too large Load Diff

View File

@ -113,7 +113,7 @@ enum drm_mode_status {
struct drm_display_mode {
/* Header */
// struct list_head head;
struct list_head head;
struct drm_mode_object base;
char name[DRM_DISPLAY_MODE_LEN];
@ -246,7 +246,7 @@ struct drm_framebuffer_funcs {
struct drm_framebuffer {
struct drm_device *dev;
// struct list_head head;
struct list_head head;
struct drm_mode_object base;
const struct drm_framebuffer_funcs *funcs;
unsigned int pitch;
@ -257,32 +257,32 @@ struct drm_framebuffer {
int bits_per_pixel;
int flags;
void *fbdev;
u32_t pseudo_palette[17];
// struct list_head filp_head;
u32 pseudo_palette[17];
struct list_head filp_head;
};
struct drm_property_blob {
struct drm_mode_object base;
// struct list_head head;
struct list_head head;
unsigned int length;
void *data;
};
struct drm_property_enum {
uint64_t value;
// struct list_head head;
struct list_head head;
char name[DRM_PROP_NAME_LEN];
};
struct drm_property {
// struct list_head head;
struct list_head head;
struct drm_mode_object base;
uint32_t flags;
char name[DRM_PROP_NAME_LEN];
uint32_t num_values;
uint64_t *values;
// struct list_head enum_blob_list;
struct list_head enum_blob_list;
};
struct drm_crtc;
@ -348,7 +348,7 @@ struct drm_crtc_funcs {
*/
struct drm_crtc {
struct drm_device *dev;
// struct list_head head;
struct list_head head;
struct drm_mode_object base;
@ -415,7 +415,7 @@ struct drm_encoder_funcs {
*/
struct drm_encoder {
struct drm_device *dev;
// struct list_head head;
struct list_head head;
struct drm_mode_object base;
int encoder_type;
@ -447,7 +447,7 @@ struct drm_connector {
struct drm_device *dev;
// struct device kdev;
struct device_attribute *attr;
// struct list_head head;
struct list_head head;
struct drm_mode_object base;
@ -455,18 +455,18 @@ struct drm_connector {
int connector_type_id;
bool interlace_allowed;
bool doublescan_allowed;
// struct list_head modes; /* list of modes on this connector */
struct list_head modes; /* list of modes on this connector */
int initial_x, initial_y;
enum drm_connector_status status;
/* these are modes added by probing with DDC or the BIOS */
// struct list_head probed_modes;
struct list_head probed_modes;
struct drm_display_info display_info;
const struct drm_connector_funcs *funcs;
// struct list_head user_modes;
struct list_head user_modes;
struct drm_property_blob *edid_blob_ptr;
u32_t property_ids[DRM_CONNECTOR_MAX_PROPERTY];
uint64_t property_values[DRM_CONNECTOR_MAX_PROPERTY];
@ -490,7 +490,7 @@ struct drm_connector {
* This is used to set modes.
*/
struct drm_mode_set {
// struct list_head head;
struct list_head head;
struct drm_framebuffer *fb;
struct drm_crtc *crtc;
@ -533,22 +533,22 @@ struct drm_mode_group {
struct drm_mode_config {
// struct mutex mutex; /* protects configuration (mode lists etc.) */
// struct mutex idr_mutex; /* for IDR management */
// struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
/* this is limited to one for now */
int num_fb;
// struct list_head fb_list;
struct list_head fb_list;
int num_connector;
// struct list_head connector_list;
struct list_head connector_list;
int num_encoder;
// struct list_head encoder_list;
struct list_head encoder_list;
int num_crtc;
// struct list_head crtc_list;
struct list_head crtc_list;
// struct list_head property_list;
struct list_head property_list;
/* in-kernel framebuffers - hung of filp_head in drm_framebuffer */
// struct list_head fb_kernel_list;
struct list_head fb_kernel_list;
int min_width, min_height;
int max_width, max_height;
@ -556,7 +556,7 @@ struct drm_mode_config {
resource_size_t fb_base;
/* pointers to standard properties */
// struct list_head property_blob_list;
struct list_head property_blob_list;
struct drm_property *edid_property;
struct drm_property *dpms_property;
@ -586,7 +586,6 @@ struct drm_mode_config {
#define obj_to_property(x) container_of(x, struct drm_property, base)
#define obj_to_blob(x) container_of(x, struct drm_property_blob, base)
extern void drm_crtc_init(struct drm_device *dev,
struct drm_crtc *crtc,
const struct drm_crtc_funcs *funcs);
@ -612,8 +611,11 @@ extern char *drm_get_dvi_i_subconnector_name(int val);
extern char *drm_get_dvi_i_select_name(int val);
extern char *drm_get_tv_subconnector_name(int val);
extern char *drm_get_tv_select_name(int val);
extern void drm_fb_release(struct drm_file *file_priv);
extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
//extern void drm_fb_release(struct drm_file *file_priv);
//extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
//extern struct edid *drm_get_edid(struct drm_connector *connector,
// struct i2c_adapter *adapter);
//extern int drm_do_probe_ddc_edid(struct i2c_adapter *adapter,
@ -736,4 +738,6 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
//extern bool drm_detect_hdmi_monitor(struct edid *edid);
#endif /* __DRM_CRTC_H__ */

View File

@ -0,0 +1,126 @@
/*
* Copyright © 2006 Keith Packard
* Copyright © 2007-2008 Dave Airlie
* Copyright © 2007-2008 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* The DRM mode setting helper functions are common code for drivers to use if
* they wish. Drivers are not forced to use this code in their
* implementations but it would be useful if they code they do use at least
* provides a consistent interface and operation to userspace
*/
#ifndef __DRM_CRTC_HELPER_H__
#define __DRM_CRTC_HELPER_H__
//#include <linux/spinlock.h>
//#include <linux/types.h>
//#include <linux/idr.h>
//#include <linux/fb.h>
struct drm_crtc_helper_funcs {
/*
* Control power levels on the CRTC. If the mode passed in is
* unsupported, the provider must use the next lowest power level.
*/
void (*dpms)(struct drm_crtc *crtc, int mode);
void (*prepare)(struct drm_crtc *crtc);
void (*commit)(struct drm_crtc *crtc);
/* Provider can fixup or change mode timings before modeset occurs */
bool (*mode_fixup)(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
/* Actually set the mode */
int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode, int x, int y,
struct drm_framebuffer *old_fb);
/* Move the crtc on the current fb to the given position *optional* */
int (*mode_set_base)(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb);
};
struct drm_encoder_helper_funcs {
void (*dpms)(struct drm_encoder *encoder, int mode);
void (*save)(struct drm_encoder *encoder);
void (*restore)(struct drm_encoder *encoder);
bool (*mode_fixup)(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void (*prepare)(struct drm_encoder *encoder);
void (*commit)(struct drm_encoder *encoder);
void (*mode_set)(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder);
/* detect for DAC style encoders */
enum drm_connector_status (*detect)(struct drm_encoder *encoder,
struct drm_connector *connector);
};
struct drm_connector_helper_funcs {
int (*get_modes)(struct drm_connector *connector);
int (*mode_valid)(struct drm_connector *connector,
struct drm_display_mode *mode);
struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
};
extern int drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY);
extern void drm_helper_disable_unused_functions(struct drm_device *dev);
extern int drm_helper_hotplug_stage_two(struct drm_device *dev);
extern bool drm_helper_initial_config(struct drm_device *dev);
extern int drm_crtc_helper_set_config(struct drm_mode_set *set);
extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y,
struct drm_framebuffer *old_fb);
extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
struct drm_mode_fb_cmd *mode_cmd);
static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
const struct drm_crtc_helper_funcs *funcs)
{
crtc->helper_private = (void *)funcs;
}
static inline void drm_encoder_helper_add(struct drm_encoder *encoder,
const struct drm_encoder_helper_funcs *funcs)
{
encoder->helper_private = (void *)funcs;
}
static inline void drm_connector_helper_add(struct drm_connector *connector,
const struct drm_connector_helper_funcs *funcs)
{
connector->helper_private = (void *)funcs;
}
extern int drm_helper_resume_force_mode(struct drm_device *dev);
#endif

View File

@ -0,0 +1,289 @@
/*
* Copyright © 2007-2008 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef __DRM_EDID_H__
#define __DRM_EDID_H__
#include <types.h>
#define EDID_LENGTH 128
#define DDC_ADDR 0x50
struct est_timings {
u8 t1;
u8 t2;
u8 mfg_rsvd;
} __attribute__((packed));
/* 00=16:10, 01=4:3, 10=5:4, 11=16:9 */
#define EDID_TIMING_ASPECT_SHIFT 6
#define EDID_TIMING_ASPECT_MASK (0x3 << EDID_TIMING_ASPECT_SHIFT)
/* need to add 60 */
#define EDID_TIMING_VFREQ_SHIFT 0
#define EDID_TIMING_VFREQ_MASK (0x3f << EDID_TIMING_VFREQ_SHIFT)
struct std_timing {
u8 hsize; /* need to multiply by 8 then add 248 */
u8 vfreq_aspect;
} __attribute__((packed));
#define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1)
#define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2)
#define DRM_EDID_PT_SEPARATE_SYNC (3 << 3)
#define DRM_EDID_PT_STEREO (1 << 5)
#define DRM_EDID_PT_INTERLACED (1 << 7)
/* If detailed data is pixel timing */
struct detailed_pixel_timing {
u8 hactive_lo;
u8 hblank_lo;
u8 hactive_hblank_hi;
u8 vactive_lo;
u8 vblank_lo;
u8 vactive_vblank_hi;
u8 hsync_offset_lo;
u8 hsync_pulse_width_lo;
u8 vsync_offset_pulse_width_lo;
u8 hsync_vsync_offset_pulse_width_hi;
u8 width_mm_lo;
u8 height_mm_lo;
u8 width_height_mm_hi;
u8 hborder;
u8 vborder;
u8 misc;
} __attribute__((packed));
/* If it's not pixel timing, it'll be one of the below */
struct detailed_data_string {
u8 str[13];
} __attribute__((packed));
struct detailed_data_monitor_range {
u8 min_vfreq;
u8 max_vfreq;
u8 min_hfreq_khz;
u8 max_hfreq_khz;
u8 pixel_clock_mhz; /* need to multiply by 10 */
u16 sec_gtf_toggle; /* A000=use above, 20=use below */
u8 hfreq_start_khz; /* need to multiply by 2 */
u8 c; /* need to divide by 2 */
u16 m;
u8 k;
u8 j; /* need to divide by 2 */
} __attribute__((packed));
struct detailed_data_wpindex {
u8 white_yx_lo; /* Lower 2 bits each */
u8 white_x_hi;
u8 white_y_hi;
u8 gamma; /* need to divide by 100 then add 1 */
} __attribute__((packed));
struct detailed_data_color_point {
u8 windex1;
u8 wpindex1[3];
u8 windex2;
u8 wpindex2[3];
} __attribute__((packed));
struct detailed_non_pixel {
u8 pad1;
u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name
fb=color point data, fa=standard timing data,
f9=undefined, f8=mfg. reserved */
u8 pad2;
union {
struct detailed_data_string str;
struct detailed_data_monitor_range range;
struct detailed_data_wpindex color;
struct std_timing timings[5];
} data;
} __attribute__((packed));
#define EDID_DETAIL_STD_MODES 0xfa
#define EDID_DETAIL_MONITOR_CPDATA 0xfb
#define EDID_DETAIL_MONITOR_NAME 0xfc
#define EDID_DETAIL_MONITOR_RANGE 0xfd
#define EDID_DETAIL_MONITOR_STRING 0xfe
#define EDID_DETAIL_MONITOR_SERIAL 0xff
struct detailed_timing {
u16 pixel_clock; /* need to multiply by 10 KHz */
union {
struct detailed_pixel_timing pixel_data;
struct detailed_non_pixel other_data;
} data;
} __attribute__((packed));
#define DRM_EDID_INPUT_SERRATION_VSYNC (1 << 0)
#define DRM_EDID_INPUT_SYNC_ON_GREEN (1 << 1)
#define DRM_EDID_INPUT_COMPOSITE_SYNC (1 << 2)
#define DRM_EDID_INPUT_SEPARATE_SYNCS (1 << 3)
#define DRM_EDID_INPUT_BLANK_TO_BLACK (1 << 4)
#define DRM_EDID_INPUT_VIDEO_LEVEL (3 << 5)
#define DRM_EDID_INPUT_DIGITAL (1 << 7) /* bits below must be zero if set */
#define DRM_EDID_FEATURE_DEFAULT_GTF (1 << 0)
#define DRM_EDID_FEATURE_PREFERRED_TIMING (1 << 1)
#define DRM_EDID_FEATURE_STANDARD_COLOR (1 << 2)
#define DRM_EDID_FEATURE_DISPLAY_TYPE (3 << 3) /* 00=mono, 01=rgb, 10=non-rgb, 11=unknown */
#define DRM_EDID_FEATURE_PM_ACTIVE_OFF (1 << 5)
#define DRM_EDID_FEATURE_PM_SUSPEND (1 << 6)
#define DRM_EDID_FEATURE_PM_STANDBY (1 << 7)
struct edid {
u8 header[8];
/* Vendor & product info */
u8 mfg_id[2];
u8 prod_code[2];
u32 serial; /* FIXME: byte order */
u8 mfg_week;
u8 mfg_year;
/* EDID version */
u8 version;
u8 revision;
/* Display info: */
u8 input;
u8 width_cm;
u8 height_cm;
u8 gamma;
u8 features;
/* Color characteristics */
u8 red_green_lo;
u8 black_white_lo;
u8 red_x;
u8 red_y;
u8 green_x;
u8 green_y;
u8 blue_x;
u8 blue_y;
u8 white_x;
u8 white_y;
/* Est. timings and mfg rsvd timings*/
struct est_timings established_timings;
/* Standard timings 1-8*/
struct std_timing standard_timings[8];
/* Detailing timings 1-4 */
struct detailed_timing detailed_timings[4];
/* Number of 128 byte ext. blocks */
u8 extensions;
/* Checksum */
u8 checksum;
} __attribute__((packed));
#define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8))
#define KOBJ_NAME_LEN 20
#define I2C_NAME_SIZE 20
/* --- Defines for bit-adapters --------------------------------------- */
/*
* This struct contains the hw-dependent functions of bit-style adapters to
* manipulate the line states, and to init any hw-specific features. This is
* only used if you have more than one hw-type of adapter running.
*/
struct i2c_algo_bit_data {
void *data; /* private data for lowlevel routines */
void (*setsda) (void *data, int state);
void (*setscl) (void *data, int state);
int (*getsda) (void *data);
int (*getscl) (void *data);
/* local settings */
int udelay; /* half clock cycle time in us,
minimum 2 us for fast-mode I2C,
minimum 5 us for standard-mode I2C and SMBus,
maximum 50 us for SMBus */
int timeout; /* in jiffies */
};
struct i2c_client;
/*
* i2c_adapter is the structure used to identify a physical i2c bus along
* with the access algorithms necessary to access it.
*/
struct i2c_adapter {
// struct module *owner;
unsigned int id;
unsigned int class;
// const struct i2c_algorithm *algo; /* the algorithm to access the bus */
void *algo_data;
/* --- administration stuff. */
int (*client_register)(struct i2c_client *);
int (*client_unregister)(struct i2c_client *);
/* data fields that are valid for all devices */
u8 level; /* nesting level for lockdep */
// struct mutex bus_lock;
// struct mutex clist_lock;
int timeout;
int retries;
// struct device dev; /* the adapter device */
int nr;
struct list_head clients; /* DEPRECATED */
char name[48];
// struct completion dev_released;
};
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
struct i2c_client {
unsigned short flags; /* div., see below */
unsigned short addr; /* chip address - NOTE: 7bit */
/* addresses are stored in the */
/* _LOWER_ 7 bits */
char name[I2C_NAME_SIZE];
struct i2c_adapter *adapter; /* the adapter we sit on */
// struct i2c_driver *driver; /* and our access routines */
// struct device dev; /* the device structure */
int irq; /* irq issued by device (or -1) */
char driver_name[KOBJ_NAME_LEN];
struct list_head list; /* DEPRECATED */
// struct completion released;
};
#define to_i2c_client(d) container_of(d, struct i2c_client, dev)
int i2c_bit_add_bus(struct i2c_adapter *);
int i2c_bit_add_numbered_bus(struct i2c_adapter *);
struct i2c_msg {
u16 addr; /* slave address */
u16 flags;
#define I2C_M_TEN 0x0010 /* this is a ten bit chip address */
#define I2C_M_RD 0x0001 /* read data, from slave to master */
#define I2C_M_NOSTART 0x4000 /* if I2C_FUNC_PROTOCOL_MANGLING */
#define I2C_M_REV_DIR_ADDR 0x2000 /* if I2C_FUNC_PROTOCOL_MANGLING */
#define I2C_M_IGNORE_NAK 0x1000 /* if I2C_FUNC_PROTOCOL_MANGLING */
#define I2C_M_NO_RD_ACK 0x0800 /* if I2C_FUNC_PROTOCOL_MANGLING */
#define I2C_M_RECV_LEN 0x0400 /* length will be first received byte */
u16 len; /* msg length */
u8 *buf; /* pointer to msg data */
};
#endif /* __DRM_EDID_H__ */

View File

@ -1,6 +1,6 @@
#include <types.h>
#include <link.h>
#include <list.h>
#ifndef __PCI_H__
#define __PCI_H__
@ -545,8 +545,8 @@ struct pci_device_id
typedef struct
{
link_t link;
struct pci_dev pci_dev;
struct list_head link;
struct pci_dev pci_dev;
}dev_t;
int enum_pci_devices(void);

View File

@ -38,6 +38,9 @@ typedef unsigned short u16_t;
typedef unsigned int u32_t;
typedef unsigned long long u64_t;
typedef signed char int8_t;
typedef signed long long int64_t;
#define NULL (void*)0
typedef uint32_t dma_addr_t;
@ -55,6 +58,14 @@ typedef uint32_t resource_size_t;
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#define BITS_PER_LONG 32
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_LONG)
#define DECLARE_BITMAP(name,bits) \
unsigned long name[BITS_TO_LONGS(bits)]
#define KERN_EMERG "<0>" /* system is unusable */
@ -115,14 +126,21 @@ int dbgprintf(const char* format, ...);
#define GFP_KERNEL 0
//#include <stdio.h>
int snprintf(char *str, size_t size, const char *format, ...);
//#include <string.h>
void* memcpy(void *s1, const void *s2, size_t n);
void* memset(void *s, int c, size_t n);
size_t strlen(const char *s);
char *strncpy (char *dst, const char *src, size_t len);
void *malloc(size_t size);
#define kmalloc(s,f) malloc((s))
#define kfree free
static inline void *kzalloc(size_t size, u32_t flags)
@ -200,5 +218,81 @@ struct drm_file;
#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
static inline void bitmap_zero(unsigned long *dst, int nbits)
{
if (nbits <= BITS_PER_LONG)
*dst = 0UL;
else {
int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
memset(dst, 0, len);
}
}
#define EXPORT_SYMBOL(x)
#define IDR_BITS 5
#define IDR_FULL 0xfffffffful
struct idr_layer {
unsigned long bitmap; /* A zero bit means "space here" */
struct idr_layer *ary[1<<IDR_BITS];
int count; /* When zero, we can release it */
};
struct idr {
struct idr_layer *top;
struct idr_layer *id_free;
int layers;
int id_free_cnt;
// spinlock_t lock;
};
#define min(x,y) ({ \
typeof(x) _x = (x); \
typeof(y) _y = (y); \
(void) (&_x == &_y); \
_x < _y ? _x : _y; })
#define max(x,y) ({ \
typeof(x) _x = (x); \
typeof(y) _y = (y); \
(void) (&_x == &_y); \
_x > _y ? _x : _y; })
extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
# define do_div(n,base) ({ \
uint32_t __base = (base); \
uint32_t __rem; \
(void)(((typeof((n)) *)0) == ((uint64_t *)0)); \
if (likely(((n) >> 32) == 0)) { \
__rem = (uint32_t)(n) % __base; \
(n) = (uint32_t)(n) / __base; \
} else \
__rem = __div64_32(&(n), __base); \
__rem; \
})
#define lower_32_bits(n) ((u32)(n))
#define INT_MAX ((int)(~0U>>1))
#define INT_MIN (-INT_MAX - 1)
#define UINT_MAX (~0U)
#define LONG_MAX ((long)(~0UL>>1))
#define LONG_MIN (-LONG_MAX - 1)
#define ULONG_MAX (~0UL)
#define LLONG_MAX ((long long)(~0ULL>>1))
#define LLONG_MIN (-LLONG_MAX - 1)
#define ULLONG_MAX (~0ULL)
static inline void *kcalloc(size_t n, size_t size, u32_t flags)
{
if (n != 0 && size > ULONG_MAX / n)
return NULL;
return kmalloc(n * size, 0);
}
#endif //__TYPES_H__

View File

@ -0,0 +1,697 @@
/*
* Copyright 2007-8 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alex Deucher
*/
#include <drmP.h>
#include <drm_crtc_helper.h>
#include "radeon_drm.h"
#include "radeon_fixed.h"
#include "radeon.h"
#include "atom.h"
#include "atom-bits.h"
static void atombios_lock_crtc(struct drm_crtc *crtc, int lock)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
int index =
GetIndexIntoMasterTable(COMMAND, UpdateCRTC_DoubleBufferRegisters);
ENABLE_CRTC_PS_ALLOCATION args;
memset(&args, 0, sizeof(args));
args.ucCRTC = radeon_crtc->crtc_id;
args.ucEnable = lock;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
static void atombios_enable_crtc(struct drm_crtc *crtc, int state)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
int index = GetIndexIntoMasterTable(COMMAND, EnableCRTC);
ENABLE_CRTC_PS_ALLOCATION args;
memset(&args, 0, sizeof(args));
args.ucCRTC = radeon_crtc->crtc_id;
args.ucEnable = state;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
static void atombios_enable_crtc_memreq(struct drm_crtc *crtc, int state)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
int index = GetIndexIntoMasterTable(COMMAND, EnableCRTCMemReq);
ENABLE_CRTC_PS_ALLOCATION args;
memset(&args, 0, sizeof(args));
args.ucCRTC = radeon_crtc->crtc_id;
args.ucEnable = state;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC);
BLANK_CRTC_PS_ALLOCATION args;
memset(&args, 0, sizeof(args));
args.ucCRTC = radeon_crtc->crtc_id;
args.ucBlanking = state;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
switch (mode) {
case DRM_MODE_DPMS_ON:
if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, 1);
atombios_enable_crtc(crtc, 1);
atombios_blank_crtc(crtc, 0);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
atombios_blank_crtc(crtc, 1);
atombios_enable_crtc(crtc, 0);
if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, 0);
break;
}
if (mode != DRM_MODE_DPMS_OFF) {
radeon_crtc_load_lut(crtc);
}
}
static void
atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
SET_CRTC_USING_DTD_TIMING_PARAMETERS * crtc_param)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
SET_CRTC_USING_DTD_TIMING_PARAMETERS conv_param;
int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming);
conv_param.usH_Size = cpu_to_le16(crtc_param->usH_Size);
conv_param.usH_Blanking_Time =
cpu_to_le16(crtc_param->usH_Blanking_Time);
conv_param.usV_Size = cpu_to_le16(crtc_param->usV_Size);
conv_param.usV_Blanking_Time =
cpu_to_le16(crtc_param->usV_Blanking_Time);
conv_param.usH_SyncOffset = cpu_to_le16(crtc_param->usH_SyncOffset);
conv_param.usH_SyncWidth = cpu_to_le16(crtc_param->usH_SyncWidth);
conv_param.usV_SyncOffset = cpu_to_le16(crtc_param->usV_SyncOffset);
conv_param.usV_SyncWidth = cpu_to_le16(crtc_param->usV_SyncWidth);
conv_param.susModeMiscInfo.usAccess =
cpu_to_le16(crtc_param->susModeMiscInfo.usAccess);
conv_param.ucCRTC = crtc_param->ucCRTC;
printk("executing set crtc dtd timing\n");
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&conv_param);
}
void atombios_crtc_set_timing(struct drm_crtc *crtc,
SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION *
crtc_param)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION conv_param;
int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_Timing);
conv_param.usH_Total = cpu_to_le16(crtc_param->usH_Total);
conv_param.usH_Disp = cpu_to_le16(crtc_param->usH_Disp);
conv_param.usH_SyncStart = cpu_to_le16(crtc_param->usH_SyncStart);
conv_param.usH_SyncWidth = cpu_to_le16(crtc_param->usH_SyncWidth);
conv_param.usV_Total = cpu_to_le16(crtc_param->usV_Total);
conv_param.usV_Disp = cpu_to_le16(crtc_param->usV_Disp);
conv_param.usV_SyncStart = cpu_to_le16(crtc_param->usV_SyncStart);
conv_param.usV_SyncWidth = cpu_to_le16(crtc_param->usV_SyncWidth);
conv_param.susModeMiscInfo.usAccess =
cpu_to_le16(crtc_param->susModeMiscInfo.usAccess);
conv_param.ucCRTC = crtc_param->ucCRTC;
conv_param.ucOverscanRight = crtc_param->ucOverscanRight;
conv_param.ucOverscanLeft = crtc_param->ucOverscanLeft;
conv_param.ucOverscanBottom = crtc_param->ucOverscanBottom;
conv_param.ucOverscanTop = crtc_param->ucOverscanTop;
conv_param.ucReserved = crtc_param->ucReserved;
printk("executing set crtc timing\n");
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&conv_param);
}
void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder = NULL;
struct radeon_encoder *radeon_encoder = NULL;
uint8_t frev, crev;
int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
SET_PIXEL_CLOCK_PS_ALLOCATION args;
PIXEL_CLOCK_PARAMETERS *spc1_ptr;
PIXEL_CLOCK_PARAMETERS_V2 *spc2_ptr;
PIXEL_CLOCK_PARAMETERS_V3 *spc3_ptr;
uint32_t sclock = mode->clock;
uint32_t ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
struct radeon_pll *pll;
int pll_flags = 0;
memset(&args, 0, sizeof(args));
if (ASIC_IS_AVIVO(rdev)) {
uint32_t ss_cntl;
if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */
pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
/* disable spread spectrum clocking for now -- thanks Hedy Lamarr */
if (radeon_crtc->crtc_id == 0) {
ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL);
WREG32(AVIVO_P1PLL_INT_SS_CNTL, ss_cntl & ~1);
} else {
ss_cntl = RREG32(AVIVO_P2PLL_INT_SS_CNTL);
WREG32(AVIVO_P2PLL_INT_SS_CNTL, ss_cntl & ~1);
}
} else {
pll_flags |= RADEON_PLL_LEGACY;
if (mode->clock > 200000) /* range limits??? */
pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
if (!ASIC_IS_AVIVO(rdev)) {
if (encoder->encoder_type !=
DRM_MODE_ENCODER_DAC)
pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
if (!ASIC_IS_AVIVO(rdev)
&& (encoder->encoder_type ==
DRM_MODE_ENCODER_LVDS))
pll_flags |= RADEON_PLL_USE_REF_DIV;
}
radeon_encoder = to_radeon_encoder(encoder);
}
}
if (radeon_crtc->crtc_id == 0)
pll = &rdev->clock.p1pll;
else
pll = &rdev->clock.p2pll;
radeon_compute_pll(pll, mode->clock, &sclock, &fb_div, &frac_fb_div,
&ref_div, &post_div, pll_flags);
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
&crev);
switch (frev) {
case 1:
switch (crev) {
case 1:
spc1_ptr = (PIXEL_CLOCK_PARAMETERS *) & args.sPCLKInput;
spc1_ptr->usPixelClock = cpu_to_le16(sclock);
spc1_ptr->usRefDiv = cpu_to_le16(ref_div);
spc1_ptr->usFbDiv = cpu_to_le16(fb_div);
spc1_ptr->ucFracFbDiv = frac_fb_div;
spc1_ptr->ucPostDiv = post_div;
spc1_ptr->ucPpll =
radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
spc1_ptr->ucCRTC = radeon_crtc->crtc_id;
spc1_ptr->ucRefDivSrc = 1;
break;
case 2:
spc2_ptr =
(PIXEL_CLOCK_PARAMETERS_V2 *) & args.sPCLKInput;
spc2_ptr->usPixelClock = cpu_to_le16(sclock);
spc2_ptr->usRefDiv = cpu_to_le16(ref_div);
spc2_ptr->usFbDiv = cpu_to_le16(fb_div);
spc2_ptr->ucFracFbDiv = frac_fb_div;
spc2_ptr->ucPostDiv = post_div;
spc2_ptr->ucPpll =
radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
spc2_ptr->ucCRTC = radeon_crtc->crtc_id;
spc2_ptr->ucRefDivSrc = 1;
break;
case 3:
if (!encoder)
return;
spc3_ptr =
(PIXEL_CLOCK_PARAMETERS_V3 *) & args.sPCLKInput;
spc3_ptr->usPixelClock = cpu_to_le16(sclock);
spc3_ptr->usRefDiv = cpu_to_le16(ref_div);
spc3_ptr->usFbDiv = cpu_to_le16(fb_div);
spc3_ptr->ucFracFbDiv = frac_fb_div;
spc3_ptr->ucPostDiv = post_div;
spc3_ptr->ucPpll =
radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
spc3_ptr->ucMiscInfo = (radeon_crtc->crtc_id << 2);
spc3_ptr->ucTransmitterId = radeon_encoder->encoder_id;
spc3_ptr->ucEncoderMode =
atombios_get_encoder_mode(encoder);
break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
return;
}
break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
return;
}
printk("executing set pll\n");
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_framebuffer *radeon_fb;
struct drm_gem_object *obj;
struct drm_radeon_gem_object *obj_priv;
uint64_t fb_location;
uint32_t fb_format, fb_pitch_pixels;
if (!crtc->fb)
return -EINVAL;
radeon_fb = to_radeon_framebuffer(crtc->fb);
obj = radeon_fb->obj;
obj_priv = obj->driver_private;
// if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &fb_location)) {
// return -EINVAL;
// }
switch (crtc->fb->bits_per_pixel) {
case 15:
fb_format =
AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555;
break;
case 16:
fb_format =
AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
AVIVO_D1GRPH_CONTROL_16BPP_RGB565;
break;
case 24:
case 32:
fb_format =
AVIVO_D1GRPH_CONTROL_DEPTH_32BPP |
AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888;
break;
default:
DRM_ERROR("Unsupported screen depth %d\n",
crtc->fb->bits_per_pixel);
return -EINVAL;
}
/* TODO tiling */
if (radeon_crtc->crtc_id == 0)
WREG32(AVIVO_D1VGA_CONTROL, 0);
else
WREG32(AVIVO_D2VGA_CONTROL, 0);
WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
(u32) fb_location);
WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS +
radeon_crtc->crtc_offset, (u32) fb_location);
WREG32(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
WREG32(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, 0);
WREG32(AVIVO_D1GRPH_Y_START + radeon_crtc->crtc_offset, 0);
WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, crtc->fb->width);
WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, crtc->fb->height);
fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
WREG32(AVIVO_D1MODE_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
crtc->mode.vdisplay);
x &= ~3;
y &= ~1;
WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset,
(x << 16) | y);
WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
(crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
AVIVO_D1MODE_INTERLEAVE_EN);
else
WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
if (old_fb && old_fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(old_fb);
// radeon_gem_object_unpin(radeon_fb->obj);
}
return 0;
}
int atombios_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y, struct drm_framebuffer *old_fb)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder;
SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION crtc_timing;
/* TODO color tiling */
memset(&crtc_timing, 0, sizeof(crtc_timing));
/* TODO tv */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
}
crtc_timing.ucCRTC = radeon_crtc->crtc_id;
crtc_timing.usH_Total = adjusted_mode->crtc_htotal;
crtc_timing.usH_Disp = adjusted_mode->crtc_hdisplay;
crtc_timing.usH_SyncStart = adjusted_mode->crtc_hsync_start;
crtc_timing.usH_SyncWidth =
adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start;
crtc_timing.usV_Total = adjusted_mode->crtc_vtotal;
crtc_timing.usV_Disp = adjusted_mode->crtc_vdisplay;
crtc_timing.usV_SyncStart = adjusted_mode->crtc_vsync_start;
crtc_timing.usV_SyncWidth =
adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start;
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
crtc_timing.susModeMiscInfo.usAccess |= ATOM_VSYNC_POLARITY;
if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
crtc_timing.susModeMiscInfo.usAccess |= ATOM_HSYNC_POLARITY;
if (adjusted_mode->flags & DRM_MODE_FLAG_CSYNC)
crtc_timing.susModeMiscInfo.usAccess |= ATOM_COMPOSITESYNC;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
crtc_timing.susModeMiscInfo.usAccess |= ATOM_INTERLACE;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
crtc_timing.susModeMiscInfo.usAccess |= ATOM_DOUBLE_CLOCK_MODE;
atombios_crtc_set_pll(crtc, adjusted_mode);
atombios_crtc_set_timing(crtc, &crtc_timing);
if (ASIC_IS_AVIVO(rdev))
atombios_crtc_set_base(crtc, x, y, old_fb);
else {
if (radeon_crtc->crtc_id == 0) {
SET_CRTC_USING_DTD_TIMING_PARAMETERS crtc_dtd_timing;
memset(&crtc_dtd_timing, 0, sizeof(crtc_dtd_timing));
/* setup FP shadow regs on R4xx */
crtc_dtd_timing.ucCRTC = radeon_crtc->crtc_id;
crtc_dtd_timing.usH_Size = adjusted_mode->crtc_hdisplay;
crtc_dtd_timing.usV_Size = adjusted_mode->crtc_vdisplay;
crtc_dtd_timing.usH_Blanking_Time =
adjusted_mode->crtc_hblank_end -
adjusted_mode->crtc_hdisplay;
crtc_dtd_timing.usV_Blanking_Time =
adjusted_mode->crtc_vblank_end -
adjusted_mode->crtc_vdisplay;
crtc_dtd_timing.usH_SyncOffset =
adjusted_mode->crtc_hsync_start -
adjusted_mode->crtc_hdisplay;
crtc_dtd_timing.usV_SyncOffset =
adjusted_mode->crtc_vsync_start -
adjusted_mode->crtc_vdisplay;
crtc_dtd_timing.usH_SyncWidth =
adjusted_mode->crtc_hsync_end -
adjusted_mode->crtc_hsync_start;
crtc_dtd_timing.usV_SyncWidth =
adjusted_mode->crtc_vsync_end -
adjusted_mode->crtc_vsync_start;
/* crtc_dtd_timing.ucH_Border = adjusted_mode->crtc_hborder; */
/* crtc_dtd_timing.ucV_Border = adjusted_mode->crtc_vborder; */
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
crtc_dtd_timing.susModeMiscInfo.usAccess |=
ATOM_VSYNC_POLARITY;
if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
crtc_dtd_timing.susModeMiscInfo.usAccess |=
ATOM_HSYNC_POLARITY;
if (adjusted_mode->flags & DRM_MODE_FLAG_CSYNC)
crtc_dtd_timing.susModeMiscInfo.usAccess |=
ATOM_COMPOSITESYNC;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
crtc_dtd_timing.susModeMiscInfo.usAccess |=
ATOM_INTERLACE;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
crtc_dtd_timing.susModeMiscInfo.usAccess |=
ATOM_DOUBLE_CLOCK_MODE;
atombios_set_crtc_dtd_timing(crtc, &crtc_dtd_timing);
}
radeon_crtc_set_base(crtc, x, y, old_fb);
radeon_legacy_atom_set_surface(crtc);
}
return 0;
}
static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
static void atombios_crtc_prepare(struct drm_crtc *crtc)
{
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
atombios_lock_crtc(crtc, 1);
}
static void atombios_crtc_commit(struct drm_crtc *crtc)
{
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
atombios_lock_crtc(crtc, 0);
}
static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
.dpms = atombios_crtc_dpms,
.mode_fixup = atombios_crtc_mode_fixup,
.mode_set = atombios_crtc_mode_set,
.mode_set_base = atombios_crtc_set_base,
.prepare = atombios_crtc_prepare,
.commit = atombios_crtc_commit,
};
void radeon_atombios_init_crtc(struct drm_device *dev,
struct radeon_crtc *radeon_crtc)
{
if (radeon_crtc->crtc_id == 1)
radeon_crtc->crtc_offset =
AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL;
drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
}
void radeon_init_disp_bw_avivo(struct drm_device *dev,
struct drm_display_mode *mode1,
uint32_t pixel_bytes1,
struct drm_display_mode *mode2,
uint32_t pixel_bytes2)
{
struct radeon_device *rdev = dev->dev_private;
fixed20_12 min_mem_eff;
fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff;
fixed20_12 sclk_ff, mclk_ff;
uint32_t dc_lb_memory_split, temp;
min_mem_eff.full = rfixed_const_8(0);
if (rdev->disp_priority == 2) {
uint32_t mc_init_misc_lat_timer = 0;
if (rdev->family == CHIP_RV515)
mc_init_misc_lat_timer =
RREG32_MC(RV515_MC_INIT_MISC_LAT_TIMER);
else if (rdev->family == CHIP_RS690)
mc_init_misc_lat_timer =
RREG32_MC(RS690_MC_INIT_MISC_LAT_TIMER);
mc_init_misc_lat_timer &=
~(R300_MC_DISP1R_INIT_LAT_MASK <<
R300_MC_DISP1R_INIT_LAT_SHIFT);
mc_init_misc_lat_timer &=
~(R300_MC_DISP0R_INIT_LAT_MASK <<
R300_MC_DISP0R_INIT_LAT_SHIFT);
if (mode2)
mc_init_misc_lat_timer |=
(1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
if (mode1)
mc_init_misc_lat_timer |=
(1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
if (rdev->family == CHIP_RV515)
WREG32_MC(RV515_MC_INIT_MISC_LAT_TIMER,
mc_init_misc_lat_timer);
else if (rdev->family == CHIP_RS690)
WREG32_MC(RS690_MC_INIT_MISC_LAT_TIMER,
mc_init_misc_lat_timer);
}
/*
* determine is there is enough bw for current mode
*/
temp_ff.full = rfixed_const(100);
mclk_ff.full = rfixed_const(rdev->clock.default_mclk);
mclk_ff.full = rfixed_div(mclk_ff, temp_ff);
sclk_ff.full = rfixed_const(rdev->clock.default_sclk);
sclk_ff.full = rfixed_div(sclk_ff, temp_ff);
temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
temp_ff.full = rfixed_const(temp);
mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);
pix_clk.full = 0;
pix_clk2.full = 0;
peak_disp_bw.full = 0;
if (mode1) {
temp_ff.full = rfixed_const(1000);
pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */
pix_clk.full = rfixed_div(pix_clk, temp_ff);
temp_ff.full = rfixed_const(pixel_bytes1);
peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
}
if (mode2) {
temp_ff.full = rfixed_const(1000);
pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */
pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
temp_ff.full = rfixed_const(pixel_bytes2);
peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
}
if (peak_disp_bw.full >= mem_bw.full) {
DRM_ERROR
("You may not have enough display bandwidth for current mode\n"
"If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
printk("peak disp bw %d, mem_bw %d\n",
rfixed_trunc(peak_disp_bw), rfixed_trunc(mem_bw));
}
/*
* Line Buffer Setup
* There is a single line buffer shared by both display controllers.
* DC_LB_MEMORY_SPLIT controls how that line buffer is shared between the display
* controllers. The paritioning can either be done manually or via one of four
* preset allocations specified in bits 1:0:
* 0 - line buffer is divided in half and shared between each display controller
* 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4
* 2 - D1 gets the whole buffer
* 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4
* Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual allocation mode.
* In manual allocation mode, D1 always starts at 0, D1 end/2 is specified in bits
* 14:4; D2 allocation follows D1.
*/
/* is auto or manual better ? */
dc_lb_memory_split =
RREG32(AVIVO_DC_LB_MEMORY_SPLIT) & ~AVIVO_DC_LB_MEMORY_SPLIT_MASK;
dc_lb_memory_split &= ~AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE;
#if 1
/* auto */
if (mode1 && mode2) {
if (mode1->hdisplay > mode2->hdisplay) {
if (mode1->hdisplay > 2560)
dc_lb_memory_split |=
AVIVO_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q;
else
dc_lb_memory_split |=
AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
} else if (mode2->hdisplay > mode1->hdisplay) {
if (mode2->hdisplay > 2560)
dc_lb_memory_split |=
AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
else
dc_lb_memory_split |=
AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
} else
dc_lb_memory_split |=
AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
} else if (mode1) {
dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_D1_ONLY;
} else if (mode2) {
dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
}
#else
/* manual */
dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE;
dc_lb_memory_split &=
~(AVIVO_DC_LB_DISP1_END_ADR_MASK <<
AVIVO_DC_LB_DISP1_END_ADR_SHIFT);
if (mode1) {
dc_lb_memory_split |=
((((mode1->hdisplay / 2) + 64) & AVIVO_DC_LB_DISP1_END_ADR_MASK)
<< AVIVO_DC_LB_DISP1_END_ADR_SHIFT);
} else if (mode2) {
dc_lb_memory_split |= (0 << AVIVO_DC_LB_DISP1_END_ADR_SHIFT);
}
#endif
WREG32(AVIVO_DC_LB_MEMORY_SPLIT, dc_lb_memory_split);
}

View File

@ -50,17 +50,18 @@
#include <pci.h>
#include <errno-base.h>
#include "drm_edid.h"
#include "radeon_mode.h"
#include "radeon_reg.h"
#include "r300.h"
#include <syscall.h>
extern int radeon_modeset;
extern int radeon_dynclks;
extern int radeon_gart_size;
extern int radeon_r4xx_atom;
extern int radeon_gart_size;
extern int radeon_connector_table;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
@ -212,8 +213,23 @@ struct radeon_object_list {
unsigned wdomain;
};
int radeon_object_init(struct radeon_device *rdev);
void radeon_object_fini(struct radeon_device *rdev);
int radeon_object_create(struct radeon_device *rdev,
struct drm_gem_object *gobj,
unsigned long size,
bool kernel,
uint32_t domain,
bool interruptible,
struct radeon_object **robj_ptr);
/*
* GEM objects.
*/
struct radeon_gem {
struct list_head objects;
};
@ -1040,40 +1056,6 @@ struct drm_agp_head {
};
/**
* DRM device structure. This structure represent a complete card that
* may contain multiple heads.
*/
struct drm_device {
int irq_enabled; /**< True if irq handler is enabled */
__volatile__ long context_flag; /**< Context swapping flag */
__volatile__ long interrupt_flag; /**< Interruption handler flag */
__volatile__ long dma_flag; /**< DMA dispatch flag */
int last_checked; /**< Last context checked for DMA */
int last_context; /**< Last current context */
unsigned long last_switch; /**< jiffies at last context switch */
struct drm_agp_head *agp; /**< AGP data */
struct pci_dev *pdev; /**< PCI device structure */
int pci_vendor; /**< PCI vendor id */
int pci_device; /** PCI device id */
int num_crtcs; /**< Number of CRTCs on this device */
void *dev_private; /**< device private data */
void *mm_private;
// struct address_space *dev_mapping;
struct drm_mode_config mode_config; /**< Current mode config */
};
#define radeon_errata(rdev) (rdev)->asic->errata((rdev))

View File

@ -23,7 +23,7 @@
* Authors: Dave Airlie
* Alex Deucher
*/
//#include "drmP.h"
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon.h"
@ -336,11 +336,11 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
else
linkb = false;
// radeon_add_atom_encoder(dev,
// enc_obj_id,
// le16_to_cpu
// (path->
// usDeviceTag));
radeon_add_atom_encoder(dev,
enc_obj_id,
le16_to_cpu
(path->
usDeviceTag));
}
}
@ -406,18 +406,18 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
else
ddc_bus = radeon_lookup_gpio(dev, line_mux);
// radeon_add_atom_connector(dev,
// le16_to_cpu(path->
// usConnObjectId),
// le16_to_cpu(path->
// usDeviceTag),
// connector_type, &ddc_bus,
// linkb, igp_lane_info);
radeon_add_atom_connector(dev,
le16_to_cpu(path->
usConnObjectId),
le16_to_cpu(path->
usDeviceTag),
connector_type, &ddc_bus,
linkb, igp_lane_info);
}
}
// radeon_link_encoder_connector(dev);
radeon_link_encoder_connector(dev);
return true;
}
@ -532,19 +532,19 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
bios_connectors[i].valid = true;
bios_connectors[i].devices = (1 << i);
// if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom)
// radeon_add_atom_encoder(dev,
// radeon_get_encoder_id(dev,
// (1 << i),
// dac),
// (1 << i));
// else
// radeon_add_legacy_encoder(dev,
// radeon_get_encoder_id(dev,
// (1 <<
// i),
// dac),
// (1 << i));
if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom)
radeon_add_atom_encoder(dev,
radeon_get_encoder_id(dev,
(1 << i),
dac),
(1 << i));
else
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
(1 <<
i),
dac),
(1 << i));
}
/* combine shared connectors */
@ -584,18 +584,18 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
}
/* add the connectors */
// for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
// if (bios_connectors[i].valid)
// radeon_add_atom_connector(dev,
// bios_connectors[i].line_mux,
// bios_connectors[i].devices,
// bios_connectors[i].
// connector_type,
// &bios_connectors[i].ddc_bus,
// false, 0);
// }
for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
if (bios_connectors[i].valid)
radeon_add_atom_connector(dev,
bios_connectors[i].line_mux,
bios_connectors[i].devices,
bios_connectors[i].
connector_type,
&bios_connectors[i].ddc_bus,
false, 0);
}
// radeon_link_encoder_connector(dev);
radeon_link_encoder_connector(dev);
return true;
}

View File

@ -25,7 +25,7 @@
* Alex Deucher
* Jerome Glisse
*/
//#include "drmP.h"
#include "drmP.h"
#include "radeon_reg.h"
#include "radeon.h"
#include "atom.h"

View File

@ -25,7 +25,7 @@
* Alex Deucher
* Jerome Glisse
*/
//#include "drmP.h"
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon_reg.h"
#include "radeon.h"

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,604 @@
/*
* Copyright 2007-8 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alex Deucher
*/
#include "drmP.h"
//#include "drm_edid.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#include "radeon_drm.h"
#include "radeon.h"
extern void
radeon_combios_connected_scratch_regs(struct drm_connector *connector,
struct drm_encoder *encoder,
bool connected);
extern void
radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
struct drm_encoder *encoder,
bool connected);
static void
radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_connector_status status)
{
struct drm_device *dev = connector->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *best_encoder = NULL;
struct drm_encoder *encoder = NULL;
struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
struct drm_mode_object *obj;
bool connected;
int i;
best_encoder = connector_funcs->best_encoder(connector);
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == 0)
break;
obj = drm_mode_object_find(connector->dev,
connector->encoder_ids[i],
DRM_MODE_OBJECT_ENCODER);
if (!obj)
continue;
encoder = obj_to_encoder(obj);
if ((encoder == best_encoder) && (status == connector_status_connected))
connected = true;
else
connected = false;
if (rdev->is_atom_bios)
radeon_atombios_connected_scratch_regs(connector, encoder, connected);
else
radeon_combios_connected_scratch_regs(connector, encoder, connected);
}
}
struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
struct drm_mode_object *obj;
struct drm_encoder *encoder;
/* pick the encoder ids */
if (enc_id) {
obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
if (!obj)
return NULL;
encoder = obj_to_encoder(obj);
return encoder;
}
return NULL;
}
static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_display_mode *mode = NULL;
struct radeon_native_mode *native_mode = &radeon_encoder->native_mode;
if (native_mode->panel_xres != 0 &&
native_mode->panel_yres != 0 &&
native_mode->dotclock != 0) {
mode = drm_mode_create(dev);
mode->hdisplay = native_mode->panel_xres;
mode->vdisplay = native_mode->panel_yres;
mode->htotal = mode->hdisplay + native_mode->hblank;
mode->hsync_start = mode->hdisplay + native_mode->hoverplus;
mode->hsync_end = mode->hsync_start + native_mode->hsync_width;
mode->vtotal = mode->vdisplay + native_mode->vblank;
mode->vsync_start = mode->vdisplay + native_mode->voverplus;
mode->vsync_end = mode->vsync_start + native_mode->vsync_width;
mode->clock = native_mode->dotclock;
mode->flags = 0;
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
drm_mode_set_name(mode);
DRM_DEBUG("Adding native panel mode %s\n", mode->name);
}
return mode;
}
int radeon_connector_set_property(struct drm_connector *connector, struct drm_property *property,
uint64_t val)
{
return 0;
}
static int radeon_lvds_get_modes(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder;
int ret = 0;
struct drm_display_mode *mode;
if (radeon_connector->ddc_bus) {
ret = radeon_ddc_get_modes(radeon_connector);
if (ret > 0) {
return ret;
}
}
encoder = radeon_best_single_encoder(connector);
if (!encoder)
return 0;
/* we have no EDID modes */
mode = radeon_fp_native_mode(encoder);
if (mode) {
ret = 1;
drm_mode_probed_add(connector, mode);
}
return ret;
}
static int radeon_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
return MODE_OK;
}
static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connector)
{
enum drm_connector_status ret = connector_status_connected;
/* check acpi lid status ??? */
radeon_connector_update_scratch_regs(connector, ret);
return ret;
}
static void radeon_connector_destroy(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
if (radeon_connector->ddc_bus)
radeon_i2c_destroy(radeon_connector->ddc_bus);
kfree(radeon_connector->con_priv);
// drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
struct drm_connector_helper_funcs radeon_lvds_connector_helper_funcs = {
.get_modes = radeon_lvds_get_modes,
.mode_valid = radeon_lvds_mode_valid,
.best_encoder = radeon_best_single_encoder,
};
struct drm_connector_funcs radeon_lvds_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = radeon_connector_destroy,
.set_property = radeon_connector_set_property,
};
static int radeon_vga_get_modes(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
int ret;
ret = radeon_ddc_get_modes(radeon_connector);
return ret;
}
static int radeon_vga_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
return MODE_OK;
}
static enum drm_connector_status radeon_vga_detect(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder;
struct drm_encoder_helper_funcs *encoder_funcs;
bool dret;
enum drm_connector_status ret = connector_status_disconnected;
radeon_i2c_do_lock(radeon_connector, 1);
dret = radeon_ddc_probe(radeon_connector);
radeon_i2c_do_lock(radeon_connector, 0);
if (dret)
ret = connector_status_connected;
else {
/* if EDID fails to a load detect */
encoder = radeon_best_single_encoder(connector);
if (!encoder)
ret = connector_status_disconnected;
else {
encoder_funcs = encoder->helper_private;
ret = encoder_funcs->detect(encoder, connector);
}
}
radeon_connector_update_scratch_regs(connector, ret);
return ret;
}
struct drm_connector_helper_funcs radeon_vga_connector_helper_funcs = {
.get_modes = radeon_vga_get_modes,
.mode_valid = radeon_vga_mode_valid,
.best_encoder = radeon_best_single_encoder,
};
struct drm_connector_funcs radeon_vga_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_vga_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = radeon_connector_destroy,
.set_property = radeon_connector_set_property,
};
static int radeon_dvi_get_modes(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
int ret;
ret = radeon_ddc_get_modes(radeon_connector);
/* reset scratch regs here since radeon_dvi_detect doesn't check digital bit */
radeon_connector_update_scratch_regs(connector, connector_status_connected);
return ret;
}
static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder;
struct drm_encoder_helper_funcs *encoder_funcs;
struct drm_mode_object *obj;
int i;
enum drm_connector_status ret = connector_status_disconnected;
bool dret;
radeon_i2c_do_lock(radeon_connector, 1);
dret = radeon_ddc_probe(radeon_connector);
radeon_i2c_do_lock(radeon_connector, 0);
if (dret)
ret = connector_status_connected;
else {
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == 0)
break;
obj = drm_mode_object_find(connector->dev,
connector->encoder_ids[i],
DRM_MODE_OBJECT_ENCODER);
if (!obj)
continue;
encoder = obj_to_encoder(obj);
encoder_funcs = encoder->helper_private;
if (encoder_funcs->detect) {
ret = encoder_funcs->detect(encoder, connector);
if (ret == connector_status_connected) {
radeon_connector->use_digital = 0;
break;
}
}
}
}
/* updated in get modes as well since we need to know if it's analog or digital */
radeon_connector_update_scratch_regs(connector, ret);
return ret;
}
/* okay need to be smart in here about which encoder to pick */
struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_mode_object *obj;
struct drm_encoder *encoder;
int i;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == 0)
break;
obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
if (!obj)
continue;
encoder = obj_to_encoder(obj);
if (radeon_connector->use_digital) {
if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
return encoder;
} else {
if (encoder->encoder_type == DRM_MODE_ENCODER_DAC ||
encoder->encoder_type == DRM_MODE_ENCODER_TVDAC)
return encoder;
}
}
/* see if we have a default encoder TODO */
/* then check use digitial */
/* pick the first one */
if (enc_id) {
obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
if (!obj)
return NULL;
encoder = obj_to_encoder(obj);
return encoder;
}
return NULL;
}
struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
.get_modes = radeon_dvi_get_modes,
.mode_valid = radeon_vga_mode_valid,
.best_encoder = radeon_dvi_encoder,
};
struct drm_connector_funcs radeon_dvi_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_dvi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = radeon_connector_set_property,
.destroy = radeon_connector_destroy,
};
void
radeon_add_atom_connector(struct drm_device *dev,
uint32_t connector_id,
uint32_t supported_device,
int connector_type,
struct radeon_i2c_bus_rec *i2c_bus,
bool linkb,
uint32_t igp_lane_info)
{
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *radeon_dig_connector;
uint32_t subpixel_order = SubPixelNone;
/* fixme - tv/cv/din */
if ((connector_type == DRM_MODE_CONNECTOR_Unknown) ||
(connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
(connector_type == DRM_MODE_CONNECTOR_Composite) ||
(connector_type == DRM_MODE_CONNECTOR_9PinDIN))
return;
/* see if we already added it */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
if (radeon_connector->connector_id == connector_id) {
radeon_connector->devices |= supported_device;
return;
}
}
radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL);
if (!radeon_connector)
return;
connector = &radeon_connector->base;
radeon_connector->connector_id = connector_id;
radeon_connector->devices = supported_device;
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
if (!radeon_connector->ddc_bus)
goto failed;
}
break;
case DRM_MODE_CONNECTOR_DVIA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
if (!radeon_connector->ddc_bus)
goto failed;
}
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
if (!radeon_dig_connector)
goto failed;
radeon_dig_connector->linkb = linkb;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
if (!radeon_connector->ddc_bus)
goto failed;
}
subpixel_order = SubPixelHorizontalRGB;
break;
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB:
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
if (!radeon_dig_connector)
goto failed;
radeon_dig_connector->linkb = linkb;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "HDMI");
if (!radeon_connector->ddc_bus)
goto failed;
}
subpixel_order = SubPixelHorizontalRGB;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
if (!radeon_dig_connector)
goto failed;
radeon_dig_connector->linkb = linkb;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
if (!radeon_connector->ddc_bus)
goto failed;
}
subpixel_order = SubPixelHorizontalRGB;
break;
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite:
case DRM_MODE_CONNECTOR_9PinDIN:
break;
case DRM_MODE_CONNECTOR_LVDS:
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
if (!radeon_dig_connector)
goto failed;
radeon_dig_connector->linkb = linkb;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
if (!radeon_connector->ddc_bus)
goto failed;
}
subpixel_order = SubPixelHorizontalRGB;
break;
}
connector->display_info.subpixel_order = subpixel_order;
// drm_sysfs_connector_add(connector);
return;
failed:
if (radeon_connector->ddc_bus)
radeon_i2c_destroy(radeon_connector->ddc_bus);
drm_connector_cleanup(connector);
kfree(connector);
}
void
radeon_add_legacy_connector(struct drm_device *dev,
uint32_t connector_id,
uint32_t supported_device,
int connector_type,
struct radeon_i2c_bus_rec *i2c_bus)
{
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
uint32_t subpixel_order = SubPixelNone;
/* fixme - tv/cv/din */
if ((connector_type == DRM_MODE_CONNECTOR_Unknown) ||
(connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
(connector_type == DRM_MODE_CONNECTOR_Composite) ||
(connector_type == DRM_MODE_CONNECTOR_9PinDIN))
return;
/* see if we already added it */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
if (radeon_connector->connector_id == connector_id) {
radeon_connector->devices |= supported_device;
return;
}
}
radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL);
if (!radeon_connector)
return;
connector = &radeon_connector->base;
radeon_connector->connector_id = connector_id;
radeon_connector->devices = supported_device;
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
if (!radeon_connector->ddc_bus)
goto failed;
}
break;
case DRM_MODE_CONNECTOR_DVIA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
if (!radeon_connector->ddc_bus)
goto failed;
}
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
if (!radeon_connector->ddc_bus)
goto failed;
}
subpixel_order = SubPixelHorizontalRGB;
break;
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite:
case DRM_MODE_CONNECTOR_9PinDIN:
break;
case DRM_MODE_CONNECTOR_LVDS:
drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
if (!radeon_connector->ddc_bus)
goto failed;
}
subpixel_order = SubPixelHorizontalRGB;
break;
}
connector->display_info.subpixel_order = subpixel_order;
// drm_sysfs_connector_add(connector);
return;
failed:
if (radeon_connector->ddc_bus)
radeon_i2c_destroy(radeon_connector->ddc_bus);
drm_connector_cleanup(connector);
kfree(connector);
}

View File

@ -26,7 +26,8 @@
* Jerome Glisse
*/
//#include <linux/console.h>
//#include <drm/drmP.h>
#include <drmP.h>
//#include <drm/drm_crtc_helper.h>
#include "radeon_drm.h"
#include "radeon_reg.h"
@ -36,9 +37,14 @@
#include <syscall.h>
int radeon_modeset = -1;
int radeon_dynclks = -1;
int radeon_agpmode = -1;
int radeon_r4xx_atom = 0;
int radeon_agpmode = 0;
int radeon_vram_limit = 0;
int radeon_gart_size = 512; /* default gart size */
int radeon_benchmarking = 0;
int radeon_connector_table = 0;
/*
@ -896,3 +902,37 @@ resource_size_t drm_get_resource_len(struct drm_device *dev, unsigned int resour
return pci_resource_len(dev->pdev, resource);
}
uint32_t __div64_32(uint64_t *n, uint32_t base)
{
uint64_t rem = *n;
uint64_t b = base;
uint64_t res, d = 1;
uint32_t high = rem >> 32;
/* Reduce the thing a bit first */
res = 0;
if (high >= base) {
high /= base;
res = (uint64_t) high << 32;
rem -= (uint64_t) (high*base) << 32;
}
while ((int64_t)b > 0 && b < rem) {
b = b+b;
d = d+d;
}
do {
if (rem >= b) {
rem -= b;
res += d;
}
b >>= 1;
d >>= 1;
} while (d);
*n = res;
return rem;
}

View File

@ -0,0 +1,697 @@
/*
* Copyright 2007-8 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alex Deucher
*/
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon.h"
#include "atom.h"
//#include <asm/div64.h>
#include "drm_crtc_helper.h"
#include "drm_edid.h"
static int radeon_ddc_dump(struct drm_connector *connector);
static void avivo_crtc_load_lut(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
int i;
DRM_DEBUG("%d\n", radeon_crtc->crtc_id);
WREG32(AVIVO_DC_LUTA_CONTROL + radeon_crtc->crtc_offset, 0);
WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
WREG32(AVIVO_DC_LUT_RW_SELECT, radeon_crtc->crtc_id);
WREG32(AVIVO_DC_LUT_RW_MODE, 0);
WREG32(AVIVO_DC_LUT_WRITE_EN_MASK, 0x0000003f);
WREG8(AVIVO_DC_LUT_RW_INDEX, 0);
for (i = 0; i < 256; i++) {
WREG32(AVIVO_DC_LUT_30_COLOR,
(radeon_crtc->lut_r[i] << 20) |
(radeon_crtc->lut_g[i] << 10) |
(radeon_crtc->lut_b[i] << 0));
}
WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
}
static void legacy_crtc_load_lut(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
int i;
uint32_t dac2_cntl;
dac2_cntl = RREG32(RADEON_DAC_CNTL2);
if (radeon_crtc->crtc_id == 0)
dac2_cntl &= (uint32_t)~RADEON_DAC2_PALETTE_ACC_CTL;
else
dac2_cntl |= RADEON_DAC2_PALETTE_ACC_CTL;
WREG32(RADEON_DAC_CNTL2, dac2_cntl);
WREG8(RADEON_PALETTE_INDEX, 0);
for (i = 0; i < 256; i++) {
WREG32(RADEON_PALETTE_30_DATA,
(radeon_crtc->lut_r[i] << 20) |
(radeon_crtc->lut_g[i] << 10) |
(radeon_crtc->lut_b[i] << 0));
}
}
void radeon_crtc_load_lut(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
if (!crtc->enabled)
return;
if (ASIC_IS_AVIVO(rdev))
avivo_crtc_load_lut(crtc);
else
legacy_crtc_load_lut(crtc);
}
/** Sets the color ramps on behalf of RandR */
void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
if (regno == 0)
DRM_DEBUG("gamma set %d\n", radeon_crtc->crtc_id);
radeon_crtc->lut_r[regno] = red >> 6;
radeon_crtc->lut_g[regno] = green >> 6;
radeon_crtc->lut_b[regno] = blue >> 6;
}
static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t size)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
int i, j;
if (size != 256) {
return;
}
if (crtc->fb == NULL) {
return;
}
if (crtc->fb->depth == 16) {
for (i = 0; i < 64; i++) {
if (i <= 31) {
for (j = 0; j < 8; j++) {
radeon_crtc->lut_r[i * 8 + j] = red[i] >> 6;
radeon_crtc->lut_b[i * 8 + j] = blue[i] >> 6;
}
}
for (j = 0; j < 4; j++)
radeon_crtc->lut_g[i * 4 + j] = green[i] >> 6;
}
} else {
for (i = 0; i < 256; i++) {
radeon_crtc->lut_r[i] = red[i] >> 6;
radeon_crtc->lut_g[i] = green[i] >> 6;
radeon_crtc->lut_b[i] = blue[i] >> 6;
}
}
radeon_crtc_load_lut(crtc);
}
static void radeon_crtc_destroy(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
if (radeon_crtc->mode_set.mode) {
drm_mode_destroy(crtc->dev, radeon_crtc->mode_set.mode);
}
drm_crtc_cleanup(crtc);
kfree(radeon_crtc);
}
static const struct drm_crtc_funcs radeon_crtc_funcs = {
// .cursor_set = radeon_crtc_cursor_set,
// .cursor_move = radeon_crtc_cursor_move,
.gamma_set = radeon_crtc_gamma_set,
// .set_config = drm_crtc_helper_set_config,
.destroy = radeon_crtc_destroy,
};
static void radeon_crtc_init(struct drm_device *dev, int index)
{
struct radeon_device *rdev = dev->dev_private;
struct radeon_crtc *radeon_crtc;
int i;
radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
if (radeon_crtc == NULL)
return;
drm_crtc_init(dev, &radeon_crtc->base, &radeon_crtc_funcs);
drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256);
radeon_crtc->crtc_id = index;
radeon_crtc->mode_set.crtc = &radeon_crtc->base;
radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1);
radeon_crtc->mode_set.num_connectors = 0;
for (i = 0; i < 256; i++) {
radeon_crtc->lut_r[i] = i << 2;
radeon_crtc->lut_g[i] = i << 2;
radeon_crtc->lut_b[i] = i << 2;
}
if (rdev->is_atom_bios && (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom))
radeon_atombios_init_crtc(dev, radeon_crtc);
else
radeon_legacy_init_crtc(dev, radeon_crtc);
}
static const char *encoder_names[34] = {
"NONE",
"INTERNAL_LVDS",
"INTERNAL_TMDS1",
"INTERNAL_TMDS2",
"INTERNAL_DAC1",
"INTERNAL_DAC2",
"INTERNAL_SDVOA",
"INTERNAL_SDVOB",
"SI170B",
"CH7303",
"CH7301",
"INTERNAL_DVO1",
"EXTERNAL_SDVOA",
"EXTERNAL_SDVOB",
"TITFP513",
"INTERNAL_LVTM1",
"VT1623",
"HDMI_SI1930",
"HDMI_INTERNAL",
"INTERNAL_KLDSCP_TMDS1",
"INTERNAL_KLDSCP_DVO1",
"INTERNAL_KLDSCP_DAC1",
"INTERNAL_KLDSCP_DAC2",
"SI178",
"MVPU_FPGA",
"INTERNAL_DDI",
"VT1625",
"HDMI_SI1932",
"DP_AN9801",
"DP_DP501",
"INTERNAL_UNIPHY",
"INTERNAL_KLDSCP_LVTMA",
"INTERNAL_UNIPHY1",
"INTERNAL_UNIPHY2",
};
static const char *connector_names[13] = {
"Unknown",
"VGA",
"DVI-I",
"DVI-D",
"DVI-A",
"Composite",
"S-video",
"LVDS",
"Component",
"DIN",
"DisplayPort",
"HDMI-A",
"HDMI-B",
};
static void radeon_print_display_setup(struct drm_device *dev)
{
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
uint32_t devices;
int i = 0;
DRM_INFO("Radeon Display Connectors\n");
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
DRM_INFO("Connector %d:\n", i);
DRM_INFO(" %s\n", connector_names[connector->connector_type]);
if (radeon_connector->ddc_bus)
DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
radeon_connector->ddc_bus->rec.mask_clk_reg,
radeon_connector->ddc_bus->rec.mask_data_reg,
radeon_connector->ddc_bus->rec.a_clk_reg,
radeon_connector->ddc_bus->rec.a_data_reg,
radeon_connector->ddc_bus->rec.put_clk_reg,
radeon_connector->ddc_bus->rec.put_data_reg,
radeon_connector->ddc_bus->rec.get_clk_reg,
radeon_connector->ddc_bus->rec.get_data_reg);
DRM_INFO(" Encoders:\n");
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
devices = radeon_encoder->devices & radeon_connector->devices;
if (devices) {
if (devices & ATOM_DEVICE_CRT1_SUPPORT)
DRM_INFO(" CRT1: %s\n", encoder_names[radeon_encoder->encoder_id]);
if (devices & ATOM_DEVICE_CRT2_SUPPORT)
DRM_INFO(" CRT2: %s\n", encoder_names[radeon_encoder->encoder_id]);
if (devices & ATOM_DEVICE_LCD1_SUPPORT)
DRM_INFO(" LCD1: %s\n", encoder_names[radeon_encoder->encoder_id]);
if (devices & ATOM_DEVICE_DFP1_SUPPORT)
DRM_INFO(" DFP1: %s\n", encoder_names[radeon_encoder->encoder_id]);
if (devices & ATOM_DEVICE_DFP2_SUPPORT)
DRM_INFO(" DFP2: %s\n", encoder_names[radeon_encoder->encoder_id]);
if (devices & ATOM_DEVICE_DFP3_SUPPORT)
DRM_INFO(" DFP3: %s\n", encoder_names[radeon_encoder->encoder_id]);
if (devices & ATOM_DEVICE_DFP4_SUPPORT)
DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]);
if (devices & ATOM_DEVICE_DFP5_SUPPORT)
DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]);
if (devices & ATOM_DEVICE_TV1_SUPPORT)
DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]);
if (devices & ATOM_DEVICE_CV_SUPPORT)
DRM_INFO(" CV: %s\n", encoder_names[radeon_encoder->encoder_id]);
}
}
i++;
}
}
bool radeon_setup_enc_conn(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_connector *drm_connector;
bool ret = false;
if (rdev->bios) {
if (rdev->is_atom_bios) {
if (rdev->family >= CHIP_R600)
ret = radeon_get_atom_connector_info_from_object_table(dev);
else
ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
} else
ret = radeon_get_legacy_connector_info_from_bios(dev);
} else {
if (!ASIC_IS_AVIVO(rdev))
ret = radeon_get_legacy_connector_info_from_table(dev);
}
if (ret) {
radeon_print_display_setup(dev);
list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head)
radeon_ddc_dump(drm_connector);
}
return ret;
}
int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
{
struct edid *edid;
int ret = 0;
if (!radeon_connector->ddc_bus)
return -1;
radeon_i2c_do_lock(radeon_connector, 1);
edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
radeon_i2c_do_lock(radeon_connector, 0);
if (edid) {
/* update digital bits here */
if (edid->input & DRM_EDID_INPUT_DIGITAL)
radeon_connector->use_digital = 1;
else
radeon_connector->use_digital = 0;
drm_mode_connector_update_edid_property(&radeon_connector->base, edid);
ret = drm_add_edid_modes(&radeon_connector->base, edid);
kfree(edid);
return ret;
}
drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
return -1;
}
static int radeon_ddc_dump(struct drm_connector *connector)
{
struct edid *edid;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
int ret = 0;
if (!radeon_connector->ddc_bus)
return -1;
radeon_i2c_do_lock(radeon_connector, 1);
edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
radeon_i2c_do_lock(radeon_connector, 0);
if (edid) {
kfree(edid);
}
return ret;
}
static inline uint32_t radeon_div(uint64_t n, uint32_t d)
{
uint64_t mod;
n += d / 2;
mod = do_div(n, d);
return n;
}
void radeon_compute_pll(struct radeon_pll *pll,
uint64_t freq,
uint32_t *dot_clock_p,
uint32_t *fb_div_p,
uint32_t *frac_fb_div_p,
uint32_t *ref_div_p,
uint32_t *post_div_p,
int flags)
{
uint32_t min_ref_div = pll->min_ref_div;
uint32_t max_ref_div = pll->max_ref_div;
uint32_t min_fractional_feed_div = 0;
uint32_t max_fractional_feed_div = 0;
uint32_t best_vco = pll->best_vco;
uint32_t best_post_div = 1;
uint32_t best_ref_div = 1;
uint32_t best_feedback_div = 1;
uint32_t best_frac_feedback_div = 0;
uint32_t best_freq = -1;
uint32_t best_error = 0xffffffff;
uint32_t best_vco_diff = 1;
uint32_t post_div;
DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
freq = freq * 1000;
if (flags & RADEON_PLL_USE_REF_DIV)
min_ref_div = max_ref_div = pll->reference_div;
else {
while (min_ref_div < max_ref_div-1) {
uint32_t mid = (min_ref_div + max_ref_div) / 2;
uint32_t pll_in = pll->reference_freq / mid;
if (pll_in < pll->pll_in_min)
max_ref_div = mid;
else if (pll_in > pll->pll_in_max)
min_ref_div = mid;
else
break;
}
}
if (flags & RADEON_PLL_USE_FRAC_FB_DIV) {
min_fractional_feed_div = pll->min_frac_feedback_div;
max_fractional_feed_div = pll->max_frac_feedback_div;
}
for (post_div = pll->min_post_div; post_div <= pll->max_post_div; ++post_div) {
uint32_t ref_div;
if ((flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
continue;
/* legacy radeons only have a few post_divs */
if (flags & RADEON_PLL_LEGACY) {
if ((post_div == 5) ||
(post_div == 7) ||
(post_div == 9) ||
(post_div == 10) ||
(post_div == 11) ||
(post_div == 13) ||
(post_div == 14) ||
(post_div == 15))
continue;
}
for (ref_div = min_ref_div; ref_div <= max_ref_div; ++ref_div) {
uint32_t feedback_div, current_freq = 0, error, vco_diff;
uint32_t pll_in = pll->reference_freq / ref_div;
uint32_t min_feed_div = pll->min_feedback_div;
uint32_t max_feed_div = pll->max_feedback_div + 1;
if (pll_in < pll->pll_in_min || pll_in > pll->pll_in_max)
continue;
while (min_feed_div < max_feed_div) {
uint32_t vco;
uint32_t min_frac_feed_div = min_fractional_feed_div;
uint32_t max_frac_feed_div = max_fractional_feed_div + 1;
uint32_t frac_feedback_div;
uint64_t tmp;
feedback_div = (min_feed_div + max_feed_div) / 2;
tmp = (uint64_t)pll->reference_freq * feedback_div;
vco = radeon_div(tmp, ref_div);
if (vco < pll->pll_out_min) {
min_feed_div = feedback_div + 1;
continue;
} else if (vco > pll->pll_out_max) {
max_feed_div = feedback_div;
continue;
}
while (min_frac_feed_div < max_frac_feed_div) {
frac_feedback_div = (min_frac_feed_div + max_frac_feed_div) / 2;
tmp = (uint64_t)pll->reference_freq * 10000 * feedback_div;
tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div;
current_freq = radeon_div(tmp, ref_div * post_div);
error = abs(current_freq - freq);
vco_diff = abs(vco - best_vco);
if ((best_vco == 0 && error < best_error) ||
(best_vco != 0 &&
(error < best_error - 100 ||
(abs(error - best_error) < 100 && vco_diff < best_vco_diff)))) {
best_post_div = post_div;
best_ref_div = ref_div;
best_feedback_div = feedback_div;
best_frac_feedback_div = frac_feedback_div;
best_freq = current_freq;
best_error = error;
best_vco_diff = vco_diff;
} else if (current_freq == freq) {
if (best_freq == -1) {
best_post_div = post_div;
best_ref_div = ref_div;
best_feedback_div = feedback_div;
best_frac_feedback_div = frac_feedback_div;
best_freq = current_freq;
best_error = error;
best_vco_diff = vco_diff;
} else if (((flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) ||
((flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) ||
((flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) ||
((flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) ||
((flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) ||
((flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) {
best_post_div = post_div;
best_ref_div = ref_div;
best_feedback_div = feedback_div;
best_frac_feedback_div = frac_feedback_div;
best_freq = current_freq;
best_error = error;
best_vco_diff = vco_diff;
}
}
if (current_freq < freq)
min_frac_feed_div = frac_feedback_div + 1;
else
max_frac_feed_div = frac_feedback_div;
}
if (current_freq < freq)
min_feed_div = feedback_div + 1;
else
max_feed_div = feedback_div;
}
}
}
*dot_clock_p = best_freq / 10000;
*fb_div_p = best_feedback_div;
*frac_fb_div_p = best_frac_feedback_div;
*ref_div_p = best_ref_div;
*post_div_p = best_post_div;
}
#if 0
static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
struct drm_device *dev = fb->dev;
if (fb->fbdev)
radeonfb_remove(dev, fb);
// if (radeon_fb->obj) {
// radeon_gem_object_unpin(radeon_fb->obj);
// mutex_lock(&dev->struct_mutex);
// drm_gem_object_unreference(radeon_fb->obj);
// mutex_unlock(&dev->struct_mutex);
// }
drm_framebuffer_cleanup(fb);
kfree(radeon_fb);
}
static int radeon_user_framebuffer_create_handle(struct drm_framebuffer *fb,
struct drm_file *file_priv,
unsigned int *handle)
{
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
return NULL;
// return drm_gem_handle_create(file_priv, radeon_fb->obj, handle);
}
static const struct drm_framebuffer_funcs radeon_fb_funcs = {
.destroy = radeon_user_framebuffer_destroy,
.create_handle = radeon_user_framebuffer_create_handle,
};
struct drm_framebuffer *
radeon_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd *mode_cmd,
struct drm_gem_object *obj)
{
struct radeon_framebuffer *radeon_fb;
radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
if (radeon_fb == NULL) {
return NULL;
}
drm_framebuffer_init(dev, &radeon_fb->base, &radeon_fb_funcs);
drm_helper_mode_fill_fb_struct(&radeon_fb->base, mode_cmd);
radeon_fb->obj = obj;
return &radeon_fb->base;
}
static struct drm_framebuffer *
radeon_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
struct drm_mode_fb_cmd *mode_cmd)
{
struct drm_gem_object *obj;
obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
return radeon_framebuffer_create(dev, mode_cmd, obj);
}
static const struct drm_mode_config_funcs radeon_mode_funcs = {
.fb_create = radeon_user_framebuffer_create,
.fb_changed = radeonfb_probe,
};
#endif
int radeon_modeset_init(struct radeon_device *rdev)
{
int num_crtc = 2, i;
int ret;
drm_mode_config_init(rdev->ddev);
rdev->mode_info.mode_config_initialized = true;
// rdev->ddev->mode_config.funcs = (void *)&radeon_mode_funcs;
if (ASIC_IS_AVIVO(rdev)) {
rdev->ddev->mode_config.max_width = 8192;
rdev->ddev->mode_config.max_height = 8192;
} else {
rdev->ddev->mode_config.max_width = 4096;
rdev->ddev->mode_config.max_height = 4096;
}
rdev->ddev->mode_config.fb_base = rdev->mc.aper_base;
/* allocate crtcs - TODO single crtc */
for (i = 0; i < num_crtc; i++) {
radeon_crtc_init(rdev->ddev, i);
}
/* okay we should have all the bios connectors */
ret = radeon_setup_enc_conn(rdev->ddev);
if (!ret) {
return ret;
}
drm_helper_initial_config(rdev->ddev);
return 0;
}
void radeon_modeset_fini(struct radeon_device *rdev)
{
if (rdev->mode_info.mode_config_initialized) {
drm_mode_config_cleanup(rdev->ddev);
rdev->mode_info.mode_config_initialized = false;
}
}
void radeon_init_disp_bandwidth(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_display_mode *modes[2];
int pixel_bytes[2];
struct drm_crtc *crtc;
pixel_bytes[0] = pixel_bytes[1] = 0;
modes[0] = modes[1] = NULL;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
if (crtc->enabled && crtc->fb) {
modes[radeon_crtc->crtc_id] = &crtc->mode;
pixel_bytes[radeon_crtc->crtc_id] = crtc->fb->bits_per_pixel / 8;
}
}
if (ASIC_IS_AVIVO(rdev)) {
radeon_init_disp_bw_avivo(dev,
modes[0],
pixel_bytes[0],
modes[1],
pixel_bytes[1]);
} else {
radeon_init_disp_bw_legacy(dev,
modes[0],
pixel_bytes[0],
modes[1],
pixel_bytes[1]);
}
}

View File

@ -0,0 +1,50 @@
/*
* Copyright 2009 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
*/
#ifndef RADEON_FIXED_H
#define RADEON_FIXED_H
typedef union rfixed {
u32 full;
} fixed20_12;
#define rfixed_const(A) (u32)(((A) << 12))/* + ((B + 0.000122)*4096)) */
#define rfixed_const_half(A) (u32)(((A) << 12) + 2048)
#define rfixed_const_666(A) (u32)(((A) << 12) + 2731)
#define rfixed_const_8(A) (u32)(((A) << 12) + 3277)
#define rfixed_mul(A, B) ((u64)((u64)(A).full * (B).full + 2048) >> 12)
#define fixed_init(A) { .full = rfixed_const((A)) }
#define fixed_init_half(A) { .full = rfixed_const_half((A)) }
#define rfixed_trunc(A) ((A).full >> 12)
static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B)
{
u64 tmp = ((u64)A.full << 13);
do_div(tmp, B.full);
tmp += 1;
tmp /= 2;
return lower_32_bits(tmp);
}
#endif

View File

@ -0,0 +1,209 @@
/*
* Copyright 2007-8 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alex Deucher
*/
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon.h"
/**
* radeon_ddc_probe
*
*/
bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
{
u8 out_buf[] = { 0x0, 0x0};
u8 buf[2];
int ret;
struct i2c_msg msgs[] = {
{
.addr = 0x50,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
.addr = 0x50,
.flags = I2C_M_RD,
.len = 1,
.buf = buf,
}
};
ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
if (ret == 2)
return true;
return false;
}
void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state)
{
struct radeon_device *rdev = radeon_connector->base.dev->dev_private;
uint32_t temp;
struct radeon_i2c_bus_rec *rec = &radeon_connector->ddc_bus->rec;
/* RV410 appears to have a bug where the hw i2c in reset
* holds the i2c port in a bad state - switch hw i2c away before
* doing DDC - do this for all r200s/r300s/r400s for safety sake
*/
if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) {
if (rec->a_clk_reg == RADEON_GPIO_MONID) {
WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1)));
} else {
WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3)));
}
}
if (lock_state) {
temp = RREG32(rec->a_clk_reg);
temp &= ~(rec->a_clk_mask);
WREG32(rec->a_clk_reg, temp);
temp = RREG32(rec->a_data_reg);
temp &= ~(rec->a_data_mask);
WREG32(rec->a_data_reg, temp);
}
temp = RREG32(rec->mask_clk_reg);
if (lock_state)
temp |= rec->mask_clk_mask;
else
temp &= ~rec->mask_clk_mask;
WREG32(rec->mask_clk_reg, temp);
temp = RREG32(rec->mask_clk_reg);
temp = RREG32(rec->mask_data_reg);
if (lock_state)
temp |= rec->mask_data_mask;
else
temp &= ~rec->mask_data_mask;
WREG32(rec->mask_data_reg, temp);
temp = RREG32(rec->mask_data_reg);
}
static int get_clock(void *i2c_priv)
{
struct radeon_i2c_chan *i2c = i2c_priv;
struct radeon_device *rdev = i2c->dev->dev_private;
struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
val = RREG32(rec->get_clk_reg);
val &= rec->get_clk_mask;
return (val != 0);
}
static int get_data(void *i2c_priv)
{
struct radeon_i2c_chan *i2c = i2c_priv;
struct radeon_device *rdev = i2c->dev->dev_private;
struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
val = RREG32(rec->get_data_reg);
val &= rec->get_data_mask;
return (val != 0);
}
static void set_clock(void *i2c_priv, int clock)
{
struct radeon_i2c_chan *i2c = i2c_priv;
struct radeon_device *rdev = i2c->dev->dev_private;
struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
val = RREG32(rec->put_clk_reg) & (uint32_t)~(rec->put_clk_mask);
val |= clock ? 0 : rec->put_clk_mask;
WREG32(rec->put_clk_reg, val);
}
static void set_data(void *i2c_priv, int data)
{
struct radeon_i2c_chan *i2c = i2c_priv;
struct radeon_device *rdev = i2c->dev->dev_private;
struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
val = RREG32(rec->put_data_reg) & (uint32_t)~(rec->put_data_mask);
val |= data ? 0 : rec->put_data_mask;
WREG32(rec->put_data_reg, val);
}
struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
struct radeon_i2c_bus_rec *rec,
const char *name)
{
struct radeon_i2c_chan *i2c;
int ret;
i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL);
if (i2c == NULL)
return NULL;
// i2c->adapter.owner = THIS_MODULE;
i2c->adapter.algo_data = &i2c->algo;
i2c->dev = dev;
i2c->algo.setsda = set_data;
i2c->algo.setscl = set_clock;
i2c->algo.getsda = get_data;
i2c->algo.getscl = get_clock;
i2c->algo.udelay = 20;
/* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
* make this, 2 jiffies is a lot more reliable */
i2c->algo.timeout = 2;
i2c->algo.data = i2c;
i2c->rec = *rec;
i2c_set_adapdata(&i2c->adapter, i2c);
ret = i2c_bit_add_bus(&i2c->adapter);
if (ret) {
DRM_INFO("Failed to register i2c %s\n", name);
goto out_free;
}
return i2c;
out_free:
kfree(i2c);
return NULL;
}
void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
{
if (!i2c)
return;
i2c_del_adapter(&i2c->adapter);
kfree(i2c);
}
struct drm_encoder *radeon_best_encoder(struct drm_connector *connector)
{
return NULL;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -32,8 +32,8 @@
#include "drm_mode.h"
#include "drm_crtc.h"
#include <drm_edid.h>
//#include <drm_edid.h>
//#include <linux/i2c.h>
//#include <linux/i2c-id.h>
//#include <linux/i2c-algo-bit.h>
@ -148,8 +148,8 @@ struct radeon_pll {
struct radeon_i2c_chan {
struct drm_device *dev;
// struct i2c_adapter adapter;
// struct i2c_algo_bit_data algo;
struct i2c_adapter adapter;
struct i2c_algo_bit_data algo;
struct radeon_i2c_bus_rec rec;
};
@ -174,14 +174,14 @@ struct radeon_mode_info {
};
struct radeon_crtc {
// struct drm_crtc base;
struct drm_crtc base;
int crtc_id;
u16_t lut_r[256], lut_g[256], lut_b[256];
bool enabled;
bool can_tile;
uint32_t crtc_offset;
struct radeon_framebuffer *fbdev_fb;
// struct drm_mode_set mode_set;
struct drm_mode_set mode_set;
// struct drm_gem_object *cursor_bo;
uint64_t cursor_addr;
int cursor_width;
@ -272,8 +272,8 @@ struct radeon_connector {
};
struct radeon_framebuffer {
// struct drm_framebuffer base;
// struct drm_gem_object *obj;
struct drm_framebuffer base;
struct drm_gem_object *obj;
};
extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
@ -283,7 +283,7 @@ extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c);
extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
//extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
extern void radeon_compute_pll(struct radeon_pll *pll,
uint64_t freq,
@ -302,7 +302,6 @@ struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, i
extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action);
extern int atombios_get_encoder_mode(struct drm_encoder *encoder);
/*
extern void radeon_crtc_load_lut(struct drm_crtc *crtc);
extern int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb);
@ -396,5 +395,5 @@ void radeon_init_disp_bw_avivo(struct drm_device *dev,
struct drm_display_mode *mode2,
uint32_t pixel_bytes2);
void radeon_init_disp_bandwidth(struct drm_device *dev);
*/
#endif