kms: rc9
git-svn-id: svn://kolibrios.org@1404 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
parent
371d66a59b
commit
28dbebf492
@ -158,6 +158,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
|
||||
{ DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 },
|
||||
{ DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 },
|
||||
{ DRM_MODE_CONNECTOR_TV, "TV", 0 },
|
||||
{ DRM_MODE_CONNECTOR_eDP, "Embedded DisplayPort", 0 },
|
||||
};
|
||||
|
||||
static struct drm_prop_enum_list drm_encoder_enum_list[] =
|
||||
|
@ -216,7 +216,7 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
|
||||
EXPORT_SYMBOL(drm_helper_crtc_in_use);
|
||||
|
||||
/**
|
||||
* drm_disable_unused_functions - disable unused objects
|
||||
* drm_helper_disable_unused_functions - disable unused objects
|
||||
* @dev: DRM device
|
||||
*
|
||||
* LOCKING:
|
||||
@ -572,7 +572,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
|
||||
struct drm_crtc *tmp;
|
||||
int crtc_mask = 1;
|
||||
|
||||
// WARN(!crtc, "checking null crtc?");
|
||||
WARN(!crtc, "checking null crtc?");
|
||||
|
||||
dev = crtc->dev;
|
||||
|
||||
@ -702,7 +702,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
|
||||
if (encoder->crtc != crtc)
|
||||
continue;
|
||||
|
||||
DRM_INFO("%s: set mode %s %x\n", drm_get_encoder_name(encoder),
|
||||
DRM_DEBUG("%s: set mode %s %x\n", drm_get_encoder_name(encoder),
|
||||
mode->name, mode->base.id);
|
||||
encoder_funcs = encoder->helper_private;
|
||||
encoder_funcs->mode_set(encoder, mode, adjusted_mode);
|
||||
@ -1021,9 +1021,9 @@ bool drm_helper_initial_config(struct drm_device *dev)
|
||||
int count = 0;
|
||||
|
||||
/* disable all the possible outputs/crtcs before entering KMS mode */
|
||||
drm_helper_disable_unused_functions(dev);
|
||||
// drm_helper_disable_unused_functions(dev);
|
||||
|
||||
drm_fb_helper_parse_command_line(dev);
|
||||
// drm_fb_helper_parse_command_line(dev);
|
||||
|
||||
count = drm_helper_probe_connector_modes(dev,
|
||||
dev->mode_config.max_width,
|
||||
@ -1032,7 +1032,8 @@ bool drm_helper_initial_config(struct drm_device *dev)
|
||||
/*
|
||||
* we shouldn't end up with no modes here.
|
||||
*/
|
||||
// WARN(!count, "Connected connector with 0 modes\n");
|
||||
if (count == 0)
|
||||
printk(KERN_INFO "No connectors reported connected with modes\n");
|
||||
|
||||
drm_setup_crtcs(dev);
|
||||
|
||||
@ -1162,6 +1163,9 @@ EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
|
||||
int drm_helper_resume_force_mode(struct drm_device *dev)
|
||||
{
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_encoder *encoder;
|
||||
struct drm_encoder_helper_funcs *encoder_funcs;
|
||||
struct drm_crtc_helper_funcs *crtc_funcs;
|
||||
int ret;
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
@ -1174,6 +1178,25 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
|
||||
|
||||
if (ret == false)
|
||||
DRM_ERROR("failed to set mode on crtc %p\n", crtc);
|
||||
|
||||
/* Turn off outputs that were already powered off */
|
||||
if (drm_helper_choose_crtc_dpms(crtc)) {
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||
|
||||
if(encoder->crtc != crtc)
|
||||
continue;
|
||||
|
||||
encoder_funcs = encoder->helper_private;
|
||||
if (encoder_funcs->dpms)
|
||||
(*encoder_funcs->dpms) (encoder,
|
||||
drm_helper_choose_encoder_dpms(encoder));
|
||||
|
||||
crtc_funcs = crtc->helper_private;
|
||||
if (crtc_funcs->dpms)
|
||||
(*crtc_funcs->dpms) (crtc,
|
||||
drm_helper_choose_crtc_dpms(crtc));
|
||||
}
|
||||
}
|
||||
}
|
||||
/* disable the unused connectors while restoring the modesetting */
|
||||
drm_helper_disable_unused_functions(dev);
|
||||
|
209
drivers/video/drm/drm_dp_i2c_helper.c
Normal file
209
drivers/video/drm/drm_dp_i2c_helper.c
Normal file
@ -0,0 +1,209 @@
|
||||
/*
|
||||
* Copyright © 2009 Keith Packard
|
||||
*
|
||||
* Permission to use, copy, modify, distribute, and sell this software and its
|
||||
* documentation for any purpose is hereby granted without fee, provided that
|
||||
* the above copyright notice appear in all copies and that both that copyright
|
||||
* notice and this permission notice appear in supporting documentation, and
|
||||
* that the name of the copyright holders not be used in advertising or
|
||||
* publicity pertaining to distribution of the software without specific,
|
||||
* written prior permission. The copyright holders make no representations
|
||||
* about the suitability of this software for any purpose. It is provided "as
|
||||
* is" without express or implied warranty.
|
||||
*
|
||||
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
|
||||
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
|
||||
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
|
||||
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
|
||||
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
||||
* OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
//#include <linux/delay.h>
|
||||
//#include <linux/slab.h>
|
||||
//#include <linux/init.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/i2c.h>
|
||||
#include "drm_dp_helper.h"
|
||||
#include "drmP.h"
|
||||
|
||||
/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
|
||||
static int
|
||||
i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
|
||||
uint8_t write_byte, uint8_t *read_byte)
|
||||
{
|
||||
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
|
||||
int ret;
|
||||
|
||||
ret = (*algo_data->aux_ch)(adapter, mode,
|
||||
write_byte, read_byte);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* I2C over AUX CH
|
||||
*/
|
||||
|
||||
/*
|
||||
* Send the address. If the I2C link is running, this 'restarts'
|
||||
* the connection with the new address, this is used for doing
|
||||
* a write followed by a read (as needed for DDC)
|
||||
*/
|
||||
static int
|
||||
i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading)
|
||||
{
|
||||
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
|
||||
int mode = MODE_I2C_START;
|
||||
int ret;
|
||||
|
||||
if (reading)
|
||||
mode |= MODE_I2C_READ;
|
||||
else
|
||||
mode |= MODE_I2C_WRITE;
|
||||
algo_data->address = address;
|
||||
algo_data->running = true;
|
||||
ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Stop the I2C transaction. This closes out the link, sending
|
||||
* a bare address packet with the MOT bit turned off
|
||||
*/
|
||||
static void
|
||||
i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading)
|
||||
{
|
||||
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
|
||||
int mode = MODE_I2C_STOP;
|
||||
|
||||
if (reading)
|
||||
mode |= MODE_I2C_READ;
|
||||
else
|
||||
mode |= MODE_I2C_WRITE;
|
||||
if (algo_data->running) {
|
||||
(void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
|
||||
algo_data->running = false;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Write a single byte to the current I2C address, the
|
||||
* the I2C link must be running or this returns -EIO
|
||||
*/
|
||||
static int
|
||||
i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte)
|
||||
{
|
||||
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
|
||||
int ret;
|
||||
|
||||
if (!algo_data->running)
|
||||
return -EIO;
|
||||
|
||||
ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Read a single byte from the current I2C address, the
|
||||
* I2C link must be running or this returns -EIO
|
||||
*/
|
||||
static int
|
||||
i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret)
|
||||
{
|
||||
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
|
||||
int ret;
|
||||
|
||||
if (!algo_data->running)
|
||||
return -EIO;
|
||||
|
||||
ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
|
||||
struct i2c_msg *msgs,
|
||||
int num)
|
||||
{
|
||||
int ret = 0;
|
||||
bool reading = false;
|
||||
int m;
|
||||
int b;
|
||||
|
||||
for (m = 0; m < num; m++) {
|
||||
u16 len = msgs[m].len;
|
||||
u8 *buf = msgs[m].buf;
|
||||
reading = (msgs[m].flags & I2C_M_RD) != 0;
|
||||
ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading);
|
||||
if (ret < 0)
|
||||
break;
|
||||
if (reading) {
|
||||
for (b = 0; b < len; b++) {
|
||||
ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
for (b = 0; b < len; b++) {
|
||||
ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
if (ret >= 0)
|
||||
ret = num;
|
||||
i2c_algo_dp_aux_stop(adapter, reading);
|
||||
DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u32
|
||||
i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter)
|
||||
{
|
||||
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
|
||||
I2C_FUNC_SMBUS_READ_BLOCK_DATA |
|
||||
I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
|
||||
I2C_FUNC_10BIT_ADDR;
|
||||
}
|
||||
|
||||
static const struct i2c_algorithm i2c_dp_aux_algo = {
|
||||
.master_xfer = i2c_algo_dp_aux_xfer,
|
||||
.functionality = i2c_algo_dp_aux_functionality,
|
||||
};
|
||||
|
||||
static void
|
||||
i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
|
||||
{
|
||||
(void) i2c_algo_dp_aux_address(adapter, 0, false);
|
||||
(void) i2c_algo_dp_aux_stop(adapter, false);
|
||||
|
||||
}
|
||||
|
||||
static int
|
||||
i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
|
||||
{
|
||||
adapter->algo = &i2c_dp_aux_algo;
|
||||
adapter->retries = 3;
|
||||
i2c_dp_aux_reset_bus(adapter);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = i2c_dp_aux_prepare_bus(adapter);
|
||||
if (error)
|
||||
return error;
|
||||
// error = i2c_add_adapter(adapter);
|
||||
return error;
|
||||
}
|
||||
EXPORT_SYMBOL(i2c_dp_aux_add_bus);
|
@ -633,8 +633,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
|
||||
return NULL;
|
||||
}
|
||||
if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
|
||||
printk(KERN_WARNING "integrated sync not supported\n");
|
||||
return NULL;
|
||||
printk(KERN_WARNING "composite sync not supported\n");
|
||||
}
|
||||
|
||||
/* it is incorrect if hsync/vsync width is zero */
|
||||
@ -911,23 +910,27 @@ static int drm_cvt_modes(struct drm_connector *connector,
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct cvt_timing *cvt;
|
||||
const int rates[] = { 60, 85, 75, 60, 50 };
|
||||
const u8 empty[3] = { 0, 0, 0 };
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
int width, height;
|
||||
int uninitialized_var(width), height;
|
||||
cvt = &(timing->data.other_data.data.cvt[i]);
|
||||
|
||||
height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2;
|
||||
switch (cvt->code[1] & 0xc0) {
|
||||
if (!memcmp(cvt->code, empty, 3))
|
||||
continue;
|
||||
|
||||
height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2;
|
||||
switch (cvt->code[1] & 0x0c) {
|
||||
case 0x00:
|
||||
width = height * 4 / 3;
|
||||
break;
|
||||
case 0x40:
|
||||
case 0x04:
|
||||
width = height * 16 / 9;
|
||||
break;
|
||||
case 0x80:
|
||||
case 0x08:
|
||||
width = height * 16 / 10;
|
||||
break;
|
||||
case 0xc0:
|
||||
case 0x0c:
|
||||
width = height * 15 / 9;
|
||||
break;
|
||||
}
|
||||
@ -1027,7 +1030,7 @@ static int add_detailed_info(struct drm_connector *connector,
|
||||
|
||||
modes += add_detailed_modes(connector, timing, edid, quirks,
|
||||
preferred);
|
||||
}
|
||||
}
|
||||
|
||||
return modes;
|
||||
}
|
||||
|
@ -175,7 +175,7 @@ int drm_fb_helper_blank(int blank, struct fb_info *info)
|
||||
break;
|
||||
/* Display: Off; HSync: On, VSync: On */
|
||||
case FB_BLANK_NORMAL:
|
||||
drm_fb_helper_off(info, DRM_MODE_DPMS_ON);
|
||||
drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
|
||||
break;
|
||||
/* Display: Off; HSync: Off, VSync: On */
|
||||
case FB_BLANK_HSYNC_SUSPEND:
|
||||
@ -392,11 +392,10 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
|
||||
return -EINVAL;
|
||||
|
||||
/* Need to resize the fb object !!! */
|
||||
if (var->xres > fb->width || var->yres > fb->height) {
|
||||
DRM_ERROR("Requested width/height is greater than current fb "
|
||||
"object %dx%d > %dx%d\n", var->xres, var->yres,
|
||||
fb->width, fb->height);
|
||||
DRM_ERROR("Need resizing code.\n");
|
||||
if (var->bits_per_pixel > fb->bits_per_pixel || var->xres > fb->width || var->yres > fb->height) {
|
||||
DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb "
|
||||
"object %dx%d-%d > %dx%d-%d\n", var->xres, var->yres, var->bits_per_pixel,
|
||||
fb->width, fb->height, fb->bits_per_pixel);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -355,7 +355,7 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
|
||||
if (entry->size >= size + wasted) {
|
||||
if (!best_match)
|
||||
return entry;
|
||||
if (size < best_size) {
|
||||
if (entry->size < best_size) {
|
||||
best = entry;
|
||||
best_size = entry->size;
|
||||
}
|
||||
@ -405,7 +405,7 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
|
||||
if (entry->size >= size + wasted) {
|
||||
if (!best_match)
|
||||
return entry;
|
||||
if (size < best_size) {
|
||||
if (entry->size < best_size) {
|
||||
best = entry;
|
||||
best_size = entry->size;
|
||||
}
|
||||
|
@ -1,9 +1,4 @@
|
||||
/*
|
||||
* The list_sort function is (presumably) licensed under the GPL (see the
|
||||
* top level "COPYING" file for details).
|
||||
*
|
||||
* The remainder of this file is:
|
||||
*
|
||||
* Copyright © 1997-2003 by The XFree86 Project, Inc.
|
||||
* Copyright © 2007 Dave Airlie
|
||||
* Copyright © 2007-2008 Intel Corporation
|
||||
@ -36,6 +31,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/list_sort.h>
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
#include "drm_crtc.h"
|
||||
@ -855,6 +851,7 @@ EXPORT_SYMBOL(drm_mode_prune_invalid);
|
||||
|
||||
/**
|
||||
* drm_mode_compare - compare modes for favorability
|
||||
* @priv: unused
|
||||
* @lh_a: list_head for first mode
|
||||
* @lh_b: list_head for second mode
|
||||
*
|
||||
@ -868,7 +865,7 @@ EXPORT_SYMBOL(drm_mode_prune_invalid);
|
||||
* Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
|
||||
* positive if @lh_b is better than @lh_a.
|
||||
*/
|
||||
static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b)
|
||||
static int drm_mode_compare(void *priv, struct list_head *lh_a, struct list_head *lh_b)
|
||||
{
|
||||
struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head);
|
||||
struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head);
|
||||
@ -885,85 +882,6 @@ static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b)
|
||||
return diff;
|
||||
}
|
||||
|
||||
/* FIXME: what we don't have a list sort function? */
|
||||
/* list sort from Mark J Roberts (mjr@znex.org) */
|
||||
void list_sort(struct list_head *head,
|
||||
int (*cmp)(struct list_head *a, struct list_head *b))
|
||||
{
|
||||
struct list_head *p, *q, *e, *list, *tail, *oldhead;
|
||||
int insize, nmerges, psize, qsize, i;
|
||||
|
||||
list = head->next;
|
||||
list_del(head);
|
||||
insize = 1;
|
||||
for (;;) {
|
||||
p = oldhead = list;
|
||||
list = tail = NULL;
|
||||
nmerges = 0;
|
||||
|
||||
while (p) {
|
||||
nmerges++;
|
||||
q = p;
|
||||
psize = 0;
|
||||
for (i = 0; i < insize; i++) {
|
||||
psize++;
|
||||
q = q->next == oldhead ? NULL : q->next;
|
||||
if (!q)
|
||||
break;
|
||||
}
|
||||
|
||||
qsize = insize;
|
||||
while (psize > 0 || (qsize > 0 && q)) {
|
||||
if (!psize) {
|
||||
e = q;
|
||||
q = q->next;
|
||||
qsize--;
|
||||
if (q == oldhead)
|
||||
q = NULL;
|
||||
} else if (!qsize || !q) {
|
||||
e = p;
|
||||
p = p->next;
|
||||
psize--;
|
||||
if (p == oldhead)
|
||||
p = NULL;
|
||||
} else if (cmp(p, q) <= 0) {
|
||||
e = p;
|
||||
p = p->next;
|
||||
psize--;
|
||||
if (p == oldhead)
|
||||
p = NULL;
|
||||
} else {
|
||||
e = q;
|
||||
q = q->next;
|
||||
qsize--;
|
||||
if (q == oldhead)
|
||||
q = NULL;
|
||||
}
|
||||
if (tail)
|
||||
tail->next = e;
|
||||
else
|
||||
list = e;
|
||||
e->prev = tail;
|
||||
tail = e;
|
||||
}
|
||||
p = q;
|
||||
}
|
||||
|
||||
tail->next = list;
|
||||
list->prev = tail;
|
||||
|
||||
if (nmerges <= 1)
|
||||
break;
|
||||
|
||||
insize *= 2;
|
||||
}
|
||||
|
||||
head->next = list;
|
||||
head->prev = list->prev;
|
||||
list->prev->next = head;
|
||||
list->prev = head;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_mode_sort - sort mode list
|
||||
* @mode_list: list to sort
|
||||
@ -975,7 +893,7 @@ void list_sort(struct list_head *head,
|
||||
*/
|
||||
void drm_mode_sort(struct list_head *mode_list)
|
||||
{
|
||||
list_sort(mode_list, drm_mode_compare);
|
||||
list_sort(NULL, mode_list, drm_mode_compare);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mode_sort);
|
||||
|
||||
|
@ -34,6 +34,55 @@
|
||||
#include "drmP.h"
|
||||
#include "drm_crtc.h"
|
||||
|
||||
unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
|
||||
{
|
||||
const unsigned long *p = addr;
|
||||
unsigned long result = 0;
|
||||
unsigned long tmp;
|
||||
|
||||
while (size & ~(BITS_PER_LONG-1)) {
|
||||
if ((tmp = *(p++)))
|
||||
goto found;
|
||||
result += BITS_PER_LONG;
|
||||
size -= BITS_PER_LONG;
|
||||
}
|
||||
if (!size)
|
||||
return result;
|
||||
|
||||
tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
|
||||
if (tmp == 0UL) /* Are any bits set? */
|
||||
return result + size; /* Nope. */
|
||||
found:
|
||||
return result + __ffs(tmp);
|
||||
}
|
||||
|
||||
int find_next_bit(const unsigned long *addr, int size, int offset)
|
||||
{
|
||||
const unsigned long *p = addr + (offset >> 5);
|
||||
int set = 0, bit = offset & 31, res;
|
||||
|
||||
if (bit)
|
||||
{
|
||||
/*
|
||||
* Look for nonzero in the first 32 bits:
|
||||
*/
|
||||
__asm__("bsfl %1,%0\n\t"
|
||||
"jne 1f\n\t"
|
||||
"movl $32, %0\n"
|
||||
"1:"
|
||||
: "=r" (set)
|
||||
: "r" (*p >> bit));
|
||||
if (set < (32 - bit))
|
||||
return set + offset;
|
||||
set = 32 - bit;
|
||||
p++;
|
||||
}
|
||||
/*
|
||||
* No set bit yet, search remaining full words for a bit
|
||||
*/
|
||||
res = find_first_bit (p, size - 32 * (p - addr));
|
||||
return (offset + set + res);
|
||||
}
|
||||
|
||||
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
|
||||
|
||||
|
@ -1,111 +0,0 @@
|
||||
#ifndef _ASM_GENERIC_ERRNO_H
|
||||
#define _ASM_GENERIC_ERRNO_H
|
||||
|
||||
#include <errno-base.h>
|
||||
|
||||
#define EDEADLK 35 /* Resource deadlock would occur */
|
||||
#define ENAMETOOLONG 36 /* File name too long */
|
||||
#define ENOLCK 37 /* No record locks available */
|
||||
#define ENOSYS 38 /* Function not implemented */
|
||||
#define ENOTEMPTY 39 /* Directory not empty */
|
||||
#define ELOOP 40 /* Too many symbolic links encountered */
|
||||
#define EWOULDBLOCK EAGAIN /* Operation would block */
|
||||
#define ENOMSG 42 /* No message of desired type */
|
||||
#define EIDRM 43 /* Identifier removed */
|
||||
#define ECHRNG 44 /* Channel number out of range */
|
||||
#define EL2NSYNC 45 /* Level 2 not synchronized */
|
||||
#define EL3HLT 46 /* Level 3 halted */
|
||||
#define EL3RST 47 /* Level 3 reset */
|
||||
#define ELNRNG 48 /* Link number out of range */
|
||||
#define EUNATCH 49 /* Protocol driver not attached */
|
||||
#define ENOCSI 50 /* No CSI structure available */
|
||||
#define EL2HLT 51 /* Level 2 halted */
|
||||
#define EBADE 52 /* Invalid exchange */
|
||||
#define EBADR 53 /* Invalid request descriptor */
|
||||
#define EXFULL 54 /* Exchange full */
|
||||
#define ENOANO 55 /* No anode */
|
||||
#define EBADRQC 56 /* Invalid request code */
|
||||
#define EBADSLT 57 /* Invalid slot */
|
||||
|
||||
#define EDEADLOCK EDEADLK
|
||||
|
||||
#define EBFONT 59 /* Bad font file format */
|
||||
#define ENOSTR 60 /* Device not a stream */
|
||||
#define ENODATA 61 /* No data available */
|
||||
#define ETIME 62 /* Timer expired */
|
||||
#define ENOSR 63 /* Out of streams resources */
|
||||
#define ENONET 64 /* Machine is not on the network */
|
||||
#define ENOPKG 65 /* Package not installed */
|
||||
#define EREMOTE 66 /* Object is remote */
|
||||
#define ENOLINK 67 /* Link has been severed */
|
||||
#define EADV 68 /* Advertise error */
|
||||
#define ESRMNT 69 /* Srmount error */
|
||||
#define ECOMM 70 /* Communication error on send */
|
||||
#define EPROTO 71 /* Protocol error */
|
||||
#define EMULTIHOP 72 /* Multihop attempted */
|
||||
#define EDOTDOT 73 /* RFS specific error */
|
||||
#define EBADMSG 74 /* Not a data message */
|
||||
#define EOVERFLOW 75 /* Value too large for defined data type */
|
||||
#define ENOTUNIQ 76 /* Name not unique on network */
|
||||
#define EBADFD 77 /* File descriptor in bad state */
|
||||
#define EREMCHG 78 /* Remote address changed */
|
||||
#define ELIBACC 79 /* Can not access a needed shared library */
|
||||
#define ELIBBAD 80 /* Accessing a corrupted shared library */
|
||||
#define ELIBSCN 81 /* .lib section in a.out corrupted */
|
||||
#define ELIBMAX 82 /* Attempting to link in too many shared libraries */
|
||||
#define ELIBEXEC 83 /* Cannot exec a shared library directly */
|
||||
#define EILSEQ 84 /* Illegal byte sequence */
|
||||
#define ERESTART 85 /* Interrupted system call should be restarted */
|
||||
#define ESTRPIPE 86 /* Streams pipe error */
|
||||
#define EUSERS 87 /* Too many users */
|
||||
#define ENOTSOCK 88 /* Socket operation on non-socket */
|
||||
#define EDESTADDRREQ 89 /* Destination address required */
|
||||
#define EMSGSIZE 90 /* Message too long */
|
||||
#define EPROTOTYPE 91 /* Protocol wrong type for socket */
|
||||
#define ENOPROTOOPT 92 /* Protocol not available */
|
||||
#define EPROTONOSUPPORT 93 /* Protocol not supported */
|
||||
#define ESOCKTNOSUPPORT 94 /* Socket type not supported */
|
||||
#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */
|
||||
#define EPFNOSUPPORT 96 /* Protocol family not supported */
|
||||
#define EAFNOSUPPORT 97 /* Address family not supported by protocol */
|
||||
#define EADDRINUSE 98 /* Address already in use */
|
||||
#define EADDRNOTAVAIL 99 /* Cannot assign requested address */
|
||||
#define ENETDOWN 100 /* Network is down */
|
||||
#define ENETUNREACH 101 /* Network is unreachable */
|
||||
#define ENETRESET 102 /* Network dropped connection because of reset */
|
||||
#define ECONNABORTED 103 /* Software caused connection abort */
|
||||
#define ECONNRESET 104 /* Connection reset by peer */
|
||||
#define ENOBUFS 105 /* No buffer space available */
|
||||
#define EISCONN 106 /* Transport endpoint is already connected */
|
||||
#define ENOTCONN 107 /* Transport endpoint is not connected */
|
||||
#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */
|
||||
#define ETOOMANYREFS 109 /* Too many references: cannot splice */
|
||||
#define ETIMEDOUT 110 /* Connection timed out */
|
||||
#define ECONNREFUSED 111 /* Connection refused */
|
||||
#define EHOSTDOWN 112 /* Host is down */
|
||||
#define EHOSTUNREACH 113 /* No route to host */
|
||||
#define EALREADY 114 /* Operation already in progress */
|
||||
#define EINPROGRESS 115 /* Operation now in progress */
|
||||
#define ESTALE 116 /* Stale NFS file handle */
|
||||
#define EUCLEAN 117 /* Structure needs cleaning */
|
||||
#define ENOTNAM 118 /* Not a XENIX named type file */
|
||||
#define ENAVAIL 119 /* No XENIX semaphores available */
|
||||
#define EISNAM 120 /* Is a named type file */
|
||||
#define EREMOTEIO 121 /* Remote I/O error */
|
||||
#define EDQUOT 122 /* Quota exceeded */
|
||||
|
||||
#define ENOMEDIUM 123 /* No medium found */
|
||||
#define EMEDIUMTYPE 124 /* Wrong medium type */
|
||||
#define ECANCELED 125 /* Operation Canceled */
|
||||
#define ENOKEY 126 /* Required key not available */
|
||||
#define EKEYEXPIRED 127 /* Key has expired */
|
||||
#define EKEYREVOKED 128 /* Key has been revoked */
|
||||
#define EKEYREJECTED 129 /* Key was rejected by service */
|
||||
|
||||
/* for robust mutexes */
|
||||
#define EOWNERDEAD 130 /* Owner died */
|
||||
#define ENOTRECOVERABLE 131 /* State not recoverable */
|
||||
|
||||
#define ERFKILL 132 /* Operation not possible due to RF-kill */
|
||||
|
||||
#endif
|
@ -78,6 +78,22 @@
|
||||
(void) (&_max1 == &_max2); \
|
||||
_max1 > _max2 ? _max1 : _max2; })
|
||||
|
||||
/*
|
||||
* ..and if you can't take the strict
|
||||
* types, you can specify one yourself.
|
||||
*
|
||||
* Or not use min/max/clamp at all, of course.
|
||||
*/
|
||||
#define min_t(type, x, y) ({ \
|
||||
type __min1 = (x); \
|
||||
type __min2 = (y); \
|
||||
__min1 < __min2 ? __min1: __min2; })
|
||||
|
||||
#define max_t(type, x, y) ({ \
|
||||
type __max1 = (x); \
|
||||
type __max2 = (y); \
|
||||
__max1 > __max2 ? __max1: __max2; })
|
||||
|
||||
/**
|
||||
* container_of - cast a member of a structure out to the containing structure
|
||||
* @ptr: the pointer to the member.
|
||||
|
@ -1 +1,29 @@
|
||||
/* stub */
|
||||
|
||||
static inline void mdelay(unsigned long time)
|
||||
{
|
||||
time /= 10;
|
||||
if(!time) time = 1;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"call *__imp__Delay"
|
||||
::"b" (time));
|
||||
__asm__ __volatile__ (
|
||||
"":::"ebx");
|
||||
|
||||
};
|
||||
|
||||
static inline void udelay(unsigned long delay)
|
||||
{
|
||||
if(!delay) delay++;
|
||||
delay*= 500;
|
||||
|
||||
while(delay--)
|
||||
{
|
||||
__asm__ __volatile__(
|
||||
"xorl %%eax, %%eax \n\t"
|
||||
"cpuid"
|
||||
:::"eax","ebx","ecx","edx" );
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,566 +0,0 @@
|
||||
|
||||
#include <types.h>
|
||||
#include <list.h>
|
||||
|
||||
#ifndef __PCI_H__
|
||||
#define __PCI_H__
|
||||
|
||||
#define PCI_ANY_ID (~0)
|
||||
|
||||
|
||||
#define PCI_CLASS_NOT_DEFINED 0x0000
|
||||
#define PCI_CLASS_NOT_DEFINED_VGA 0x0001
|
||||
|
||||
#define PCI_BASE_CLASS_STORAGE 0x01
|
||||
#define PCI_CLASS_STORAGE_SCSI 0x0100
|
||||
#define PCI_CLASS_STORAGE_IDE 0x0101
|
||||
#define PCI_CLASS_STORAGE_FLOPPY 0x0102
|
||||
#define PCI_CLASS_STORAGE_IPI 0x0103
|
||||
#define PCI_CLASS_STORAGE_RAID 0x0104
|
||||
#define PCI_CLASS_STORAGE_SATA 0x0106
|
||||
#define PCI_CLASS_STORAGE_SATA_AHCI 0x010601
|
||||
#define PCI_CLASS_STORAGE_SAS 0x0107
|
||||
#define PCI_CLASS_STORAGE_OTHER 0x0180
|
||||
|
||||
#define PCI_BASE_CLASS_NETWORK 0x02
|
||||
#define PCI_CLASS_NETWORK_ETHERNET 0x0200
|
||||
#define PCI_CLASS_NETWORK_TOKEN_RING 0x0201
|
||||
#define PCI_CLASS_NETWORK_FDDI 0x0202
|
||||
#define PCI_CLASS_NETWORK_ATM 0x0203
|
||||
#define PCI_CLASS_NETWORK_OTHER 0x0280
|
||||
|
||||
#define PCI_BASE_CLASS_DISPLAY 0x03
|
||||
#define PCI_CLASS_DISPLAY_VGA 0x0300
|
||||
#define PCI_CLASS_DISPLAY_XGA 0x0301
|
||||
#define PCI_CLASS_DISPLAY_3D 0x0302
|
||||
#define PCI_CLASS_DISPLAY_OTHER 0x0380
|
||||
|
||||
#define PCI_BASE_CLASS_MULTIMEDIA 0x04
|
||||
#define PCI_CLASS_MULTIMEDIA_VIDEO 0x0400
|
||||
#define PCI_CLASS_MULTIMEDIA_AUDIO 0x0401
|
||||
#define PCI_CLASS_MULTIMEDIA_PHONE 0x0402
|
||||
#define PCI_CLASS_MULTIMEDIA_OTHER 0x0480
|
||||
|
||||
#define PCI_BASE_CLASS_MEMORY 0x05
|
||||
#define PCI_CLASS_MEMORY_RAM 0x0500
|
||||
#define PCI_CLASS_MEMORY_FLASH 0x0501
|
||||
#define PCI_CLASS_MEMORY_OTHER 0x0580
|
||||
|
||||
#define PCI_BASE_CLASS_BRIDGE 0x06
|
||||
#define PCI_CLASS_BRIDGE_HOST 0x0600
|
||||
#define PCI_CLASS_BRIDGE_ISA 0x0601
|
||||
#define PCI_CLASS_BRIDGE_EISA 0x0602
|
||||
#define PCI_CLASS_BRIDGE_MC 0x0603
|
||||
#define PCI_CLASS_BRIDGE_PCI 0x0604
|
||||
#define PCI_CLASS_BRIDGE_PCMCIA 0x0605
|
||||
#define PCI_CLASS_BRIDGE_NUBUS 0x0606
|
||||
#define PCI_CLASS_BRIDGE_CARDBUS 0x0607
|
||||
#define PCI_CLASS_BRIDGE_RACEWAY 0x0608
|
||||
#define PCI_CLASS_BRIDGE_OTHER 0x0680
|
||||
|
||||
#define PCI_BASE_CLASS_COMMUNICATION 0x07
|
||||
#define PCI_CLASS_COMMUNICATION_SERIAL 0x0700
|
||||
#define PCI_CLASS_COMMUNICATION_PARALLEL 0x0701
|
||||
#define PCI_CLASS_COMMUNICATION_MULTISERIAL 0x0702
|
||||
#define PCI_CLASS_COMMUNICATION_MODEM 0x0703
|
||||
#define PCI_CLASS_COMMUNICATION_OTHER 0x0780
|
||||
|
||||
#define PCI_BASE_CLASS_SYSTEM 0x08
|
||||
#define PCI_CLASS_SYSTEM_PIC 0x0800
|
||||
#define PCI_CLASS_SYSTEM_PIC_IOAPIC 0x080010
|
||||
#define PCI_CLASS_SYSTEM_PIC_IOXAPIC 0x080020
|
||||
#define PCI_CLASS_SYSTEM_DMA 0x0801
|
||||
#define PCI_CLASS_SYSTEM_TIMER 0x0802
|
||||
#define PCI_CLASS_SYSTEM_RTC 0x0803
|
||||
#define PCI_CLASS_SYSTEM_PCI_HOTPLUG 0x0804
|
||||
#define PCI_CLASS_SYSTEM_SDHCI 0x0805
|
||||
#define PCI_CLASS_SYSTEM_OTHER 0x0880
|
||||
|
||||
#define PCI_BASE_CLASS_INPUT 0x09
|
||||
#define PCI_CLASS_INPUT_KEYBOARD 0x0900
|
||||
#define PCI_CLASS_INPUT_PEN 0x0901
|
||||
#define PCI_CLASS_INPUT_MOUSE 0x0902
|
||||
#define PCI_CLASS_INPUT_SCANNER 0x0903
|
||||
#define PCI_CLASS_INPUT_GAMEPORT 0x0904
|
||||
#define PCI_CLASS_INPUT_OTHER 0x0980
|
||||
|
||||
#define PCI_BASE_CLASS_DOCKING 0x0a
|
||||
#define PCI_CLASS_DOCKING_GENERIC 0x0a00
|
||||
#define PCI_CLASS_DOCKING_OTHER 0x0a80
|
||||
|
||||
#define PCI_BASE_CLASS_PROCESSOR 0x0b
|
||||
#define PCI_CLASS_PROCESSOR_386 0x0b00
|
||||
#define PCI_CLASS_PROCESSOR_486 0x0b01
|
||||
#define PCI_CLASS_PROCESSOR_PENTIUM 0x0b02
|
||||
#define PCI_CLASS_PROCESSOR_ALPHA 0x0b10
|
||||
#define PCI_CLASS_PROCESSOR_POWERPC 0x0b20
|
||||
#define PCI_CLASS_PROCESSOR_MIPS 0x0b30
|
||||
#define PCI_CLASS_PROCESSOR_CO 0x0b40
|
||||
|
||||
#define PCI_BASE_CLASS_SERIAL 0x0c
|
||||
#define PCI_CLASS_SERIAL_FIREWIRE 0x0c00
|
||||
#define PCI_CLASS_SERIAL_FIREWIRE_OHCI 0x0c0010
|
||||
#define PCI_CLASS_SERIAL_ACCESS 0x0c01
|
||||
#define PCI_CLASS_SERIAL_SSA 0x0c02
|
||||
#define PCI_CLASS_SERIAL_USB 0x0c03
|
||||
#define PCI_CLASS_SERIAL_USB_UHCI 0x0c0300
|
||||
#define PCI_CLASS_SERIAL_USB_OHCI 0x0c0310
|
||||
#define PCI_CLASS_SERIAL_USB_EHCI 0x0c0320
|
||||
#define PCI_CLASS_SERIAL_FIBER 0x0c04
|
||||
#define PCI_CLASS_SERIAL_SMBUS 0x0c05
|
||||
|
||||
#define PCI_BASE_CLASS_WIRELESS 0x0d
|
||||
#define PCI_CLASS_WIRELESS_RF_CONTROLLER 0x0d10
|
||||
#define PCI_CLASS_WIRELESS_WHCI 0x0d1010
|
||||
|
||||
#define PCI_BASE_CLASS_INTELLIGENT 0x0e
|
||||
#define PCI_CLASS_INTELLIGENT_I2O 0x0e00
|
||||
|
||||
#define PCI_BASE_CLASS_SATELLITE 0x0f
|
||||
#define PCI_CLASS_SATELLITE_TV 0x0f00
|
||||
#define PCI_CLASS_SATELLITE_AUDIO 0x0f01
|
||||
#define PCI_CLASS_SATELLITE_VOICE 0x0f03
|
||||
#define PCI_CLASS_SATELLITE_DATA 0x0f04
|
||||
|
||||
#define PCI_BASE_CLASS_CRYPT 0x10
|
||||
#define PCI_CLASS_CRYPT_NETWORK 0x1000
|
||||
#define PCI_CLASS_CRYPT_ENTERTAINMENT 0x1001
|
||||
#define PCI_CLASS_CRYPT_OTHER 0x1080
|
||||
|
||||
#define PCI_BASE_CLASS_SIGNAL_PROCESSING 0x11
|
||||
#define PCI_CLASS_SP_DPIO 0x1100
|
||||
#define PCI_CLASS_SP_OTHER 0x1180
|
||||
|
||||
#define PCI_CLASS_OTHERS 0xff
|
||||
|
||||
|
||||
/*
|
||||
* Under PCI, each device has 256 bytes of configuration address space,
|
||||
* of which the first 64 bytes are standardized as follows:
|
||||
*/
|
||||
#define PCI_VENDOR_ID 0x000 /* 16 bits */
|
||||
#define PCI_DEVICE_ID 0x002 /* 16 bits */
|
||||
#define PCI_COMMAND 0x004 /* 16 bits */
|
||||
#define PCI_COMMAND_IO 0x001 /* Enable response in I/O space */
|
||||
#define PCI_COMMAND_MEMORY 0x002 /* Enable response in Memory space */
|
||||
#define PCI_COMMAND_MASTER 0x004 /* Enable bus mastering */
|
||||
#define PCI_COMMAND_SPECIAL 0x008 /* Enable response to special cycles */
|
||||
#define PCI_COMMAND_INVALIDATE 0x010 /* Use memory write and invalidate */
|
||||
#define PCI_COMMAND_VGA_PALETTE 0x020 /* Enable palette snooping */
|
||||
#define PCI_COMMAND_PARITY 0x040 /* Enable parity checking */
|
||||
#define PCI_COMMAND_WAIT 0x080 /* Enable address/data stepping */
|
||||
#define PCI_COMMAND_SERR 0x100 /* Enable SERR */
|
||||
#define PCI_COMMAND_FAST_BACK 0x200 /* Enable back-to-back writes */
|
||||
#define PCI_COMMAND_INTX_DISABLE 0x400 /* INTx Emulation Disable */
|
||||
|
||||
#define PCI_STATUS 0x006 /* 16 bits */
|
||||
#define PCI_STATUS_CAP_LIST 0x010 /* Support Capability List */
|
||||
#define PCI_STATUS_66MHZ 0x020 /* Support 66 Mhz PCI 2.1 bus */
|
||||
#define PCI_STATUS_UDF 0x040 /* Support User Definable Features [obsolete] */
|
||||
#define PCI_STATUS_FAST_BACK 0x080 /* Accept fast-back to back */
|
||||
#define PCI_STATUS_PARITY 0x100 /* Detected parity error */
|
||||
#define PCI_STATUS_DEVSEL_MASK 0x600 /* DEVSEL timing */
|
||||
#define PCI_STATUS_DEVSEL_FAST 0x000
|
||||
#define PCI_STATUS_DEVSEL_MEDIUM 0x200
|
||||
#define PCI_STATUS_DEVSEL_SLOW 0x400
|
||||
#define PCI_STATUS_SIG_TARGET_ABORT 0x800 /* Set on target abort */
|
||||
#define PCI_STATUS_REC_TARGET_ABORT 0x1000 /* Master ack of " */
|
||||
#define PCI_STATUS_REC_MASTER_ABORT 0x2000 /* Set on master abort */
|
||||
#define PCI_STATUS_SIG_SYSTEM_ERROR 0x4000 /* Set when we drive SERR */
|
||||
#define PCI_STATUS_DETECTED_PARITY 0x8000 /* Set on parity error */
|
||||
|
||||
#define PCI_CLASS_REVISION 0x08 /* High 24 bits are class, low 8 revision */
|
||||
#define PCI_REVISION_ID 0x08 /* Revision ID */
|
||||
#define PCI_CLASS_PROG 0x09 /* Reg. Level Programming Interface */
|
||||
#define PCI_CLASS_DEVICE 0x0a /* Device class */
|
||||
|
||||
#define PCI_CACHE_LINE_SIZE 0x0c /* 8 bits */
|
||||
#define PCI_LATENCY_TIMER 0x0d /* 8 bits */
|
||||
#define PCI_HEADER_TYPE 0x0e /* 8 bits */
|
||||
#define PCI_HEADER_TYPE_NORMAL 0
|
||||
#define PCI_HEADER_TYPE_BRIDGE 1
|
||||
#define PCI_HEADER_TYPE_CARDBUS 2
|
||||
|
||||
#define PCI_BIST 0x0f /* 8 bits */
|
||||
#define PCI_BIST_CODE_MASK 0x0f /* Return result */
|
||||
#define PCI_BIST_START 0x40 /* 1 to start BIST, 2 secs or less */
|
||||
#define PCI_BIST_CAPABLE 0x80 /* 1 if BIST capable */
|
||||
|
||||
/*
|
||||
* Base addresses specify locations in memory or I/O space.
|
||||
* Decoded size can be determined by writing a value of
|
||||
* 0xffffffff to the register, and reading it back. Only
|
||||
* 1 bits are decoded.
|
||||
*/
|
||||
#define PCI_BASE_ADDRESS_0 0x10 /* 32 bits */
|
||||
#define PCI_BASE_ADDRESS_1 0x14 /* 32 bits [htype 0,1 only] */
|
||||
#define PCI_BASE_ADDRESS_2 0x18 /* 32 bits [htype 0 only] */
|
||||
#define PCI_BASE_ADDRESS_3 0x1c /* 32 bits */
|
||||
#define PCI_BASE_ADDRESS_4 0x20 /* 32 bits */
|
||||
#define PCI_BASE_ADDRESS_5 0x24 /* 32 bits */
|
||||
#define PCI_BASE_ADDRESS_SPACE 0x01 /* 0 = memory, 1 = I/O */
|
||||
#define PCI_BASE_ADDRESS_SPACE_IO 0x01
|
||||
#define PCI_BASE_ADDRESS_SPACE_MEMORY 0x00
|
||||
#define PCI_BASE_ADDRESS_MEM_TYPE_MASK 0x06
|
||||
#define PCI_BASE_ADDRESS_MEM_TYPE_32 0x00 /* 32 bit address */
|
||||
#define PCI_BASE_ADDRESS_MEM_TYPE_1M 0x02 /* Below 1M [obsolete] */
|
||||
#define PCI_BASE_ADDRESS_MEM_TYPE_64 0x04 /* 64 bit address */
|
||||
#define PCI_BASE_ADDRESS_MEM_PREFETCH 0x08 /* prefetchable? */
|
||||
#define PCI_BASE_ADDRESS_MEM_MASK (~0x0fUL)
|
||||
#define PCI_BASE_ADDRESS_IO_MASK (~0x03UL)
|
||||
/* bit 1 is reserved if address_space = 1 */
|
||||
|
||||
#define PCI_ROM_ADDRESS1 0x38 /* Same as PCI_ROM_ADDRESS, but for htype 1 */
|
||||
|
||||
/* Header type 0 (normal devices) */
|
||||
#define PCI_CARDBUS_CIS 0x28
|
||||
#define PCI_SUBSYSTEM_VENDOR_ID 0x2c
|
||||
#define PCI_SUBSYSTEM_ID 0x2e
|
||||
#define PCI_ROM_ADDRESS 0x30 /* Bits 31..11 are address, 10..1 reserved */
|
||||
#define PCI_ROM_ADDRESS_ENABLE 0x01
|
||||
#define PCI_ROM_ADDRESS_MASK (~0x7ffUL)
|
||||
|
||||
#define PCI_INTERRUPT_LINE 0x3c /* 8 bits */
|
||||
#define PCI_INTERRUPT_PIN 0x3d /* 8 bits */
|
||||
|
||||
|
||||
#define PCI_CB_SUBSYSTEM_VENDOR_ID 0x40
|
||||
#define PCI_CB_SUBSYSTEM_ID 0x42
|
||||
|
||||
#define PCI_CAPABILITY_LIST 0x34 /* Offset of first capability list entry */
|
||||
#define PCI_CB_CAPABILITY_LIST 0x14
|
||||
/* Capability lists */
|
||||
|
||||
#define PCI_CAP_LIST_ID 0 /* Capability ID */
|
||||
#define PCI_CAP_ID_PM 0x01 /* Power Management */
|
||||
#define PCI_CAP_ID_AGP 0x02 /* Accelerated Graphics Port */
|
||||
#define PCI_CAP_ID_VPD 0x03 /* Vital Product Data */
|
||||
#define PCI_CAP_ID_SLOTID 0x04 /* Slot Identification */
|
||||
#define PCI_CAP_ID_MSI 0x05 /* Message Signalled Interrupts */
|
||||
#define PCI_CAP_ID_CHSWP 0x06 /* CompactPCI HotSwap */
|
||||
#define PCI_CAP_ID_PCIX 0x07 /* PCI-X */
|
||||
#define PCI_CAP_ID_HT 0x08 /* HyperTransport */
|
||||
#define PCI_CAP_ID_VNDR 0x09 /* Vendor specific capability */
|
||||
#define PCI_CAP_ID_SHPC 0x0C /* PCI Standard Hot-Plug Controller */
|
||||
#define PCI_CAP_ID_EXP 0x10 /* PCI Express */
|
||||
#define PCI_CAP_ID_MSIX 0x11 /* MSI-X */
|
||||
#define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */
|
||||
#define PCI_CAP_FLAGS 2 /* Capability defined flags (16 bits) */
|
||||
#define PCI_CAP_SIZEOF 4
|
||||
|
||||
|
||||
/* AGP registers */
|
||||
|
||||
#define PCI_AGP_VERSION 2 /* BCD version number */
|
||||
#define PCI_AGP_RFU 3 /* Rest of capability flags */
|
||||
#define PCI_AGP_STATUS 4 /* Status register */
|
||||
#define PCI_AGP_STATUS_RQ_MASK 0xff000000 /* Maximum number of requests - 1 */
|
||||
#define PCI_AGP_STATUS_SBA 0x0200 /* Sideband addressing supported */
|
||||
#define PCI_AGP_STATUS_64BIT 0x0020 /* 64-bit addressing supported */
|
||||
#define PCI_AGP_STATUS_FW 0x0010 /* FW transfers supported */
|
||||
#define PCI_AGP_STATUS_RATE4 0x0004 /* 4x transfer rate supported */
|
||||
#define PCI_AGP_STATUS_RATE2 0x0002 /* 2x transfer rate supported */
|
||||
#define PCI_AGP_STATUS_RATE1 0x0001 /* 1x transfer rate supported */
|
||||
#define PCI_AGP_COMMAND 8 /* Control register */
|
||||
#define PCI_AGP_COMMAND_RQ_MASK 0xff000000 /* Master: Maximum number of requests */
|
||||
#define PCI_AGP_COMMAND_SBA 0x0200 /* Sideband addressing enabled */
|
||||
#define PCI_AGP_COMMAND_AGP 0x0100 /* Allow processing of AGP transactions */
|
||||
#define PCI_AGP_COMMAND_64BIT 0x0020 /* Allow processing of 64-bit addresses */
|
||||
#define PCI_AGP_COMMAND_FW 0x0010 /* Force FW transfers */
|
||||
#define PCI_AGP_COMMAND_RATE4 0x0004 /* Use 4x rate */
|
||||
#define PCI_AGP_COMMAND_RATE2 0x0002 /* Use 2x rate */
|
||||
#define PCI_AGP_COMMAND_RATE1 0x0001 /* Use 1x rate */
|
||||
#define PCI_AGP_SIZEOF 12
|
||||
|
||||
|
||||
#define PCI_MAP_REG_START 0x10
|
||||
#define PCI_MAP_REG_END 0x28
|
||||
#define PCI_MAP_ROM_REG 0x30
|
||||
|
||||
#define PCI_MAP_MEMORY 0x00000000
|
||||
#define PCI_MAP_IO 0x00000001
|
||||
|
||||
#define PCI_MAP_MEMORY_TYPE 0x00000007
|
||||
#define PCI_MAP_IO_TYPE 0x00000003
|
||||
|
||||
#define PCI_MAP_MEMORY_TYPE_32BIT 0x00000000
|
||||
#define PCI_MAP_MEMORY_TYPE_32BIT_1M 0x00000002
|
||||
#define PCI_MAP_MEMORY_TYPE_64BIT 0x00000004
|
||||
#define PCI_MAP_MEMORY_TYPE_MASK 0x00000006
|
||||
#define PCI_MAP_MEMORY_CACHABLE 0x00000008
|
||||
#define PCI_MAP_MEMORY_ATTR_MASK 0x0000000e
|
||||
#define PCI_MAP_MEMORY_ADDRESS_MASK 0xfffffff0
|
||||
|
||||
#define PCI_MAP_IO_ATTR_MASK 0x00000003
|
||||
|
||||
|
||||
|
||||
#define PCI_MAP_IS_IO(b) ((b) & PCI_MAP_IO)
|
||||
#define PCI_MAP_IS_MEM(b) (!PCI_MAP_IS_IO(b))
|
||||
|
||||
#define PCI_MAP_IS64BITMEM(b) \
|
||||
(((b) & PCI_MAP_MEMORY_TYPE_MASK) == PCI_MAP_MEMORY_TYPE_64BIT)
|
||||
|
||||
#define PCIGETMEMORY(b) ((b) & PCI_MAP_MEMORY_ADDRESS_MASK)
|
||||
#define PCIGETMEMORY64HIGH(b) (*((CARD32*)&b + 1))
|
||||
#define PCIGETMEMORY64(b) \
|
||||
(PCIGETMEMORY(b) | ((CARD64)PCIGETMEMORY64HIGH(b) << 32))
|
||||
|
||||
#define PCI_MAP_IO_ADDRESS_MASK 0xfffffffc
|
||||
|
||||
#define PCIGETIO(b) ((b) & PCI_MAP_IO_ADDRESS_MASK)
|
||||
|
||||
#define PCI_MAP_ROM_DECODE_ENABLE 0x00000001
|
||||
#define PCI_MAP_ROM_ADDRESS_MASK 0xfffff800
|
||||
|
||||
#define PCIGETROM(b) ((b) & PCI_MAP_ROM_ADDRESS_MASK)
|
||||
|
||||
|
||||
#ifndef PCI_DOM_MASK
|
||||
# define PCI_DOM_MASK 0x0ffu
|
||||
#endif
|
||||
#define PCI_DOMBUS_MASK (((PCI_DOM_MASK) << 8) | 0x0ffu)
|
||||
|
||||
#define PCI_MAKE_TAG(b,d,f) ((((b) & (PCI_DOMBUS_MASK)) << 16) | \
|
||||
(((d) & 0x00001fu) << 11) | \
|
||||
(((f) & 0x000007u) << 8))
|
||||
|
||||
#define PCI_BUS_FROM_TAG(tag) (((tag) >> 16) & (PCI_DOMBUS_MASK))
|
||||
#define PCI_DEV_FROM_TAG(tag) (((tag) & 0x0000f800u) >> 11)
|
||||
#define PCI_FUNC_FROM_TAG(tag) (((tag) & 0x00000700u) >> 8)
|
||||
#define PCI_DFN_FROM_TAG(tag) (((tag) & 0x0000ff00u) >> 8)
|
||||
|
||||
#define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07))
|
||||
#define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f)
|
||||
#define PCI_FUNC(devfn) ((devfn) & 0x07)
|
||||
|
||||
|
||||
|
||||
typedef unsigned int PCITAG;
|
||||
|
||||
extern inline PCITAG
|
||||
pciTag(int busnum, int devnum, int funcnum)
|
||||
{
|
||||
return(PCI_MAKE_TAG(busnum,devnum,funcnum));
|
||||
}
|
||||
|
||||
|
||||
struct resource
|
||||
{
|
||||
resource_size_t start;
|
||||
resource_size_t end;
|
||||
// const char *name;
|
||||
unsigned long flags;
|
||||
// struct resource *parent, *sibling, *child;
|
||||
};
|
||||
|
||||
/*
|
||||
* IO resources have these defined flags.
|
||||
*/
|
||||
#define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */
|
||||
|
||||
#define IORESOURCE_IO 0x00000100 /* Resource type */
|
||||
#define IORESOURCE_MEM 0x00000200
|
||||
#define IORESOURCE_IRQ 0x00000400
|
||||
#define IORESOURCE_DMA 0x00000800
|
||||
|
||||
#define IORESOURCE_PREFETCH 0x00001000 /* No side effects */
|
||||
#define IORESOURCE_READONLY 0x00002000
|
||||
#define IORESOURCE_CACHEABLE 0x00004000
|
||||
#define IORESOURCE_RANGELENGTH 0x00008000
|
||||
#define IORESOURCE_SHADOWABLE 0x00010000
|
||||
#define IORESOURCE_BUS_HAS_VGA 0x00080000
|
||||
|
||||
#define IORESOURCE_DISABLED 0x10000000
|
||||
#define IORESOURCE_UNSET 0x20000000
|
||||
#define IORESOURCE_AUTO 0x40000000
|
||||
#define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */
|
||||
|
||||
/* ISA PnP IRQ specific bits (IORESOURCE_BITS) */
|
||||
#define IORESOURCE_IRQ_HIGHEDGE (1<<0)
|
||||
#define IORESOURCE_IRQ_LOWEDGE (1<<1)
|
||||
#define IORESOURCE_IRQ_HIGHLEVEL (1<<2)
|
||||
#define IORESOURCE_IRQ_LOWLEVEL (1<<3)
|
||||
#define IORESOURCE_IRQ_SHAREABLE (1<<4)
|
||||
|
||||
/* ISA PnP DMA specific bits (IORESOURCE_BITS) */
|
||||
#define IORESOURCE_DMA_TYPE_MASK (3<<0)
|
||||
#define IORESOURCE_DMA_8BIT (0<<0)
|
||||
#define IORESOURCE_DMA_8AND16BIT (1<<0)
|
||||
#define IORESOURCE_DMA_16BIT (2<<0)
|
||||
|
||||
#define IORESOURCE_DMA_MASTER (1<<2)
|
||||
#define IORESOURCE_DMA_BYTE (1<<3)
|
||||
#define IORESOURCE_DMA_WORD (1<<4)
|
||||
|
||||
#define IORESOURCE_DMA_SPEED_MASK (3<<6)
|
||||
#define IORESOURCE_DMA_COMPATIBLE (0<<6)
|
||||
#define IORESOURCE_DMA_TYPEA (1<<6)
|
||||
#define IORESOURCE_DMA_TYPEB (2<<6)
|
||||
#define IORESOURCE_DMA_TYPEF (3<<6)
|
||||
|
||||
/* ISA PnP memory I/O specific bits (IORESOURCE_BITS) */
|
||||
#define IORESOURCE_MEM_WRITEABLE (1<<0) /* dup: IORESOURCE_READONLY */
|
||||
#define IORESOURCE_MEM_CACHEABLE (1<<1) /* dup: IORESOURCE_CACHEABLE */
|
||||
#define IORESOURCE_MEM_RANGELENGTH (1<<2) /* dup: IORESOURCE_RANGELENGTH */
|
||||
#define IORESOURCE_MEM_TYPE_MASK (3<<3)
|
||||
#define IORESOURCE_MEM_8BIT (0<<3)
|
||||
#define IORESOURCE_MEM_16BIT (1<<3)
|
||||
#define IORESOURCE_MEM_8AND16BIT (2<<3)
|
||||
#define IORESOURCE_MEM_32BIT (3<<3)
|
||||
#define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */
|
||||
#define IORESOURCE_MEM_EXPANSIONROM (1<<6)
|
||||
|
||||
/* PCI ROM control bits (IORESOURCE_BITS) */
|
||||
#define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */
|
||||
#define IORESOURCE_ROM_SHADOW (1<<1) /* ROM is copy at C000:0 */
|
||||
#define IORESOURCE_ROM_COPY (1<<2) /* ROM is alloc'd copy, resource field overlaid */
|
||||
#define IORESOURCE_ROM_BIOS_COPY (1<<3) /* ROM is BIOS copy, resource field overlaid */
|
||||
|
||||
/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */
|
||||
#define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */
|
||||
|
||||
|
||||
/*
|
||||
* For PCI devices, the region numbers are assigned this way:
|
||||
*
|
||||
* 0-5 standard PCI regions
|
||||
* 6 expansion ROM
|
||||
* 7-10 bridges: address space assigned to buses behind the bridge
|
||||
*/
|
||||
|
||||
#define PCI_ROM_RESOURCE 6
|
||||
#define PCI_BRIDGE_RESOURCES 7
|
||||
#define PCI_NUM_RESOURCES 11
|
||||
|
||||
#ifndef PCI_BUS_NUM_RESOURCES
|
||||
#define PCI_BUS_NUM_RESOURCES 8
|
||||
#endif
|
||||
|
||||
#define DEVICE_COUNT_RESOURCE 12
|
||||
|
||||
/*
|
||||
* The pci_dev structure is used to describe PCI devices.
|
||||
*/
|
||||
struct pci_dev {
|
||||
// struct list_head bus_list; /* node in per-bus list */
|
||||
// struct pci_bus *bus; /* bus this device is on */
|
||||
// struct pci_bus *subordinate; /* bus this device bridges to */
|
||||
|
||||
// void *sysdata; /* hook for sys-specific extension */
|
||||
// struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */
|
||||
// struct pci_slot *slot; /* Physical slot this device is in */
|
||||
u32_t bus;
|
||||
u32_t devfn; /* encoded device & function index */
|
||||
u16_t vendor;
|
||||
u16_t device;
|
||||
u16_t subsystem_vendor;
|
||||
u16_t subsystem_device;
|
||||
u32_t class; /* 3 bytes: (base,sub,prog-if) */
|
||||
uint8_t revision; /* PCI revision, low byte of class word */
|
||||
uint8_t hdr_type; /* PCI header type (`multi' flag masked out) */
|
||||
uint8_t pcie_type; /* PCI-E device/port type */
|
||||
uint8_t rom_base_reg; /* which config register controls the ROM */
|
||||
uint8_t pin; /* which interrupt pin this device uses */
|
||||
|
||||
// struct pci_driver *driver; /* which driver has allocated this device */
|
||||
uint64_t dma_mask; /* Mask of the bits of bus address this
|
||||
device implements. Normally this is
|
||||
0xffffffff. You only need to change
|
||||
this if your device has broken DMA
|
||||
or supports 64-bit transfers. */
|
||||
|
||||
// struct device_dma_parameters dma_parms;
|
||||
|
||||
// pci_power_t current_state; /* Current operating state. In ACPI-speak,
|
||||
// this is D0-D3, D0 being fully functional,
|
||||
// and D3 being off. */
|
||||
// int pm_cap; /* PM capability offset in the
|
||||
// configuration space */
|
||||
unsigned int pme_support:5; /* Bitmask of states from which PME#
|
||||
can be generated */
|
||||
unsigned int d1_support:1; /* Low power state D1 is supported */
|
||||
unsigned int d2_support:1; /* Low power state D2 is supported */
|
||||
unsigned int no_d1d2:1; /* Only allow D0 and D3 */
|
||||
|
||||
// pci_channel_state_t error_state; /* current connectivity state */
|
||||
// struct device dev; /* Generic device interface */
|
||||
|
||||
// int cfg_size; /* Size of configuration space */
|
||||
|
||||
/*
|
||||
* Instead of touching interrupt line and base address registers
|
||||
* directly, use the values stored here. They might be different!
|
||||
*/
|
||||
unsigned int irq;
|
||||
struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
|
||||
|
||||
/* These fields are used by common fixups */
|
||||
unsigned int transparent:1; /* Transparent PCI bridge */
|
||||
unsigned int multifunction:1;/* Part of multi-function device */
|
||||
/* keep track of device state */
|
||||
unsigned int is_added:1;
|
||||
unsigned int is_busmaster:1; /* device is busmaster */
|
||||
unsigned int no_msi:1; /* device may not use msi */
|
||||
unsigned int block_ucfg_access:1; /* userspace config space access is blocked */
|
||||
unsigned int broken_parity_status:1; /* Device generates false positive parity */
|
||||
unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */
|
||||
unsigned int msi_enabled:1;
|
||||
unsigned int msix_enabled:1;
|
||||
unsigned int ari_enabled:1; /* ARI forwarding */
|
||||
unsigned int is_managed:1;
|
||||
unsigned int is_pcie:1;
|
||||
unsigned int state_saved:1;
|
||||
unsigned int is_physfn:1;
|
||||
unsigned int is_virtfn:1;
|
||||
// pci_dev_flags_t dev_flags;
|
||||
// atomic_t enable_cnt; /* pci_enable_device has been called */
|
||||
|
||||
// u32 saved_config_space[16]; /* config space saved at suspend time */
|
||||
// struct hlist_head saved_cap_space;
|
||||
// struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */
|
||||
// int rom_attr_enabled; /* has display of the rom attribute been enabled? */
|
||||
// struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
|
||||
// struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
|
||||
};
|
||||
|
||||
#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
|
||||
#define pci_resource_end(dev, bar) ((dev)->resource[(bar)].end)
|
||||
#define pci_resource_flags(dev, bar) ((dev)->resource[(bar)].flags)
|
||||
#define pci_resource_len(dev,bar) \
|
||||
((pci_resource_start((dev), (bar)) == 0 && \
|
||||
pci_resource_end((dev), (bar)) == \
|
||||
pci_resource_start((dev), (bar))) ? 0 : \
|
||||
\
|
||||
(pci_resource_end((dev), (bar)) - \
|
||||
pci_resource_start((dev), (bar)) + 1))
|
||||
|
||||
struct pci_device_id
|
||||
{
|
||||
u16_t vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
|
||||
u16_t subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */
|
||||
u32_t class, class_mask; /* (class,subclass,prog-if) triplet */
|
||||
u32_t driver_data; /* Data private to the driver */
|
||||
};
|
||||
|
||||
typedef struct
|
||||
{
|
||||
struct list_head link;
|
||||
struct pci_dev pci_dev;
|
||||
}dev_t;
|
||||
|
||||
int enum_pci_devices(void);
|
||||
|
||||
struct pci_device_id*
|
||||
find_pci_device(dev_t* pdev, struct pci_device_id *idlist);
|
||||
|
||||
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
|
||||
|
||||
int pci_set_dma_mask(struct pci_dev *dev, u64 mask);
|
||||
|
||||
|
||||
#define pci_name(x) "radeon"
|
||||
|
||||
#endif //__PCI__H__
|
||||
|
||||
|
101
drivers/video/drm/list_sort.c
Normal file
101
drivers/video/drm/list_sort.c
Normal file
@ -0,0 +1,101 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/list_sort.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
/**
|
||||
* list_sort - sort a list.
|
||||
* @priv: private data, passed to @cmp
|
||||
* @head: the list to sort
|
||||
* @cmp: the elements comparison function
|
||||
*
|
||||
* This function has been implemented by Mark J Roberts <mjr@znex.org>. It
|
||||
* implements "merge sort" which has O(nlog(n)) complexity. The list is sorted
|
||||
* in ascending order.
|
||||
*
|
||||
* The comparison function @cmp is supposed to return a negative value if @a is
|
||||
* less than @b, and a positive value if @a is greater than @b. If @a and @b
|
||||
* are equivalent, then it does not matter what this function returns.
|
||||
*/
|
||||
void list_sort(void *priv, struct list_head *head,
|
||||
int (*cmp)(void *priv, struct list_head *a,
|
||||
struct list_head *b))
|
||||
{
|
||||
struct list_head *p, *q, *e, *list, *tail, *oldhead;
|
||||
int insize, nmerges, psize, qsize, i;
|
||||
|
||||
if (list_empty(head))
|
||||
return;
|
||||
|
||||
list = head->next;
|
||||
list_del(head);
|
||||
insize = 1;
|
||||
for (;;) {
|
||||
p = oldhead = list;
|
||||
list = tail = NULL;
|
||||
nmerges = 0;
|
||||
|
||||
while (p) {
|
||||
nmerges++;
|
||||
q = p;
|
||||
psize = 0;
|
||||
for (i = 0; i < insize; i++) {
|
||||
psize++;
|
||||
q = q->next == oldhead ? NULL : q->next;
|
||||
if (!q)
|
||||
break;
|
||||
}
|
||||
|
||||
qsize = insize;
|
||||
while (psize > 0 || (qsize > 0 && q)) {
|
||||
if (!psize) {
|
||||
e = q;
|
||||
q = q->next;
|
||||
qsize--;
|
||||
if (q == oldhead)
|
||||
q = NULL;
|
||||
} else if (!qsize || !q) {
|
||||
e = p;
|
||||
p = p->next;
|
||||
psize--;
|
||||
if (p == oldhead)
|
||||
p = NULL;
|
||||
} else if (cmp(priv, p, q) <= 0) {
|
||||
e = p;
|
||||
p = p->next;
|
||||
psize--;
|
||||
if (p == oldhead)
|
||||
p = NULL;
|
||||
} else {
|
||||
e = q;
|
||||
q = q->next;
|
||||
qsize--;
|
||||
if (q == oldhead)
|
||||
q = NULL;
|
||||
}
|
||||
if (tail)
|
||||
tail->next = e;
|
||||
else
|
||||
list = e;
|
||||
e->prev = tail;
|
||||
tail = e;
|
||||
}
|
||||
p = q;
|
||||
}
|
||||
|
||||
tail->next = list;
|
||||
list->prev = tail;
|
||||
|
||||
if (nmerges <= 1)
|
||||
break;
|
||||
|
||||
insize *= 2;
|
||||
}
|
||||
|
||||
head->next = list;
|
||||
head->prev = list->prev;
|
||||
list->prev->next = head;
|
||||
list->prev = head;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(list_sort);
|
@ -305,7 +305,6 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
|
||||
args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
|
||||
args.ucCRTC = radeon_crtc->crtc_id;
|
||||
|
||||
printk("executing set crtc dtd timing\n");
|
||||
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
}
|
||||
|
||||
@ -345,7 +344,6 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
|
||||
args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
|
||||
args.ucCRTC = radeon_crtc->crtc_id;
|
||||
|
||||
printk("executing set crtc timing\n");
|
||||
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
}
|
||||
|
||||
@ -407,59 +405,57 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
|
||||
}
|
||||
}
|
||||
|
||||
void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
|
||||
union adjust_pixel_clock {
|
||||
ADJUST_DISPLAY_PLL_PS_ALLOCATION v1;
|
||||
};
|
||||
|
||||
static u32 atombios_adjust_pll(struct drm_crtc *crtc,
|
||||
struct drm_display_mode *mode,
|
||||
struct radeon_pll *pll)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct drm_encoder *encoder = NULL;
|
||||
struct radeon_encoder *radeon_encoder = NULL;
|
||||
uint8_t frev, crev;
|
||||
int index;
|
||||
SET_PIXEL_CLOCK_PS_ALLOCATION args;
|
||||
PIXEL_CLOCK_PARAMETERS *spc1_ptr;
|
||||
PIXEL_CLOCK_PARAMETERS_V2 *spc2_ptr;
|
||||
PIXEL_CLOCK_PARAMETERS_V3 *spc3_ptr;
|
||||
uint32_t pll_clock = mode->clock;
|
||||
uint32_t adjusted_clock;
|
||||
uint32_t ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
|
||||
struct radeon_pll *pll;
|
||||
int pll_flags = 0;
|
||||
u32 adjusted_clock = mode->clock;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
/* reset the pll flags */
|
||||
pll->flags = 0;
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
if ((rdev->family == CHIP_RS600) ||
|
||||
(rdev->family == CHIP_RS690) ||
|
||||
(rdev->family == CHIP_RS740))
|
||||
pll_flags |= (RADEON_PLL_USE_FRAC_FB_DIV |
|
||||
pll->flags |= (RADEON_PLL_USE_FRAC_FB_DIV |
|
||||
RADEON_PLL_PREFER_CLOSEST_LOWER);
|
||||
|
||||
if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */
|
||||
pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
|
||||
pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
|
||||
else
|
||||
pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
|
||||
pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
|
||||
} else {
|
||||
pll_flags |= RADEON_PLL_LEGACY;
|
||||
pll->flags |= RADEON_PLL_LEGACY;
|
||||
|
||||
if (mode->clock > 200000) /* range limits??? */
|
||||
pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
|
||||
pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
|
||||
else
|
||||
pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
|
||||
pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
|
||||
|
||||
}
|
||||
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||
if (encoder->crtc == crtc) {
|
||||
if (!ASIC_IS_AVIVO(rdev)) {
|
||||
if (encoder->encoder_type !=
|
||||
DRM_MODE_ENCODER_DAC)
|
||||
pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
|
||||
if (encoder->encoder_type ==
|
||||
DRM_MODE_ENCODER_LVDS)
|
||||
pll_flags |= RADEON_PLL_USE_REF_DIV;
|
||||
}
|
||||
radeon_encoder = to_radeon_encoder(encoder);
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
|
||||
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
|
||||
adjusted_clock = mode->clock * 2;
|
||||
} else {
|
||||
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
|
||||
pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
|
||||
if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS)
|
||||
pll->flags |= RADEON_PLL_USE_REF_DIV;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -469,46 +465,101 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
|
||||
* special hw requirements.
|
||||
*/
|
||||
if (ASIC_IS_DCE3(rdev)) {
|
||||
ADJUST_DISPLAY_PLL_PS_ALLOCATION adjust_pll_args;
|
||||
union adjust_pixel_clock args;
|
||||
struct radeon_encoder_atom_dig *dig;
|
||||
u8 frev, crev;
|
||||
int index;
|
||||
|
||||
if (!encoder)
|
||||
return;
|
||||
|
||||
memset(&adjust_pll_args, 0, sizeof(adjust_pll_args));
|
||||
adjust_pll_args.usPixelClock = cpu_to_le16(mode->clock / 10);
|
||||
adjust_pll_args.ucTransmitterID = radeon_encoder->encoder_id;
|
||||
adjust_pll_args.ucEncodeMode = atombios_get_encoder_mode(encoder);
|
||||
if (!radeon_encoder->enc_priv)
|
||||
return adjusted_clock;
|
||||
dig = radeon_encoder->enc_priv;
|
||||
|
||||
index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
|
||||
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
|
||||
&crev);
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
switch (frev) {
|
||||
case 1:
|
||||
switch (crev) {
|
||||
case 1:
|
||||
case 2:
|
||||
args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
|
||||
args.v1.ucTransmitterID = radeon_encoder->encoder_id;
|
||||
args.v1.ucEncodeMode = atombios_get_encoder_mode(encoder);
|
||||
|
||||
atom_execute_table(rdev->mode_info.atom_context,
|
||||
index, (uint32_t *)&adjust_pll_args);
|
||||
adjusted_clock = le16_to_cpu(adjust_pll_args.usPixelClock) * 10;
|
||||
} else {
|
||||
/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
|
||||
if (ASIC_IS_AVIVO(rdev) &&
|
||||
(radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1))
|
||||
adjusted_clock = mode->clock * 2;
|
||||
else
|
||||
adjusted_clock = mode->clock;
|
||||
index, (uint32_t *)&args);
|
||||
adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
|
||||
return adjusted_clock;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
|
||||
return adjusted_clock;
|
||||
}
|
||||
}
|
||||
return adjusted_clock;
|
||||
}
|
||||
|
||||
union set_pixel_clock {
|
||||
SET_PIXEL_CLOCK_PS_ALLOCATION base;
|
||||
PIXEL_CLOCK_PARAMETERS v1;
|
||||
PIXEL_CLOCK_PARAMETERS_V2 v2;
|
||||
PIXEL_CLOCK_PARAMETERS_V3 v3;
|
||||
};
|
||||
|
||||
void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct drm_encoder *encoder = NULL;
|
||||
struct radeon_encoder *radeon_encoder = NULL;
|
||||
u8 frev, crev;
|
||||
int index;
|
||||
union set_pixel_clock args;
|
||||
u32 pll_clock = mode->clock;
|
||||
u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
|
||||
struct radeon_pll *pll;
|
||||
u32 adjusted_clock;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||
if (encoder->crtc == crtc) {
|
||||
radeon_encoder = to_radeon_encoder(encoder);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!radeon_encoder)
|
||||
return;
|
||||
|
||||
if (radeon_crtc->crtc_id == 0)
|
||||
pll = &rdev->clock.p1pll;
|
||||
else
|
||||
pll = &rdev->clock.p2pll;
|
||||
|
||||
/* adjust pixel clock as needed */
|
||||
adjusted_clock = atombios_adjust_pll(crtc, mode, pll);
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
if (radeon_new_pll)
|
||||
radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock,
|
||||
&fb_div, &frac_fb_div,
|
||||
&ref_div, &post_div, pll_flags);
|
||||
&ref_div, &post_div);
|
||||
else
|
||||
radeon_compute_pll(pll, adjusted_clock, &pll_clock,
|
||||
&fb_div, &frac_fb_div,
|
||||
&ref_div, &post_div, pll_flags);
|
||||
&ref_div, &post_div);
|
||||
} else
|
||||
radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
|
||||
&ref_div, &post_div, pll_flags);
|
||||
&ref_div, &post_div);
|
||||
|
||||
index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
|
||||
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
|
||||
@ -518,45 +569,38 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
|
||||
case 1:
|
||||
switch (crev) {
|
||||
case 1:
|
||||
spc1_ptr = (PIXEL_CLOCK_PARAMETERS *) & args.sPCLKInput;
|
||||
spc1_ptr->usPixelClock = cpu_to_le16(mode->clock / 10);
|
||||
spc1_ptr->usRefDiv = cpu_to_le16(ref_div);
|
||||
spc1_ptr->usFbDiv = cpu_to_le16(fb_div);
|
||||
spc1_ptr->ucFracFbDiv = frac_fb_div;
|
||||
spc1_ptr->ucPostDiv = post_div;
|
||||
spc1_ptr->ucPpll =
|
||||
args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
|
||||
args.v1.usRefDiv = cpu_to_le16(ref_div);
|
||||
args.v1.usFbDiv = cpu_to_le16(fb_div);
|
||||
args.v1.ucFracFbDiv = frac_fb_div;
|
||||
args.v1.ucPostDiv = post_div;
|
||||
args.v1.ucPpll =
|
||||
radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
|
||||
spc1_ptr->ucCRTC = radeon_crtc->crtc_id;
|
||||
spc1_ptr->ucRefDivSrc = 1;
|
||||
args.v1.ucCRTC = radeon_crtc->crtc_id;
|
||||
args.v1.ucRefDivSrc = 1;
|
||||
break;
|
||||
case 2:
|
||||
spc2_ptr =
|
||||
(PIXEL_CLOCK_PARAMETERS_V2 *) & args.sPCLKInput;
|
||||
spc2_ptr->usPixelClock = cpu_to_le16(mode->clock / 10);
|
||||
spc2_ptr->usRefDiv = cpu_to_le16(ref_div);
|
||||
spc2_ptr->usFbDiv = cpu_to_le16(fb_div);
|
||||
spc2_ptr->ucFracFbDiv = frac_fb_div;
|
||||
spc2_ptr->ucPostDiv = post_div;
|
||||
spc2_ptr->ucPpll =
|
||||
args.v2.usPixelClock = cpu_to_le16(mode->clock / 10);
|
||||
args.v2.usRefDiv = cpu_to_le16(ref_div);
|
||||
args.v2.usFbDiv = cpu_to_le16(fb_div);
|
||||
args.v2.ucFracFbDiv = frac_fb_div;
|
||||
args.v2.ucPostDiv = post_div;
|
||||
args.v2.ucPpll =
|
||||
radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
|
||||
spc2_ptr->ucCRTC = radeon_crtc->crtc_id;
|
||||
spc2_ptr->ucRefDivSrc = 1;
|
||||
args.v2.ucCRTC = radeon_crtc->crtc_id;
|
||||
args.v2.ucRefDivSrc = 1;
|
||||
break;
|
||||
case 3:
|
||||
if (!encoder)
|
||||
return;
|
||||
spc3_ptr =
|
||||
(PIXEL_CLOCK_PARAMETERS_V3 *) & args.sPCLKInput;
|
||||
spc3_ptr->usPixelClock = cpu_to_le16(mode->clock / 10);
|
||||
spc3_ptr->usRefDiv = cpu_to_le16(ref_div);
|
||||
spc3_ptr->usFbDiv = cpu_to_le16(fb_div);
|
||||
spc3_ptr->ucFracFbDiv = frac_fb_div;
|
||||
spc3_ptr->ucPostDiv = post_div;
|
||||
spc3_ptr->ucPpll =
|
||||
args.v3.usPixelClock = cpu_to_le16(mode->clock / 10);
|
||||
args.v3.usRefDiv = cpu_to_le16(ref_div);
|
||||
args.v3.usFbDiv = cpu_to_le16(fb_div);
|
||||
args.v3.ucFracFbDiv = frac_fb_div;
|
||||
args.v3.ucPostDiv = post_div;
|
||||
args.v3.ucPpll =
|
||||
radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
|
||||
spc3_ptr->ucMiscInfo = (radeon_crtc->crtc_id << 2);
|
||||
spc3_ptr->ucTransmitterId = radeon_encoder->encoder_id;
|
||||
spc3_ptr->ucEncoderMode =
|
||||
args.v3.ucMiscInfo = (radeon_crtc->crtc_id << 2);
|
||||
args.v3.ucTransmitterId = radeon_encoder->encoder_id;
|
||||
args.v3.ucEncoderMode =
|
||||
atombios_get_encoder_mode(encoder);
|
||||
break;
|
||||
default:
|
||||
@ -569,11 +613,10 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
|
||||
return;
|
||||
}
|
||||
|
||||
printk("executing set pll\n");
|
||||
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
}
|
||||
|
||||
int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
|
||||
static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
|
||||
struct drm_framebuffer *old_fb)
|
||||
{
|
||||
ENTER();
|
||||
@ -598,14 +641,17 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
|
||||
|
||||
/* Pin framebuffer & get tilling informations */
|
||||
obj = radeon_fb->obj;
|
||||
obj_priv = obj->driver_private;
|
||||
|
||||
// if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &fb_location)) {
|
||||
// return -EINVAL;
|
||||
// }
|
||||
|
||||
fb_location = rdev->mc.vram_location;
|
||||
tiling_flags = 0;
|
||||
rbo = obj->driver_private;
|
||||
r = radeon_bo_reserve(rbo, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
|
||||
if (unlikely(r != 0)) {
|
||||
radeon_bo_unreserve(rbo);
|
||||
return -EINVAL;
|
||||
}
|
||||
radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
|
||||
radeon_bo_unreserve(rbo);
|
||||
|
||||
switch (crtc->fb->bits_per_pixel) {
|
||||
case 8:
|
||||
@ -687,10 +733,15 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
|
||||
else
|
||||
WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
|
||||
|
||||
// if (old_fb && old_fb != crtc->fb) {
|
||||
// radeon_fb = to_radeon_framebuffer(old_fb);
|
||||
// radeon_gem_object_unpin(radeon_fb->obj);
|
||||
// }
|
||||
if (old_fb && old_fb != crtc->fb) {
|
||||
radeon_fb = to_radeon_framebuffer(old_fb);
|
||||
rbo = radeon_fb->obj->driver_private;
|
||||
r = radeon_bo_reserve(rbo, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
radeon_bo_unpin(rbo);
|
||||
radeon_bo_unreserve(rbo);
|
||||
}
|
||||
|
||||
/* Bytes per pixel may have changed */
|
||||
radeon_bandwidth_update(rdev);
|
||||
@ -700,6 +751,42 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
|
||||
struct drm_framebuffer *old_fb)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev))
|
||||
return avivo_crtc_set_base(crtc, x, y, old_fb);
|
||||
else
|
||||
return radeon_crtc_set_base(crtc, x, y, old_fb);
|
||||
}
|
||||
|
||||
/* properly set additional regs when using atombios */
|
||||
static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
u32 disp_merge_cntl;
|
||||
|
||||
switch (radeon_crtc->crtc_id) {
|
||||
case 0:
|
||||
disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
|
||||
disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
|
||||
WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
|
||||
break;
|
||||
case 1:
|
||||
disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
|
||||
disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
|
||||
WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl);
|
||||
WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID));
|
||||
WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
int atombios_crtc_mode_set(struct drm_crtc *crtc,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode,
|
||||
@ -721,8 +808,8 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
|
||||
else {
|
||||
if (radeon_crtc->crtc_id == 0)
|
||||
atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
|
||||
radeon_crtc_set_base(crtc, x, y, old_fb);
|
||||
radeon_legacy_atom_set_surface(crtc);
|
||||
atombios_crtc_set_base(crtc, x, y, old_fb);
|
||||
radeon_legacy_atom_fixup(crtc);
|
||||
}
|
||||
atombios_overscan_setup(crtc, mode, adjusted_mode);
|
||||
atombios_scaler_setup(crtc);
|
||||
@ -740,8 +827,8 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
|
||||
|
||||
static void atombios_crtc_prepare(struct drm_crtc *crtc)
|
||||
{
|
||||
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
|
||||
atombios_lock_crtc(crtc, 1);
|
||||
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
|
||||
}
|
||||
|
||||
static void atombios_crtc_commit(struct drm_crtc *crtc)
|
||||
|
@ -332,11 +332,13 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
|
||||
PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args;
|
||||
int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
|
||||
unsigned char *base;
|
||||
int retry_count = 0;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
base = (unsigned char *)rdev->mode_info.atom_context->scratch;
|
||||
|
||||
retry:
|
||||
memcpy(base, req_bytes, num_bytes);
|
||||
|
||||
args.lpAuxRequest = 0;
|
||||
@ -347,10 +349,12 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
|
||||
|
||||
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
|
||||
if (args.ucReplyStatus) {
|
||||
DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n",
|
||||
if (args.ucReplyStatus && !args.ucDataOutLen) {
|
||||
if (args.ucReplyStatus == 0x20 && retry_count < 10)
|
||||
goto retry;
|
||||
DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
|
||||
req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
|
||||
chan->rec.i2c_id, args.ucReplyStatus);
|
||||
chan->rec.i2c_id, args.ucReplyStatus, retry_count);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,4 @@
|
||||
|
||||
#include <stdint.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm.h>
|
||||
#include <drm_mm.h>
|
||||
@ -24,7 +23,7 @@ static int my_atoi(char **cmd)
|
||||
}
|
||||
}
|
||||
|
||||
char* parse_mode(char *p, mode_t *mode)
|
||||
char* parse_mode(char *p, videomode_t *mode)
|
||||
{
|
||||
char c;
|
||||
|
||||
@ -63,7 +62,7 @@ char* parse_path(char *p, char *log)
|
||||
return p;
|
||||
};
|
||||
|
||||
void parse_cmdline(char *cmdline, mode_t *mode, char *log, int *kms)
|
||||
void parse_cmdline(char *cmdline, videomode_t *mode, char *log, int *kms)
|
||||
{
|
||||
char *p = cmdline;
|
||||
|
||||
|
@ -19,8 +19,8 @@ typedef struct
|
||||
uint32_t hot_x;
|
||||
uint32_t hot_y;
|
||||
|
||||
struct list_head list;
|
||||
struct radeon_object *robj;
|
||||
struct list_head list;
|
||||
struct radeon_bo *robj;
|
||||
}cursor_t;
|
||||
|
||||
#define CURSOR_WIDTH 64
|
||||
|
@ -1,23 +1,30 @@
|
||||
|
||||
CC = gcc
|
||||
FASM = e:/fasm/fasm.exe
|
||||
CFLAGS = -c -O2 -fomit-frame-pointer -fno-builtin-printf
|
||||
LDFLAGS = -nostdlib -shared -s -Map atikms.map --image-base 0 --file-alignment 512 --section-alignment 4096
|
||||
|
||||
DEFINES = -D__KERNEL__ -DCONFIG_X86_32
|
||||
|
||||
DRM_TOPDIR = $(CURDIR)/..
|
||||
DRM_INCLUDES = $(DRM_TOPDIR)/includes
|
||||
|
||||
INCLUDES = -I$(DRM_INCLUDES) -I$(DRM_INCLUDES)/drm \
|
||||
-I$(DRM_INCLUDES)/linux -I$(DRM_INCLUDES)/asm
|
||||
|
||||
CFLAGS = -c -O2 $(INCLUDES) $(DEFINES) -march=i686 -fomit-frame-pointer -fno-builtin-printf
|
||||
|
||||
LIBPATH:= .
|
||||
|
||||
LIBS:= -ldrv -lcore
|
||||
|
||||
NAME:= atikms
|
||||
LDFLAGS = -nostdlib -shared -s -Map atikms.map --image-base 0\
|
||||
--file-alignment 512 --section-alignment 4096
|
||||
|
||||
INCLUDES = -I$(DRM_INCLUDES) -I$(DRM_INCLUDES)/linux -I$(DRM_INCLUDES)/drm
|
||||
|
||||
NAME:= atikms
|
||||
|
||||
HFILES:= $(DRM_INCLUDES)/linux/types.h \
|
||||
$(DRM_INCLUDES)/linux/list.h \
|
||||
$(DRM_INCLUDES)/pci.h \
|
||||
$(DRM_INCLUDES)/linux/pci.h \
|
||||
$(DRM_INCLUDES)/drm/drm.h \
|
||||
$(DRM_INCLUDES)/drm/drmP.h \
|
||||
$(DRM_INCLUDES)/drm/drm_edid.h \
|
||||
@ -36,10 +43,11 @@ NAME_SRC= \
|
||||
$(DRM_TOPDIR)/drm_crtc.c \
|
||||
$(DRM_TOPDIR)/drm_crtc_helper.c \
|
||||
$(DRM_TOPDIR)/drm_fb_helper.c \
|
||||
$(DRM_TOPDIR)/drm_dp_i2c_helper.c \
|
||||
$(DRM_TOPDIR)/i2c/i2c-core.c \
|
||||
$(DRM_TOPDIR)/i2c/i2c-algo-bit.c \
|
||||
$(DRM_TOPDIR)/idr.c \
|
||||
radeon_gem.c \
|
||||
$(DRM_TOPDIR)/list_sort.c \
|
||||
radeon_device.c \
|
||||
radeon_clocks.c \
|
||||
radeon_i2c.c \
|
||||
@ -47,6 +55,7 @@ NAME_SRC= \
|
||||
radeon_atombios.c \
|
||||
radeon_agp.c \
|
||||
atombios_crtc.c \
|
||||
atombios_dp.c \
|
||||
radeon_encoders.c \
|
||||
radeon_connectors.c \
|
||||
radeon_bios.c \
|
||||
@ -55,9 +64,10 @@ NAME_SRC= \
|
||||
radeon_legacy_encoders.c \
|
||||
radeon_legacy_tv.c \
|
||||
radeon_display.c \
|
||||
radeon_object.c \
|
||||
radeon_gart.c \
|
||||
radeon_ring.c \
|
||||
radeon_object_kos.c \
|
||||
radeon_gem.c \
|
||||
r100.c \
|
||||
r200.c \
|
||||
r300.c \
|
||||
@ -65,6 +75,8 @@ NAME_SRC= \
|
||||
rv515.c \
|
||||
r520.c \
|
||||
r600.c \
|
||||
r600_audio.c \
|
||||
r600_hdmi.c \
|
||||
rs400.c \
|
||||
rs600.c \
|
||||
rs690.c \
|
||||
@ -92,7 +104,7 @@ $(NAME).dll: $(NAME_OBJS) $(SRC_DEP) $(HFILES) atikms.lds Makefile
|
||||
|
||||
|
||||
%.o : %.c $(HFILES) Makefile
|
||||
$(CC) $(CFLAGS) $(DEFINES) $(INCLUDES) -o $@ -c $<
|
||||
$(CC) $(CFLAGS) $(DEFINES) -o $@ $<
|
||||
|
||||
%.o : %.S $(HFILES) Makefile
|
||||
as -o $@ $<
|
||||
|
@ -345,7 +345,7 @@ static pci_dev_t* pci_scan_device(u32_t bus, int devfn)
|
||||
|
||||
hdr = PciRead8(bus, devfn, PCI_HEADER_TYPE);
|
||||
|
||||
dev = (pci_dev_t*)kzalloc(sizeof(dev_t), 0);
|
||||
dev = (pci_dev_t*)kzalloc(sizeof(pci_dev_t), 0);
|
||||
|
||||
INIT_LIST_HEAD(&dev->link);
|
||||
|
||||
@ -407,7 +407,7 @@ int pci_scan_slot(u32_t bus, int devfn)
|
||||
void pci_scan_bus(u32_t bus)
|
||||
{
|
||||
u32_t devfn;
|
||||
dev_t *dev;
|
||||
pci_dev_t *dev;
|
||||
|
||||
|
||||
for (devfn = 0; devfn < 0x100; devfn += 8)
|
||||
|
@ -272,11 +272,17 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
|
||||
return RREG32(RADEON_CRTC2_CRNT_FRAME);
|
||||
}
|
||||
|
||||
/* Who ever call radeon_fence_emit should call ring_lock and ask
|
||||
* for enough space (today caller are ib schedule and buffer move) */
|
||||
void r100_fence_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_fence *fence)
|
||||
{
|
||||
/* Who ever call radeon_fence_emit should call ring_lock and ask
|
||||
* for enough space (today caller are ib schedule and buffer move) */
|
||||
/* We have to make sure that caches are flushed before
|
||||
* CPU might read something from VRAM. */
|
||||
radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL);
|
||||
radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
|
||||
/* Wait until IDLE & CLEAN */
|
||||
radeon_ring_write(rdev, PACKET0(0x1720, 0));
|
||||
radeon_ring_write(rdev, (1 << 16) | (1 << 17));
|
||||
@ -343,9 +349,15 @@ void r100_wb_fini(struct radeon_device *rdev)
|
||||
|
||||
r100_wb_disable(rdev);
|
||||
if (rdev->wb.wb_obj) {
|
||||
// radeon_object_kunmap(rdev->wb.wb_obj);
|
||||
// radeon_object_unpin(rdev->wb.wb_obj);
|
||||
// radeon_object_unref(&rdev->wb.wb_obj);
|
||||
r = radeon_bo_reserve(rdev->wb.wb_obj, false);
|
||||
if (unlikely(r != 0)) {
|
||||
dev_err(rdev->dev, "(%d) can't finish WB\n", r);
|
||||
return;
|
||||
}
|
||||
radeon_bo_kunmap(rdev->wb.wb_obj);
|
||||
radeon_bo_unpin(rdev->wb.wb_obj);
|
||||
radeon_bo_unreserve(rdev->wb.wb_obj);
|
||||
radeon_bo_unref(&rdev->wb.wb_obj);
|
||||
rdev->wb.wb = NULL;
|
||||
rdev->wb.wb_obj = NULL;
|
||||
}
|
||||
@ -532,7 +544,6 @@ static int r100_cp_init_microcode(struct radeon_device *rdev)
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
static void r100_cp_load_microcode(struct radeon_device *rdev)
|
||||
{
|
||||
const __be32 *fw_data;
|
||||
@ -2814,6 +2825,7 @@ static int r100_startup(struct radeon_device *rdev)
|
||||
}
|
||||
/* Enable IRQ */
|
||||
// r100_irq_set(rdev);
|
||||
rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
|
||||
/* 1M ring buffer */
|
||||
// r = r100_cp_init(rdev, 1024 * 1024);
|
||||
// if (r) {
|
||||
|
@ -152,8 +152,12 @@ void rv370_pcie_gart_disable(struct radeon_device *rdev)
|
||||
tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
|
||||
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
|
||||
if (rdev->gart.table.vram.robj) {
|
||||
// radeon_object_kunmap(rdev->gart.table.vram.robj);
|
||||
// radeon_object_unpin(rdev->gart.table.vram.robj);
|
||||
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
|
||||
if (likely(r == 0)) {
|
||||
radeon_bo_kunmap(rdev->gart.table.vram.robj);
|
||||
radeon_bo_unpin(rdev->gart.table.vram.robj);
|
||||
radeon_bo_unreserve(rdev->gart.table.vram.robj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -508,11 +512,14 @@ void r300_vram_info(struct radeon_device *rdev)
|
||||
|
||||
/* DDR for all card after R300 & IGP */
|
||||
rdev->mc.vram_is_ddr = true;
|
||||
|
||||
tmp = RREG32(RADEON_MEM_CNTL);
|
||||
if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
|
||||
rdev->mc.vram_width = 128;
|
||||
} else {
|
||||
rdev->mc.vram_width = 64;
|
||||
tmp &= R300_MEM_NUM_CHANNELS_MASK;
|
||||
switch (tmp) {
|
||||
case 0: rdev->mc.vram_width = 64; break;
|
||||
case 1: rdev->mc.vram_width = 128; break;
|
||||
case 2: rdev->mc.vram_width = 256; break;
|
||||
default: rdev->mc.vram_width = 128; break;
|
||||
}
|
||||
|
||||
r100_vram_init_sizes(rdev);
|
||||
@ -1355,7 +1362,7 @@ int r300_init(struct radeon_device *rdev)
|
||||
// if (r)
|
||||
// return r;
|
||||
/* Memory manager */
|
||||
r = radeon_object_init(rdev);
|
||||
r = radeon_bo_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
if (rdev->flags & RADEON_IS_PCIE) {
|
||||
@ -1382,7 +1389,7 @@ int r300_init(struct radeon_device *rdev)
|
||||
rv370_pcie_gart_fini(rdev);
|
||||
if (rdev->flags & RADEON_IS_PCI)
|
||||
r100_pci_gart_fini(rdev);
|
||||
// radeon_irq_kms_fini(rdev);
|
||||
// radeon_agp_fini(rdev);
|
||||
rdev->accel_working = false;
|
||||
}
|
||||
return 0;
|
||||
|
@ -348,7 +348,7 @@ int r420_init(struct radeon_device *rdev)
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
r300_set_reg_safe(rdev);
|
||||
r420_set_reg_safe(rdev);
|
||||
rdev->accel_working = true;
|
||||
r = r420_startup(rdev);
|
||||
if (r) {
|
||||
@ -363,7 +363,6 @@ int r420_init(struct radeon_device *rdev)
|
||||
if (rdev->flags & RADEON_IS_PCI)
|
||||
r100_pci_gart_fini(rdev);
|
||||
// radeon_agp_fini(rdev);
|
||||
// radeon_irq_kms_fini(rdev);
|
||||
rdev->accel_working = false;
|
||||
}
|
||||
return 0;
|
||||
|
@ -1711,3 +1711,18 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev)
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
|
||||
* rdev: radeon device structure
|
||||
* bo: buffer object struct which userspace is waiting for idle
|
||||
*
|
||||
* Some R6XX/R7XX doesn't seems to take into account HDP flush performed
|
||||
* through ring buffer, this leads to corruption in rendering, see
|
||||
* http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
|
||||
* directly perform HDP flush by writing register through MMIO.
|
||||
*/
|
||||
void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
|
||||
{
|
||||
WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
|
||||
}
|
||||
|
@ -279,14 +279,16 @@ struct radeon_bo {
|
||||
struct ttm_placement placement;
|
||||
struct ttm_buffer_object tbo;
|
||||
struct ttm_bo_kmap_obj kmap;
|
||||
unsigned pin_count;
|
||||
void *kptr;
|
||||
u32 tiling_flags;
|
||||
u32 pitch;
|
||||
int surface_reg;
|
||||
unsigned pin_count;
|
||||
void *kptr;
|
||||
u32 cpu_addr;
|
||||
u32 tiling_flags;
|
||||
u32 pitch;
|
||||
int surface_reg;
|
||||
/* Constant after initialization */
|
||||
struct radeon_device *rdev;
|
||||
struct drm_gem_object *gobj;
|
||||
u32 domain;
|
||||
};
|
||||
|
||||
struct radeon_bo_list {
|
||||
@ -697,6 +699,13 @@ struct radeon_asic {
|
||||
void (*hpd_fini)(struct radeon_device *rdev);
|
||||
bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
|
||||
void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
|
||||
/* ioctl hw specific callback. Some hw might want to perform special
|
||||
* operation on specific ioctl. For instance on wait idle some hw
|
||||
* might want to perform and HDP flush through MMIO as it seems that
|
||||
* some R6XX/R7XX hw doesn't take HDP flush into account if programmed
|
||||
* through ring.
|
||||
*/
|
||||
void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
|
||||
};
|
||||
|
||||
/*
|
||||
@ -1149,6 +1158,7 @@ extern bool r600_card_posted(struct radeon_device *rdev);
|
||||
extern void r600_cp_stop(struct radeon_device *rdev);
|
||||
extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
|
||||
extern int r600_cp_resume(struct radeon_device *rdev);
|
||||
extern void r600_cp_fini(struct radeon_device *rdev);
|
||||
extern int r600_count_pipe_bits(uint32_t val);
|
||||
extern int r600_gart_clear_page(struct radeon_device *rdev, int i);
|
||||
extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
|
||||
|
@ -117,6 +117,7 @@ static struct radeon_asic r100_asic = {
|
||||
.hpd_fini = &r100_hpd_fini,
|
||||
.hpd_sense = &r100_hpd_sense,
|
||||
.hpd_set_polarity = &r100_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
|
||||
@ -176,6 +177,7 @@ static struct radeon_asic r300_asic = {
|
||||
.hpd_fini = &r100_hpd_fini,
|
||||
.hpd_sense = &r100_hpd_sense,
|
||||
.hpd_set_polarity = &r100_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
/*
|
||||
@ -219,6 +221,7 @@ static struct radeon_asic r420_asic = {
|
||||
.hpd_fini = &r100_hpd_fini,
|
||||
.hpd_sense = &r100_hpd_sense,
|
||||
.hpd_set_polarity = &r100_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
|
||||
@ -267,6 +270,7 @@ static struct radeon_asic rs400_asic = {
|
||||
.hpd_fini = &r100_hpd_fini,
|
||||
.hpd_sense = &r100_hpd_sense,
|
||||
.hpd_set_polarity = &r100_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
|
||||
@ -323,6 +327,7 @@ static struct radeon_asic rs600_asic = {
|
||||
.hpd_fini = &rs600_hpd_fini,
|
||||
.hpd_sense = &rs600_hpd_sense,
|
||||
.hpd_set_polarity = &rs600_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
|
||||
@ -370,6 +375,7 @@ static struct radeon_asic rs690_asic = {
|
||||
.hpd_fini = &rs600_hpd_fini,
|
||||
.hpd_sense = &rs600_hpd_sense,
|
||||
.hpd_set_polarity = &rs600_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
|
||||
@ -421,6 +427,7 @@ static struct radeon_asic rv515_asic = {
|
||||
.hpd_fini = &rs600_hpd_fini,
|
||||
.hpd_sense = &rs600_hpd_sense,
|
||||
.hpd_set_polarity = &rs600_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
|
||||
@ -463,6 +470,7 @@ static struct radeon_asic r520_asic = {
|
||||
.hpd_fini = &rs600_hpd_fini,
|
||||
.hpd_sense = &rs600_hpd_sense,
|
||||
.hpd_set_polarity = &rs600_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
/*
|
||||
@ -504,6 +512,7 @@ void r600_hpd_fini(struct radeon_device *rdev);
|
||||
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
|
||||
void r600_hpd_set_polarity(struct radeon_device *rdev,
|
||||
enum radeon_hpd_id hpd);
|
||||
extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
|
||||
|
||||
static struct radeon_asic r600_asic = {
|
||||
.init = &r600_init,
|
||||
@ -537,6 +546,7 @@ static struct radeon_asic r600_asic = {
|
||||
.hpd_fini = &r600_hpd_fini,
|
||||
.hpd_sense = &r600_hpd_sense,
|
||||
.hpd_set_polarity = &r600_hpd_set_polarity,
|
||||
// .ioctl_wait_idle = r600_ioctl_wait_idle,
|
||||
};
|
||||
|
||||
/*
|
||||
@ -580,6 +590,7 @@ static struct radeon_asic rv770_asic = {
|
||||
.hpd_fini = &r600_hpd_fini,
|
||||
.hpd_sense = &r600_hpd_sense,
|
||||
.hpd_set_polarity = &r600_hpd_set_polarity,
|
||||
// .ioctl_wait_idle = r600_ioctl_wait_idle,
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -114,6 +114,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
|
||||
i2c.i2c_id = gpio->sucI2cId.ucAccess;
|
||||
|
||||
i2c.valid = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -345,7 +346,9 @@ const int object_connector_convert[] = {
|
||||
DRM_MODE_CONNECTOR_Unknown,
|
||||
DRM_MODE_CONNECTOR_Unknown,
|
||||
DRM_MODE_CONNECTOR_Unknown,
|
||||
DRM_MODE_CONNECTOR_DisplayPort
|
||||
DRM_MODE_CONNECTOR_DisplayPort,
|
||||
DRM_MODE_CONNECTOR_eDP,
|
||||
DRM_MODE_CONNECTOR_Unknown
|
||||
};
|
||||
|
||||
bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
|
||||
@ -745,8 +748,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
|
||||
else
|
||||
radeon_add_legacy_encoder(dev,
|
||||
radeon_get_encoder_id(dev,
|
||||
(1 <<
|
||||
i),
|
||||
(1 << i),
|
||||
dac),
|
||||
(1 << i));
|
||||
}
|
||||
@ -758,32 +760,30 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
|
||||
if (bios_connectors[j].valid && (i != j)) {
|
||||
if (bios_connectors[i].line_mux ==
|
||||
bios_connectors[j].line_mux) {
|
||||
if (((bios_connectors[i].
|
||||
devices &
|
||||
(ATOM_DEVICE_DFP_SUPPORT))
|
||||
&& (bios_connectors[j].
|
||||
devices &
|
||||
(ATOM_DEVICE_CRT_SUPPORT)))
|
||||
||
|
||||
((bios_connectors[j].
|
||||
devices &
|
||||
(ATOM_DEVICE_DFP_SUPPORT))
|
||||
&& (bios_connectors[i].
|
||||
devices &
|
||||
(ATOM_DEVICE_CRT_SUPPORT)))) {
|
||||
bios_connectors[i].
|
||||
devices |=
|
||||
bios_connectors[j].
|
||||
devices;
|
||||
bios_connectors[i].
|
||||
connector_type =
|
||||
/* make sure not to combine LVDS */
|
||||
if (bios_connectors[i].devices & (ATOM_DEVICE_LCD_SUPPORT)) {
|
||||
bios_connectors[i].line_mux = 53;
|
||||
bios_connectors[i].ddc_bus.valid = false;
|
||||
continue;
|
||||
}
|
||||
if (bios_connectors[j].devices & (ATOM_DEVICE_LCD_SUPPORT)) {
|
||||
bios_connectors[j].line_mux = 53;
|
||||
bios_connectors[j].ddc_bus.valid = false;
|
||||
continue;
|
||||
}
|
||||
/* combine analog and digital for DVI-I */
|
||||
if (((bios_connectors[i].devices & (ATOM_DEVICE_DFP_SUPPORT)) &&
|
||||
(bios_connectors[j].devices & (ATOM_DEVICE_CRT_SUPPORT))) ||
|
||||
((bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT)) &&
|
||||
(bios_connectors[i].devices & (ATOM_DEVICE_CRT_SUPPORT)))) {
|
||||
bios_connectors[i].devices |=
|
||||
bios_connectors[j].devices;
|
||||
bios_connectors[i].connector_type =
|
||||
DRM_MODE_CONNECTOR_DVII;
|
||||
if (bios_connectors[j].devices &
|
||||
(ATOM_DEVICE_DFP_SUPPORT))
|
||||
if (bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT))
|
||||
bios_connectors[i].hpd =
|
||||
bios_connectors[j].hpd;
|
||||
bios_connectors[j].
|
||||
valid = false;
|
||||
bios_connectors[j].valid = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -938,6 +938,43 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
|
||||
return false;
|
||||
}
|
||||
|
||||
union igp_info {
|
||||
struct _ATOM_INTEGRATED_SYSTEM_INFO info;
|
||||
struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
|
||||
};
|
||||
|
||||
bool radeon_atombios_sideport_present(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_mode_info *mode_info = &rdev->mode_info;
|
||||
int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
|
||||
union igp_info *igp_info;
|
||||
u8 frev, crev;
|
||||
u16 data_offset;
|
||||
|
||||
atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
|
||||
&crev, &data_offset);
|
||||
|
||||
igp_info = (union igp_info *)(mode_info->atom_context->bios +
|
||||
data_offset);
|
||||
|
||||
if (igp_info) {
|
||||
switch (crev) {
|
||||
case 1:
|
||||
if (igp_info->info.ucMemoryType & 0xf0)
|
||||
return true;
|
||||
break;
|
||||
case 2:
|
||||
if (igp_info->info_2.ucMemoryType & 0x0f)
|
||||
return true;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
|
||||
break;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
|
||||
struct radeon_encoder_int_tmds *tmds)
|
||||
{
|
||||
@ -1029,6 +1066,7 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
|
||||
ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
|
||||
ss->range = ss_info->asSS_Info[i].ucSS_Range;
|
||||
ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1234,6 +1272,61 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
|
||||
return true;
|
||||
}
|
||||
|
||||
enum radeon_tv_std
|
||||
radeon_atombios_get_tv_info(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_mode_info *mode_info = &rdev->mode_info;
|
||||
int index = GetIndexIntoMasterTable(DATA, AnalogTV_Info);
|
||||
uint16_t data_offset;
|
||||
uint8_t frev, crev;
|
||||
struct _ATOM_ANALOG_TV_INFO *tv_info;
|
||||
enum radeon_tv_std tv_std = TV_STD_NTSC;
|
||||
|
||||
atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
|
||||
|
||||
tv_info = (struct _ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset);
|
||||
|
||||
switch (tv_info->ucTV_BootUpDefaultStandard) {
|
||||
case ATOM_TV_NTSC:
|
||||
tv_std = TV_STD_NTSC;
|
||||
DRM_INFO("Default TV standard: NTSC\n");
|
||||
break;
|
||||
case ATOM_TV_NTSCJ:
|
||||
tv_std = TV_STD_NTSC_J;
|
||||
DRM_INFO("Default TV standard: NTSC-J\n");
|
||||
break;
|
||||
case ATOM_TV_PAL:
|
||||
tv_std = TV_STD_PAL;
|
||||
DRM_INFO("Default TV standard: PAL\n");
|
||||
break;
|
||||
case ATOM_TV_PALM:
|
||||
tv_std = TV_STD_PAL_M;
|
||||
DRM_INFO("Default TV standard: PAL-M\n");
|
||||
break;
|
||||
case ATOM_TV_PALN:
|
||||
tv_std = TV_STD_PAL_N;
|
||||
DRM_INFO("Default TV standard: PAL-N\n");
|
||||
break;
|
||||
case ATOM_TV_PALCN:
|
||||
tv_std = TV_STD_PAL_CN;
|
||||
DRM_INFO("Default TV standard: PAL-CN\n");
|
||||
break;
|
||||
case ATOM_TV_PAL60:
|
||||
tv_std = TV_STD_PAL_60;
|
||||
DRM_INFO("Default TV standard: PAL-60\n");
|
||||
break;
|
||||
case ATOM_TV_SECAM:
|
||||
tv_std = TV_STD_SECAM;
|
||||
DRM_INFO("Default TV standard: SECAM\n");
|
||||
break;
|
||||
default:
|
||||
tv_std = TV_STD_NTSC;
|
||||
DRM_INFO("Unknown TV standard; defaulting to NTSC\n");
|
||||
break;
|
||||
}
|
||||
return tv_std;
|
||||
}
|
||||
|
||||
struct radeon_encoder_tv_dac *
|
||||
radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
|
||||
{
|
||||
@ -1269,6 +1362,7 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
|
||||
dac = dac_info->ucDAC2_NTSC_DAC_Adjustment;
|
||||
tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
|
||||
|
||||
tv_dac->tv_std = radeon_atombios_get_tv_info(rdev);
|
||||
}
|
||||
return tv_dac;
|
||||
}
|
||||
|
@ -595,6 +595,48 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
|
||||
return false;
|
||||
}
|
||||
|
||||
bool radeon_combios_sideport_present(struct radeon_device *rdev)
|
||||
{
|
||||
struct drm_device *dev = rdev->ddev;
|
||||
u16 igp_info;
|
||||
|
||||
igp_info = combios_get_table_offset(dev, COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE);
|
||||
|
||||
if (igp_info) {
|
||||
if (RBIOS16(igp_info + 0x4))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static const uint32_t default_primarydac_adj[CHIP_LAST] = {
|
||||
0x00000808, /* r100 */
|
||||
0x00000808, /* rv100 */
|
||||
0x00000808, /* rs100 */
|
||||
0x00000808, /* rv200 */
|
||||
0x00000808, /* rs200 */
|
||||
0x00000808, /* r200 */
|
||||
0x00000808, /* rv250 */
|
||||
0x00000000, /* rs300 */
|
||||
0x00000808, /* rv280 */
|
||||
0x00000808, /* r300 */
|
||||
0x00000808, /* r350 */
|
||||
0x00000808, /* rv350 */
|
||||
0x00000808, /* rv380 */
|
||||
0x00000808, /* r420 */
|
||||
0x00000808, /* r423 */
|
||||
0x00000808, /* rv410 */
|
||||
0x00000000, /* rs400 */
|
||||
0x00000000, /* rs480 */
|
||||
};
|
||||
|
||||
static void radeon_legacy_get_primary_dac_info_from_table(struct radeon_device *rdev,
|
||||
struct radeon_encoder_primary_dac *p_dac)
|
||||
{
|
||||
p_dac->ps2_pdac_adj = default_primarydac_adj[rdev->family];
|
||||
return;
|
||||
}
|
||||
|
||||
struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
|
||||
radeon_encoder
|
||||
*encoder)
|
||||
@ -604,20 +646,20 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
|
||||
uint16_t dac_info;
|
||||
uint8_t rev, bg, dac;
|
||||
struct radeon_encoder_primary_dac *p_dac = NULL;
|
||||
int found = 0;
|
||||
|
||||
p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!p_dac)
|
||||
return NULL;
|
||||
|
||||
if (rdev->bios == NULL)
|
||||
return NULL;
|
||||
goto out;
|
||||
|
||||
/* check CRT table */
|
||||
dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
|
||||
if (dac_info) {
|
||||
p_dac =
|
||||
kzalloc(sizeof(struct radeon_encoder_primary_dac),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!p_dac)
|
||||
return NULL;
|
||||
|
||||
rev = RBIOS8(dac_info) & 0x3;
|
||||
if (rev < 2) {
|
||||
bg = RBIOS8(dac_info + 0x2) & 0xf;
|
||||
@ -628,20 +670,26 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
|
||||
dac = RBIOS8(dac_info + 0x3) & 0xf;
|
||||
p_dac->ps2_pdac_adj = (bg << 8) | (dac);
|
||||
}
|
||||
|
||||
found = 1;
|
||||
}
|
||||
|
||||
out:
|
||||
if (!found) /* fallback to defaults */
|
||||
radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac);
|
||||
|
||||
return p_dac;
|
||||
}
|
||||
|
||||
static enum radeon_tv_std
|
||||
radeon_combios_get_tv_info(struct radeon_encoder *encoder)
|
||||
enum radeon_tv_std
|
||||
radeon_combios_get_tv_info(struct radeon_device *rdev)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct drm_device *dev = rdev->ddev;
|
||||
uint16_t tv_info;
|
||||
enum radeon_tv_std tv_std = TV_STD_NTSC;
|
||||
|
||||
if (rdev->bios == NULL)
|
||||
return tv_std;
|
||||
|
||||
tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
|
||||
if (tv_info) {
|
||||
if (RBIOS8(tv_info + 6) == 'T') {
|
||||
@ -779,7 +827,7 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
|
||||
tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
|
||||
found = 1;
|
||||
}
|
||||
tv_dac->tv_std = radeon_combios_get_tv_info(encoder);
|
||||
tv_dac->tv_std = radeon_combios_get_tv_info(rdev);
|
||||
}
|
||||
if (!found) {
|
||||
/* then check CRT table */
|
||||
@ -923,8 +971,7 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
|
||||
lvds->native_mode.vdisplay);
|
||||
|
||||
lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c);
|
||||
if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0)
|
||||
lvds->panel_vcc_delay = 2000;
|
||||
lvds->panel_vcc_delay = min_t(u16, lvds->panel_vcc_delay, 2000);
|
||||
|
||||
lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24);
|
||||
lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf;
|
||||
@ -1211,7 +1258,7 @@ bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder
|
||||
default:
|
||||
DRM_ERROR("Unsupported gpio %d\n", gpio);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -580,16 +580,18 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
|
||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
struct drm_encoder *encoder;
|
||||
struct drm_encoder_helper_funcs *encoder_funcs;
|
||||
bool dret;
|
||||
bool dret = false;
|
||||
enum drm_connector_status ret = connector_status_disconnected;
|
||||
|
||||
encoder = radeon_best_single_encoder(connector);
|
||||
if (!encoder)
|
||||
ret = connector_status_disconnected;
|
||||
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
|
||||
dret = radeon_ddc_probe(radeon_connector);
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
|
||||
if (radeon_connector->ddc_bus) {
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
|
||||
dret = radeon_ddc_probe(radeon_connector);
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
|
||||
}
|
||||
if (dret) {
|
||||
if (radeon_connector->edid) {
|
||||
kfree(radeon_connector->edid);
|
||||
@ -740,11 +742,13 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
|
||||
struct drm_mode_object *obj;
|
||||
int i;
|
||||
enum drm_connector_status ret = connector_status_disconnected;
|
||||
bool dret;
|
||||
bool dret = false;
|
||||
|
||||
if (radeon_connector->ddc_bus) {
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
|
||||
dret = radeon_ddc_probe(radeon_connector);
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
|
||||
}
|
||||
if (dret) {
|
||||
if (radeon_connector->edid) {
|
||||
kfree(radeon_connector->edid);
|
||||
@ -1343,7 +1347,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
|
||||
radeon_connector->dac_load_detect = false;
|
||||
drm_connector_attach_property(&radeon_connector->base,
|
||||
rdev->mode_info.load_detect_property,
|
||||
1);
|
||||
radeon_connector->dac_load_detect);
|
||||
drm_connector_attach_property(&radeon_connector->base,
|
||||
rdev->mode_info.tv_std_property,
|
||||
radeon_combios_get_tv_info(rdev));
|
||||
|
@ -1,260 +0,0 @@
|
||||
/*
|
||||
* Copyright 2007-8 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
*/
|
||||
#include "drmP.h"
|
||||
#include "radeon_drm.h"
|
||||
#include "radeon.h"
|
||||
|
||||
#define CURSOR_WIDTH 64
|
||||
#define CURSOR_HEIGHT 64
|
||||
|
||||
static void radeon_lock_cursor(struct drm_crtc *crtc, bool lock)
|
||||
{
|
||||
struct radeon_device *rdev = crtc->dev->dev_private;
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
uint32_t cur_lock;
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
cur_lock = RREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset);
|
||||
if (lock)
|
||||
cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK;
|
||||
else
|
||||
cur_lock &= ~AVIVO_D1CURSOR_UPDATE_LOCK;
|
||||
WREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock);
|
||||
} else {
|
||||
cur_lock = RREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset);
|
||||
if (lock)
|
||||
cur_lock |= RADEON_CUR_LOCK;
|
||||
else
|
||||
cur_lock &= ~RADEON_CUR_LOCK;
|
||||
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, cur_lock);
|
||||
}
|
||||
}
|
||||
|
||||
static void radeon_hide_cursor(struct drm_crtc *crtc)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct radeon_device *rdev = crtc->dev->dev_private;
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
|
||||
WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
|
||||
} else {
|
||||
switch (radeon_crtc->crtc_id) {
|
||||
case 0:
|
||||
WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
|
||||
break;
|
||||
case 1:
|
||||
WREG32(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL);
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
WREG32_P(RADEON_MM_DATA, 0, ~RADEON_CRTC_CUR_EN);
|
||||
}
|
||||
}
|
||||
|
||||
static void radeon_show_cursor(struct drm_crtc *crtc)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct radeon_device *rdev = crtc->dev->dev_private;
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
|
||||
WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
|
||||
(AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
|
||||
} else {
|
||||
switch (radeon_crtc->crtc_id) {
|
||||
case 0:
|
||||
WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
|
||||
break;
|
||||
case 1:
|
||||
WREG32(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL);
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
WREG32_P(RADEON_MM_DATA, (RADEON_CRTC_CUR_EN |
|
||||
(RADEON_CRTC_CUR_MODE_24BPP << RADEON_CRTC_CUR_MODE_SHIFT)),
|
||||
~(RADEON_CRTC_CUR_EN | RADEON_CRTC_CUR_MODE_MASK));
|
||||
}
|
||||
}
|
||||
|
||||
static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
|
||||
uint32_t gpu_addr)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct radeon_device *rdev = crtc->dev->dev_private;
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev))
|
||||
WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr);
|
||||
else {
|
||||
radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
|
||||
/* offset is from DISP(2)_BASE_ADDRESS */
|
||||
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
|
||||
}
|
||||
}
|
||||
|
||||
#if 0
|
||||
|
||||
int radeon_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
struct drm_file *file_priv,
|
||||
uint32_t handle,
|
||||
uint32_t width,
|
||||
uint32_t height)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct drm_gem_object *obj;
|
||||
uint64_t gpu_addr;
|
||||
int ret;
|
||||
|
||||
if (!handle) {
|
||||
/* turn off cursor */
|
||||
radeon_hide_cursor(crtc);
|
||||
obj = NULL;
|
||||
goto unpin;
|
||||
}
|
||||
|
||||
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
|
||||
DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
radeon_crtc->cursor_width = width;
|
||||
radeon_crtc->cursor_height = height;
|
||||
|
||||
obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
|
||||
if (!obj) {
|
||||
DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
radeon_lock_cursor(crtc, true);
|
||||
/* XXX only 27 bit offset for legacy cursor */
|
||||
radeon_set_cursor(crtc, obj, gpu_addr);
|
||||
radeon_show_cursor(crtc);
|
||||
radeon_lock_cursor(crtc, false);
|
||||
|
||||
unpin:
|
||||
if (radeon_crtc->cursor_bo) {
|
||||
radeon_gem_object_unpin(radeon_crtc->cursor_bo);
|
||||
mutex_lock(&crtc->dev->struct_mutex);
|
||||
drm_gem_object_unreference(radeon_crtc->cursor_bo);
|
||||
mutex_unlock(&crtc->dev->struct_mutex);
|
||||
}
|
||||
|
||||
radeon_crtc->cursor_bo = obj;
|
||||
return 0;
|
||||
fail:
|
||||
mutex_lock(&crtc->dev->struct_mutex);
|
||||
drm_gem_object_unreference(obj);
|
||||
mutex_unlock(&crtc->dev->struct_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
int radeon_crtc_cursor_move(struct drm_crtc *crtc,
|
||||
int x, int y)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct radeon_device *rdev = crtc->dev->dev_private;
|
||||
int xorigin = 0, yorigin = 0;
|
||||
|
||||
if (x < 0)
|
||||
xorigin = -x + 1;
|
||||
if (y < 0)
|
||||
yorigin = -y + 1;
|
||||
if (xorigin >= CURSOR_WIDTH)
|
||||
xorigin = CURSOR_WIDTH - 1;
|
||||
if (yorigin >= CURSOR_HEIGHT)
|
||||
yorigin = CURSOR_HEIGHT - 1;
|
||||
|
||||
radeon_lock_cursor(crtc, true);
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
int w = radeon_crtc->cursor_width;
|
||||
int i = 0;
|
||||
struct drm_crtc *crtc_p;
|
||||
|
||||
/* avivo cursor are offset into the total surface */
|
||||
x += crtc->x;
|
||||
y += crtc->y;
|
||||
DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
|
||||
|
||||
/* avivo cursor image can't end on 128 pixel boundry or
|
||||
* go past the end of the frame if both crtcs are enabled
|
||||
*/
|
||||
list_for_each_entry(crtc_p, &crtc->dev->mode_config.crtc_list, head) {
|
||||
if (crtc_p->enabled)
|
||||
i++;
|
||||
}
|
||||
if (i > 1) {
|
||||
int cursor_end, frame_end;
|
||||
|
||||
cursor_end = x - xorigin + w;
|
||||
frame_end = crtc->x + crtc->mode.crtc_hdisplay;
|
||||
if (cursor_end >= frame_end) {
|
||||
w = w - (cursor_end - frame_end);
|
||||
if (!(frame_end & 0x7f))
|
||||
w--;
|
||||
} else {
|
||||
if (!(cursor_end & 0x7f))
|
||||
w--;
|
||||
}
|
||||
if (w <= 0)
|
||||
w = 1;
|
||||
}
|
||||
|
||||
WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset,
|
||||
((xorigin ? 0 : x) << 16) |
|
||||
(yorigin ? 0 : y));
|
||||
WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
|
||||
WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset,
|
||||
((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
|
||||
} else {
|
||||
if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)
|
||||
y *= 2;
|
||||
|
||||
WREG32(RADEON_CUR_HORZ_VERT_OFF + radeon_crtc->crtc_offset,
|
||||
(RADEON_CUR_LOCK
|
||||
| (xorigin << 16)
|
||||
| yorigin));
|
||||
WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset,
|
||||
(RADEON_CUR_LOCK
|
||||
| ((xorigin ? 0 : x) << 16)
|
||||
| (yorigin ? 0 : y)));
|
||||
/* offset is from DISP(2)_BASE_ADDRESS */
|
||||
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset +
|
||||
(yorigin * 256)));
|
||||
}
|
||||
radeon_lock_cursor(crtc, false);
|
||||
|
||||
return 0;
|
||||
}
|
@ -46,13 +46,17 @@ int radeon_benchmarking = 0;
|
||||
int radeon_connector_table = 0;
|
||||
int radeon_tv = 0;
|
||||
int radeon_modeset = 1;
|
||||
int radeon_new_pll = 1;
|
||||
int radeon_vram_limit = 0;
|
||||
int radeon_audio = 0;
|
||||
|
||||
void parse_cmdline(char *cmdline, mode_t *mode, char *log, int *kms);
|
||||
int init_display(struct radeon_device *rdev, mode_t *mode);
|
||||
int init_display_kms(struct radeon_device *rdev, mode_t *mode);
|
||||
|
||||
int get_modes(mode_t *mode, int *count);
|
||||
int set_user_mode(mode_t *mode);
|
||||
void parse_cmdline(char *cmdline, videomode_t *mode, char *log, int *kms);
|
||||
int init_display(struct radeon_device *rdev, videomode_t *mode);
|
||||
int init_display_kms(struct radeon_device *rdev, videomode_t *mode);
|
||||
|
||||
int get_modes(videomode_t *mode, int *count);
|
||||
int set_user_mode(videomode_t *mode);
|
||||
|
||||
|
||||
/* Legacy VGA regions */
|
||||
@ -71,17 +75,12 @@ int set_user_mode(mode_t *mode);
|
||||
*/
|
||||
void radeon_surface_init(struct radeon_device *rdev)
|
||||
{
|
||||
ENTER();
|
||||
|
||||
/* FIXME: check this out */
|
||||
if (rdev->family < CHIP_R600) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
|
||||
if (rdev->surface_regs[i].bo)
|
||||
radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
|
||||
else
|
||||
radeon_clear_surface_reg(rdev, i);
|
||||
radeon_clear_surface_reg(rdev, i);
|
||||
}
|
||||
/* enable surfaces */
|
||||
WREG32(RADEON_SURFACE_CNTL, 0);
|
||||
@ -421,6 +420,12 @@ int radeon_asic_init(struct radeon_device *rdev)
|
||||
/* FIXME: not supported yet */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (rdev->flags & RADEON_IS_IGP) {
|
||||
rdev->asic->get_memory_clock = NULL;
|
||||
rdev->asic->set_memory_clock = NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -567,11 +572,75 @@ void radeon_agp_disable(struct radeon_device *rdev)
|
||||
rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
|
||||
rdev->asic->gart_set_page = &r100_pci_gart_set_page;
|
||||
}
|
||||
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
|
||||
}
|
||||
|
||||
void radeon_check_arguments(struct radeon_device *rdev)
|
||||
{
|
||||
/* vramlimit must be a power of two */
|
||||
switch (radeon_vram_limit) {
|
||||
case 0:
|
||||
case 4:
|
||||
case 8:
|
||||
case 16:
|
||||
case 32:
|
||||
case 64:
|
||||
case 128:
|
||||
case 256:
|
||||
case 512:
|
||||
case 1024:
|
||||
case 2048:
|
||||
case 4096:
|
||||
break;
|
||||
default:
|
||||
dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
|
||||
radeon_vram_limit);
|
||||
radeon_vram_limit = 0;
|
||||
break;
|
||||
}
|
||||
radeon_vram_limit = radeon_vram_limit << 20;
|
||||
/* gtt size must be power of two and greater or equal to 32M */
|
||||
switch (radeon_gart_size) {
|
||||
case 4:
|
||||
case 8:
|
||||
case 16:
|
||||
dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
|
||||
radeon_gart_size);
|
||||
radeon_gart_size = 512;
|
||||
break;
|
||||
case 32:
|
||||
case 64:
|
||||
case 128:
|
||||
case 256:
|
||||
case 512:
|
||||
case 1024:
|
||||
case 2048:
|
||||
case 4096:
|
||||
break;
|
||||
default:
|
||||
dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
|
||||
radeon_gart_size);
|
||||
radeon_gart_size = 512;
|
||||
break;
|
||||
}
|
||||
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
|
||||
/* AGP mode can only be -1, 1, 2, 4, 8 */
|
||||
switch (radeon_agpmode) {
|
||||
case -1:
|
||||
case 0:
|
||||
case 1:
|
||||
case 2:
|
||||
case 4:
|
||||
case 8:
|
||||
break;
|
||||
default:
|
||||
dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
|
||||
"-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
|
||||
radeon_agpmode = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Radeon device.
|
||||
*/
|
||||
int radeon_device_init(struct radeon_device *rdev,
|
||||
struct drm_device *ddev,
|
||||
struct pci_dev *pdev,
|
||||
@ -600,9 +669,9 @@ int radeon_device_init(struct radeon_device *rdev,
|
||||
|
||||
/* Set asic functions */
|
||||
r = radeon_asic_init(rdev);
|
||||
if (r) {
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
radeon_check_arguments(rdev);
|
||||
|
||||
if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
|
||||
radeon_agp_disable(rdev);
|
||||
@ -723,7 +792,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
|
||||
return 0;
|
||||
}
|
||||
|
||||
mode_t usermode;
|
||||
videomode_t usermode;
|
||||
|
||||
|
||||
int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
@ -867,9 +936,9 @@ int _stdcall display_handler(ioctl_t *io)
|
||||
|
||||
if( radeon_modeset &&
|
||||
(outp != NULL) && (io->out_size == 4) &&
|
||||
(io->inp_size == *outp * sizeof(mode_t)) )
|
||||
(io->inp_size == *outp * sizeof(videomode_t)) )
|
||||
{
|
||||
retval = get_modes((mode_t*)inp, outp);
|
||||
retval = get_modes((videomode_t*)inp, outp);
|
||||
};
|
||||
break;
|
||||
|
||||
@ -879,9 +948,9 @@ int _stdcall display_handler(ioctl_t *io)
|
||||
|
||||
if( radeon_modeset &&
|
||||
(inp != NULL) &&
|
||||
(io->inp_size == sizeof(mode_t)) )
|
||||
(io->inp_size == sizeof(videomode_t)) )
|
||||
{
|
||||
retval = set_user_mode((mode_t*)inp);
|
||||
retval = set_user_mode((videomode_t*)inp);
|
||||
};
|
||||
break;
|
||||
};
|
||||
@ -890,7 +959,7 @@ int _stdcall display_handler(ioctl_t *io)
|
||||
}
|
||||
|
||||
static char log[256];
|
||||
static dev_t device;
|
||||
static pci_dev_t device;
|
||||
|
||||
u32_t drvEntry(int action, char *cmdline)
|
||||
{
|
||||
@ -918,7 +987,7 @@ u32_t drvEntry(int action, char *cmdline)
|
||||
return 0;
|
||||
};
|
||||
}
|
||||
dbgprintf("Radeon RC09 cmdline %s\n", cmdline);
|
||||
dbgprintf("Radeon RC9 cmdline %s\n", cmdline);
|
||||
|
||||
enum_pci_devices();
|
||||
|
||||
|
@ -234,7 +234,7 @@ static const char *encoder_names[34] = {
|
||||
"INTERNAL_UNIPHY2",
|
||||
};
|
||||
|
||||
static const char *connector_names[13] = {
|
||||
static const char *connector_names[15] = {
|
||||
"Unknown",
|
||||
"VGA",
|
||||
"DVI-I",
|
||||
@ -248,6 +248,18 @@ static const char *connector_names[13] = {
|
||||
"DisplayPort",
|
||||
"HDMI-A",
|
||||
"HDMI-B",
|
||||
"TV",
|
||||
"eDP",
|
||||
};
|
||||
|
||||
static const char *hpd_names[7] = {
|
||||
"NONE",
|
||||
"HPD1",
|
||||
"HPD2",
|
||||
"HPD3",
|
||||
"HPD4",
|
||||
"HPD5",
|
||||
"HPD6",
|
||||
};
|
||||
|
||||
static void radeon_print_display_setup(struct drm_device *dev)
|
||||
@ -264,16 +276,27 @@ static void radeon_print_display_setup(struct drm_device *dev)
|
||||
radeon_connector = to_radeon_connector(connector);
|
||||
DRM_INFO("Connector %d:\n", i);
|
||||
DRM_INFO(" %s\n", connector_names[connector->connector_type]);
|
||||
if (radeon_connector->ddc_bus)
|
||||
if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
|
||||
DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]);
|
||||
if (radeon_connector->ddc_bus) {
|
||||
DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
|
||||
radeon_connector->ddc_bus->rec.mask_clk_reg,
|
||||
radeon_connector->ddc_bus->rec.mask_data_reg,
|
||||
radeon_connector->ddc_bus->rec.a_clk_reg,
|
||||
radeon_connector->ddc_bus->rec.a_data_reg,
|
||||
radeon_connector->ddc_bus->rec.put_clk_reg,
|
||||
radeon_connector->ddc_bus->rec.put_data_reg,
|
||||
radeon_connector->ddc_bus->rec.get_clk_reg,
|
||||
radeon_connector->ddc_bus->rec.get_data_reg);
|
||||
radeon_connector->ddc_bus->rec.en_clk_reg,
|
||||
radeon_connector->ddc_bus->rec.en_data_reg,
|
||||
radeon_connector->ddc_bus->rec.y_clk_reg,
|
||||
radeon_connector->ddc_bus->rec.y_data_reg);
|
||||
} else {
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
|
||||
DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
|
||||
}
|
||||
DRM_INFO(" Encoders:\n");
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||
radeon_encoder = to_radeon_encoder(encoder);
|
||||
@ -317,13 +340,17 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
|
||||
ret = radeon_get_atom_connector_info_from_object_table(dev);
|
||||
else
|
||||
ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
|
||||
} else
|
||||
} else {
|
||||
ret = radeon_get_legacy_connector_info_from_bios(dev);
|
||||
if (ret == false)
|
||||
ret = radeon_get_legacy_connector_info_from_table(dev);
|
||||
}
|
||||
} else {
|
||||
if (!ASIC_IS_AVIVO(rdev))
|
||||
ret = radeon_get_legacy_connector_info_from_table(dev);
|
||||
}
|
||||
if (ret) {
|
||||
radeon_setup_encoder_clones(dev);
|
||||
radeon_print_display_setup(dev);
|
||||
list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head)
|
||||
radeon_ddc_dump(drm_connector);
|
||||
@ -336,12 +363,19 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
|
||||
(radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
|
||||
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
|
||||
if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
|
||||
dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus)
|
||||
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter);
|
||||
}
|
||||
if (!radeon_connector->ddc_bus)
|
||||
return -1;
|
||||
if (!radeon_connector->edid) {
|
||||
radeon_i2c_do_lock(radeon_connector, 1);
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
|
||||
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
|
||||
radeon_i2c_do_lock(radeon_connector, 0);
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
|
||||
}
|
||||
|
||||
if (radeon_connector->edid) {
|
||||
@ -361,9 +395,9 @@ static int radeon_ddc_dump(struct drm_connector *connector)
|
||||
|
||||
if (!radeon_connector->ddc_bus)
|
||||
return -1;
|
||||
radeon_i2c_do_lock(radeon_connector, 1);
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
|
||||
edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
|
||||
radeon_i2c_do_lock(radeon_connector, 0);
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
|
||||
if (edid) {
|
||||
kfree(edid);
|
||||
}
|
||||
@ -386,11 +420,12 @@ void radeon_compute_pll(struct radeon_pll *pll,
|
||||
uint32_t *fb_div_p,
|
||||
uint32_t *frac_fb_div_p,
|
||||
uint32_t *ref_div_p,
|
||||
uint32_t *post_div_p,
|
||||
int flags)
|
||||
uint32_t *post_div_p)
|
||||
{
|
||||
uint32_t min_ref_div = pll->min_ref_div;
|
||||
uint32_t max_ref_div = pll->max_ref_div;
|
||||
uint32_t min_post_div = pll->min_post_div;
|
||||
uint32_t max_post_div = pll->max_post_div;
|
||||
uint32_t min_fractional_feed_div = 0;
|
||||
uint32_t max_fractional_feed_div = 0;
|
||||
uint32_t best_vco = pll->best_vco;
|
||||
@ -406,7 +441,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
|
||||
DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
|
||||
freq = freq * 1000;
|
||||
|
||||
if (flags & RADEON_PLL_USE_REF_DIV)
|
||||
if (pll->flags & RADEON_PLL_USE_REF_DIV)
|
||||
min_ref_div = max_ref_div = pll->reference_div;
|
||||
else {
|
||||
while (min_ref_div < max_ref_div-1) {
|
||||
@ -421,19 +456,22 @@ void radeon_compute_pll(struct radeon_pll *pll,
|
||||
}
|
||||
}
|
||||
|
||||
if (flags & RADEON_PLL_USE_FRAC_FB_DIV) {
|
||||
if (pll->flags & RADEON_PLL_USE_POST_DIV)
|
||||
min_post_div = max_post_div = pll->post_div;
|
||||
|
||||
if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
|
||||
min_fractional_feed_div = pll->min_frac_feedback_div;
|
||||
max_fractional_feed_div = pll->max_frac_feedback_div;
|
||||
}
|
||||
|
||||
for (post_div = pll->min_post_div; post_div <= pll->max_post_div; ++post_div) {
|
||||
for (post_div = min_post_div; post_div <= max_post_div; ++post_div) {
|
||||
uint32_t ref_div;
|
||||
|
||||
if ((flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
|
||||
if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
|
||||
continue;
|
||||
|
||||
/* legacy radeons only have a few post_divs */
|
||||
if (flags & RADEON_PLL_LEGACY) {
|
||||
if (pll->flags & RADEON_PLL_LEGACY) {
|
||||
if ((post_div == 5) ||
|
||||
(post_div == 7) ||
|
||||
(post_div == 9) ||
|
||||
@ -480,7 +518,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
|
||||
tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div;
|
||||
current_freq = radeon_div(tmp, ref_div * post_div);
|
||||
|
||||
if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
|
||||
if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
|
||||
error = freq - current_freq;
|
||||
error = error < 0 ? 0xffffffff : error;
|
||||
} else
|
||||
@ -507,12 +545,12 @@ void radeon_compute_pll(struct radeon_pll *pll,
|
||||
best_freq = current_freq;
|
||||
best_error = error;
|
||||
best_vco_diff = vco_diff;
|
||||
} else if (((flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) ||
|
||||
((flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) ||
|
||||
((flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) ||
|
||||
((flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) ||
|
||||
((flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) ||
|
||||
((flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) {
|
||||
} else if (((pll->flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) ||
|
||||
((pll->flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) ||
|
||||
((pll->flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) ||
|
||||
((pll->flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) ||
|
||||
((pll->flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) ||
|
||||
((pll->flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) {
|
||||
best_post_div = post_div;
|
||||
best_ref_div = ref_div;
|
||||
best_feedback_div = feedback_div;
|
||||
@ -542,6 +580,97 @@ void radeon_compute_pll(struct radeon_pll *pll,
|
||||
*post_div_p = best_post_div;
|
||||
}
|
||||
|
||||
void radeon_compute_pll_avivo(struct radeon_pll *pll,
|
||||
uint64_t freq,
|
||||
uint32_t *dot_clock_p,
|
||||
uint32_t *fb_div_p,
|
||||
uint32_t *frac_fb_div_p,
|
||||
uint32_t *ref_div_p,
|
||||
uint32_t *post_div_p)
|
||||
{
|
||||
fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq;
|
||||
fixed20_12 pll_out_max, pll_out_min;
|
||||
fixed20_12 pll_in_max, pll_in_min;
|
||||
fixed20_12 reference_freq;
|
||||
fixed20_12 error, ffreq, a, b;
|
||||
|
||||
pll_out_max.full = rfixed_const(pll->pll_out_max);
|
||||
pll_out_min.full = rfixed_const(pll->pll_out_min);
|
||||
pll_in_max.full = rfixed_const(pll->pll_in_max);
|
||||
pll_in_min.full = rfixed_const(pll->pll_in_min);
|
||||
reference_freq.full = rfixed_const(pll->reference_freq);
|
||||
do_div(freq, 10);
|
||||
ffreq.full = rfixed_const(freq);
|
||||
error.full = rfixed_const(100 * 100);
|
||||
|
||||
/* max p */
|
||||
p.full = rfixed_div(pll_out_max, ffreq);
|
||||
p.full = rfixed_floor(p);
|
||||
|
||||
/* min m */
|
||||
m.full = rfixed_div(reference_freq, pll_in_max);
|
||||
m.full = rfixed_ceil(m);
|
||||
|
||||
while (1) {
|
||||
n.full = rfixed_div(ffreq, reference_freq);
|
||||
n.full = rfixed_mul(n, m);
|
||||
n.full = rfixed_mul(n, p);
|
||||
|
||||
f_vco.full = rfixed_div(n, m);
|
||||
f_vco.full = rfixed_mul(f_vco, reference_freq);
|
||||
|
||||
f_pclk.full = rfixed_div(f_vco, p);
|
||||
|
||||
if (f_pclk.full > ffreq.full)
|
||||
error.full = f_pclk.full - ffreq.full;
|
||||
else
|
||||
error.full = ffreq.full - f_pclk.full;
|
||||
error.full = rfixed_div(error, f_pclk);
|
||||
a.full = rfixed_const(100 * 100);
|
||||
error.full = rfixed_mul(error, a);
|
||||
|
||||
a.full = rfixed_mul(m, p);
|
||||
a.full = rfixed_div(n, a);
|
||||
best_freq.full = rfixed_mul(reference_freq, a);
|
||||
|
||||
if (rfixed_trunc(error) < 25)
|
||||
break;
|
||||
|
||||
a.full = rfixed_const(1);
|
||||
m.full = m.full + a.full;
|
||||
a.full = rfixed_div(reference_freq, m);
|
||||
if (a.full >= pll_in_min.full)
|
||||
continue;
|
||||
|
||||
m.full = rfixed_div(reference_freq, pll_in_max);
|
||||
m.full = rfixed_ceil(m);
|
||||
a.full= rfixed_const(1);
|
||||
p.full = p.full - a.full;
|
||||
a.full = rfixed_mul(p, ffreq);
|
||||
if (a.full >= pll_out_min.full)
|
||||
continue;
|
||||
else {
|
||||
DRM_ERROR("Unable to find pll dividers\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
a.full = rfixed_const(10);
|
||||
b.full = rfixed_mul(n, a);
|
||||
|
||||
frac_n.full = rfixed_floor(n);
|
||||
frac_n.full = rfixed_mul(frac_n, a);
|
||||
frac_n.full = b.full - frac_n.full;
|
||||
|
||||
*dot_clock_p = rfixed_trunc(best_freq);
|
||||
*fb_div_p = rfixed_trunc(n);
|
||||
*frac_fb_div_p = rfixed_trunc(frac_n);
|
||||
*ref_div_p = rfixed_trunc(m);
|
||||
*post_div_p = rfixed_trunc(p);
|
||||
|
||||
DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
|
||||
}
|
||||
|
||||
static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
|
||||
{
|
||||
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
|
||||
@ -632,7 +761,7 @@ static struct drm_prop_enum_list radeon_tv_std_enum_list[] =
|
||||
{ TV_STD_SECAM, "secam" },
|
||||
};
|
||||
|
||||
int radeon_modeset_create_props(struct radeon_device *rdev)
|
||||
static int radeon_modeset_create_props(struct radeon_device *rdev)
|
||||
{
|
||||
int i, sz;
|
||||
|
||||
@ -645,7 +774,7 @@ int radeon_modeset_create_props(struct radeon_device *rdev)
|
||||
return -ENOMEM;
|
||||
|
||||
rdev->mode_info.coherent_mode_property->values[0] = 0;
|
||||
rdev->mode_info.coherent_mode_property->values[0] = 1;
|
||||
rdev->mode_info.coherent_mode_property->values[1] = 1;
|
||||
}
|
||||
|
||||
if (!ASIC_IS_AVIVO(rdev)) {
|
||||
@ -669,7 +798,7 @@ int radeon_modeset_create_props(struct radeon_device *rdev)
|
||||
if (!rdev->mode_info.load_detect_property)
|
||||
return -ENOMEM;
|
||||
rdev->mode_info.load_detect_property->values[0] = 0;
|
||||
rdev->mode_info.load_detect_property->values[0] = 1;
|
||||
rdev->mode_info.load_detect_property->values[1] = 1;
|
||||
|
||||
drm_mode_create_scaling_mode_property(rdev->ddev);
|
||||
|
||||
@ -726,6 +855,8 @@ int radeon_modeset_init(struct radeon_device *rdev)
|
||||
if (!ret) {
|
||||
return ret;
|
||||
}
|
||||
/* initialize hpd */
|
||||
radeon_hpd_init(rdev);
|
||||
drm_helper_initial_config(rdev->ddev);
|
||||
return 0;
|
||||
}
|
||||
@ -733,6 +864,7 @@ int radeon_modeset_init(struct radeon_device *rdev)
|
||||
void radeon_modeset_fini(struct radeon_device *rdev)
|
||||
{
|
||||
if (rdev->mode_info.mode_config_initialized) {
|
||||
radeon_hpd_fini(rdev);
|
||||
drm_mode_config_cleanup(rdev->ddev);
|
||||
rdev->mode_info.mode_config_initialized = false;
|
||||
}
|
||||
@ -753,7 +885,15 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
|
||||
if (encoder->crtc != crtc)
|
||||
continue;
|
||||
if (first) {
|
||||
/* set scaling */
|
||||
if (radeon_encoder->rmx_type == RMX_OFF)
|
||||
radeon_crtc->rmx_type = RMX_OFF;
|
||||
else if (mode->hdisplay < radeon_encoder->native_mode.hdisplay ||
|
||||
mode->vdisplay < radeon_encoder->native_mode.vdisplay)
|
||||
radeon_crtc->rmx_type = radeon_encoder->rmx_type;
|
||||
else
|
||||
radeon_crtc->rmx_type = RMX_OFF;
|
||||
/* copy native mode */
|
||||
memcpy(&radeon_crtc->native_mode,
|
||||
&radeon_encoder->native_mode,
|
||||
sizeof(struct drm_display_mode));
|
||||
|
@ -146,8 +146,8 @@ int radeonfb_create(struct drm_device *dev,
|
||||
struct radeon_framebuffer *rfb;
|
||||
struct drm_mode_fb_cmd mode_cmd;
|
||||
struct drm_gem_object *gobj = NULL;
|
||||
struct radeon_object *robj = NULL;
|
||||
void *device = NULL; //&rdev->pdev->dev;
|
||||
struct radeon_bo *rbo = NULL;
|
||||
// struct device *device = &rdev->pdev->dev;
|
||||
int size, aligned_size, ret;
|
||||
u64 fb_gpuaddr;
|
||||
void *fbptr = NULL;
|
||||
@ -163,7 +163,7 @@ int radeonfb_create(struct drm_device *dev,
|
||||
if ((surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
|
||||
surface_bpp = 32;
|
||||
|
||||
mode_cmd.bpp = 32;
|
||||
mode_cmd.bpp = surface_bpp;
|
||||
/* need to align pitch with crtc limits */
|
||||
mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8);
|
||||
mode_cmd.depth = surface_depth;
|
||||
@ -171,18 +171,40 @@ int radeonfb_create(struct drm_device *dev,
|
||||
size = mode_cmd.pitch * mode_cmd.height;
|
||||
aligned_size = ALIGN(size, PAGE_SIZE);
|
||||
|
||||
ret = radeon_gem_fb_object_create(rdev, aligned_size, 0,
|
||||
ret = radeon_gem_object_create(rdev, aligned_size, 0,
|
||||
RADEON_GEM_DOMAIN_VRAM,
|
||||
false, 0,
|
||||
false, &gobj);
|
||||
false, ttm_bo_type_kernel,
|
||||
&gobj);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n",
|
||||
surface_width, surface_height);
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
robj = gobj->driver_private;
|
||||
rbo = gobj->driver_private;
|
||||
|
||||
if (fb_tiled)
|
||||
tiling_flags = RADEON_TILING_MACRO;
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
switch (mode_cmd.bpp) {
|
||||
case 32:
|
||||
tiling_flags |= RADEON_TILING_SWAP_32BIT;
|
||||
break;
|
||||
case 16:
|
||||
tiling_flags |= RADEON_TILING_SWAP_16BIT;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (tiling_flags) {
|
||||
ret = radeon_bo_set_tiling_flags(rbo,
|
||||
tiling_flags | RADEON_TILING_SURFACE,
|
||||
mode_cmd.pitch);
|
||||
if (ret)
|
||||
dev_err(rdev->dev, "FB failed to set tiling flags\n");
|
||||
}
|
||||
mutex_lock(&rdev->ddev->struct_mutex);
|
||||
fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj);
|
||||
if (fb == NULL) {
|
||||
@ -190,10 +212,19 @@ int radeonfb_create(struct drm_device *dev,
|
||||
ret = -ENOMEM;
|
||||
goto out_unref;
|
||||
}
|
||||
ret = radeon_object_pin(robj, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
|
||||
ret = radeon_bo_reserve(rbo, false);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_unref;
|
||||
ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
|
||||
if (ret) {
|
||||
radeon_bo_unreserve(rbo);
|
||||
goto out_unref;
|
||||
}
|
||||
if (fb_tiled)
|
||||
radeon_bo_check_tiling(rbo, 0, 0);
|
||||
ret = radeon_bo_kmap(rbo, &fbptr);
|
||||
radeon_bo_unreserve(rbo);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "failed to pin framebuffer\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_unref;
|
||||
}
|
||||
|
||||
@ -202,9 +233,9 @@ int radeonfb_create(struct drm_device *dev,
|
||||
*fb_p = fb;
|
||||
rfb = to_radeon_framebuffer(fb);
|
||||
rdev->fbdev_rfb = rfb;
|
||||
rdev->fbdev_robj = robj;
|
||||
rdev->fbdev_rbo = rbo;
|
||||
|
||||
info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
|
||||
info = framebuffer_alloc(sizeof(struct radeon_fb_device), NULL);
|
||||
if (info == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto out_unref;
|
||||
@ -223,14 +254,7 @@ int radeonfb_create(struct drm_device *dev,
|
||||
if (ret)
|
||||
goto out_unref;
|
||||
|
||||
// ret = radeon_object_kmap(robj, &fbptr);
|
||||
// if (ret) {
|
||||
// goto out_unref;
|
||||
// }
|
||||
|
||||
|
||||
fbptr = (void*)0xFE000000; // LFB_BASE
|
||||
|
||||
|
||||
strcpy(info->fix.id, "radeondrmfb");
|
||||
|
||||
drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
|
||||
@ -277,12 +301,16 @@ int radeonfb_create(struct drm_device *dev,
|
||||
return 0;
|
||||
|
||||
out_unref:
|
||||
if (robj) {
|
||||
// radeon_object_kunmap(robj);
|
||||
if (rbo) {
|
||||
ret = radeon_bo_reserve(rbo, false);
|
||||
if (likely(ret == 0)) {
|
||||
radeon_bo_kunmap(rbo);
|
||||
radeon_bo_unreserve(rbo);
|
||||
}
|
||||
}
|
||||
if (fb && ret) {
|
||||
list_del(&fb->filp_head);
|
||||
// drm_gem_object_unreference(gobj);
|
||||
// drm_gem_object_unreference(gobj);
|
||||
// drm_framebuffer_cleanup(fb);
|
||||
kfree(fb);
|
||||
}
|
||||
@ -294,6 +322,13 @@ out:
|
||||
|
||||
int radeonfb_probe(struct drm_device *dev)
|
||||
{
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
int bpp_sel = 32;
|
||||
|
||||
/* select 8 bpp console on RN50 or 16MB cards */
|
||||
if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
|
||||
bpp_sel = 8;
|
||||
|
||||
return drm_fb_helper_single_fb_probe(dev, 32, &radeonfb_create);
|
||||
}
|
||||
|
||||
@ -301,7 +336,8 @@ int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
|
||||
{
|
||||
struct fb_info *info;
|
||||
struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb);
|
||||
struct radeon_object *robj;
|
||||
struct radeon_bo *rbo;
|
||||
int r;
|
||||
|
||||
if (!fb) {
|
||||
return -EINVAL;
|
||||
@ -309,11 +345,16 @@ int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
|
||||
info = fb->fbdev;
|
||||
if (info) {
|
||||
struct radeon_fb_device *rfbdev = info->par;
|
||||
robj = rfb->obj->driver_private;
|
||||
rbo = rfb->obj->driver_private;
|
||||
// unregister_framebuffer(info);
|
||||
// radeon_object_kunmap(robj);
|
||||
// radeon_object_unpin(robj);
|
||||
// framebuffer_release(info);
|
||||
r = radeon_bo_reserve(rbo, false);
|
||||
if (likely(r == 0)) {
|
||||
radeon_bo_kunmap(rbo);
|
||||
radeon_bo_unpin(rbo);
|
||||
radeon_bo_unreserve(rbo);
|
||||
}
|
||||
drm_fb_helper_free(&rfbdev->helper);
|
||||
framebuffer_release(info);
|
||||
}
|
||||
|
||||
printk(KERN_INFO "unregistered panic notifier\n");
|
||||
@ -323,120 +364,4 @@ int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
|
||||
EXPORT_SYMBOL(radeonfb_remove);
|
||||
|
||||
|
||||
/**
|
||||
* Allocate a GEM object of the specified size with shmfs backing store
|
||||
*/
|
||||
struct drm_gem_object *
|
||||
drm_gem_object_alloc(struct drm_device *dev, size_t size)
|
||||
{
|
||||
struct drm_gem_object *obj;
|
||||
|
||||
BUG_ON((size & (PAGE_SIZE - 1)) != 0);
|
||||
|
||||
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
|
||||
|
||||
obj->dev = dev;
|
||||
// obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
|
||||
// if (IS_ERR(obj->filp)) {
|
||||
// kfree(obj);
|
||||
// return NULL;
|
||||
// }
|
||||
|
||||
// kref_init(&obj->refcount);
|
||||
// kref_init(&obj->handlecount);
|
||||
obj->size = size;
|
||||
|
||||
// if (dev->driver->gem_init_object != NULL &&
|
||||
// dev->driver->gem_init_object(obj) != 0) {
|
||||
// fput(obj->filp);
|
||||
// kfree(obj);
|
||||
// return NULL;
|
||||
// }
|
||||
// atomic_inc(&dev->object_count);
|
||||
// atomic_add(obj->size, &dev->object_memory);
|
||||
return obj;
|
||||
}
|
||||
|
||||
|
||||
int radeon_gem_fb_object_create(struct radeon_device *rdev, int size,
|
||||
int alignment, int initial_domain,
|
||||
bool discardable, bool kernel,
|
||||
bool interruptible,
|
||||
struct drm_gem_object **obj)
|
||||
{
|
||||
struct drm_gem_object *gobj;
|
||||
struct radeon_object *robj;
|
||||
|
||||
*obj = NULL;
|
||||
gobj = drm_gem_object_alloc(rdev->ddev, size);
|
||||
if (!gobj) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
/* At least align on page size */
|
||||
if (alignment < PAGE_SIZE) {
|
||||
alignment = PAGE_SIZE;
|
||||
}
|
||||
|
||||
robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
|
||||
if (!robj) {
|
||||
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n",
|
||||
size, initial_domain, alignment);
|
||||
// mutex_lock(&rdev->ddev->struct_mutex);
|
||||
// drm_gem_object_unreference(gobj);
|
||||
// mutex_unlock(&rdev->ddev->struct_mutex);
|
||||
return -ENOMEM;;
|
||||
}
|
||||
robj->rdev = rdev;
|
||||
robj->gobj = gobj;
|
||||
INIT_LIST_HEAD(&robj->list);
|
||||
|
||||
robj->flags = TTM_PL_FLAG_VRAM;
|
||||
|
||||
struct drm_mm_node *vm_node;
|
||||
|
||||
vm_node = kzalloc(sizeof(*vm_node),0);
|
||||
|
||||
vm_node->free = 0;
|
||||
vm_node->size = 0xC00000 >> 12;
|
||||
vm_node->start = 0;
|
||||
vm_node->mm = NULL;
|
||||
|
||||
robj->mm_node = vm_node;
|
||||
|
||||
robj->vm_addr = ((uint32_t)robj->mm_node->start);
|
||||
|
||||
gobj->driver_private = robj;
|
||||
*obj = gobj;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
struct fb_info *framebuffer_alloc(size_t size, void *dev)
|
||||
{
|
||||
#define BYTES_PER_LONG (BITS_PER_LONG/8)
|
||||
#define PADDING (BYTES_PER_LONG - (sizeof(struct fb_info) % BYTES_PER_LONG))
|
||||
int fb_info_size = sizeof(struct fb_info);
|
||||
struct fb_info *info;
|
||||
char *p;
|
||||
|
||||
if (size)
|
||||
fb_info_size += PADDING;
|
||||
|
||||
p = kzalloc(fb_info_size + size, GFP_KERNEL);
|
||||
|
||||
if (!p)
|
||||
return NULL;
|
||||
|
||||
info = (struct fb_info *) p;
|
||||
|
||||
if (size)
|
||||
info->par = p + fb_info_size;
|
||||
|
||||
return info;
|
||||
#undef PADDING
|
||||
#undef BYTES_PER_LONG
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -140,16 +140,15 @@ int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
|
||||
|
||||
bool radeon_fence_signaled(struct radeon_fence *fence)
|
||||
{
|
||||
struct radeon_device *rdev = fence->rdev;
|
||||
unsigned long irq_flags;
|
||||
bool signaled = false;
|
||||
|
||||
if (rdev->gpu_lockup) {
|
||||
if (!fence)
|
||||
return true;
|
||||
}
|
||||
if (fence == NULL) {
|
||||
|
||||
if (fence->rdev->gpu_lockup)
|
||||
return true;
|
||||
}
|
||||
|
||||
write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
|
||||
signaled = fence->signaled;
|
||||
/* if we are shuting down report all fence as signaled */
|
||||
@ -324,7 +323,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
|
||||
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
|
||||
r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
|
||||
if (r) {
|
||||
DRM_ERROR("Fence failed to get a scratch register.");
|
||||
dev_err(rdev->dev, "fence failed to get scratch register\n");
|
||||
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
||||
return r;
|
||||
}
|
||||
@ -335,9 +334,10 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
|
||||
INIT_LIST_HEAD(&rdev->fence_drv.signaled);
|
||||
rdev->fence_drv.count_timeout = 0;
|
||||
init_waitqueue_head(&rdev->fence_drv.queue);
|
||||
rdev->fence_drv.initialized = true;
|
||||
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
||||
if (radeon_debugfs_fence_init(rdev)) {
|
||||
DRM_ERROR("Failed to register debugfs file for fence !\n");
|
||||
dev_err(rdev->dev, "fence debugfs file creation failed\n");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -346,11 +346,13 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
|
||||
if (!rdev->fence_drv.initialized)
|
||||
return;
|
||||
wake_up_all(&rdev->fence_drv.queue);
|
||||
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
|
||||
radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg);
|
||||
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
||||
DRM_INFO("radeon: fence finalized\n");
|
||||
rdev->fence_drv.initialized = false;
|
||||
}
|
||||
|
||||
|
||||
|
@ -78,11 +78,9 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
|
||||
int r;
|
||||
|
||||
if (rdev->gart.table.vram.robj == NULL) {
|
||||
r = radeon_object_create(rdev, NULL,
|
||||
rdev->gart.table_size,
|
||||
true,
|
||||
RADEON_GEM_DOMAIN_VRAM,
|
||||
false, &rdev->gart.table.vram.robj);
|
||||
r = radeon_bo_create(rdev, NULL, rdev->gart.table_size,
|
||||
true, RADEON_GEM_DOMAIN_VRAM,
|
||||
&rdev->gart.table.vram.robj);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
@ -95,47 +93,41 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
|
||||
uint64_t gpu_addr;
|
||||
int r;
|
||||
|
||||
r = radeon_object_pin(rdev->gart.table.vram.robj,
|
||||
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
r = radeon_bo_pin(rdev->gart.table.vram.robj,
|
||||
RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
|
||||
if (r) {
|
||||
// radeon_object_unref(&rdev->gart.table.vram.robj);
|
||||
radeon_bo_unreserve(rdev->gart.table.vram.robj);
|
||||
return r;
|
||||
}
|
||||
r = radeon_object_kmap(rdev->gart.table.vram.robj,
|
||||
r = radeon_bo_kmap(rdev->gart.table.vram.robj,
|
||||
(void **)&rdev->gart.table.vram.ptr);
|
||||
if (r) {
|
||||
// radeon_object_unpin(rdev->gart.table.vram.robj);
|
||||
// radeon_object_unref(&rdev->gart.table.vram.robj);
|
||||
DRM_ERROR("radeon: failed to map gart vram table.\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
rdev->gart.table_addr = gpu_addr;
|
||||
if (r)
|
||||
radeon_bo_unpin(rdev->gart.table.vram.robj);
|
||||
radeon_bo_unreserve(rdev->gart.table.vram.robj);
|
||||
rdev->gart.table_addr = gpu_addr;
|
||||
return r;
|
||||
|
||||
dbgprintf("alloc gart vram: gpu_base %x lin_addr %x\n",
|
||||
rdev->gart.table_addr, rdev->gart.table.vram.ptr);
|
||||
|
||||
// gpu_addr = 0x800000;
|
||||
|
||||
// u32_t pci_addr = rdev->mc.aper_base + gpu_addr;
|
||||
|
||||
// rdev->gart.table.vram.ptr = (void*)MapIoMem(pci_addr, rdev->gart.table_size, PG_SW);
|
||||
|
||||
|
||||
// dbgprintf("alloc gart vram:\n gpu_base %x pci_base %x lin_addr %x",
|
||||
// gpu_addr, pci_addr, rdev->gart.table.vram.ptr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_gart_table_vram_free(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (rdev->gart.table.vram.robj == NULL) {
|
||||
return;
|
||||
}
|
||||
// radeon_object_kunmap(rdev->gart.table.vram.robj);
|
||||
// radeon_object_unpin(rdev->gart.table.vram.robj);
|
||||
// radeon_object_unref(&rdev->gart.table.vram.robj);
|
||||
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
|
||||
if (likely(r == 0)) {
|
||||
radeon_bo_kunmap(rdev->gart.table.vram.robj);
|
||||
radeon_bo_unpin(rdev->gart.table.vram.robj);
|
||||
radeon_bo_unreserve(rdev->gart.table.vram.robj);
|
||||
}
|
||||
radeon_bo_unref(&rdev->gart.table.vram.robj);
|
||||
}
|
||||
|
||||
|
||||
@ -152,7 +144,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
|
||||
int i, j;
|
||||
|
||||
if (!rdev->gart.ready) {
|
||||
// WARN(1, "trying to unbind memory to unitialized GART !\n");
|
||||
WARN(1, "trying to unbind memory to unitialized GART !\n");
|
||||
return;
|
||||
}
|
||||
t = offset / RADEON_GPU_PAGE_SIZE;
|
||||
@ -234,13 +226,13 @@ int radeon_gart_init(struct radeon_device *rdev)
|
||||
rdev->gart.pages = kzalloc(sizeof(void *) * rdev->gart.num_cpu_pages,
|
||||
GFP_KERNEL);
|
||||
if (rdev->gart.pages == NULL) {
|
||||
// radeon_gart_fini(rdev);
|
||||
radeon_gart_fini(rdev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
rdev->gart.pages_addr = kzalloc(sizeof(u32_t) *
|
||||
rdev->gart.pages_addr = kzalloc(sizeof(dma_addr_t) *
|
||||
rdev->gart.num_cpu_pages, GFP_KERNEL);
|
||||
if (rdev->gart.pages_addr == NULL) {
|
||||
// radeon_gart_fini(rdev);
|
||||
radeon_gart_fini(rdev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
|
@ -30,31 +30,6 @@
|
||||
#include "radeon_drm.h"
|
||||
#include "radeon.h"
|
||||
|
||||
|
||||
#define TTM_PL_SYSTEM 0
|
||||
#define TTM_PL_TT 1
|
||||
#define TTM_PL_VRAM 2
|
||||
#define TTM_PL_PRIV0 3
|
||||
#define TTM_PL_PRIV1 4
|
||||
#define TTM_PL_PRIV2 5
|
||||
#define TTM_PL_PRIV3 6
|
||||
#define TTM_PL_PRIV4 7
|
||||
#define TTM_PL_PRIV5 8
|
||||
#define TTM_PL_SWAPPED 15
|
||||
|
||||
#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM)
|
||||
#define TTM_PL_FLAG_TT (1 << TTM_PL_TT)
|
||||
#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM)
|
||||
#define TTM_PL_FLAG_PRIV0 (1 << TTM_PL_PRIV0)
|
||||
#define TTM_PL_FLAG_PRIV1 (1 << TTM_PL_PRIV1)
|
||||
#define TTM_PL_FLAG_PRIV2 (1 << TTM_PL_PRIV2)
|
||||
#define TTM_PL_FLAG_PRIV3 (1 << TTM_PL_PRIV3)
|
||||
#define TTM_PL_FLAG_PRIV4 (1 << TTM_PL_PRIV4)
|
||||
#define TTM_PL_FLAG_PRIV5 (1 << TTM_PL_PRIV5)
|
||||
#define TTM_PL_FLAG_SWAPPED (1 << TTM_PL_SWAPPED)
|
||||
#define TTM_PL_MASK_MEM 0x0000FFFF
|
||||
|
||||
|
||||
int radeon_gem_object_init(struct drm_gem_object *obj)
|
||||
{
|
||||
/* we do nothings here */
|
||||
@ -63,22 +38,21 @@ int radeon_gem_object_init(struct drm_gem_object *obj)
|
||||
|
||||
void radeon_gem_object_free(struct drm_gem_object *gobj)
|
||||
{
|
||||
struct radeon_object *robj = gobj->driver_private;
|
||||
struct radeon_bo *robj = gobj->driver_private;
|
||||
|
||||
gobj->driver_private = NULL;
|
||||
if (robj) {
|
||||
// radeon_object_unref(&robj);
|
||||
radeon_bo_unref(&robj);
|
||||
}
|
||||
}
|
||||
|
||||
int radeon_gem_object_create(struct radeon_device *rdev, int size,
|
||||
int alignment, int initial_domain,
|
||||
bool discardable, bool kernel,
|
||||
bool interruptible,
|
||||
struct drm_gem_object **obj)
|
||||
int alignment, int initial_domain,
|
||||
bool discardable, bool kernel,
|
||||
struct drm_gem_object **obj)
|
||||
{
|
||||
struct drm_gem_object *gobj;
|
||||
struct radeon_object *robj;
|
||||
struct radeon_bo *robj;
|
||||
int r;
|
||||
|
||||
*obj = NULL;
|
||||
@ -90,15 +64,11 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
|
||||
if (alignment < PAGE_SIZE) {
|
||||
alignment = PAGE_SIZE;
|
||||
}
|
||||
r = radeon_object_create(rdev, gobj, size, kernel, initial_domain,
|
||||
interruptible, &robj);
|
||||
r = radeon_fb_bo_create(rdev, gobj, size, kernel, initial_domain, &robj);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n",
|
||||
size, initial_domain, alignment);
|
||||
// mutex_lock(&rdev->ddev->struct_mutex);
|
||||
// drm_gem_object_unreference(gobj);
|
||||
// mutex_unlock(&rdev->ddev->struct_mutex);
|
||||
return r;
|
||||
return r;
|
||||
}
|
||||
gobj->driver_private = robj;
|
||||
*obj = gobj;
|
||||
@ -108,33 +78,33 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
|
||||
int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
|
||||
uint64_t *gpu_addr)
|
||||
{
|
||||
struct radeon_object *robj = obj->driver_private;
|
||||
uint32_t flags;
|
||||
struct radeon_bo *robj = obj->driver_private;
|
||||
int r;
|
||||
|
||||
switch (pin_domain) {
|
||||
case RADEON_GEM_DOMAIN_VRAM:
|
||||
flags = TTM_PL_FLAG_VRAM;
|
||||
break;
|
||||
case RADEON_GEM_DOMAIN_GTT:
|
||||
flags = TTM_PL_FLAG_TT;
|
||||
break;
|
||||
default:
|
||||
flags = TTM_PL_FLAG_SYSTEM;
|
||||
break;
|
||||
}
|
||||
return radeon_object_pin(robj, flags, gpu_addr);
|
||||
r = radeon_bo_reserve(robj, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
r = radeon_bo_pin(robj, pin_domain, gpu_addr);
|
||||
radeon_bo_unreserve(robj);
|
||||
return r;
|
||||
}
|
||||
|
||||
void radeon_gem_object_unpin(struct drm_gem_object *obj)
|
||||
{
|
||||
struct radeon_object *robj = obj->driver_private;
|
||||
// radeon_object_unpin(robj);
|
||||
struct radeon_bo *robj = obj->driver_private;
|
||||
int r;
|
||||
|
||||
r = radeon_bo_reserve(robj, false);
|
||||
if (likely(r == 0)) {
|
||||
radeon_bo_unpin(robj);
|
||||
radeon_bo_unreserve(robj);
|
||||
}
|
||||
}
|
||||
|
||||
int radeon_gem_set_domain(struct drm_gem_object *gobj,
|
||||
uint32_t rdomain, uint32_t wdomain)
|
||||
{
|
||||
struct radeon_object *robj;
|
||||
struct radeon_bo *robj;
|
||||
uint32_t domain;
|
||||
int r;
|
||||
|
||||
@ -152,11 +122,11 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj,
|
||||
}
|
||||
if (domain == RADEON_GEM_DOMAIN_CPU) {
|
||||
/* Asking for cpu access wait for object idle */
|
||||
// r = radeon_object_wait(robj);
|
||||
if (r) {
|
||||
printk(KERN_ERR "Failed to wait for object !\n");
|
||||
return r;
|
||||
}
|
||||
// r = radeon_bo_wait(robj, NULL, false);
|
||||
// if (r) {
|
||||
// printk(KERN_ERR "Failed to wait for object !\n");
|
||||
// return r;
|
||||
// }
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -218,7 +188,7 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
|
||||
args->size = roundup(args->size, PAGE_SIZE);
|
||||
r = radeon_gem_object_create(rdev, args->size, args->alignment,
|
||||
args->initial_domain, false,
|
||||
false, true, &gobj);
|
||||
false, &gobj);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
@ -243,7 +213,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
||||
* just validate the BO into a certain domain */
|
||||
struct drm_radeon_gem_set_domain *args = data;
|
||||
struct drm_gem_object *gobj;
|
||||
struct radeon_object *robj;
|
||||
struct radeon_bo *robj;
|
||||
int r;
|
||||
|
||||
/* for now if someone requests domain CPU -
|
||||
@ -269,26 +239,51 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
|
||||
{
|
||||
struct drm_radeon_gem_mmap *args = data;
|
||||
struct drm_gem_object *gobj;
|
||||
struct radeon_object *robj;
|
||||
int r;
|
||||
struct radeon_bo *robj;
|
||||
|
||||
gobj = drm_gem_object_lookup(dev, filp, args->handle);
|
||||
if (gobj == NULL) {
|
||||
return -EINVAL;
|
||||
}
|
||||
robj = gobj->driver_private;
|
||||
r = radeon_object_mmap(robj, &args->addr_ptr);
|
||||
args->addr_ptr = radeon_bo_mmap_offset(robj);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(gobj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return r;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp)
|
||||
{
|
||||
/* FIXME: implement */
|
||||
return 0;
|
||||
struct drm_radeon_gem_busy *args = data;
|
||||
struct drm_gem_object *gobj;
|
||||
struct radeon_bo *robj;
|
||||
int r;
|
||||
uint32_t cur_placement = 0;
|
||||
|
||||
gobj = drm_gem_object_lookup(dev, filp, args->handle);
|
||||
if (gobj == NULL) {
|
||||
return -EINVAL;
|
||||
}
|
||||
robj = gobj->driver_private;
|
||||
r = radeon_bo_wait(robj, &cur_placement, true);
|
||||
switch (cur_placement) {
|
||||
case TTM_PL_VRAM:
|
||||
args->domain = RADEON_GEM_DOMAIN_VRAM;
|
||||
break;
|
||||
case TTM_PL_TT:
|
||||
args->domain = RADEON_GEM_DOMAIN_GTT;
|
||||
break;
|
||||
case TTM_PL_SYSTEM:
|
||||
args->domain = RADEON_GEM_DOMAIN_CPU;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(gobj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return r;
|
||||
}
|
||||
|
||||
int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
|
||||
@ -296,7 +291,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
|
||||
{
|
||||
struct drm_radeon_gem_wait_idle *args = data;
|
||||
struct drm_gem_object *gobj;
|
||||
struct radeon_object *robj;
|
||||
struct radeon_bo *robj;
|
||||
int r;
|
||||
|
||||
gobj = drm_gem_object_lookup(dev, filp, args->handle);
|
||||
@ -304,7 +299,30 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
|
||||
return -EINVAL;
|
||||
}
|
||||
robj = gobj->driver_private;
|
||||
r = radeon_object_wait(robj);
|
||||
r = radeon_bo_wait(robj, NULL, false);
|
||||
/* callback hw specific functions if any */
|
||||
if (robj->rdev->asic->ioctl_wait_idle)
|
||||
robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(gobj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return r;
|
||||
}
|
||||
|
||||
int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp)
|
||||
{
|
||||
struct drm_radeon_gem_set_tiling *args = data;
|
||||
struct drm_gem_object *gobj;
|
||||
struct radeon_bo *robj;
|
||||
int r = 0;
|
||||
|
||||
DRM_DEBUG("%d \n", args->handle);
|
||||
gobj = drm_gem_object_lookup(dev, filp, args->handle);
|
||||
if (gobj == NULL)
|
||||
return -EINVAL;
|
||||
robj = gobj->driver_private;
|
||||
r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(gobj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
@ -216,7 +216,7 @@ struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
|
||||
return NULL;
|
||||
|
||||
i2c->rec = *rec;
|
||||
i2c->adapter.owner = THIS_MODULE;
|
||||
// i2c->adapter.owner = THIS_MODULE;
|
||||
i2c->dev = dev;
|
||||
i2c->adapter.algo_data = &i2c->algo.dp;
|
||||
i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch;
|
||||
|
@ -43,8 +43,7 @@ static void radeon_overscan_setup(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
@ -322,13 +321,11 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
|
||||
RADEON_CRTC_DISP_REQ_EN_B));
|
||||
WREG32_P(RADEON_CRTC_EXT_CNTL, 0, ~mask);
|
||||
}
|
||||
// drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
|
||||
radeon_crtc_load_lut(crtc);
|
||||
break;
|
||||
case DRM_MODE_DPMS_STANDBY:
|
||||
case DRM_MODE_DPMS_SUSPEND:
|
||||
case DRM_MODE_DPMS_OFF:
|
||||
// drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
|
||||
if (radeon_crtc->crtc_id)
|
||||
WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
|
||||
else {
|
||||
@ -340,69 +337,6 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
|
||||
}
|
||||
}
|
||||
|
||||
/* properly set crtc bpp when using atombios */
|
||||
void radeon_legacy_atom_set_surface(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
int format;
|
||||
uint32_t crtc_gen_cntl;
|
||||
uint32_t disp_merge_cntl;
|
||||
uint32_t crtc_pitch;
|
||||
|
||||
switch (crtc->fb->bits_per_pixel) {
|
||||
case 8:
|
||||
format = 2;
|
||||
break;
|
||||
case 15: /* 555 */
|
||||
format = 3;
|
||||
break;
|
||||
case 16: /* 565 */
|
||||
format = 4;
|
||||
break;
|
||||
case 24: /* RGB */
|
||||
format = 5;
|
||||
break;
|
||||
case 32: /* xRGB */
|
||||
format = 6;
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
crtc_pitch = ((((crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8)) * crtc->fb->bits_per_pixel) +
|
||||
((crtc->fb->bits_per_pixel * 8) - 1)) /
|
||||
(crtc->fb->bits_per_pixel * 8));
|
||||
crtc_pitch |= crtc_pitch << 16;
|
||||
|
||||
WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
|
||||
|
||||
switch (radeon_crtc->crtc_id) {
|
||||
case 0:
|
||||
disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
|
||||
disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
|
||||
WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
|
||||
|
||||
crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0xfffff0ff;
|
||||
crtc_gen_cntl |= (format << 8);
|
||||
crtc_gen_cntl |= RADEON_CRTC_EXT_DISP_EN;
|
||||
WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
|
||||
break;
|
||||
case 1:
|
||||
disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
|
||||
disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
|
||||
WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl);
|
||||
|
||||
crtc_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0xfffff0ff;
|
||||
crtc_gen_cntl |= (format << 8);
|
||||
WREG32(RADEON_CRTC2_GEN_CNTL, crtc_gen_cntl);
|
||||
WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID));
|
||||
WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
|
||||
struct drm_framebuffer *old_fb)
|
||||
{
|
||||
@ -756,7 +690,6 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
|
||||
uint32_t post_divider = 0;
|
||||
uint32_t freq = 0;
|
||||
uint8_t pll_gain;
|
||||
int pll_flags = RADEON_PLL_LEGACY;
|
||||
bool use_bios_divs = false;
|
||||
/* PLL registers */
|
||||
uint32_t pll_ref_div = 0;
|
||||
@ -790,10 +723,12 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
|
||||
else
|
||||
pll = &rdev->clock.p1pll;
|
||||
|
||||
pll->flags = RADEON_PLL_LEGACY;
|
||||
|
||||
if (mode->clock > 200000) /* range limits??? */
|
||||
pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
|
||||
pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
|
||||
else
|
||||
pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
|
||||
pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
|
||||
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||
if (encoder->crtc == crtc) {
|
||||
@ -805,7 +740,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
|
||||
}
|
||||
|
||||
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
|
||||
pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
|
||||
pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
|
||||
if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
|
||||
if (!rdev->is_atom_bios) {
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
@ -820,7 +755,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
|
||||
}
|
||||
}
|
||||
}
|
||||
pll_flags |= RADEON_PLL_USE_REF_DIV;
|
||||
pll->flags |= RADEON_PLL_USE_REF_DIV;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -830,8 +765,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
|
||||
if (!use_bios_divs) {
|
||||
radeon_compute_pll(pll, mode->clock,
|
||||
&freq, &feedback_div, &frac_fb_div,
|
||||
&reference_div, &post_divider,
|
||||
pll_flags);
|
||||
&reference_div, &post_divider);
|
||||
|
||||
for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
|
||||
if (post_div->divider == post_divider)
|
||||
@ -1059,7 +993,7 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
|
||||
radeon_set_pll(crtc, adjusted_mode);
|
||||
radeon_overscan_setup(crtc, adjusted_mode);
|
||||
if (radeon_crtc->crtc_id == 0) {
|
||||
radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode);
|
||||
radeon_legacy_rmx_mode_set(crtc, adjusted_mode);
|
||||
} else {
|
||||
if (radeon_crtc->rmx_type != RMX_OFF) {
|
||||
/* FIXME: only first crtc has rmx what should we
|
||||
|
@ -77,7 +77,7 @@ struct radeon_tv_mode_constants {
|
||||
unsigned pix_to_tv;
|
||||
};
|
||||
|
||||
static const uint16_t hor_timing_NTSC[] = {
|
||||
static const uint16_t hor_timing_NTSC[MAX_H_CODE_TIMING_LEN] = {
|
||||
0x0007,
|
||||
0x003f,
|
||||
0x0263,
|
||||
@ -98,7 +98,7 @@ static const uint16_t hor_timing_NTSC[] = {
|
||||
0
|
||||
};
|
||||
|
||||
static const uint16_t vert_timing_NTSC[] = {
|
||||
static const uint16_t vert_timing_NTSC[MAX_V_CODE_TIMING_LEN] = {
|
||||
0x2001,
|
||||
0x200d,
|
||||
0x1006,
|
||||
@ -115,7 +115,7 @@ static const uint16_t vert_timing_NTSC[] = {
|
||||
0
|
||||
};
|
||||
|
||||
static const uint16_t hor_timing_PAL[] = {
|
||||
static const uint16_t hor_timing_PAL[MAX_H_CODE_TIMING_LEN] = {
|
||||
0x0007,
|
||||
0x0058,
|
||||
0x027c,
|
||||
@ -136,7 +136,7 @@ static const uint16_t hor_timing_PAL[] = {
|
||||
0
|
||||
};
|
||||
|
||||
static const uint16_t vert_timing_PAL[] = {
|
||||
static const uint16_t vert_timing_PAL[MAX_V_CODE_TIMING_LEN] = {
|
||||
0x2001,
|
||||
0x200c,
|
||||
0x1005,
|
||||
@ -623,9 +623,9 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
|
||||
}
|
||||
flicker_removal = (tmp + 500) / 1000;
|
||||
|
||||
if (flicker_removal < 3)
|
||||
flicker_removal = 3;
|
||||
for (i = 0; i < 6; ++i) {
|
||||
if (flicker_removal < 2)
|
||||
flicker_removal = 2;
|
||||
for (i = 0; i < ARRAY_SIZE(SLOPE_limit); ++i) {
|
||||
if (flicker_removal == SLOPE_limit[i])
|
||||
break;
|
||||
}
|
||||
|
@ -46,32 +46,6 @@ struct radeon_device;
|
||||
#define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base)
|
||||
#define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base)
|
||||
|
||||
enum radeon_connector_type {
|
||||
CONNECTOR_NONE,
|
||||
CONNECTOR_VGA,
|
||||
CONNECTOR_DVI_I,
|
||||
CONNECTOR_DVI_D,
|
||||
CONNECTOR_DVI_A,
|
||||
CONNECTOR_STV,
|
||||
CONNECTOR_CTV,
|
||||
CONNECTOR_LVDS,
|
||||
CONNECTOR_DIGITAL,
|
||||
CONNECTOR_SCART,
|
||||
CONNECTOR_HDMI_TYPE_A,
|
||||
CONNECTOR_HDMI_TYPE_B,
|
||||
CONNECTOR_0XC,
|
||||
CONNECTOR_0XD,
|
||||
CONNECTOR_DIN,
|
||||
CONNECTOR_DISPLAY_PORT,
|
||||
CONNECTOR_UNSUPPORTED
|
||||
};
|
||||
|
||||
enum radeon_dvi_type {
|
||||
DVI_AUTO,
|
||||
DVI_DIGITAL,
|
||||
DVI_ANALOG
|
||||
};
|
||||
|
||||
enum radeon_rmx_type {
|
||||
RMX_OFF,
|
||||
RMX_FULL,
|
||||
@ -88,6 +62,7 @@ enum radeon_tv_std {
|
||||
TV_STD_SCART_PAL,
|
||||
TV_STD_SECAM,
|
||||
TV_STD_PAL_CN,
|
||||
TV_STD_PAL_N,
|
||||
};
|
||||
|
||||
/* radeon gpio-based i2c
|
||||
@ -150,16 +125,24 @@ struct radeon_tmds_pll {
|
||||
#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9)
|
||||
#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10)
|
||||
#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
|
||||
#define RADEON_PLL_USE_POST_DIV (1 << 12)
|
||||
|
||||
struct radeon_pll {
|
||||
uint16_t reference_freq;
|
||||
uint16_t reference_div;
|
||||
/* reference frequency */
|
||||
uint32_t reference_freq;
|
||||
|
||||
/* fixed dividers */
|
||||
uint32_t reference_div;
|
||||
uint32_t post_div;
|
||||
|
||||
/* pll in/out limits */
|
||||
uint32_t pll_in_min;
|
||||
uint32_t pll_in_max;
|
||||
uint32_t pll_out_min;
|
||||
uint32_t pll_out_max;
|
||||
uint16_t xclk;
|
||||
uint32_t best_vco;
|
||||
|
||||
/* divider limits */
|
||||
uint32_t min_ref_div;
|
||||
uint32_t max_ref_div;
|
||||
uint32_t min_post_div;
|
||||
@ -168,7 +151,12 @@ struct radeon_pll {
|
||||
uint32_t max_feedback_div;
|
||||
uint32_t min_frac_feedback_div;
|
||||
uint32_t max_frac_feedback_div;
|
||||
uint32_t best_vco;
|
||||
|
||||
/* flags for the current clock */
|
||||
uint32_t flags;
|
||||
|
||||
/* pll id */
|
||||
uint32_t id;
|
||||
};
|
||||
|
||||
struct radeon_i2c_chan {
|
||||
@ -311,7 +299,7 @@ struct radeon_atom_ss {
|
||||
struct radeon_encoder_atom_dig {
|
||||
/* atom dig */
|
||||
bool coherent_mode;
|
||||
int dig_block;
|
||||
int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */
|
||||
/* atom lvds */
|
||||
uint32_t lvds_misc;
|
||||
uint16_t panel_pwr_delay;
|
||||
@ -334,6 +322,9 @@ struct radeon_encoder {
|
||||
enum radeon_rmx_type rmx_type;
|
||||
struct drm_display_mode native_mode;
|
||||
void *enc_priv;
|
||||
int hdmi_offset;
|
||||
int hdmi_audio_workaround;
|
||||
int hdmi_buffer_status;
|
||||
};
|
||||
|
||||
struct radeon_connector_atom_dig {
|
||||
@ -392,6 +383,11 @@ struct radeon_framebuffer {
|
||||
struct drm_gem_object *obj;
|
||||
};
|
||||
|
||||
extern enum radeon_tv_std
|
||||
radeon_combios_get_tv_info(struct radeon_device *rdev);
|
||||
extern enum radeon_tv_std
|
||||
radeon_atombios_get_tv_info(struct radeon_device *rdev);
|
||||
|
||||
extern void radeon_connector_hotplug(struct drm_connector *connector);
|
||||
extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
|
||||
extern int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector,
|
||||
@ -434,8 +430,7 @@ extern void radeon_compute_pll(struct radeon_pll *pll,
|
||||
uint32_t *fb_div_p,
|
||||
uint32_t *frac_fb_div_p,
|
||||
uint32_t *ref_div_p,
|
||||
uint32_t *post_div_p,
|
||||
int flags);
|
||||
uint32_t *post_div_p);
|
||||
|
||||
extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
|
||||
uint64_t freq,
|
||||
@ -443,8 +438,7 @@ extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
|
||||
uint32_t *fb_div_p,
|
||||
uint32_t *frac_fb_div_p,
|
||||
uint32_t *ref_div_p,
|
||||
uint32_t *post_div_p,
|
||||
int flags);
|
||||
uint32_t *post_div_p);
|
||||
|
||||
extern void radeon_setup_encoder_clones(struct drm_device *dev);
|
||||
|
||||
@ -470,7 +464,6 @@ extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode);
|
||||
|
||||
extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
|
||||
struct drm_framebuffer *old_fb);
|
||||
extern void radeon_legacy_atom_set_surface(struct drm_crtc *crtc);
|
||||
|
||||
extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
struct drm_file *file_priv,
|
||||
|
@ -59,23 +59,9 @@ static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
|
||||
*
|
||||
* Returns:
|
||||
* -EBUSY: buffer is busy and @no_wait is true
|
||||
* -ERESTART: A wait for the buffer to become unreserved was interrupted by
|
||||
* -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
|
||||
* a signal. Release all buffer reservations and return to user-space.
|
||||
*/
|
||||
static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
|
||||
{
|
||||
int r;
|
||||
|
||||
retry:
|
||||
r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
|
||||
if (unlikely(r != 0)) {
|
||||
if (r == -ERESTART)
|
||||
goto retry;
|
||||
dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
*/
|
||||
|
||||
static inline void radeon_bo_unreserve(struct radeon_bo *bo)
|
||||
{
|
||||
@ -85,12 +71,12 @@ static inline void radeon_bo_unreserve(struct radeon_bo *bo)
|
||||
/**
|
||||
* radeon_bo_gpu_offset - return GPU offset of bo
|
||||
* @bo: radeon object for which we query the offset
|
||||
*
|
||||
*
|
||||
* Returns current GPU offset of the object.
|
||||
*
|
||||
*
|
||||
* Note: object should either be pinned or reserved when calling this
|
||||
* function, it might be usefull to add check for this for debugging.
|
||||
*/
|
||||
*/
|
||||
static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
|
||||
{
|
||||
return bo->tbo.offset;
|
||||
@ -109,12 +95,12 @@ static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
|
||||
/**
|
||||
* radeon_bo_mmap_offset - return mmap offset of bo
|
||||
* @bo: radeon object for which we query the offset
|
||||
*
|
||||
*
|
||||
* Returns mmap offset of the object.
|
||||
*
|
||||
*
|
||||
* Note: addr_space_offset is constant after ttm bo init thus isn't protected
|
||||
* by any lock.
|
||||
*/
|
||||
*/
|
||||
static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
|
||||
{
|
||||
return bo->tbo.addr_space_offset;
|
||||
@ -125,11 +111,9 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
|
||||
{
|
||||
int r;
|
||||
|
||||
retry:
|
||||
r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
|
||||
if (unlikely(r != 0)) {
|
||||
if (r == -ERESTART)
|
||||
goto retry;
|
||||
if (r != -ERESTARTSYS)
|
||||
dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo);
|
||||
return r;
|
||||
}
|
||||
@ -140,8 +124,6 @@ retry:
|
||||
r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
|
||||
spin_unlock(&bo->tbo.lock);
|
||||
ttm_bo_unreserve(&bo->tbo);
|
||||
if (unlikely(r == -ERESTART))
|
||||
goto retry;
|
||||
return r;
|
||||
}
|
||||
|
||||
|
388
drivers/video/drm/radeon/radeon_object_kos.c
Normal file
388
drivers/video/drm/radeon/radeon_object_kos.c
Normal file
@ -0,0 +1,388 @@
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <drm/drmP.h>
|
||||
#include "radeon_drm.h"
|
||||
#include "radeon.h"
|
||||
|
||||
|
||||
static struct drm_mm mm_gtt;
|
||||
static struct drm_mm mm_vram;
|
||||
|
||||
int drm_mm_alloc(struct drm_mm *mm, size_t num_pages,
|
||||
struct drm_mm_node **node)
|
||||
{
|
||||
struct drm_mm_node *vm_node;
|
||||
int r;
|
||||
|
||||
retry_pre_get:
|
||||
|
||||
r = drm_mm_pre_get(mm);
|
||||
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
|
||||
vm_node = drm_mm_search_free(mm, num_pages, 0, 0);
|
||||
|
||||
if (unlikely(vm_node == NULL)) {
|
||||
r = -ENOMEM;
|
||||
return r;
|
||||
}
|
||||
|
||||
*node = drm_mm_get_block_atomic(vm_node, num_pages, 0);
|
||||
|
||||
if (unlikely(*node == NULL)) {
|
||||
goto retry_pre_get;
|
||||
}
|
||||
|
||||
return 0;
|
||||
};
|
||||
|
||||
|
||||
void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
|
||||
{
|
||||
u32 c = 0;
|
||||
|
||||
rbo->placement.fpfn = 0;
|
||||
rbo->placement.lpfn = 0;
|
||||
rbo->placement.placement = rbo->placements;
|
||||
rbo->placement.busy_placement = rbo->placements;
|
||||
if (domain & RADEON_GEM_DOMAIN_VRAM)
|
||||
rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
|
||||
TTM_PL_FLAG_VRAM;
|
||||
if (domain & RADEON_GEM_DOMAIN_GTT)
|
||||
rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
|
||||
if (domain & RADEON_GEM_DOMAIN_CPU)
|
||||
rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
|
||||
if (!c)
|
||||
rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
|
||||
rbo->placement.num_placement = c;
|
||||
rbo->placement.num_busy_placement = c;
|
||||
}
|
||||
|
||||
|
||||
int radeon_bo_init(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
|
||||
DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
|
||||
rdev->mc.mc_vram_size >> 20,
|
||||
(unsigned long long)rdev->mc.aper_size >> 20);
|
||||
DRM_INFO("RAM width %dbits %cDR\n",
|
||||
rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
|
||||
|
||||
r = drm_mm_init(&mm_vram, 0xC00000 >> PAGE_SHIFT,
|
||||
((rdev->mc.real_vram_size - 0xC00000) >> PAGE_SHIFT));
|
||||
if (r) {
|
||||
DRM_ERROR("Failed initializing VRAM heap.\n");
|
||||
return r;
|
||||
};
|
||||
|
||||
r = drm_mm_init(&mm_gtt, 0, rdev->mc.gtt_size >> PAGE_SHIFT);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed initializing GTT heap.\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
|
||||
{
|
||||
int r;
|
||||
|
||||
bo->tbo.reserved.counter = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ttm_bo_unreserve(struct ttm_buffer_object *bo)
|
||||
{
|
||||
bo->reserved.counter = 1;
|
||||
}
|
||||
|
||||
int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
|
||||
unsigned long size, bool kernel, u32 domain,
|
||||
struct radeon_bo **bo_ptr)
|
||||
{
|
||||
enum ttm_bo_type type;
|
||||
|
||||
struct radeon_bo *bo;
|
||||
size_t num_pages;
|
||||
struct drm_mm *mman;
|
||||
u32 bo_domain;
|
||||
int r;
|
||||
|
||||
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
|
||||
if (num_pages == 0) {
|
||||
dbgprintf("Illegal buffer object size.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if(domain & RADEON_GEM_DOMAIN_VRAM)
|
||||
{
|
||||
mman = &mm_vram;
|
||||
bo_domain = RADEON_GEM_DOMAIN_VRAM;
|
||||
}
|
||||
else if(domain & RADEON_GEM_DOMAIN_GTT)
|
||||
{
|
||||
mman = &mm_gtt;
|
||||
bo_domain = RADEON_GEM_DOMAIN_GTT;
|
||||
}
|
||||
else return -EINVAL;
|
||||
|
||||
if (kernel) {
|
||||
type = ttm_bo_type_kernel;
|
||||
} else {
|
||||
type = ttm_bo_type_device;
|
||||
}
|
||||
*bo_ptr = NULL;
|
||||
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
|
||||
if (bo == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
bo->rdev = rdev;
|
||||
bo->gobj = gobj;
|
||||
bo->surface_reg = -1;
|
||||
bo->tbo.num_pages = num_pages;
|
||||
bo->domain = domain;
|
||||
|
||||
INIT_LIST_HEAD(&bo->list);
|
||||
|
||||
// radeon_ttm_placement_from_domain(bo, domain);
|
||||
/* Kernel allocation are uninterruptible */
|
||||
|
||||
r = drm_mm_alloc(mman, num_pages, &bo->tbo.vm_node);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
|
||||
*bo_ptr = bo;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define page_tabs 0xFDC00000 /* just another hack */
|
||||
|
||||
int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
|
||||
{
|
||||
int r=0, i;
|
||||
|
||||
if (bo->pin_count) {
|
||||
bo->pin_count++;
|
||||
if (gpu_addr)
|
||||
*gpu_addr = radeon_bo_gpu_offset(bo);
|
||||
return 0;
|
||||
}
|
||||
|
||||
bo->tbo.offset = bo->tbo.vm_node->start << PAGE_SHIFT;
|
||||
|
||||
if(bo->domain & RADEON_GEM_DOMAIN_VRAM)
|
||||
{
|
||||
bo->tbo.offset += (u64)bo->rdev->mc.vram_location;
|
||||
}
|
||||
else if (bo->domain & RADEON_GEM_DOMAIN_GTT)
|
||||
{
|
||||
u32_t *pagelist;
|
||||
bo->kptr = KernelAlloc( bo->tbo.num_pages << PAGE_SHIFT );
|
||||
dbgprintf("kernel alloc %x\n", bo->kptr );
|
||||
|
||||
pagelist = &((u32_t*)page_tabs)[(u32_t)bo->kptr >> 12];
|
||||
dbgprintf("pagelist %x\n", pagelist);
|
||||
radeon_gart_bind(bo->rdev, bo->tbo.offset,
|
||||
bo->tbo.vm_node->size, pagelist);
|
||||
bo->tbo.offset += (u64)bo->rdev->mc.gtt_location;
|
||||
}
|
||||
else
|
||||
{
|
||||
DRM_ERROR("Unknown placement %x\n", bo->domain);
|
||||
bo->tbo.offset = -1;
|
||||
r = -1;
|
||||
};
|
||||
|
||||
if (unlikely(r != 0)) {
|
||||
DRM_ERROR("radeon: failed to pin object.\n");
|
||||
}
|
||||
|
||||
if (likely(r == 0)) {
|
||||
bo->pin_count = 1;
|
||||
if (gpu_addr != NULL)
|
||||
*gpu_addr = radeon_bo_gpu_offset(bo);
|
||||
}
|
||||
|
||||
if (unlikely(r != 0))
|
||||
dev_err(bo->rdev->dev, "%p pin failed\n", bo);
|
||||
return r;
|
||||
};
|
||||
|
||||
int radeon_bo_unpin(struct radeon_bo *bo)
|
||||
{
|
||||
int r = 0;
|
||||
|
||||
if (!bo->pin_count) {
|
||||
dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
|
||||
return 0;
|
||||
}
|
||||
bo->pin_count--;
|
||||
if (bo->pin_count)
|
||||
return 0;
|
||||
|
||||
if( bo->tbo.vm_node )
|
||||
{
|
||||
drm_mm_put_block(bo->tbo.vm_node);
|
||||
bo->tbo.vm_node = NULL;
|
||||
};
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
|
||||
{
|
||||
bool is_iomem;
|
||||
|
||||
if (bo->kptr) {
|
||||
if (ptr) {
|
||||
*ptr = bo->kptr;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
if(bo->domain & RADEON_GEM_DOMAIN_VRAM)
|
||||
{
|
||||
bo->cpu_addr = bo->rdev->mc.aper_base +
|
||||
(bo->tbo.vm_node->start << PAGE_SHIFT);
|
||||
bo->kptr = (void*)MapIoMem(bo->cpu_addr,
|
||||
bo->tbo.vm_node->size << 12, PG_SW);
|
||||
}
|
||||
else
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (ptr) {
|
||||
*ptr = bo->kptr;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_bo_kunmap(struct radeon_bo *bo)
|
||||
{
|
||||
if (bo->kptr == NULL)
|
||||
return;
|
||||
|
||||
if (bo->domain & RADEON_GEM_DOMAIN_VRAM)
|
||||
{
|
||||
FreeKernelSpace(bo->kptr);
|
||||
}
|
||||
|
||||
bo->kptr = NULL;
|
||||
|
||||
}
|
||||
|
||||
void radeon_bo_unref(struct radeon_bo **bo)
|
||||
{
|
||||
struct ttm_buffer_object *tbo;
|
||||
|
||||
if ((*bo) == NULL)
|
||||
return;
|
||||
|
||||
*bo = NULL;
|
||||
}
|
||||
|
||||
|
||||
void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
|
||||
uint32_t *tiling_flags,
|
||||
uint32_t *pitch)
|
||||
{
|
||||
// BUG_ON(!atomic_read(&bo->tbo.reserved));
|
||||
if (tiling_flags)
|
||||
*tiling_flags = bo->tiling_flags;
|
||||
if (pitch)
|
||||
*pitch = bo->pitch;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Allocate a GEM object of the specified size with shmfs backing store
|
||||
*/
|
||||
struct drm_gem_object *
|
||||
drm_gem_object_alloc(struct drm_device *dev, size_t size)
|
||||
{
|
||||
struct drm_gem_object *obj;
|
||||
|
||||
BUG_ON((size & (PAGE_SIZE - 1)) != 0);
|
||||
|
||||
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
|
||||
|
||||
obj->dev = dev;
|
||||
obj->size = size;
|
||||
return obj;
|
||||
}
|
||||
|
||||
|
||||
int radeon_fb_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
|
||||
unsigned long size, bool kernel, u32 domain,
|
||||
struct radeon_bo **bo_ptr)
|
||||
{
|
||||
enum ttm_bo_type type;
|
||||
|
||||
struct radeon_bo *bo;
|
||||
struct drm_mm *mman;
|
||||
struct drm_mm_node *vm_node;
|
||||
|
||||
size_t num_pages;
|
||||
u32 bo_domain;
|
||||
int r;
|
||||
|
||||
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
|
||||
if (num_pages == 0) {
|
||||
dbgprintf("Illegal buffer object size.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if( (domain & RADEON_GEM_DOMAIN_VRAM) !=
|
||||
RADEON_GEM_DOMAIN_VRAM )
|
||||
{
|
||||
return -EINVAL;
|
||||
};
|
||||
|
||||
if (kernel) {
|
||||
type = ttm_bo_type_kernel;
|
||||
} else {
|
||||
type = ttm_bo_type_device;
|
||||
}
|
||||
*bo_ptr = NULL;
|
||||
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
|
||||
if (bo == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
bo->rdev = rdev;
|
||||
bo->gobj = gobj;
|
||||
bo->surface_reg = -1;
|
||||
bo->tbo.num_pages = num_pages;
|
||||
bo->domain = domain;
|
||||
|
||||
INIT_LIST_HEAD(&bo->list);
|
||||
|
||||
// radeon_ttm_placement_from_domain(bo, domain);
|
||||
/* Kernel allocation are uninterruptible */
|
||||
|
||||
vm_node = kzalloc(sizeof(*vm_node),0);
|
||||
|
||||
vm_node->free = 0;
|
||||
vm_node->size = 0xC00000 >> 12;
|
||||
vm_node->start = 0;
|
||||
vm_node->mm = NULL;
|
||||
|
||||
bo->tbo.vm_node = vm_node;
|
||||
bo->tbo.offset = bo->tbo.vm_node->start << PAGE_SHIFT;
|
||||
bo->tbo.offset += (u64)bo->rdev->mc.vram_location;
|
||||
bo->kptr = (void*)0xFE000000;
|
||||
bo->pin_count = 1;
|
||||
|
||||
*bo_ptr = bo;
|
||||
|
||||
return 0;
|
||||
}
|
@ -44,8 +44,11 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
|
||||
seq_printf(m, "engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
|
||||
seq_printf(m, "memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
|
||||
seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
|
||||
seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
|
||||
seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
|
||||
if (rdev->asic->get_memory_clock)
|
||||
seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -169,19 +169,24 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
|
||||
return 0;
|
||||
/* Allocate 1M object buffer */
|
||||
INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
|
||||
r = radeon_object_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
|
||||
r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
|
||||
true, RADEON_GEM_DOMAIN_GTT,
|
||||
false, &rdev->ib_pool.robj);
|
||||
&rdev->ib_pool.robj);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
r = radeon_object_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
|
||||
r = radeon_bo_reserve(rdev->ib_pool.robj, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
|
||||
if (r) {
|
||||
radeon_bo_unreserve(rdev->ib_pool.robj);
|
||||
DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
r = radeon_object_kmap(rdev->ib_pool.robj, &ptr);
|
||||
r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
|
||||
radeon_bo_unreserve(rdev->ib_pool.robj);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failed to map ib poll (%d).\n", r);
|
||||
return r;
|
||||
@ -207,14 +212,21 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
|
||||
|
||||
void radeon_ib_pool_fini(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (!rdev->ib_pool.ready) {
|
||||
return;
|
||||
}
|
||||
mutex_lock(&rdev->ib_pool.mutex);
|
||||
bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
|
||||
if (rdev->ib_pool.robj) {
|
||||
// radeon_object_kunmap(rdev->ib_pool.robj);
|
||||
// radeon_object_unref(&rdev->ib_pool.robj);
|
||||
r = radeon_bo_reserve(rdev->ib_pool.robj, false);
|
||||
if (likely(r == 0)) {
|
||||
radeon_bo_kunmap(rdev->ib_pool.robj);
|
||||
radeon_bo_unpin(rdev->ib_pool.robj);
|
||||
radeon_bo_unreserve(rdev->ib_pool.robj);
|
||||
}
|
||||
radeon_bo_unref(&rdev->ib_pool.robj);
|
||||
rdev->ib_pool.robj = NULL;
|
||||
}
|
||||
mutex_unlock(&rdev->ib_pool.mutex);
|
||||
@ -294,46 +306,31 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
|
||||
rdev->cp.ring_size = ring_size;
|
||||
/* Allocate ring buffer */
|
||||
if (rdev->cp.ring_obj == NULL) {
|
||||
r = radeon_object_create(rdev, NULL, rdev->cp.ring_size,
|
||||
true,
|
||||
r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, true,
|
||||
RADEON_GEM_DOMAIN_GTT,
|
||||
false,
|
||||
&rdev->cp.ring_obj);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failed to create ring buffer (%d).\n", r);
|
||||
mutex_unlock(&rdev->cp.mutex);
|
||||
dev_err(rdev->dev, "(%d) ring create failed\n", r);
|
||||
return r;
|
||||
}
|
||||
r = radeon_object_pin(rdev->cp.ring_obj,
|
||||
RADEON_GEM_DOMAIN_GTT,
|
||||
r = radeon_bo_reserve(rdev->cp.ring_obj, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT,
|
||||
&rdev->cp.gpu_addr);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failed to pin ring buffer (%d).\n", r);
|
||||
mutex_unlock(&rdev->cp.mutex);
|
||||
radeon_bo_unreserve(rdev->cp.ring_obj);
|
||||
dev_err(rdev->dev, "(%d) ring pin failed\n", r);
|
||||
return r;
|
||||
}
|
||||
r = radeon_object_kmap(rdev->cp.ring_obj,
|
||||
r = radeon_bo_kmap(rdev->cp.ring_obj,
|
||||
(void **)&rdev->cp.ring);
|
||||
radeon_bo_unreserve(rdev->cp.ring_obj);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failed to map ring buffer (%d).\n", r);
|
||||
mutex_unlock(&rdev->cp.mutex);
|
||||
dev_err(rdev->dev, "(%d) ring map failed\n", r);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// rdev->cp.ring = CreateRingBuffer( ring_size, PG_SW );
|
||||
|
||||
dbgprintf("ring buffer %x\n", rdev->cp.ring );
|
||||
|
||||
// rdev->cp.gpu_addr = rdev->mc.gtt_location;
|
||||
|
||||
// u32_t *pagelist = &((u32_t*)page_tabs)[(u32_t)rdev->cp.ring >> 12];
|
||||
|
||||
// dbgprintf("pagelist %x\n", pagelist);
|
||||
|
||||
// radeon_gart_bind(rdev, 0, ring_size / 4096, pagelist);
|
||||
|
||||
rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1;
|
||||
rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
|
||||
|
||||
@ -344,11 +341,17 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
|
||||
|
||||
void radeon_ring_fini(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
|
||||
mutex_lock(&rdev->cp.mutex);
|
||||
if (rdev->cp.ring_obj) {
|
||||
// radeon_object_kunmap(rdev->cp.ring_obj);
|
||||
// radeon_object_unpin(rdev->cp.ring_obj);
|
||||
// radeon_object_unref(&rdev->cp.ring_obj);
|
||||
r = radeon_bo_reserve(rdev->cp.ring_obj, false);
|
||||
if (likely(r == 0)) {
|
||||
radeon_bo_kunmap(rdev->cp.ring_obj);
|
||||
radeon_bo_unpin(rdev->cp.ring_obj);
|
||||
radeon_bo_unreserve(rdev->cp.ring_obj);
|
||||
}
|
||||
radeon_bo_unref(&rdev->cp.ring_obj);
|
||||
rdev->cp.ring = NULL;
|
||||
rdev->cp.ring_obj = NULL;
|
||||
}
|
||||
|
270
drivers/video/drm/radeon/radeon_ttm.c
Normal file
270
drivers/video/drm/radeon/radeon_ttm.c
Normal file
@ -0,0 +1,270 @@
|
||||
/*
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
*/
|
||||
/*
|
||||
* Authors:
|
||||
* Jerome Glisse <glisse@freedesktop.org>
|
||||
* Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
|
||||
* Dave Airlie
|
||||
*/
|
||||
#include <ttm/ttm_bo_api.h>
|
||||
#include <ttm/ttm_bo_driver.h>
|
||||
#include <ttm/ttm_placement.h>
|
||||
#include <ttm/ttm_module.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/radeon_drm.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include "radeon_reg.h"
|
||||
#include "radeon.h"
|
||||
|
||||
#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
|
||||
|
||||
static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
|
||||
|
||||
static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
|
||||
{
|
||||
struct radeon_mman *mman;
|
||||
struct radeon_device *rdev;
|
||||
|
||||
mman = container_of(bdev, struct radeon_mman, bdev);
|
||||
rdev = container_of(mman, struct radeon_device, mman);
|
||||
return rdev;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Global memory.
|
||||
*/
|
||||
static int radeon_ttm_mem_global_init(struct ttm_global_reference *ref)
|
||||
{
|
||||
return ttm_mem_global_init(ref->object);
|
||||
}
|
||||
|
||||
static void radeon_ttm_mem_global_release(struct ttm_global_reference *ref)
|
||||
{
|
||||
ttm_mem_global_release(ref->object);
|
||||
}
|
||||
|
||||
static int radeon_ttm_global_init(struct radeon_device *rdev)
|
||||
{
|
||||
struct ttm_global_reference *global_ref;
|
||||
int r;
|
||||
|
||||
rdev->mman.mem_global_referenced = false;
|
||||
global_ref = &rdev->mman.mem_global_ref;
|
||||
global_ref->global_type = TTM_GLOBAL_TTM_MEM;
|
||||
global_ref->size = sizeof(struct ttm_mem_global);
|
||||
global_ref->init = &radeon_ttm_mem_global_init;
|
||||
global_ref->release = &radeon_ttm_mem_global_release;
|
||||
r = ttm_global_item_ref(global_ref);
|
||||
if (r != 0) {
|
||||
DRM_ERROR("Failed setting up TTM memory accounting "
|
||||
"subsystem.\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
rdev->mman.bo_global_ref.mem_glob =
|
||||
rdev->mman.mem_global_ref.object;
|
||||
global_ref = &rdev->mman.bo_global_ref.ref;
|
||||
global_ref->global_type = TTM_GLOBAL_TTM_BO;
|
||||
global_ref->size = sizeof(struct ttm_bo_global);
|
||||
global_ref->init = &ttm_bo_global_init;
|
||||
global_ref->release = &ttm_bo_global_release;
|
||||
r = ttm_global_item_ref(global_ref);
|
||||
if (r != 0) {
|
||||
DRM_ERROR("Failed setting up TTM BO subsystem.\n");
|
||||
ttm_global_item_unref(&rdev->mman.mem_global_ref);
|
||||
return r;
|
||||
}
|
||||
|
||||
rdev->mman.mem_global_referenced = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev);
|
||||
|
||||
|
||||
static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
|
||||
struct ttm_mem_type_manager *man)
|
||||
{
|
||||
struct radeon_device *rdev;
|
||||
|
||||
rdev = radeon_get_rdev(bdev);
|
||||
|
||||
switch (type) {
|
||||
case TTM_PL_SYSTEM:
|
||||
/* System memory */
|
||||
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
|
||||
man->available_caching = TTM_PL_MASK_CACHING;
|
||||
man->default_caching = TTM_PL_FLAG_CACHED;
|
||||
break;
|
||||
case TTM_PL_TT:
|
||||
man->gpu_offset = rdev->mc.gtt_location;
|
||||
man->available_caching = TTM_PL_MASK_CACHING;
|
||||
man->default_caching = TTM_PL_FLAG_CACHED;
|
||||
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
|
||||
#if __OS_HAS_AGP
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) {
|
||||
DRM_ERROR("AGP is not enabled for memory type %u\n",
|
||||
(unsigned)type);
|
||||
return -EINVAL;
|
||||
}
|
||||
man->io_offset = rdev->mc.agp_base;
|
||||
man->io_size = rdev->mc.gtt_size;
|
||||
man->io_addr = NULL;
|
||||
if (!rdev->ddev->agp->cant_use_aperture)
|
||||
man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
|
||||
TTM_MEMTYPE_FLAG_MAPPABLE;
|
||||
man->available_caching = TTM_PL_FLAG_UNCACHED |
|
||||
TTM_PL_FLAG_WC;
|
||||
man->default_caching = TTM_PL_FLAG_WC;
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
man->io_offset = 0;
|
||||
man->io_size = 0;
|
||||
man->io_addr = NULL;
|
||||
}
|
||||
break;
|
||||
case TTM_PL_VRAM:
|
||||
/* "On-card" video ram */
|
||||
man->gpu_offset = rdev->mc.vram_location;
|
||||
man->flags = TTM_MEMTYPE_FLAG_FIXED |
|
||||
TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
|
||||
TTM_MEMTYPE_FLAG_MAPPABLE;
|
||||
man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
|
||||
man->default_caching = TTM_PL_FLAG_WC;
|
||||
man->io_addr = NULL;
|
||||
man->io_offset = rdev->mc.aper_base;
|
||||
man->io_size = rdev->mc.aper_size;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ttm_bo_driver radeon_bo_driver = {
|
||||
// .create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
|
||||
// .invalidate_caches = &radeon_invalidate_caches,
|
||||
.init_mem_type = &radeon_init_mem_type,
|
||||
// .evict_flags = &radeon_evict_flags,
|
||||
// .move = &radeon_bo_move,
|
||||
// .verify_access = &radeon_verify_access,
|
||||
// .sync_obj_signaled = &radeon_sync_obj_signaled,
|
||||
// .sync_obj_wait = &radeon_sync_obj_wait,
|
||||
// .sync_obj_flush = &radeon_sync_obj_flush,
|
||||
// .sync_obj_unref = &radeon_sync_obj_unref,
|
||||
// .sync_obj_ref = &radeon_sync_obj_ref,
|
||||
// .move_notify = &radeon_bo_move_notify,
|
||||
// .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
|
||||
};
|
||||
|
||||
int radeon_ttm_init(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = radeon_ttm_global_init(rdev);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
/* No others user of address space so set it to 0 */
|
||||
r = ttm_bo_device_init(&rdev->mman.bdev,
|
||||
rdev->mman.bo_global_ref.ref.object,
|
||||
&radeon_bo_driver, DRM_FILE_PAGE_OFFSET,
|
||||
rdev->need_dma32);
|
||||
if (r) {
|
||||
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
rdev->mman.initialized = true;
|
||||
r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
|
||||
rdev->mc.real_vram_size >> PAGE_SHIFT);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed initializing VRAM heap.\n");
|
||||
return r;
|
||||
}
|
||||
r = radeon_bo_create(rdev, NULL, 256 * 1024, true,
|
||||
RADEON_GEM_DOMAIN_VRAM,
|
||||
&rdev->stollen_vga_memory);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
|
||||
if (r)
|
||||
return r;
|
||||
r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
|
||||
radeon_bo_unreserve(rdev->stollen_vga_memory);
|
||||
if (r) {
|
||||
radeon_bo_unref(&rdev->stollen_vga_memory);
|
||||
return r;
|
||||
}
|
||||
DRM_INFO("radeon: %uM of VRAM memory ready\n",
|
||||
(unsigned)rdev->mc.real_vram_size / (1024 * 1024));
|
||||
r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
|
||||
rdev->mc.gtt_size >> PAGE_SHIFT);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed initializing GTT heap.\n");
|
||||
return r;
|
||||
}
|
||||
DRM_INFO("radeon: %uM of GTT memory ready.\n",
|
||||
(unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
|
||||
if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
|
||||
rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
|
||||
}
|
||||
|
||||
r = radeon_ttm_debugfs_init(rdev);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to init debugfs\n");
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct vm_operations_struct radeon_ttm_vm_ops;
|
||||
static const struct vm_operations_struct *ttm_vm_ops = NULL;
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -29,17 +29,21 @@ int init_cursor(cursor_t *cursor)
|
||||
|
||||
rdev = (struct radeon_device *)rdisplay->ddev->dev_private;
|
||||
|
||||
r = radeon_object_create(rdev, NULL, CURSOR_WIDTH*CURSOR_HEIGHT*4,
|
||||
false,
|
||||
RADEON_GEM_DOMAIN_VRAM,
|
||||
false, &cursor->robj);
|
||||
r = radeon_bo_create(rdev, NULL, CURSOR_WIDTH*CURSOR_HEIGHT*4,
|
||||
false, RADEON_GEM_DOMAIN_VRAM, &cursor->robj);
|
||||
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
|
||||
radeon_object_pin(cursor->robj, TTM_PL_FLAG_VRAM, NULL);
|
||||
r = radeon_bo_reserve(cursor->robj, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
|
||||
r = radeon_object_kmap(cursor->robj, &bits);
|
||||
r = radeon_bo_pin(cursor->robj, RADEON_GEM_DOMAIN_VRAM, NULL);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
|
||||
r = radeon_bo_kmap(cursor->robj, (void**)&bits);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failed to map cursor (%d).\n", r);
|
||||
return r;
|
||||
@ -57,7 +61,7 @@ int init_cursor(cursor_t *cursor)
|
||||
for(i = 0; i < CURSOR_WIDTH*(CURSOR_HEIGHT-32); i++)
|
||||
*bits++ = 0;
|
||||
|
||||
radeon_object_kunmap(cursor->robj);
|
||||
radeon_bo_kunmap(cursor->robj);
|
||||
|
||||
// cursor->header.destroy = destroy_cursor;
|
||||
|
||||
@ -67,7 +71,7 @@ int init_cursor(cursor_t *cursor)
|
||||
void fini_cursor(cursor_t *cursor)
|
||||
{
|
||||
list_del(&cursor->list);
|
||||
radeon_object_unpin(cursor->robj);
|
||||
radeon_bo_unpin(cursor->robj);
|
||||
KernelFree(cursor->data);
|
||||
__DestroyObject(cursor);
|
||||
};
|
||||
@ -100,7 +104,7 @@ cursor_t* __stdcall select_cursor(cursor_t *cursor)
|
||||
old = rdisplay->cursor;
|
||||
|
||||
rdisplay->cursor = cursor;
|
||||
// gpu_addr = cursor->robj->gpu_addr;
|
||||
gpu_addr = radeon_bo_gpu_offset(cursor->robj);
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev))
|
||||
WREG32(AVIVO_D1CUR_SURFACE_ADDRESS, gpu_addr);
|
||||
@ -149,24 +153,40 @@ void __stdcall move_cursor(cursor_t *cursor, int x, int y)
|
||||
if (ASIC_IS_AVIVO(rdev))
|
||||
{
|
||||
int w = 32;
|
||||
int i = 0;
|
||||
|
||||
WREG32(AVIVO_D1CUR_POSITION, (x << 16) | y);
|
||||
WREG32(AVIVO_D1CUR_HOT_SPOT, (hot_x << 16) | hot_y);
|
||||
WREG32(AVIVO_D1CUR_SIZE, ((w - 1) << 16) | 31);
|
||||
} else {
|
||||
|
||||
uint32_t gpu_addr;
|
||||
int xorg =0, yorg=0;
|
||||
|
||||
x = x - hot_x;
|
||||
y = y - hot_y;
|
||||
|
||||
if( x < 0 )
|
||||
{
|
||||
xorg = -x + 1;
|
||||
x = 0;
|
||||
}
|
||||
|
||||
if( y < 0 )
|
||||
{
|
||||
yorg = -hot_y + 1;
|
||||
y = 0;
|
||||
};
|
||||
|
||||
WREG32(RADEON_CUR_HORZ_VERT_OFF,
|
||||
(RADEON_CUR_LOCK | (hot_x << 16) | hot_y ));
|
||||
(RADEON_CUR_LOCK | (xorg << 16) | yorg ));
|
||||
WREG32(RADEON_CUR_HORZ_VERT_POSN,
|
||||
(RADEON_CUR_LOCK | (x << 16) | y));
|
||||
|
||||
// gpu_addr = cursor->robj->gpu_addr;
|
||||
gpu_addr = radeon_bo_gpu_offset(cursor->robj);
|
||||
|
||||
/* offset is from DISP(2)_BASE_ADDRESS */
|
||||
WREG32(RADEON_CUR_OFFSET,
|
||||
(gpu_addr - rdev->mc.vram_location + (hot_y * 256)));
|
||||
(gpu_addr - rdev->mc.vram_location + (yorg * 256)));
|
||||
}
|
||||
radeon_lock_cursor(false);
|
||||
}
|
||||
@ -176,7 +196,7 @@ void __stdcall restore_cursor(int x, int y)
|
||||
};
|
||||
|
||||
|
||||
bool init_display(struct radeon_device *rdev, mode_t *usermode)
|
||||
bool init_display(struct radeon_device *rdev, videomode_t *usermode)
|
||||
{
|
||||
struct drm_device *dev;
|
||||
|
||||
@ -216,4 +236,34 @@ bool init_display(struct radeon_device *rdev, mode_t *usermode)
|
||||
};
|
||||
|
||||
|
||||
struct fb_info *framebuffer_alloc(size_t size, struct device *dev)
|
||||
{
|
||||
#define BYTES_PER_LONG (BITS_PER_LONG/8)
|
||||
#define PADDING (BYTES_PER_LONG - (sizeof(struct fb_info) % BYTES_PER_LONG))
|
||||
int fb_info_size = sizeof(struct fb_info);
|
||||
struct fb_info *info;
|
||||
char *p;
|
||||
|
||||
if (size)
|
||||
fb_info_size += PADDING;
|
||||
|
||||
p = kzalloc(fb_info_size + size, GFP_KERNEL);
|
||||
|
||||
if (!p)
|
||||
return NULL;
|
||||
|
||||
info = (struct fb_info *) p;
|
||||
|
||||
if (size)
|
||||
info->par = p + fb_info_size;
|
||||
|
||||
return info;
|
||||
#undef PADDING
|
||||
#undef BYTES_PER_LONG
|
||||
}
|
||||
|
||||
void framebuffer_release(struct fb_info *info)
|
||||
{
|
||||
kfree(info);
|
||||
}
|
||||
|
||||
|
@ -78,7 +78,7 @@ cursor_t* __stdcall select_cursor_kms(cursor_t *cursor)
|
||||
old = rdisplay->cursor;
|
||||
|
||||
rdisplay->cursor = cursor;
|
||||
// gpu_addr = cursor->robj->gpu_addr;
|
||||
gpu_addr = radeon_bo_gpu_offset(cursor->robj);
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev))
|
||||
WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr);
|
||||
@ -148,14 +148,34 @@ void __stdcall move_cursor_kms(cursor_t *cursor, int x, int y)
|
||||
if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)
|
||||
y *= 2;
|
||||
|
||||
WREG32(RADEON_CUR_HORZ_VERT_OFF + radeon_crtc->crtc_offset,
|
||||
(RADEON_CUR_LOCK | (hot_x << 16) | hot_y ));
|
||||
WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset,
|
||||
uint32_t gpu_addr;
|
||||
int xorg =0, yorg=0;
|
||||
|
||||
x = x - hot_x;
|
||||
y = y - hot_y;
|
||||
|
||||
if( x < 0 )
|
||||
{
|
||||
xorg = -x + 1;
|
||||
x = 0;
|
||||
}
|
||||
|
||||
if( y < 0 )
|
||||
{
|
||||
yorg = -hot_y + 1;
|
||||
y = 0;
|
||||
};
|
||||
|
||||
WREG32(RADEON_CUR_HORZ_VERT_OFF,
|
||||
(RADEON_CUR_LOCK | (xorg << 16) | yorg ));
|
||||
WREG32(RADEON_CUR_HORZ_VERT_POSN,
|
||||
(RADEON_CUR_LOCK | (x << 16) | y));
|
||||
|
||||
gpu_addr = radeon_bo_gpu_offset(cursor->robj);
|
||||
|
||||
/* offset is from DISP(2)_BASE_ADDRESS */
|
||||
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,
|
||||
(radeon_crtc->legacy_cursor_offset + (hot_y * 256)));
|
||||
WREG32(RADEON_CUR_OFFSET,
|
||||
(gpu_addr - rdev->mc.vram_location + (yorg * 256)));
|
||||
}
|
||||
radeon_lock_cursor_kms(crtc, false);
|
||||
}
|
||||
@ -447,3 +467,40 @@ int set_user_mode(videomode_t *mode)
|
||||
return err;
|
||||
};
|
||||
|
||||
#if 0
|
||||
void drm_helper_disable_unused_functions(struct drm_device *dev)
|
||||
{
|
||||
struct drm_encoder *encoder;
|
||||
struct drm_connector *connector;
|
||||
struct drm_encoder_helper_funcs *encoder_funcs;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
if (!connector->encoder)
|
||||
continue;
|
||||
if (connector->status == connector_status_disconnected)
|
||||
connector->encoder = NULL;
|
||||
}
|
||||
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||
encoder_funcs = encoder->helper_private;
|
||||
if (!drm_helper_encoder_in_use(encoder)) {
|
||||
if (encoder_funcs->disable)
|
||||
(*encoder_funcs->disable)(encoder);
|
||||
else
|
||||
(*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
|
||||
/* disconnector encoder from any connector */
|
||||
encoder->crtc = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
|
||||
crtc->enabled = drm_helper_crtc_in_use(crtc);
|
||||
if (!crtc->enabled) {
|
||||
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
|
||||
crtc->fb = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -223,15 +223,31 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int rs400_mc_wait_for_idle(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned i;
|
||||
uint32_t tmp;
|
||||
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
/* read MC_STATUS */
|
||||
tmp = RREG32(0x0150);
|
||||
if (tmp & (1 << 2)) {
|
||||
return 0;
|
||||
}
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
void rs400_gpu_init(struct radeon_device *rdev)
|
||||
{
|
||||
/* FIXME: HDP same place on rs400 ? */
|
||||
r100_hdp_reset(rdev);
|
||||
/* FIXME: is this correct ? */
|
||||
r420_pipes_init(rdev);
|
||||
if (r300_mc_wait_for_idle(rdev)) {
|
||||
printk(KERN_WARNING "Failed to wait MC idle while "
|
||||
"programming pipes. Bad things might happen.\n");
|
||||
if (rs400_mc_wait_for_idle(rdev)) {
|
||||
printk(KERN_WARNING "rs400: Failed to wait MC idle while "
|
||||
"programming pipes. Bad things might happen. %08x\n", RREG32(0x150));
|
||||
}
|
||||
}
|
||||
|
||||
@ -370,8 +386,8 @@ void rs400_mc_program(struct radeon_device *rdev)
|
||||
r100_mc_stop(rdev, &save);
|
||||
|
||||
/* Wait for mc idle */
|
||||
if (r300_mc_wait_for_idle(rdev))
|
||||
dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
|
||||
if (rs400_mc_wait_for_idle(rdev))
|
||||
dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n");
|
||||
WREG32(R_000148_MC_FB_LOCATION,
|
||||
S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
|
||||
S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
|
||||
|
@ -272,8 +272,12 @@ void rs600_gart_disable(struct radeon_device *rdev)
|
||||
tmp = RREG32_MC(R_000009_MC_CNTL1);
|
||||
WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
|
||||
if (rdev->gart.table.vram.robj) {
|
||||
// radeon_object_kunmap(rdev->gart.table.vram.robj);
|
||||
// radeon_object_unpin(rdev->gart.table.vram.robj);
|
||||
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
|
||||
if (r == 0) {
|
||||
radeon_bo_kunmap(rdev->gart.table.vram.robj);
|
||||
radeon_bo_unpin(rdev->gart.table.vram.robj);
|
||||
radeon_bo_unreserve(rdev->gart.table.vram.robj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -113,15 +113,19 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev)
|
||||
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
|
||||
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
|
||||
if (rdev->gart.table.vram.robj) {
|
||||
// radeon_object_kunmap(rdev->gart.table.vram.robj);
|
||||
// radeon_object_unpin(rdev->gart.table.vram.robj);
|
||||
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
|
||||
if (likely(r == 0)) {
|
||||
radeon_bo_kunmap(rdev->gart.table.vram.robj);
|
||||
radeon_bo_unpin(rdev->gart.table.vram.robj);
|
||||
radeon_bo_unreserve(rdev->gart.table.vram.robj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void rv770_pcie_gart_fini(struct radeon_device *rdev)
|
||||
{
|
||||
rv770_pcie_gart_disable(rdev);
|
||||
// radeon_gart_table_vram_free(rdev);
|
||||
radeon_gart_table_vram_free(rdev);
|
||||
radeon_gart_fini(rdev);
|
||||
}
|
||||
|
||||
@ -877,6 +881,7 @@ static int rv770_startup(struct radeon_device *rdev)
|
||||
}
|
||||
rv770_gpu_init(rdev);
|
||||
|
||||
|
||||
// r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
|
||||
// &rdev->r600_blit.shader_gpu_addr);
|
||||
// if (r) {
|
||||
|
141
drivers/video/drm/ttm/ttm_bo.c
Normal file
141
drivers/video/drm/ttm/ttm_bo.c
Normal file
@ -0,0 +1,141 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
/* Notes:
|
||||
*
|
||||
* We store bo pointer in drm_mm_node struct so we know which bo own a
|
||||
* specific node. There is no protection on the pointer, thus to make
|
||||
* sure things don't go berserk you have to access this pointer while
|
||||
* holding the global lru lock and make sure anytime you free a node you
|
||||
* reset the pointer to NULL.
|
||||
*/
|
||||
|
||||
#include "ttm/ttm_module.h"
|
||||
#include "ttm/ttm_bo_driver.h"
|
||||
#include "ttm/ttm_placement.h"
|
||||
#include <linux/module.h>
|
||||
|
||||
|
||||
|
||||
int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
|
||||
unsigned long p_size)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
struct ttm_mem_type_manager *man;
|
||||
|
||||
if (type >= TTM_NUM_MEM_TYPES) {
|
||||
printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
|
||||
return ret;
|
||||
}
|
||||
|
||||
man = &bdev->man[type];
|
||||
if (man->has_type) {
|
||||
printk(KERN_ERR TTM_PFX
|
||||
"Memory manager already initialized for type %d\n",
|
||||
type);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = bdev->driver->init_mem_type(bdev, type, man);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = 0;
|
||||
if (type != TTM_PL_SYSTEM) {
|
||||
if (!p_size) {
|
||||
printk(KERN_ERR TTM_PFX
|
||||
"Zero size memory manager type %d\n",
|
||||
type);
|
||||
return ret;
|
||||
}
|
||||
ret = drm_mm_init(&man->manager, 0, p_size);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
man->has_type = true;
|
||||
man->use_type = true;
|
||||
man->size = p_size;
|
||||
|
||||
INIT_LIST_HEAD(&man->lru);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_init_mm);
|
||||
|
||||
int ttm_bo_global_init(struct ttm_global_reference *ref)
|
||||
{
|
||||
struct ttm_bo_global_ref *bo_ref =
|
||||
container_of(ref, struct ttm_bo_global_ref, ref);
|
||||
struct ttm_bo_global *glob = ref->object;
|
||||
int ret;
|
||||
|
||||
// mutex_init(&glob->device_list_mutex);
|
||||
// spin_lock_init(&glob->lru_lock);
|
||||
glob->mem_glob = bo_ref->mem_glob;
|
||||
// glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
|
||||
|
||||
if (unlikely(glob->dummy_read_page == NULL)) {
|
||||
ret = -ENOMEM;
|
||||
goto out_no_drp;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&glob->swap_lru);
|
||||
INIT_LIST_HEAD(&glob->device_list);
|
||||
|
||||
// ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
|
||||
ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
|
||||
if (unlikely(ret != 0)) {
|
||||
printk(KERN_ERR TTM_PFX
|
||||
"Could not register buffer object swapout.\n");
|
||||
goto out_no_shrink;
|
||||
}
|
||||
|
||||
glob->ttm_bo_extra_size =
|
||||
ttm_round_pot(sizeof(struct ttm_tt)) +
|
||||
ttm_round_pot(sizeof(struct ttm_backend));
|
||||
|
||||
glob->ttm_bo_size = glob->ttm_bo_extra_size +
|
||||
ttm_round_pot(sizeof(struct ttm_buffer_object));
|
||||
|
||||
atomic_set(&glob->bo_count, 0);
|
||||
|
||||
// kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type);
|
||||
// ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects");
|
||||
// if (unlikely(ret != 0))
|
||||
// kobject_put(&glob->kobj);
|
||||
return ret;
|
||||
out_no_shrink:
|
||||
__free_page(glob->dummy_read_page);
|
||||
out_no_drp:
|
||||
kfree(glob);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_global_init);
|
||||
|
||||
|
114
drivers/video/drm/ttm/ttm_global.c
Normal file
114
drivers/video/drm/ttm/ttm_global.c
Normal file
@ -0,0 +1,114 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
|
||||
#include "ttm/ttm_module.h"
|
||||
//#include <linux/mutex.h>
|
||||
//#include <linux/slab.h>
|
||||
#include "ttm/ttm_bo_driver.h"
|
||||
#include "ttm/ttm_placement.h"
|
||||
#include <linux/module.h>
|
||||
|
||||
struct ttm_global_item {
|
||||
// struct mutex mutex;
|
||||
void *object;
|
||||
int refcount;
|
||||
};
|
||||
|
||||
static struct ttm_global_item glob[TTM_GLOBAL_NUM];
|
||||
|
||||
void ttm_global_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
|
||||
struct ttm_global_item *item = &glob[i];
|
||||
// mutex_init(&item->mutex);
|
||||
item->object = NULL;
|
||||
item->refcount = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void ttm_global_release(void)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
|
||||
struct ttm_global_item *item = &glob[i];
|
||||
BUG_ON(item->object != NULL);
|
||||
BUG_ON(item->refcount != 0);
|
||||
}
|
||||
}
|
||||
|
||||
int ttm_global_item_ref(struct ttm_global_reference *ref)
|
||||
{
|
||||
int ret;
|
||||
struct ttm_global_item *item = &glob[ref->global_type];
|
||||
void *object;
|
||||
|
||||
// mutex_lock(&item->mutex);
|
||||
if (item->refcount == 0) {
|
||||
item->object = kzalloc(ref->size, GFP_KERNEL);
|
||||
if (unlikely(item->object == NULL)) {
|
||||
ret = -ENOMEM;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
ref->object = item->object;
|
||||
ret = ref->init(ref);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err;
|
||||
|
||||
}
|
||||
++item->refcount;
|
||||
ref->object = item->object;
|
||||
object = item->object;
|
||||
// mutex_unlock(&item->mutex);
|
||||
return 0;
|
||||
out_err:
|
||||
// mutex_unlock(&item->mutex);
|
||||
item->object = NULL;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_global_item_ref);
|
||||
|
||||
void ttm_global_item_unref(struct ttm_global_reference *ref)
|
||||
{
|
||||
struct ttm_global_item *item = &glob[ref->global_type];
|
||||
|
||||
// mutex_lock(&item->mutex);
|
||||
BUG_ON(item->refcount == 0);
|
||||
BUG_ON(ref->object != item->object);
|
||||
if (--item->refcount == 0) {
|
||||
ref->release(ref);
|
||||
item->object = NULL;
|
||||
}
|
||||
// mutex_unlock(&item->mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_global_item_unref);
|
||||
|
Loading…
Reference in New Issue
Block a user