forked from KolibriOS/kolibrios
kms rc10: Evergreen
git-svn-id: svn://kolibrios.org@1430 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
parent
34d56f0e98
commit
f7bc5f78ab
@ -801,4 +801,6 @@ extern struct drm_display_mode *drm_gtf_mode(struct drm_device *dev,
|
||||
bool interlaced, int margins);
|
||||
extern int drm_add_modes_noedid(struct drm_connector *connector,
|
||||
int hdisplay, int vdisplay);
|
||||
|
||||
extern bool drm_edid_is_valid(struct edid *edid);
|
||||
#endif /* __DRM_CRTC_H__ */
|
||||
|
@ -201,4 +201,7 @@ struct edid {
|
||||
|
||||
#define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8))
|
||||
|
||||
/* define the number of Extension EDID block */
|
||||
#define DRM_MAX_EDID_EXT_NUM 4
|
||||
|
||||
#endif /* __DRM_EDID_H__ */
|
||||
|
@ -141,6 +141,41 @@
|
||||
{0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x688A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x6898, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x6899, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x689c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x689d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x689e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68a0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68a1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68a8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68a9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68b8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68b9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68be, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68c8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68c9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68d9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68da, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68de, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68e0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68e1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68e4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68e5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68e8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68e9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68f1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68f8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68f9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x7102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
|
@ -808,6 +808,7 @@ struct drm_radeon_gem_create {
|
||||
#define RADEON_TILING_SWAP_32BIT 0x8
|
||||
#define RADEON_TILING_SURFACE 0x10 /* this object requires a surface
|
||||
* when mapped - i.e. front buffer */
|
||||
#define RADEON_TILING_MICRO_SQUARE 0x20
|
||||
|
||||
struct drm_radeon_gem_set_tiling {
|
||||
uint32_t handle;
|
||||
|
@ -28,6 +28,8 @@
|
||||
|
||||
#include <types.h>
|
||||
#include <list.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/i2c-id.h>
|
||||
|
||||
|
||||
#define I2C_NAME_SIZE 20
|
||||
@ -44,6 +46,84 @@ union i2c_smbus_data;
|
||||
extern int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
|
||||
int num);
|
||||
|
||||
/**
|
||||
* struct i2c_driver - represent an I2C device driver
|
||||
* @class: What kind of i2c device we instantiate (for detect)
|
||||
* @attach_adapter: Callback for bus addition (for legacy drivers)
|
||||
* @detach_adapter: Callback for bus removal (for legacy drivers)
|
||||
* @probe: Callback for device binding
|
||||
* @remove: Callback for device unbinding
|
||||
* @shutdown: Callback for device shutdown
|
||||
* @suspend: Callback for device suspend
|
||||
* @resume: Callback for device resume
|
||||
* @command: Callback for bus-wide signaling (optional)
|
||||
* @driver: Device driver model driver
|
||||
* @id_table: List of I2C devices supported by this driver
|
||||
* @detect: Callback for device detection
|
||||
* @address_list: The I2C addresses to probe (for detect)
|
||||
* @clients: List of detected clients we created (for i2c-core use only)
|
||||
*
|
||||
* The driver.owner field should be set to the module owner of this driver.
|
||||
* The driver.name field should be set to the name of this driver.
|
||||
*
|
||||
* For automatic device detection, both @detect and @address_data must
|
||||
* be defined. @class should also be set, otherwise only devices forced
|
||||
* with module parameters will be created. The detect function must
|
||||
* fill at least the name field of the i2c_board_info structure it is
|
||||
* handed upon successful detection, and possibly also the flags field.
|
||||
*
|
||||
* If @detect is missing, the driver will still work fine for enumerated
|
||||
* devices. Detected devices simply won't be supported. This is expected
|
||||
* for the many I2C/SMBus devices which can't be detected reliably, and
|
||||
* the ones which can always be enumerated in practice.
|
||||
*
|
||||
* The i2c_client structure which is handed to the @detect callback is
|
||||
* not a real i2c_client. It is initialized just enough so that you can
|
||||
* call i2c_smbus_read_byte_data and friends on it. Don't do anything
|
||||
* else with it. In particular, calling dev_dbg and friends on it is
|
||||
* not allowed.
|
||||
*/
|
||||
struct i2c_driver {
|
||||
unsigned int class;
|
||||
|
||||
/* Notifies the driver that a new bus has appeared or is about to be
|
||||
* removed. You should avoid using this if you can, it will probably
|
||||
* be removed in a near future.
|
||||
*/
|
||||
int (*attach_adapter)(struct i2c_adapter *);
|
||||
int (*detach_adapter)(struct i2c_adapter *);
|
||||
|
||||
/* Standard driver model interfaces */
|
||||
int (*probe)(struct i2c_client *, const struct i2c_device_id *);
|
||||
int (*remove)(struct i2c_client *);
|
||||
|
||||
/* driver model interfaces that don't relate to enumeration */
|
||||
void (*shutdown)(struct i2c_client *);
|
||||
// int (*suspend)(struct i2c_client *, pm_message_t mesg);
|
||||
int (*resume)(struct i2c_client *);
|
||||
|
||||
/* Alert callback, for example for the SMBus alert protocol.
|
||||
* The format and meaning of the data value depends on the protocol.
|
||||
* For the SMBus alert protocol, there is a single bit of data passed
|
||||
* as the alert response's low bit ("event flag").
|
||||
*/
|
||||
void (*alert)(struct i2c_client *, unsigned int data);
|
||||
|
||||
/* a ioctl like command that can be used to perform specific functions
|
||||
* with the device.
|
||||
*/
|
||||
int (*command)(struct i2c_client *client, unsigned int cmd, void *arg);
|
||||
|
||||
// struct device_driver driver;
|
||||
const struct i2c_device_id *id_table;
|
||||
|
||||
/* Device detection callback for automatic device creation */
|
||||
// int (*detect)(struct i2c_client *, struct i2c_board_info *);
|
||||
const unsigned short *address_list;
|
||||
struct list_head clients;
|
||||
};
|
||||
#define to_i2c_driver(d) container_of(d, struct i2c_driver, driver)
|
||||
|
||||
/**
|
||||
* struct i2c_client - represent an I2C slave device
|
||||
* @flags: I2C_CLIENT_TEN indicates the device uses a ten bit chip address;
|
||||
@ -69,13 +149,14 @@ struct i2c_client {
|
||||
/* _LOWER_ 7 bits */
|
||||
char name[I2C_NAME_SIZE];
|
||||
struct i2c_adapter *adapter; /* the adapter we sit on */
|
||||
// struct i2c_driver *driver; /* and our access routines */
|
||||
struct i2c_driver *driver; /* and our access routines */
|
||||
// struct device dev; /* the device structure */
|
||||
int irq; /* irq issued by device (or -1) */
|
||||
int irq; /* irq issued by device (or -1) */
|
||||
struct list_head detected;
|
||||
};
|
||||
#define to_i2c_client(d) container_of(d, struct i2c_client, dev)
|
||||
|
||||
extern struct i2c_client *i2c_verify_client(struct device *dev);
|
||||
|
||||
/*
|
||||
* The following structs are for those who like to implement new bus drivers:
|
||||
@ -111,17 +192,26 @@ struct i2c_adapter {
|
||||
void *algo_data;
|
||||
|
||||
/* data fields that are valid for all devices */
|
||||
u8 level; /* nesting level for lockdep */
|
||||
// struct rt_mutex bus_lock;
|
||||
|
||||
int timeout; /* in jiffies */
|
||||
int retries;
|
||||
// struct device dev; /* the adapter device */
|
||||
struct device dev; /* the adapter device */
|
||||
|
||||
int nr;
|
||||
char name[48];
|
||||
};
|
||||
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
|
||||
|
||||
static inline void *i2c_get_adapdata(const struct i2c_adapter *dev)
|
||||
{
|
||||
return dev_get_drvdata(&dev->dev);
|
||||
}
|
||||
|
||||
static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data)
|
||||
{
|
||||
dev_set_drvdata(&dev->dev, data);
|
||||
}
|
||||
|
||||
/*flags for the client struct: */
|
||||
#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */
|
||||
@ -136,17 +226,6 @@ struct i2c_adapter {
|
||||
#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */
|
||||
#define I2C_CLASS_SPD (1<<7) /* SPD EEPROMs and similar */
|
||||
|
||||
/* i2c_client_address_data is the struct for holding default client
|
||||
* addresses for a driver and for the parameters supplied on the
|
||||
* command line
|
||||
*/
|
||||
struct i2c_client_address_data {
|
||||
const unsigned short *normal_i2c;
|
||||
const unsigned short *probe;
|
||||
const unsigned short *ignore;
|
||||
const unsigned short * const *forces;
|
||||
};
|
||||
|
||||
/* Internal numbers to terminate lists */
|
||||
#define I2C_CLIENT_END 0xfffeU
|
||||
|
||||
@ -275,25 +354,4 @@ union i2c_smbus_data {
|
||||
#define I2C_SMBUS_BLOCK_PROC_CALL 7 /* SMBus 2.0 */
|
||||
#define I2C_SMBUS_I2C_BLOCK_DATA 8
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#endif /* _LINUX_I2C_H */
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -124,7 +124,22 @@ typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
|
||||
struct file {};
|
||||
struct vm_area_struct {};
|
||||
struct address_space {};
|
||||
struct device {};
|
||||
|
||||
struct device
|
||||
{
|
||||
struct device *parent;
|
||||
void *driver_data;
|
||||
};
|
||||
|
||||
static inline void dev_set_drvdata(struct device *dev, void *data)
|
||||
{
|
||||
dev->driver_data = data;
|
||||
}
|
||||
|
||||
static inline void *dev_get_drvdata(struct device *dev)
|
||||
{
|
||||
return dev->driver_data;
|
||||
}
|
||||
|
||||
#define preempt_disable() do { } while (0)
|
||||
#define preempt_enable_no_resched() do { } while (0)
|
||||
|
@ -4,12 +4,18 @@
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
|
||||
#define EXPORT_SYMBOL(x)
|
||||
|
||||
#define MODULE_FIRMWARE(x)
|
||||
|
||||
|
||||
#define MODULE_AUTHOR(x)
|
||||
#define MODULE_DESCRIPTION(x)
|
||||
#define MODULE_LICENSE(x)
|
||||
|
||||
struct module {};
|
||||
|
||||
#endif /* _LINUX_MODULE_H */
|
||||
|
@ -484,7 +484,7 @@ struct pci_dev {
|
||||
unsigned int no_d1d2:1; /* Only allow D0 and D3 */
|
||||
|
||||
// pci_channel_state_t error_state; /* current connectivity state */
|
||||
// struct device dev; /* Generic device interface */
|
||||
struct device dev; /* Generic device interface */
|
||||
|
||||
// int cfg_size; /* Size of configuration space */
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
|
||||
|
||||
static Bool FindPciDevice()
|
||||
Bool FindPciDevice()
|
||||
{
|
||||
Bool retval = FALSE;
|
||||
u32_t bus, last_bus;
|
||||
@ -20,13 +20,17 @@ static Bool FindPciDevice()
|
||||
u32_t id;
|
||||
u16_t pcicmd;
|
||||
u16_t devclass;
|
||||
u8_t interface;
|
||||
int i;
|
||||
|
||||
interface = PciRead8(bus,devfn, 0x09);
|
||||
devclass = PciRead16(bus,devfn, 0x0A);
|
||||
|
||||
if( devclass != 0x0C03)
|
||||
continue;
|
||||
|
||||
if( interface != 0)
|
||||
continue;
|
||||
|
||||
pcicmd = PciRead16(bus,devfn, PCI_COMMAND);
|
||||
if (! pcicmd & PCI_COMMAND_IO)
|
||||
continue;
|
||||
|
@ -12,7 +12,6 @@
|
||||
#include "syscall.h"
|
||||
#include "usb.h"
|
||||
|
||||
static Bool FindPciDevice();
|
||||
|
||||
int __stdcall srv_usb(ioctl_t *io);
|
||||
|
||||
|
@ -836,11 +836,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
|
||||
mode_changed = true;
|
||||
} else if (set->fb == NULL) {
|
||||
mode_changed = true;
|
||||
} else if ((set->fb->bits_per_pixel !=
|
||||
set->crtc->fb->bits_per_pixel) ||
|
||||
set->fb->depth != set->crtc->fb->depth)
|
||||
fb_changed = true;
|
||||
else
|
||||
} else
|
||||
fb_changed = true;
|
||||
}
|
||||
|
||||
|
@ -60,8 +60,7 @@
|
||||
#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
|
||||
/* use +hsync +vsync for detailed mode */
|
||||
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
|
||||
/* define the number of Extension EDID block */
|
||||
#define MAX_EDID_EXT_NUM 4
|
||||
|
||||
|
||||
#define LEVEL_DMT 0
|
||||
#define LEVEL_GTF 1
|
||||
@ -114,14 +113,14 @@ static const u8 edid_header[] = {
|
||||
};
|
||||
|
||||
/**
|
||||
* edid_is_valid - sanity check EDID data
|
||||
* drm_edid_is_valid - sanity check EDID data
|
||||
* @edid: EDID data
|
||||
*
|
||||
* Sanity check the EDID block by looking at the header, the version number
|
||||
* and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's
|
||||
* valid.
|
||||
*/
|
||||
static bool edid_is_valid(struct edid *edid)
|
||||
bool drm_edid_is_valid(struct edid *edid)
|
||||
{
|
||||
int i, score = 0;
|
||||
u8 csum = 0;
|
||||
@ -163,6 +162,7 @@ bad:
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_edid_is_valid);
|
||||
|
||||
/**
|
||||
* edid_vendor - match a string against EDID's obfuscated vendor field
|
||||
@ -1112,8 +1112,8 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
|
||||
}
|
||||
|
||||
/* Chose real EDID extension number */
|
||||
edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ?
|
||||
MAX_EDID_EXT_NUM : edid->extensions;
|
||||
edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
|
||||
DRM_MAX_EDID_EXT_NUM : edid->extensions;
|
||||
|
||||
/* Find CEA extension */
|
||||
for (i = 0; i < edid_ext_num; i++) {
|
||||
@ -1195,7 +1195,7 @@ static int drm_ddc_read_edid(struct drm_connector *connector,
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (drm_do_probe_ddc_edid(adapter, buf, len))
|
||||
return -1;
|
||||
if (edid_is_valid((struct edid *)buf))
|
||||
if (drm_edid_is_valid((struct edid *)buf))
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1220,7 +1220,7 @@ struct edid *drm_get_edid(struct drm_connector *connector,
|
||||
int ret;
|
||||
struct edid *edid;
|
||||
|
||||
edid = kmalloc(EDID_LENGTH * (MAX_EDID_EXT_NUM + 1),
|
||||
edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1),
|
||||
GFP_KERNEL);
|
||||
if (edid == NULL) {
|
||||
dev_warn(&connector->dev->pdev->dev,
|
||||
@ -1238,14 +1238,14 @@ struct edid *drm_get_edid(struct drm_connector *connector,
|
||||
if (edid->extensions != 0) {
|
||||
int edid_ext_num = edid->extensions;
|
||||
|
||||
if (edid_ext_num > MAX_EDID_EXT_NUM) {
|
||||
if (edid_ext_num > DRM_MAX_EDID_EXT_NUM) {
|
||||
dev_warn(&connector->dev->pdev->dev,
|
||||
"The number of extension(%d) is "
|
||||
"over max (%d), actually read number (%d)\n",
|
||||
edid_ext_num, MAX_EDID_EXT_NUM,
|
||||
MAX_EDID_EXT_NUM);
|
||||
edid_ext_num, DRM_MAX_EDID_EXT_NUM,
|
||||
DRM_MAX_EDID_EXT_NUM);
|
||||
/* Reset EDID extension number to be read */
|
||||
edid_ext_num = MAX_EDID_EXT_NUM;
|
||||
edid_ext_num = DRM_MAX_EDID_EXT_NUM;
|
||||
}
|
||||
/* Read EDID including extensions too */
|
||||
ret = drm_ddc_read_edid(connector, adapter, (char *)edid,
|
||||
@ -1288,8 +1288,8 @@ bool drm_detect_hdmi_monitor(struct edid *edid)
|
||||
goto end;
|
||||
|
||||
/* Chose real EDID extension number */
|
||||
edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ?
|
||||
MAX_EDID_EXT_NUM : edid->extensions;
|
||||
edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
|
||||
DRM_MAX_EDID_EXT_NUM : edid->extensions;
|
||||
|
||||
/* Find CEA extension */
|
||||
for (i = 0; i < edid_ext_num; i++) {
|
||||
@ -1346,7 +1346,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
|
||||
if (edid == NULL) {
|
||||
return 0;
|
||||
}
|
||||
if (!edid_is_valid(edid)) {
|
||||
if (!drm_edid_is_valid(edid)) {
|
||||
dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
|
||||
drm_get_connector_name(connector));
|
||||
return 0;
|
||||
|
@ -27,6 +27,7 @@
|
||||
* Dave Airlie <airlied@linux.ie>
|
||||
* Jesse Barnes <jesse.barnes@intel.com>
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
//#include <linux/sysrq.h>
|
||||
#include <linux/fb.h>
|
||||
#include "drmP.h"
|
||||
@ -34,9 +35,9 @@
|
||||
#include "drm_fb_helper.h"
|
||||
#include "drm_crtc_helper.h"
|
||||
|
||||
//MODULE_AUTHOR("David Airlie, Jesse Barnes");
|
||||
//MODULE_DESCRIPTION("DRM KMS helper");
|
||||
//MODULE_LICENSE("GPL and additional rights");
|
||||
MODULE_AUTHOR("David Airlie, Jesse Barnes");
|
||||
MODULE_DESCRIPTION("DRM KMS helper");
|
||||
MODULE_LICENSE("GPL and additional rights");
|
||||
|
||||
static LIST_HEAD(kernel_fb_helper_list);
|
||||
|
||||
@ -480,7 +481,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
|
||||
int i;
|
||||
|
||||
if (var->pixclock != 0) {
|
||||
DRM_ERROR("PIXEL CLCOK SET\n");
|
||||
DRM_ERROR("PIXEL CLOCK SET\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -402,7 +402,8 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
|
||||
wasted += alignment - tmp;
|
||||
}
|
||||
|
||||
if (entry->size >= size + wasted) {
|
||||
if (entry->size >= size + wasted &&
|
||||
(entry->start + wasted + size) <= end) {
|
||||
if (!best_match)
|
||||
return entry;
|
||||
if (entry->size < best_size) {
|
||||
|
@ -881,8 +881,6 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
|
||||
uint8_t attr = U8((*ptr)++), shift;
|
||||
uint32_t saved, dst;
|
||||
int dptr = *ptr;
|
||||
attr &= 0x38;
|
||||
attr |= atom_def_dst[attr >> 3] << 6;
|
||||
SDEBUG(" dst: ");
|
||||
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
|
||||
shift = atom_get_src(ctx, attr, ptr);
|
||||
@ -897,8 +895,6 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
|
||||
uint8_t attr = U8((*ptr)++), shift;
|
||||
uint32_t saved, dst;
|
||||
int dptr = *ptr;
|
||||
attr &= 0x38;
|
||||
attr |= atom_def_dst[attr >> 3] << 6;
|
||||
SDEBUG(" dst: ");
|
||||
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
|
||||
shift = atom_get_src(ctx, attr, ptr);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -245,19 +245,25 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
|
||||
|
||||
switch (mode) {
|
||||
case DRM_MODE_DPMS_ON:
|
||||
atombios_enable_crtc(crtc, 1);
|
||||
atombios_enable_crtc(crtc, ATOM_ENABLE);
|
||||
if (ASIC_IS_DCE3(rdev))
|
||||
atombios_enable_crtc_memreq(crtc, 1);
|
||||
atombios_blank_crtc(crtc, 0);
|
||||
atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
|
||||
atombios_blank_crtc(crtc, ATOM_DISABLE);
|
||||
/* XXX re-enable when interrupt support is added */
|
||||
if (!ASIC_IS_DCE4(rdev))
|
||||
drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
|
||||
radeon_crtc_load_lut(crtc);
|
||||
break;
|
||||
case DRM_MODE_DPMS_STANDBY:
|
||||
case DRM_MODE_DPMS_SUSPEND:
|
||||
case DRM_MODE_DPMS_OFF:
|
||||
atombios_blank_crtc(crtc, 1);
|
||||
/* XXX re-enable when interrupt support is added */
|
||||
if (!ASIC_IS_DCE4(rdev))
|
||||
drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
|
||||
atombios_blank_crtc(crtc, ATOM_ENABLE);
|
||||
if (ASIC_IS_DCE3(rdev))
|
||||
atombios_enable_crtc_memreq(crtc, 0);
|
||||
atombios_enable_crtc(crtc, 0);
|
||||
atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
|
||||
atombios_enable_crtc(crtc, ATOM_DISABLE);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -347,6 +353,11 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
|
||||
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
}
|
||||
|
||||
union atom_enable_ss {
|
||||
ENABLE_LVDS_SS_PARAMETERS legacy;
|
||||
ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1;
|
||||
};
|
||||
|
||||
static void atombios_set_ss(struct drm_crtc *crtc, int enable)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
@ -356,11 +367,14 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
|
||||
struct radeon_encoder *radeon_encoder = NULL;
|
||||
struct radeon_encoder_atom_dig *dig = NULL;
|
||||
int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
|
||||
ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION args;
|
||||
ENABLE_LVDS_SS_PARAMETERS legacy_args;
|
||||
union atom_enable_ss args;
|
||||
uint16_t percentage = 0;
|
||||
uint8_t type = 0, step = 0, delay = 0, range = 0;
|
||||
|
||||
/* XXX add ss support for DCE4 */
|
||||
if (ASIC_IS_DCE4(rdev))
|
||||
return;
|
||||
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||
if (encoder->crtc == crtc) {
|
||||
radeon_encoder = to_radeon_encoder(encoder);
|
||||
@ -384,29 +398,28 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
|
||||
if (!radeon_encoder)
|
||||
return;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
memset(&args, 0, sizeof(args));
|
||||
args.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
|
||||
args.ucSpreadSpectrumType = type;
|
||||
args.ucSpreadSpectrumStep = step;
|
||||
args.ucSpreadSpectrumDelay = delay;
|
||||
args.ucSpreadSpectrumRange = range;
|
||||
args.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
|
||||
args.ucEnable = enable;
|
||||
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
args.v1.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
|
||||
args.v1.ucSpreadSpectrumType = type;
|
||||
args.v1.ucSpreadSpectrumStep = step;
|
||||
args.v1.ucSpreadSpectrumDelay = delay;
|
||||
args.v1.ucSpreadSpectrumRange = range;
|
||||
args.v1.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
|
||||
args.v1.ucEnable = enable;
|
||||
} else {
|
||||
memset(&legacy_args, 0, sizeof(legacy_args));
|
||||
legacy_args.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
|
||||
legacy_args.ucSpreadSpectrumType = type;
|
||||
legacy_args.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2;
|
||||
legacy_args.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4;
|
||||
legacy_args.ucEnable = enable;
|
||||
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&legacy_args);
|
||||
args.legacy.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
|
||||
args.legacy.ucSpreadSpectrumType = type;
|
||||
args.legacy.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2;
|
||||
args.legacy.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4;
|
||||
args.legacy.ucEnable = enable;
|
||||
}
|
||||
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
}
|
||||
|
||||
union adjust_pixel_clock {
|
||||
ADJUST_DISPLAY_PLL_PS_ALLOCATION v1;
|
||||
ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 v3;
|
||||
};
|
||||
|
||||
static u32 atombios_adjust_pll(struct drm_crtc *crtc,
|
||||
@ -418,10 +431,24 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
|
||||
struct drm_encoder *encoder = NULL;
|
||||
struct radeon_encoder *radeon_encoder = NULL;
|
||||
u32 adjusted_clock = mode->clock;
|
||||
int encoder_mode = 0;
|
||||
|
||||
/* reset the pll flags */
|
||||
pll->flags = 0;
|
||||
|
||||
/* select the PLL algo */
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
if (radeon_new_pll == 0)
|
||||
pll->algo = PLL_ALGO_LEGACY;
|
||||
else
|
||||
pll->algo = PLL_ALGO_NEW;
|
||||
} else {
|
||||
if (radeon_new_pll == 1)
|
||||
pll->algo = PLL_ALGO_NEW;
|
||||
else
|
||||
pll->algo = PLL_ALGO_LEGACY;
|
||||
}
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
if ((rdev->family == CHIP_RS600) ||
|
||||
(rdev->family == CHIP_RS690) ||
|
||||
@ -446,10 +473,16 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||
if (encoder->crtc == crtc) {
|
||||
radeon_encoder = to_radeon_encoder(encoder);
|
||||
encoder_mode = atombios_get_encoder_mode(encoder);
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
|
||||
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
|
||||
adjusted_clock = mode->clock * 2;
|
||||
/* LVDS PLL quirks */
|
||||
if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
|
||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||
pll->algo = dig->pll_algo;
|
||||
}
|
||||
} else {
|
||||
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
|
||||
pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
|
||||
@ -466,14 +499,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
|
||||
*/
|
||||
if (ASIC_IS_DCE3(rdev)) {
|
||||
union adjust_pixel_clock args;
|
||||
struct radeon_encoder_atom_dig *dig;
|
||||
u8 frev, crev;
|
||||
int index;
|
||||
|
||||
if (!radeon_encoder->enc_priv)
|
||||
return adjusted_clock;
|
||||
dig = radeon_encoder->enc_priv;
|
||||
|
||||
index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
|
||||
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
|
||||
&crev);
|
||||
@ -487,12 +515,51 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
|
||||
case 2:
|
||||
args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
|
||||
args.v1.ucTransmitterID = radeon_encoder->encoder_id;
|
||||
args.v1.ucEncodeMode = atombios_get_encoder_mode(encoder);
|
||||
args.v1.ucEncodeMode = encoder_mode;
|
||||
|
||||
atom_execute_table(rdev->mode_info.atom_context,
|
||||
index, (uint32_t *)&args);
|
||||
adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
|
||||
break;
|
||||
case 3:
|
||||
args.v3.sInput.usPixelClock = cpu_to_le16(mode->clock / 10);
|
||||
args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
|
||||
args.v3.sInput.ucEncodeMode = encoder_mode;
|
||||
args.v3.sInput.ucDispPllConfig = 0;
|
||||
if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
|
||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||
|
||||
if (encoder_mode == ATOM_ENCODER_MODE_DP)
|
||||
args.v3.sInput.ucDispPllConfig |=
|
||||
DISPPLL_CONFIG_COHERENT_MODE;
|
||||
else {
|
||||
if (dig->coherent_mode)
|
||||
args.v3.sInput.ucDispPllConfig |=
|
||||
DISPPLL_CONFIG_COHERENT_MODE;
|
||||
if (mode->clock > 165000)
|
||||
args.v3.sInput.ucDispPllConfig |=
|
||||
DISPPLL_CONFIG_DUAL_LINK;
|
||||
}
|
||||
} else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
|
||||
/* may want to enable SS on DP/eDP eventually */
|
||||
args.v3.sInput.ucDispPllConfig |=
|
||||
DISPPLL_CONFIG_SS_ENABLE;
|
||||
if (mode->clock > 165000)
|
||||
args.v3.sInput.ucDispPllConfig |=
|
||||
DISPPLL_CONFIG_DUAL_LINK;
|
||||
}
|
||||
atom_execute_table(rdev->mode_info.atom_context,
|
||||
index, (uint32_t *)&args);
|
||||
adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
|
||||
if (args.v3.sOutput.ucRefDiv) {
|
||||
pll->flags |= RADEON_PLL_USE_REF_DIV;
|
||||
pll->reference_div = args.v3.sOutput.ucRefDiv;
|
||||
}
|
||||
if (args.v3.sOutput.ucPostDiv) {
|
||||
pll->flags |= RADEON_PLL_USE_POST_DIV;
|
||||
pll->post_div = args.v3.sOutput.ucPostDiv;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
|
||||
return adjusted_clock;
|
||||
@ -511,9 +578,47 @@ union set_pixel_clock {
|
||||
PIXEL_CLOCK_PARAMETERS v1;
|
||||
PIXEL_CLOCK_PARAMETERS_V2 v2;
|
||||
PIXEL_CLOCK_PARAMETERS_V3 v3;
|
||||
PIXEL_CLOCK_PARAMETERS_V5 v5;
|
||||
};
|
||||
|
||||
void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
|
||||
static void atombios_crtc_set_dcpll(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
u8 frev, crev;
|
||||
int index;
|
||||
union set_pixel_clock args;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
|
||||
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
|
||||
&crev);
|
||||
|
||||
switch (frev) {
|
||||
case 1:
|
||||
switch (crev) {
|
||||
case 5:
|
||||
/* if the default dcpll clock is specified,
|
||||
* SetPixelClock provides the dividers
|
||||
*/
|
||||
args.v5.ucCRTC = ATOM_CRTC_INVALID;
|
||||
args.v5.usPixelClock = rdev->clock.default_dispclk;
|
||||
args.v5.ucPpll = ATOM_DCPLL;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
|
||||
return;
|
||||
}
|
||||
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
}
|
||||
|
||||
static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
@ -527,12 +632,14 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
|
||||
u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
|
||||
struct radeon_pll *pll;
|
||||
u32 adjusted_clock;
|
||||
int encoder_mode = 0;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||
if (encoder->crtc == crtc) {
|
||||
radeon_encoder = to_radeon_encoder(encoder);
|
||||
encoder_mode = atombios_get_encoder_mode(encoder);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -540,24 +647,22 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
|
||||
if (!radeon_encoder)
|
||||
return;
|
||||
|
||||
if (radeon_crtc->crtc_id == 0)
|
||||
switch (radeon_crtc->pll_id) {
|
||||
case ATOM_PPLL1:
|
||||
pll = &rdev->clock.p1pll;
|
||||
else
|
||||
break;
|
||||
case ATOM_PPLL2:
|
||||
pll = &rdev->clock.p2pll;
|
||||
break;
|
||||
case ATOM_DCPLL:
|
||||
case ATOM_PPLL_INVALID:
|
||||
pll = &rdev->clock.dcpll;
|
||||
break;
|
||||
}
|
||||
|
||||
/* adjust pixel clock as needed */
|
||||
adjusted_clock = atombios_adjust_pll(crtc, mode, pll);
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
if (radeon_new_pll)
|
||||
radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock,
|
||||
&fb_div, &frac_fb_div,
|
||||
&ref_div, &post_div);
|
||||
else
|
||||
radeon_compute_pll(pll, adjusted_clock, &pll_clock,
|
||||
&fb_div, &frac_fb_div,
|
||||
&ref_div, &post_div);
|
||||
} else
|
||||
radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
|
||||
&ref_div, &post_div);
|
||||
|
||||
@ -574,8 +679,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
|
||||
args.v1.usFbDiv = cpu_to_le16(fb_div);
|
||||
args.v1.ucFracFbDiv = frac_fb_div;
|
||||
args.v1.ucPostDiv = post_div;
|
||||
args.v1.ucPpll =
|
||||
radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
|
||||
args.v1.ucPpll = radeon_crtc->pll_id;
|
||||
args.v1.ucCRTC = radeon_crtc->crtc_id;
|
||||
args.v1.ucRefDivSrc = 1;
|
||||
break;
|
||||
@ -585,8 +689,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
|
||||
args.v2.usFbDiv = cpu_to_le16(fb_div);
|
||||
args.v2.ucFracFbDiv = frac_fb_div;
|
||||
args.v2.ucPostDiv = post_div;
|
||||
args.v2.ucPpll =
|
||||
radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
|
||||
args.v2.ucPpll = radeon_crtc->pll_id;
|
||||
args.v2.ucCRTC = radeon_crtc->crtc_id;
|
||||
args.v2.ucRefDivSrc = 1;
|
||||
break;
|
||||
@ -596,12 +699,22 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
|
||||
args.v3.usFbDiv = cpu_to_le16(fb_div);
|
||||
args.v3.ucFracFbDiv = frac_fb_div;
|
||||
args.v3.ucPostDiv = post_div;
|
||||
args.v3.ucPpll =
|
||||
radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
|
||||
args.v3.ucMiscInfo = (radeon_crtc->crtc_id << 2);
|
||||
args.v3.ucPpll = radeon_crtc->pll_id;
|
||||
args.v3.ucMiscInfo = (radeon_crtc->pll_id << 2);
|
||||
args.v3.ucTransmitterId = radeon_encoder->encoder_id;
|
||||
args.v3.ucEncoderMode =
|
||||
atombios_get_encoder_mode(encoder);
|
||||
args.v3.ucEncoderMode = encoder_mode;
|
||||
break;
|
||||
case 5:
|
||||
args.v5.ucCRTC = radeon_crtc->crtc_id;
|
||||
args.v5.usPixelClock = cpu_to_le16(mode->clock / 10);
|
||||
args.v5.ucRefDiv = ref_div;
|
||||
args.v5.usFbDiv = cpu_to_le16(fb_div);
|
||||
args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
|
||||
args.v5.ucPostDiv = post_div;
|
||||
args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */
|
||||
args.v5.ucTransmitterID = radeon_encoder->encoder_id;
|
||||
args.v5.ucEncoderMode = encoder_mode;
|
||||
args.v5.ucPpll = radeon_crtc->pll_id;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
|
||||
@ -616,11 +729,143 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
|
||||
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
}
|
||||
|
||||
static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
|
||||
struct drm_framebuffer *old_fb)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_framebuffer *radeon_fb;
|
||||
struct drm_gem_object *obj;
|
||||
struct radeon_bo *rbo;
|
||||
uint64_t fb_location;
|
||||
uint32_t fb_format, fb_pitch_pixels, tiling_flags;
|
||||
int r;
|
||||
|
||||
/* no fb bound */
|
||||
if (!crtc->fb) {
|
||||
DRM_DEBUG("No FB bound\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
radeon_fb = to_radeon_framebuffer(crtc->fb);
|
||||
|
||||
/* Pin framebuffer & get tilling informations */
|
||||
obj = radeon_fb->obj;
|
||||
rbo = obj->driver_private;
|
||||
r = radeon_bo_reserve(rbo, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
|
||||
if (unlikely(r != 0)) {
|
||||
radeon_bo_unreserve(rbo);
|
||||
return -EINVAL;
|
||||
}
|
||||
radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
|
||||
radeon_bo_unreserve(rbo);
|
||||
|
||||
switch (crtc->fb->bits_per_pixel) {
|
||||
case 8:
|
||||
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
|
||||
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
|
||||
break;
|
||||
case 15:
|
||||
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
|
||||
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555));
|
||||
break;
|
||||
case 16:
|
||||
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
|
||||
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
|
||||
break;
|
||||
case 24:
|
||||
case 32:
|
||||
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
|
||||
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unsupported screen depth %d\n",
|
||||
crtc->fb->bits_per_pixel);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (radeon_crtc->crtc_id) {
|
||||
case 0:
|
||||
WREG32(AVIVO_D1VGA_CONTROL, 0);
|
||||
break;
|
||||
case 1:
|
||||
WREG32(AVIVO_D2VGA_CONTROL, 0);
|
||||
break;
|
||||
case 2:
|
||||
WREG32(EVERGREEN_D3VGA_CONTROL, 0);
|
||||
break;
|
||||
case 3:
|
||||
WREG32(EVERGREEN_D4VGA_CONTROL, 0);
|
||||
break;
|
||||
case 4:
|
||||
WREG32(EVERGREEN_D5VGA_CONTROL, 0);
|
||||
break;
|
||||
case 5:
|
||||
WREG32(EVERGREEN_D6VGA_CONTROL, 0);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
|
||||
upper_32_bits(fb_location));
|
||||
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
|
||||
upper_32_bits(fb_location));
|
||||
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
|
||||
(u32)fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
|
||||
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
|
||||
(u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
|
||||
WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
|
||||
|
||||
WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
|
||||
WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
|
||||
WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0);
|
||||
WREG32(EVERGREEN_GRPH_Y_START + radeon_crtc->crtc_offset, 0);
|
||||
WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, crtc->fb->width);
|
||||
WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, crtc->fb->height);
|
||||
|
||||
fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
|
||||
WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
|
||||
WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
|
||||
|
||||
WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
|
||||
crtc->mode.vdisplay);
|
||||
x &= ~3;
|
||||
y &= ~1;
|
||||
WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset,
|
||||
(x << 16) | y);
|
||||
WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
|
||||
(crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
|
||||
|
||||
if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
|
||||
WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
|
||||
EVERGREEN_INTERLEAVE_EN);
|
||||
else
|
||||
WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
|
||||
|
||||
if (old_fb && old_fb != crtc->fb) {
|
||||
radeon_fb = to_radeon_framebuffer(old_fb);
|
||||
rbo = radeon_fb->obj->driver_private;
|
||||
r = radeon_bo_reserve(rbo, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
radeon_bo_unpin(rbo);
|
||||
radeon_bo_unreserve(rbo);
|
||||
}
|
||||
|
||||
/* Bytes per pixel may have changed */
|
||||
radeon_bandwidth_update(rdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
|
||||
struct drm_framebuffer *old_fb)
|
||||
{
|
||||
ENTER();
|
||||
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
@ -746,8 +991,6 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
|
||||
/* Bytes per pixel may have changed */
|
||||
radeon_bandwidth_update(rdev);
|
||||
|
||||
LEAVE();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -757,7 +1000,9 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev))
|
||||
if (ASIC_IS_DCE4(rdev))
|
||||
return evergreen_crtc_set_base(crtc, x, y, old_fb);
|
||||
else if (ASIC_IS_AVIVO(rdev))
|
||||
return avivo_crtc_set_base(crtc, x, y, old_fb);
|
||||
else
|
||||
return radeon_crtc_set_base(crtc, x, y, old_fb);
|
||||
@ -787,6 +1032,46 @@ static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
|
||||
}
|
||||
}
|
||||
|
||||
static int radeon_atom_pick_pll(struct drm_crtc *crtc)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct drm_encoder *test_encoder;
|
||||
struct drm_crtc *test_crtc;
|
||||
uint32_t pll_in_use = 0;
|
||||
|
||||
if (ASIC_IS_DCE4(rdev)) {
|
||||
/* if crtc is driving DP and we have an ext clock, use that */
|
||||
list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
|
||||
if (test_encoder->crtc && (test_encoder->crtc == crtc)) {
|
||||
if (atombios_get_encoder_mode(test_encoder) == ATOM_ENCODER_MODE_DP) {
|
||||
if (rdev->clock.dp_extclk)
|
||||
return ATOM_PPLL_INVALID;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* otherwise, pick one of the plls */
|
||||
list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
|
||||
struct radeon_crtc *radeon_test_crtc;
|
||||
|
||||
if (crtc == test_crtc)
|
||||
continue;
|
||||
|
||||
radeon_test_crtc = to_radeon_crtc(test_crtc);
|
||||
if ((radeon_test_crtc->pll_id >= ATOM_PPLL1) &&
|
||||
(radeon_test_crtc->pll_id <= ATOM_PPLL2))
|
||||
pll_in_use |= (1 << radeon_test_crtc->pll_id);
|
||||
}
|
||||
if (!(pll_in_use & 1))
|
||||
return ATOM_PPLL1;
|
||||
return ATOM_PPLL2;
|
||||
} else
|
||||
return radeon_crtc->crtc_id;
|
||||
|
||||
}
|
||||
|
||||
int atombios_crtc_mode_set(struct drm_crtc *crtc,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode,
|
||||
@ -798,19 +1083,27 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
|
||||
|
||||
/* TODO color tiling */
|
||||
|
||||
/* pick pll */
|
||||
radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
|
||||
|
||||
atombios_set_ss(crtc, 0);
|
||||
/* always set DCPLL */
|
||||
if (ASIC_IS_DCE4(rdev))
|
||||
atombios_crtc_set_dcpll(crtc);
|
||||
atombios_crtc_set_pll(crtc, adjusted_mode);
|
||||
atombios_set_ss(crtc, 1);
|
||||
atombios_crtc_set_timing(crtc, adjusted_mode);
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev))
|
||||
atombios_crtc_set_base(crtc, x, y, old_fb);
|
||||
if (ASIC_IS_DCE4(rdev))
|
||||
atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
|
||||
else if (ASIC_IS_AVIVO(rdev))
|
||||
atombios_crtc_set_timing(crtc, adjusted_mode);
|
||||
else {
|
||||
atombios_crtc_set_timing(crtc, adjusted_mode);
|
||||
if (radeon_crtc->crtc_id == 0)
|
||||
atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
|
||||
atombios_crtc_set_base(crtc, x, y, old_fb);
|
||||
radeon_legacy_atom_fixup(crtc);
|
||||
}
|
||||
atombios_crtc_set_base(crtc, x, y, old_fb);
|
||||
atombios_overscan_setup(crtc, mode, adjusted_mode);
|
||||
atombios_scaler_setup(crtc);
|
||||
return 0;
|
||||
@ -827,14 +1120,14 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
|
||||
|
||||
static void atombios_crtc_prepare(struct drm_crtc *crtc)
|
||||
{
|
||||
atombios_lock_crtc(crtc, 1);
|
||||
atombios_lock_crtc(crtc, ATOM_ENABLE);
|
||||
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
|
||||
}
|
||||
|
||||
static void atombios_crtc_commit(struct drm_crtc *crtc)
|
||||
{
|
||||
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
|
||||
atombios_lock_crtc(crtc, 0);
|
||||
atombios_lock_crtc(crtc, ATOM_DISABLE);
|
||||
}
|
||||
|
||||
static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
|
||||
@ -850,8 +1143,37 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
|
||||
void radeon_atombios_init_crtc(struct drm_device *dev,
|
||||
struct radeon_crtc *radeon_crtc)
|
||||
{
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
|
||||
if (ASIC_IS_DCE4(rdev)) {
|
||||
switch (radeon_crtc->crtc_id) {
|
||||
case 0:
|
||||
default:
|
||||
radeon_crtc->crtc_offset = EVERGREEN_CRTC0_REGISTER_OFFSET;
|
||||
break;
|
||||
case 1:
|
||||
radeon_crtc->crtc_offset = EVERGREEN_CRTC1_REGISTER_OFFSET;
|
||||
break;
|
||||
case 2:
|
||||
radeon_crtc->crtc_offset = EVERGREEN_CRTC2_REGISTER_OFFSET;
|
||||
break;
|
||||
case 3:
|
||||
radeon_crtc->crtc_offset = EVERGREEN_CRTC3_REGISTER_OFFSET;
|
||||
break;
|
||||
case 4:
|
||||
radeon_crtc->crtc_offset = EVERGREEN_CRTC4_REGISTER_OFFSET;
|
||||
break;
|
||||
case 5:
|
||||
radeon_crtc->crtc_offset = EVERGREEN_CRTC5_REGISTER_OFFSET;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
if (radeon_crtc->crtc_id == 1)
|
||||
radeon_crtc->crtc_offset =
|
||||
AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL;
|
||||
else
|
||||
radeon_crtc->crtc_offset = 0;
|
||||
}
|
||||
radeon_crtc->pll_id = -1;
|
||||
drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
|
||||
}
|
||||
|
@ -321,6 +321,10 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
|
||||
train_set[lane] = v | p;
|
||||
}
|
||||
|
||||
union aux_channel_transaction {
|
||||
PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
|
||||
PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
|
||||
};
|
||||
|
||||
/* radeon aux chan functions */
|
||||
bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
|
||||
@ -329,7 +333,7 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args;
|
||||
union aux_channel_transaction args;
|
||||
int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
|
||||
unsigned char *base;
|
||||
int retry_count = 0;
|
||||
@ -341,31 +345,33 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
|
||||
retry:
|
||||
memcpy(base, req_bytes, num_bytes);
|
||||
|
||||
args.lpAuxRequest = 0;
|
||||
args.lpDataOut = 16;
|
||||
args.ucDataOutLen = 0;
|
||||
args.ucChannelID = chan->rec.i2c_id;
|
||||
args.ucDelay = delay / 10;
|
||||
args.v1.lpAuxRequest = 0;
|
||||
args.v1.lpDataOut = 16;
|
||||
args.v1.ucDataOutLen = 0;
|
||||
args.v1.ucChannelID = chan->rec.i2c_id;
|
||||
args.v1.ucDelay = delay / 10;
|
||||
if (ASIC_IS_DCE4(rdev))
|
||||
args.v2.ucHPD_ID = chan->rec.hpd_id;
|
||||
|
||||
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
|
||||
if (args.ucReplyStatus && !args.ucDataOutLen) {
|
||||
if (args.ucReplyStatus == 0x20 && retry_count++ < 10)
|
||||
if (args.v1.ucReplyStatus && !args.v1.ucDataOutLen) {
|
||||
if (args.v1.ucReplyStatus == 0x20 && retry_count++ < 10)
|
||||
goto retry;
|
||||
DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
|
||||
req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
|
||||
chan->rec.i2c_id, args.ucReplyStatus, retry_count);
|
||||
chan->rec.i2c_id, args.v1.ucReplyStatus, retry_count);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (args.ucDataOutLen && read_byte && read_buf_len) {
|
||||
if (read_buf_len < args.ucDataOutLen) {
|
||||
if (args.v1.ucDataOutLen && read_byte && read_buf_len) {
|
||||
if (read_buf_len < args.v1.ucDataOutLen) {
|
||||
DRM_ERROR("Buffer to small for return answer %d %d\n",
|
||||
read_buf_len, args.ucDataOutLen);
|
||||
read_buf_len, args.v1.ucDataOutLen);
|
||||
return false;
|
||||
}
|
||||
{
|
||||
int len = min(read_buf_len, args.ucDataOutLen);
|
||||
int len = min(read_buf_len, args.v1.ucDataOutLen);
|
||||
memcpy(read_byte, base + 16, len);
|
||||
}
|
||||
}
|
||||
@ -626,12 +632,19 @@ void dp_link_train(struct drm_encoder *encoder,
|
||||
dp_set_link_bw_lanes(radeon_connector, link_configuration);
|
||||
/* disable downspread on the sink */
|
||||
dp_set_downspread(radeon_connector, 0);
|
||||
if (ASIC_IS_DCE4(rdev)) {
|
||||
/* start training on the source */
|
||||
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_START);
|
||||
/* set training pattern 1 on the source */
|
||||
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1);
|
||||
} else {
|
||||
/* start training on the source */
|
||||
radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START,
|
||||
dig_connector->dp_clock, enc_id, 0);
|
||||
/* set training pattern 1 on the source */
|
||||
radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
|
||||
dig_connector->dp_clock, enc_id, 0);
|
||||
}
|
||||
|
||||
/* set initial vs/emph */
|
||||
memset(train_set, 0, 4);
|
||||
@ -691,6 +704,9 @@ void dp_link_train(struct drm_encoder *encoder,
|
||||
/* set training pattern 2 on the sink */
|
||||
dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2);
|
||||
/* set training pattern 2 on the source */
|
||||
if (ASIC_IS_DCE4(rdev))
|
||||
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2);
|
||||
else
|
||||
radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
|
||||
dig_connector->dp_clock, enc_id, 1);
|
||||
|
||||
@ -729,7 +745,11 @@ void dp_link_train(struct drm_encoder *encoder,
|
||||
>> DP_TRAIN_PRE_EMPHASIS_SHIFT);
|
||||
|
||||
/* disable the training pattern on the sink */
|
||||
dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
|
||||
if (ASIC_IS_DCE4(rdev))
|
||||
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE);
|
||||
else
|
||||
radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
|
||||
dig_connector->dp_clock, enc_id, 0);
|
||||
|
||||
radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
|
||||
dig_connector->dp_clock, enc_id, 0);
|
||||
|
@ -30,11 +30,13 @@
|
||||
|
||||
#define D1CRTC_CONTROL 0x6080
|
||||
#define CRTC_EN (1 << 0)
|
||||
#define D1CRTC_STATUS 0x609c
|
||||
#define D1CRTC_UPDATE_LOCK 0x60E8
|
||||
#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
|
||||
#define D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118
|
||||
|
||||
#define D2CRTC_CONTROL 0x6880
|
||||
#define D2CRTC_STATUS 0x689c
|
||||
#define D2CRTC_UPDATE_LOCK 0x68E8
|
||||
#define D2GRPH_PRIMARY_SURFACE_ADDRESS 0x6910
|
||||
#define D2GRPH_SECONDARY_SURFACE_ADDRESS 0x6918
|
||||
|
720
drivers/video/drm/radeon/evergreen.c
Normal file
720
drivers/video/drm/radeon/evergreen.c
Normal file
@ -0,0 +1,720 @@
|
||||
/*
|
||||
* Copyright 2010 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Alex Deucher
|
||||
*/
|
||||
#include <linux/firmware.h>
|
||||
//#include <linux/platform_device.h>
|
||||
#include "drmP.h"
|
||||
#include "radeon.h"
|
||||
#include "radeon_drm.h"
|
||||
#include "rv770d.h"
|
||||
#include "atom.h"
|
||||
#include "avivod.h"
|
||||
#include "evergreen_reg.h"
|
||||
|
||||
static void evergreen_gpu_init(struct radeon_device *rdev);
|
||||
void evergreen_fini(struct radeon_device *rdev);
|
||||
|
||||
bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
|
||||
{
|
||||
bool connected = false;
|
||||
/* XXX */
|
||||
return connected;
|
||||
}
|
||||
|
||||
void evergreen_hpd_set_polarity(struct radeon_device *rdev,
|
||||
enum radeon_hpd_id hpd)
|
||||
{
|
||||
/* XXX */
|
||||
}
|
||||
|
||||
void evergreen_hpd_init(struct radeon_device *rdev)
|
||||
{
|
||||
/* XXX */
|
||||
}
|
||||
|
||||
|
||||
void evergreen_bandwidth_update(struct radeon_device *rdev)
|
||||
{
|
||||
/* XXX */
|
||||
}
|
||||
|
||||
void evergreen_hpd_fini(struct radeon_device *rdev)
|
||||
{
|
||||
/* XXX */
|
||||
}
|
||||
|
||||
static int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned i;
|
||||
u32 tmp;
|
||||
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
/* read MC_STATUS */
|
||||
tmp = RREG32(SRBM_STATUS) & 0x1F00;
|
||||
if (!tmp)
|
||||
return 0;
|
||||
udelay(1);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* GART
|
||||
*/
|
||||
int evergreen_pcie_gart_enable(struct radeon_device *rdev)
|
||||
{
|
||||
u32 tmp;
|
||||
int r, i;
|
||||
|
||||
if (rdev->gart.table.vram.robj == NULL) {
|
||||
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
r = radeon_gart_table_vram_pin(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
radeon_gart_restore(rdev);
|
||||
/* Setup L2 cache */
|
||||
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
|
||||
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
|
||||
EFFECTIVE_L2_QUEUE_SIZE(7));
|
||||
WREG32(VM_L2_CNTL2, 0);
|
||||
WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
|
||||
/* Setup TLB control */
|
||||
tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
|
||||
SYSTEM_ACCESS_MODE_NOT_IN_SYS |
|
||||
SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
|
||||
EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
|
||||
WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
|
||||
WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
|
||||
WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
|
||||
WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
|
||||
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
|
||||
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
|
||||
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
|
||||
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
|
||||
WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
|
||||
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
|
||||
WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
|
||||
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
|
||||
WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
|
||||
(u32)(rdev->dummy_page.addr >> 12));
|
||||
for (i = 1; i < 7; i++)
|
||||
WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
|
||||
|
||||
r600_pcie_gart_tlb_flush(rdev);
|
||||
rdev->gart.ready = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void evergreen_pcie_gart_disable(struct radeon_device *rdev)
|
||||
{
|
||||
u32 tmp;
|
||||
int i, r;
|
||||
|
||||
/* Disable all tables */
|
||||
for (i = 0; i < 7; i++)
|
||||
WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
|
||||
|
||||
/* Setup L2 cache */
|
||||
WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
|
||||
EFFECTIVE_L2_QUEUE_SIZE(7));
|
||||
WREG32(VM_L2_CNTL2, 0);
|
||||
WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
|
||||
/* Setup TLB control */
|
||||
tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
|
||||
WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
|
||||
WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
|
||||
WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
|
||||
WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
|
||||
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
|
||||
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
|
||||
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
|
||||
if (rdev->gart.table.vram.robj) {
|
||||
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
|
||||
if (likely(r == 0)) {
|
||||
radeon_bo_kunmap(rdev->gart.table.vram.robj);
|
||||
radeon_bo_unpin(rdev->gart.table.vram.robj);
|
||||
radeon_bo_unreserve(rdev->gart.table.vram.robj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void evergreen_pcie_gart_fini(struct radeon_device *rdev)
|
||||
{
|
||||
evergreen_pcie_gart_disable(rdev);
|
||||
radeon_gart_table_vram_free(rdev);
|
||||
radeon_gart_fini(rdev);
|
||||
}
|
||||
|
||||
|
||||
void evergreen_agp_enable(struct radeon_device *rdev)
|
||||
{
|
||||
u32 tmp;
|
||||
int i;
|
||||
|
||||
/* Setup L2 cache */
|
||||
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
|
||||
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
|
||||
EFFECTIVE_L2_QUEUE_SIZE(7));
|
||||
WREG32(VM_L2_CNTL2, 0);
|
||||
WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
|
||||
/* Setup TLB control */
|
||||
tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
|
||||
SYSTEM_ACCESS_MODE_NOT_IN_SYS |
|
||||
SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
|
||||
EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
|
||||
WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
|
||||
WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
|
||||
WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
|
||||
WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
|
||||
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
|
||||
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
|
||||
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
|
||||
for (i = 0; i < 7; i++)
|
||||
WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
|
||||
}
|
||||
|
||||
static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
|
||||
{
|
||||
save->vga_control[0] = RREG32(D1VGA_CONTROL);
|
||||
save->vga_control[1] = RREG32(D2VGA_CONTROL);
|
||||
save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
|
||||
save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
|
||||
save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
|
||||
save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
|
||||
save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
|
||||
save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
|
||||
save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
|
||||
save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
|
||||
save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
|
||||
save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
|
||||
save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
|
||||
save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
|
||||
|
||||
/* Stop all video */
|
||||
WREG32(VGA_RENDER_CONTROL, 0);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
|
||||
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
|
||||
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
|
||||
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
|
||||
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
|
||||
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
|
||||
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
|
||||
|
||||
WREG32(D1VGA_CONTROL, 0);
|
||||
WREG32(D2VGA_CONTROL, 0);
|
||||
WREG32(EVERGREEN_D3VGA_CONTROL, 0);
|
||||
WREG32(EVERGREEN_D4VGA_CONTROL, 0);
|
||||
WREG32(EVERGREEN_D5VGA_CONTROL, 0);
|
||||
WREG32(EVERGREEN_D6VGA_CONTROL, 0);
|
||||
}
|
||||
|
||||
static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
|
||||
{
|
||||
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
|
||||
upper_32_bits(rdev->mc.vram_start));
|
||||
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
|
||||
upper_32_bits(rdev->mc.vram_start));
|
||||
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
|
||||
(u32)rdev->mc.vram_start);
|
||||
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
|
||||
(u32)rdev->mc.vram_start);
|
||||
|
||||
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
|
||||
upper_32_bits(rdev->mc.vram_start));
|
||||
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
|
||||
upper_32_bits(rdev->mc.vram_start));
|
||||
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
|
||||
(u32)rdev->mc.vram_start);
|
||||
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
|
||||
(u32)rdev->mc.vram_start);
|
||||
|
||||
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
|
||||
upper_32_bits(rdev->mc.vram_start));
|
||||
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
|
||||
upper_32_bits(rdev->mc.vram_start));
|
||||
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
|
||||
(u32)rdev->mc.vram_start);
|
||||
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
|
||||
(u32)rdev->mc.vram_start);
|
||||
|
||||
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
|
||||
upper_32_bits(rdev->mc.vram_start));
|
||||
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
|
||||
upper_32_bits(rdev->mc.vram_start));
|
||||
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
|
||||
(u32)rdev->mc.vram_start);
|
||||
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
|
||||
(u32)rdev->mc.vram_start);
|
||||
|
||||
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
|
||||
upper_32_bits(rdev->mc.vram_start));
|
||||
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
|
||||
upper_32_bits(rdev->mc.vram_start));
|
||||
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
|
||||
(u32)rdev->mc.vram_start);
|
||||
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
|
||||
(u32)rdev->mc.vram_start);
|
||||
|
||||
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
|
||||
upper_32_bits(rdev->mc.vram_start));
|
||||
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
|
||||
upper_32_bits(rdev->mc.vram_start));
|
||||
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
|
||||
(u32)rdev->mc.vram_start);
|
||||
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
|
||||
(u32)rdev->mc.vram_start);
|
||||
|
||||
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
|
||||
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
|
||||
/* Unlock host access */
|
||||
WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
|
||||
mdelay(1);
|
||||
/* Restore video state */
|
||||
WREG32(D1VGA_CONTROL, save->vga_control[0]);
|
||||
WREG32(D2VGA_CONTROL, save->vga_control[1]);
|
||||
WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
|
||||
WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
|
||||
WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
|
||||
WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
|
||||
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
|
||||
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
|
||||
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
|
||||
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
|
||||
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
|
||||
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
|
||||
WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
|
||||
}
|
||||
|
||||
static void evergreen_mc_program(struct radeon_device *rdev)
|
||||
{
|
||||
struct evergreen_mc_save save;
|
||||
u32 tmp;
|
||||
int i, j;
|
||||
|
||||
/* Initialize HDP */
|
||||
for (i = 0, j = 0; i < 32; i++, j += 0x18) {
|
||||
WREG32((0x2c14 + j), 0x00000000);
|
||||
WREG32((0x2c18 + j), 0x00000000);
|
||||
WREG32((0x2c1c + j), 0x00000000);
|
||||
WREG32((0x2c20 + j), 0x00000000);
|
||||
WREG32((0x2c24 + j), 0x00000000);
|
||||
}
|
||||
WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
|
||||
|
||||
evergreen_mc_stop(rdev, &save);
|
||||
if (evergreen_mc_wait_for_idle(rdev)) {
|
||||
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
|
||||
}
|
||||
/* Lockout access through VGA aperture*/
|
||||
WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
|
||||
/* Update configuration */
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
if (rdev->mc.vram_start < rdev->mc.gtt_start) {
|
||||
/* VRAM before AGP */
|
||||
WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
|
||||
rdev->mc.vram_start >> 12);
|
||||
WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
|
||||
rdev->mc.gtt_end >> 12);
|
||||
} else {
|
||||
/* VRAM after AGP */
|
||||
WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
|
||||
rdev->mc.gtt_start >> 12);
|
||||
WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
|
||||
rdev->mc.vram_end >> 12);
|
||||
}
|
||||
} else {
|
||||
WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
|
||||
rdev->mc.vram_start >> 12);
|
||||
WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
|
||||
rdev->mc.vram_end >> 12);
|
||||
}
|
||||
WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
|
||||
tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
|
||||
tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
|
||||
WREG32(MC_VM_FB_LOCATION, tmp);
|
||||
WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
|
||||
WREG32(HDP_NONSURFACE_INFO, (2 << 7));
|
||||
WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF);
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
|
||||
WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
|
||||
WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
|
||||
} else {
|
||||
WREG32(MC_VM_AGP_BASE, 0);
|
||||
WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
|
||||
WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
|
||||
}
|
||||
if (evergreen_mc_wait_for_idle(rdev)) {
|
||||
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
|
||||
}
|
||||
evergreen_mc_resume(rdev, &save);
|
||||
/* we need to own VRAM, so turn off the VGA renderer here
|
||||
* to stop it overwriting our objects */
|
||||
rv515_vga_render_disable(rdev);
|
||||
}
|
||||
|
||||
#if 0
|
||||
/*
|
||||
* CP.
|
||||
*/
|
||||
static void evergreen_cp_stop(struct radeon_device *rdev)
|
||||
{
|
||||
/* XXX */
|
||||
}
|
||||
|
||||
|
||||
static int evergreen_cp_load_microcode(struct radeon_device *rdev)
|
||||
{
|
||||
/* XXX */
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Core functions
|
||||
*/
|
||||
static u32 evergreen_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
|
||||
u32 num_backends,
|
||||
u32 backend_disable_mask)
|
||||
{
|
||||
u32 backend_map = 0;
|
||||
|
||||
return backend_map;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void evergreen_gpu_init(struct radeon_device *rdev)
|
||||
{
|
||||
/* XXX */
|
||||
}
|
||||
|
||||
int evergreen_mc_init(struct radeon_device *rdev)
|
||||
{
|
||||
fixed20_12 a;
|
||||
u32 tmp;
|
||||
int chansize, numchan;
|
||||
|
||||
/* Get VRAM informations */
|
||||
rdev->mc.vram_is_ddr = true;
|
||||
tmp = RREG32(MC_ARB_RAMCFG);
|
||||
if (tmp & CHANSIZE_OVERRIDE) {
|
||||
chansize = 16;
|
||||
} else if (tmp & CHANSIZE_MASK) {
|
||||
chansize = 64;
|
||||
} else {
|
||||
chansize = 32;
|
||||
}
|
||||
tmp = RREG32(MC_SHARED_CHMAP);
|
||||
switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
|
||||
case 0:
|
||||
default:
|
||||
numchan = 1;
|
||||
break;
|
||||
case 1:
|
||||
numchan = 2;
|
||||
break;
|
||||
case 2:
|
||||
numchan = 4;
|
||||
break;
|
||||
case 3:
|
||||
numchan = 8;
|
||||
break;
|
||||
}
|
||||
rdev->mc.vram_width = numchan * chansize;
|
||||
/* Could aper size report 0 ? */
|
||||
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
|
||||
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
|
||||
/* Setup GPU memory space */
|
||||
/* size in MB on evergreen */
|
||||
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
|
||||
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
|
||||
rdev->mc.visible_vram_size = rdev->mc.aper_size;
|
||||
/* FIXME remove this once we support unmappable VRAM */
|
||||
if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
|
||||
rdev->mc.mc_vram_size = rdev->mc.aper_size;
|
||||
rdev->mc.real_vram_size = rdev->mc.aper_size;
|
||||
}
|
||||
r600_vram_gtt_location(rdev, &rdev->mc);
|
||||
/* FIXME: we should enforce default clock in case GPU is not in
|
||||
* default setup
|
||||
*/
|
||||
a.full = rfixed_const(100);
|
||||
rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
|
||||
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int evergreen_gpu_reset(struct radeon_device *rdev)
|
||||
{
|
||||
/* FIXME: implement for evergreen */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int evergreen_startup(struct radeon_device *rdev)
|
||||
{
|
||||
#if 0
|
||||
int r;
|
||||
|
||||
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
|
||||
r = r600_init_microcode(rdev);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to load firmware!\n");
|
||||
return r;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
evergreen_mc_program(rdev);
|
||||
#if 0
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
evergreem_agp_enable(rdev);
|
||||
} else {
|
||||
r = evergreen_pcie_gart_enable(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
#endif
|
||||
evergreen_gpu_init(rdev);
|
||||
#if 0
|
||||
|
||||
r = radeon_ring_init(rdev, rdev->cp.ring_size);
|
||||
if (r)
|
||||
return r;
|
||||
r = evergreen_cp_load_microcode(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
r = r600_cp_resume(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
/* write back buffer are not vital so don't worry about failure */
|
||||
r600_wb_enable(rdev);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
int evergreen_resume(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
|
||||
/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
|
||||
* posting will perform necessary task to bring back GPU into good
|
||||
* shape.
|
||||
*/
|
||||
/* post card */
|
||||
atom_asic_init(rdev->mode_info.atom_context);
|
||||
/* Initialize clocks */
|
||||
r = radeon_clocks_init(rdev);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
r = evergreen_startup(rdev);
|
||||
if (r) {
|
||||
DRM_ERROR("r600 startup failed on resume\n");
|
||||
return r;
|
||||
}
|
||||
#if 0
|
||||
r = r600_ib_test(rdev);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failled testing IB (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
#endif
|
||||
return r;
|
||||
|
||||
}
|
||||
|
||||
int evergreen_suspend(struct radeon_device *rdev)
|
||||
{
|
||||
#if 0
|
||||
int r;
|
||||
|
||||
/* FIXME: we should wait for ring to be empty */
|
||||
r700_cp_stop(rdev);
|
||||
rdev->cp.ready = false;
|
||||
r600_wb_disable(rdev);
|
||||
evergreen_pcie_gart_disable(rdev);
|
||||
/* unpin shaders bo */
|
||||
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
|
||||
if (likely(r == 0)) {
|
||||
radeon_bo_unpin(rdev->r600_blit.shader_obj);
|
||||
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool evergreen_card_posted(struct radeon_device *rdev)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
/* first check CRTCs */
|
||||
reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
|
||||
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
|
||||
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
|
||||
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
|
||||
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
|
||||
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
|
||||
if (reg & EVERGREEN_CRTC_MASTER_EN)
|
||||
return true;
|
||||
|
||||
/* then check MEM_SIZE, in case the crtcs are off */
|
||||
if (RREG32(CONFIG_MEMSIZE))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Plan is to move initialization in that function and use
|
||||
* helper function so that radeon_device_init pretty much
|
||||
* do nothing more than calling asic specific function. This
|
||||
* should also allow to remove a bunch of callback function
|
||||
* like vram_info.
|
||||
*/
|
||||
int evergreen_init(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = radeon_dummy_page_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
/* This don't do much */
|
||||
r = radeon_gem_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
/* Read BIOS */
|
||||
if (!radeon_get_bios(rdev)) {
|
||||
if (ASIC_IS_AVIVO(rdev))
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Must be an ATOMBIOS */
|
||||
if (!rdev->is_atom_bios) {
|
||||
dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
r = radeon_atombios_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
/* Post card if necessary */
|
||||
if (!evergreen_card_posted(rdev)) {
|
||||
if (!rdev->bios) {
|
||||
dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
DRM_INFO("GPU not posted. posting now...\n");
|
||||
atom_asic_init(rdev->mode_info.atom_context);
|
||||
}
|
||||
/* Initialize scratch registers */
|
||||
r600_scratch_init(rdev);
|
||||
/* Initialize surface registers */
|
||||
radeon_surface_init(rdev);
|
||||
/* Initialize clocks */
|
||||
radeon_get_clock_info(rdev->ddev);
|
||||
r = radeon_clocks_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
/* Initialize power management */
|
||||
radeon_pm_init(rdev);
|
||||
/* Fence driver */
|
||||
/* initialize AGP */
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
r = radeon_agp_init(rdev);
|
||||
if (r)
|
||||
radeon_agp_disable(rdev);
|
||||
}
|
||||
/* initialize memory controller */
|
||||
r = evergreen_mc_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
/* Memory manager */
|
||||
r = radeon_bo_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
#if 0
|
||||
r = radeon_irq_kms_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
rdev->cp.ring_obj = NULL;
|
||||
r600_ring_init(rdev, 1024 * 1024);
|
||||
|
||||
rdev->ih.ring_obj = NULL;
|
||||
r600_ih_ring_init(rdev, 64 * 1024);
|
||||
|
||||
r = r600_pcie_gart_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
#endif
|
||||
rdev->accel_working = false;
|
||||
r = evergreen_startup(rdev);
|
||||
if (r) {
|
||||
evergreen_suspend(rdev);
|
||||
/*r600_wb_fini(rdev);*/
|
||||
/*radeon_ring_fini(rdev);*/
|
||||
/*evergreen_pcie_gart_fini(rdev);*/
|
||||
rdev->accel_working = false;
|
||||
}
|
||||
if (rdev->accel_working) {
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void evergreen_fini(struct radeon_device *rdev)
|
||||
{
|
||||
evergreen_suspend(rdev);
|
||||
#if 0
|
||||
r600_blit_fini(rdev);
|
||||
r600_irq_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
radeon_ring_fini(rdev);
|
||||
r600_wb_fini(rdev);
|
||||
evergreen_pcie_gart_fini(rdev);
|
||||
#endif
|
||||
kfree(rdev->bios);
|
||||
rdev->bios = NULL;
|
||||
}
|
176
drivers/video/drm/radeon/evergreen_reg.h
Normal file
176
drivers/video/drm/radeon/evergreen_reg.h
Normal file
@ -0,0 +1,176 @@
|
||||
/*
|
||||
* Copyright 2010 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Alex Deucher
|
||||
*/
|
||||
#ifndef __EVERGREEN_REG_H__
|
||||
#define __EVERGREEN_REG_H__
|
||||
|
||||
/* evergreen */
|
||||
#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0x310
|
||||
#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0x324
|
||||
#define EVERGREEN_D3VGA_CONTROL 0x3e0
|
||||
#define EVERGREEN_D4VGA_CONTROL 0x3e4
|
||||
#define EVERGREEN_D5VGA_CONTROL 0x3e8
|
||||
#define EVERGREEN_D6VGA_CONTROL 0x3ec
|
||||
|
||||
#define EVERGREEN_P1PLL_SS_CNTL 0x414
|
||||
#define EVERGREEN_P2PLL_SS_CNTL 0x454
|
||||
# define EVERGREEN_PxPLL_SS_EN (1 << 12)
|
||||
/* GRPH blocks at 0x6800, 0x7400, 0x10000, 0x10c00, 0x11800, 0x12400 */
|
||||
#define EVERGREEN_GRPH_ENABLE 0x6800
|
||||
#define EVERGREEN_GRPH_CONTROL 0x6804
|
||||
# define EVERGREEN_GRPH_DEPTH(x) (((x) & 0x3) << 0)
|
||||
# define EVERGREEN_GRPH_DEPTH_8BPP 0
|
||||
# define EVERGREEN_GRPH_DEPTH_16BPP 1
|
||||
# define EVERGREEN_GRPH_DEPTH_32BPP 2
|
||||
# define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8)
|
||||
/* 8 BPP */
|
||||
# define EVERGREEN_GRPH_FORMAT_INDEXED 0
|
||||
/* 16 BPP */
|
||||
# define EVERGREEN_GRPH_FORMAT_ARGB1555 0
|
||||
# define EVERGREEN_GRPH_FORMAT_ARGB565 1
|
||||
# define EVERGREEN_GRPH_FORMAT_ARGB4444 2
|
||||
# define EVERGREEN_GRPH_FORMAT_AI88 3
|
||||
# define EVERGREEN_GRPH_FORMAT_MONO16 4
|
||||
# define EVERGREEN_GRPH_FORMAT_BGRA5551 5
|
||||
/* 32 BPP */
|
||||
# define EVERGREEN_GRPH_FORMAT_ARGB8888 0
|
||||
# define EVERGREEN_GRPH_FORMAT_ARGB2101010 1
|
||||
# define EVERGREEN_GRPH_FORMAT_32BPP_DIG 2
|
||||
# define EVERGREEN_GRPH_FORMAT_8B_ARGB2101010 3
|
||||
# define EVERGREEN_GRPH_FORMAT_BGRA1010102 4
|
||||
# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
|
||||
# define EVERGREEN_GRPH_FORMAT_RGB111110 6
|
||||
# define EVERGREEN_GRPH_FORMAT_BGR101111 7
|
||||
#define EVERGREEN_GRPH_SWAP_CONTROL 0x680c
|
||||
# define EVERGREEN_GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0)
|
||||
# define EVERGREEN_GRPH_ENDIAN_NONE 0
|
||||
# define EVERGREEN_GRPH_ENDIAN_8IN16 1
|
||||
# define EVERGREEN_GRPH_ENDIAN_8IN32 2
|
||||
# define EVERGREEN_GRPH_ENDIAN_8IN64 3
|
||||
# define EVERGREEN_GRPH_RED_CROSSBAR(x) (((x) & 0x3) << 4)
|
||||
# define EVERGREEN_GRPH_RED_SEL_R 0
|
||||
# define EVERGREEN_GRPH_RED_SEL_G 1
|
||||
# define EVERGREEN_GRPH_RED_SEL_B 2
|
||||
# define EVERGREEN_GRPH_RED_SEL_A 3
|
||||
# define EVERGREEN_GRPH_GREEN_CROSSBAR(x) (((x) & 0x3) << 6)
|
||||
# define EVERGREEN_GRPH_GREEN_SEL_G 0
|
||||
# define EVERGREEN_GRPH_GREEN_SEL_B 1
|
||||
# define EVERGREEN_GRPH_GREEN_SEL_A 2
|
||||
# define EVERGREEN_GRPH_GREEN_SEL_R 3
|
||||
# define EVERGREEN_GRPH_BLUE_CROSSBAR(x) (((x) & 0x3) << 8)
|
||||
# define EVERGREEN_GRPH_BLUE_SEL_B 0
|
||||
# define EVERGREEN_GRPH_BLUE_SEL_A 1
|
||||
# define EVERGREEN_GRPH_BLUE_SEL_R 2
|
||||
# define EVERGREEN_GRPH_BLUE_SEL_G 3
|
||||
# define EVERGREEN_GRPH_ALPHA_CROSSBAR(x) (((x) & 0x3) << 10)
|
||||
# define EVERGREEN_GRPH_ALPHA_SEL_A 0
|
||||
# define EVERGREEN_GRPH_ALPHA_SEL_R 1
|
||||
# define EVERGREEN_GRPH_ALPHA_SEL_G 2
|
||||
# define EVERGREEN_GRPH_ALPHA_SEL_B 3
|
||||
#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS 0x6810
|
||||
#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS 0x6814
|
||||
# define EVERGREEN_GRPH_DFQ_ENABLE (1 << 0)
|
||||
# define EVERGREEN_GRPH_SURFACE_ADDRESS_MASK 0xffffff00
|
||||
#define EVERGREEN_GRPH_PITCH 0x6818
|
||||
#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x681c
|
||||
#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x6820
|
||||
#define EVERGREEN_GRPH_SURFACE_OFFSET_X 0x6824
|
||||
#define EVERGREEN_GRPH_SURFACE_OFFSET_Y 0x6828
|
||||
#define EVERGREEN_GRPH_X_START 0x682c
|
||||
#define EVERGREEN_GRPH_Y_START 0x6830
|
||||
#define EVERGREEN_GRPH_X_END 0x6834
|
||||
#define EVERGREEN_GRPH_Y_END 0x6838
|
||||
|
||||
/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
|
||||
#define EVERGREEN_CUR_CONTROL 0x6998
|
||||
# define EVERGREEN_CURSOR_EN (1 << 0)
|
||||
# define EVERGREEN_CURSOR_MODE(x) (((x) & 0x3) << 8)
|
||||
# define EVERGREEN_CURSOR_MONO 0
|
||||
# define EVERGREEN_CURSOR_24_1 1
|
||||
# define EVERGREEN_CURSOR_24_8_PRE_MULT 2
|
||||
# define EVERGREEN_CURSOR_24_8_UNPRE_MULT 3
|
||||
# define EVERGREEN_CURSOR_2X_MAGNIFY (1 << 16)
|
||||
# define EVERGREEN_CURSOR_FORCE_MC_ON (1 << 20)
|
||||
# define EVERGREEN_CURSOR_URGENT_CONTROL(x) (((x) & 0x7) << 24)
|
||||
# define EVERGREEN_CURSOR_URGENT_ALWAYS 0
|
||||
# define EVERGREEN_CURSOR_URGENT_1_8 1
|
||||
# define EVERGREEN_CURSOR_URGENT_1_4 2
|
||||
# define EVERGREEN_CURSOR_URGENT_3_8 3
|
||||
# define EVERGREEN_CURSOR_URGENT_1_2 4
|
||||
#define EVERGREEN_CUR_SURFACE_ADDRESS 0x699c
|
||||
# define EVERGREEN_CUR_SURFACE_ADDRESS_MASK 0xfffff000
|
||||
#define EVERGREEN_CUR_SIZE 0x69a0
|
||||
#define EVERGREEN_CUR_SURFACE_ADDRESS_HIGH 0x69a4
|
||||
#define EVERGREEN_CUR_POSITION 0x69a8
|
||||
#define EVERGREEN_CUR_HOT_SPOT 0x69ac
|
||||
#define EVERGREEN_CUR_COLOR1 0x69b0
|
||||
#define EVERGREEN_CUR_COLOR2 0x69b4
|
||||
#define EVERGREEN_CUR_UPDATE 0x69b8
|
||||
# define EVERGREEN_CURSOR_UPDATE_PENDING (1 << 0)
|
||||
# define EVERGREEN_CURSOR_UPDATE_TAKEN (1 << 1)
|
||||
# define EVERGREEN_CURSOR_UPDATE_LOCK (1 << 16)
|
||||
# define EVERGREEN_CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
|
||||
|
||||
/* LUT blocks at 0x69e0, 0x75e0, 0x101e0, 0x10de0, 0x119e0, 0x125e0 */
|
||||
#define EVERGREEN_DC_LUT_RW_MODE 0x69e0
|
||||
#define EVERGREEN_DC_LUT_RW_INDEX 0x69e4
|
||||
#define EVERGREEN_DC_LUT_SEQ_COLOR 0x69e8
|
||||
#define EVERGREEN_DC_LUT_PWL_DATA 0x69ec
|
||||
#define EVERGREEN_DC_LUT_30_COLOR 0x69f0
|
||||
#define EVERGREEN_DC_LUT_VGA_ACCESS_ENABLE 0x69f4
|
||||
#define EVERGREEN_DC_LUT_WRITE_EN_MASK 0x69f8
|
||||
#define EVERGREEN_DC_LUT_AUTOFILL 0x69fc
|
||||
#define EVERGREEN_DC_LUT_CONTROL 0x6a00
|
||||
#define EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE 0x6a04
|
||||
#define EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN 0x6a08
|
||||
#define EVERGREEN_DC_LUT_BLACK_OFFSET_RED 0x6a0c
|
||||
#define EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE 0x6a10
|
||||
#define EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN 0x6a14
|
||||
#define EVERGREEN_DC_LUT_WHITE_OFFSET_RED 0x6a18
|
||||
|
||||
#define EVERGREEN_DATA_FORMAT 0x6b00
|
||||
# define EVERGREEN_INTERLEAVE_EN (1 << 0)
|
||||
#define EVERGREEN_DESKTOP_HEIGHT 0x6b04
|
||||
|
||||
#define EVERGREEN_VIEWPORT_START 0x6d70
|
||||
#define EVERGREEN_VIEWPORT_SIZE 0x6d74
|
||||
|
||||
/* display controller offsets used for crtc/cur/lut/grph/viewport/etc. */
|
||||
#define EVERGREEN_CRTC0_REGISTER_OFFSET (0x6df0 - 0x6df0)
|
||||
#define EVERGREEN_CRTC1_REGISTER_OFFSET (0x79f0 - 0x6df0)
|
||||
#define EVERGREEN_CRTC2_REGISTER_OFFSET (0x105f0 - 0x6df0)
|
||||
#define EVERGREEN_CRTC3_REGISTER_OFFSET (0x111f0 - 0x6df0)
|
||||
#define EVERGREEN_CRTC4_REGISTER_OFFSET (0x11df0 - 0x6df0)
|
||||
#define EVERGREEN_CRTC5_REGISTER_OFFSET (0x129f0 - 0x6df0)
|
||||
|
||||
/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
|
||||
#define EVERGREEN_CRTC_CONTROL 0x6e70
|
||||
# define EVERGREEN_CRTC_MASTER_EN (1 << 0)
|
||||
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
|
||||
|
||||
#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0
|
||||
#define EVERGREEN_DC_GPIO_HPD_A 0x64b4
|
||||
#define EVERGREEN_DC_GPIO_HPD_EN 0x64b8
|
||||
#define EVERGREEN_DC_GPIO_HPD_Y 0x64bc
|
||||
|
||||
#endif
|
@ -1,4 +1,5 @@
|
||||
|
||||
|
||||
CC = gcc
|
||||
FASM = e:/fasm/fasm.exe
|
||||
|
||||
@ -48,6 +49,7 @@ NAME_SRC= \
|
||||
$(DRM_TOPDIR)/drm_dp_i2c_helper.c \
|
||||
$(DRM_TOPDIR)/i2c/i2c-core.c \
|
||||
$(DRM_TOPDIR)/i2c/i2c-algo-bit.c \
|
||||
evergreen.c \
|
||||
radeon_device.c \
|
||||
radeon_clocks.c \
|
||||
radeon_i2c.c \
|
||||
|
@ -195,13 +195,13 @@ int r100_pci_gart_enable(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t tmp;
|
||||
|
||||
radeon_gart_restore(rdev);
|
||||
/* discard memory request outside of configured range */
|
||||
tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
|
||||
WREG32(RADEON_AIC_CNTL, tmp);
|
||||
/* set address range for PCI address translate */
|
||||
WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location);
|
||||
tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
|
||||
WREG32(RADEON_AIC_HI_ADDR, tmp);
|
||||
WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start);
|
||||
WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end);
|
||||
/* set PCI GART page-table base address */
|
||||
WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
|
||||
tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
|
||||
@ -284,8 +284,8 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
|
||||
radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
|
||||
/* Wait until IDLE & CLEAN */
|
||||
radeon_ring_write(rdev, PACKET0(0x1720, 0));
|
||||
radeon_ring_write(rdev, (1 << 16) | (1 << 17));
|
||||
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
|
||||
radeon_ring_write(rdev, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
|
||||
radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
|
||||
radeon_ring_write(rdev, rdev->config.r100.hdp_cntl |
|
||||
RADEON_HDP_READ_BUFFER_INVALIDATE);
|
||||
@ -479,7 +479,7 @@ static int r100_cp_init_microcode(struct radeon_device *rdev)
|
||||
const char *fw_name = NULL;
|
||||
int err;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
|
||||
err = IS_ERR(pdev);
|
||||
@ -710,8 +710,6 @@ int r100_cp_reset(struct radeon_device *rdev)
|
||||
bool reinit_cp;
|
||||
int i;
|
||||
|
||||
ENTER();
|
||||
|
||||
reinit_cp = rdev->cp.ready;
|
||||
rdev->cp.ready = false;
|
||||
WREG32(RADEON_CP_CSQ_MODE, 0);
|
||||
@ -1630,7 +1628,7 @@ int r100_gui_wait_for_idle(struct radeon_device *rdev)
|
||||
}
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
tmp = RREG32(RADEON_RBBM_STATUS);
|
||||
if (!(tmp & (1 << 31))) {
|
||||
if (!(tmp & RADEON_RBBM_ACTIVE)) {
|
||||
return 0;
|
||||
}
|
||||
DRM_UDELAY(1);
|
||||
@ -1645,8 +1643,8 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
|
||||
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
/* read MC_STATUS */
|
||||
tmp = RREG32(0x0150);
|
||||
if (tmp & (1 << 2)) {
|
||||
tmp = RREG32(RADEON_MC_STATUS);
|
||||
if (tmp & RADEON_MC_IDLE) {
|
||||
return 0;
|
||||
}
|
||||
DRM_UDELAY(1);
|
||||
@ -1664,8 +1662,6 @@ void r100_hdp_reset(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t tmp;
|
||||
|
||||
ENTER();
|
||||
|
||||
tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
|
||||
tmp |= (7 << 28);
|
||||
WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
|
||||
@ -1681,8 +1677,6 @@ int r100_rb2d_reset(struct radeon_device *rdev)
|
||||
uint32_t tmp;
|
||||
int i;
|
||||
|
||||
ENTER();
|
||||
|
||||
WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2);
|
||||
(void)RREG32(RADEON_RBBM_SOFT_RESET);
|
||||
udelay(200);
|
||||
@ -1723,7 +1717,7 @@ int r100_gpu_reset(struct radeon_device *rdev)
|
||||
}
|
||||
/* Check if GPU is idle */
|
||||
status = RREG32(RADEON_RBBM_STATUS);
|
||||
if (status & (1 << 31)) {
|
||||
if (status & RADEON_RBBM_ACTIVE) {
|
||||
DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
|
||||
return -1;
|
||||
}
|
||||
@ -1733,6 +1727,9 @@ int r100_gpu_reset(struct radeon_device *rdev)
|
||||
|
||||
void r100_set_common_regs(struct radeon_device *rdev)
|
||||
{
|
||||
struct drm_device *dev = rdev->ddev;
|
||||
bool force_dac2 = false;
|
||||
|
||||
/* set these so they don't interfere with anything */
|
||||
WREG32(RADEON_OV0_SCALE_CNTL, 0);
|
||||
WREG32(RADEON_SUBPIC_CNTL, 0);
|
||||
@ -1741,6 +1738,68 @@ void r100_set_common_regs(struct radeon_device *rdev)
|
||||
WREG32(RADEON_DVI_I2C_CNTL_1, 0);
|
||||
WREG32(RADEON_CAP0_TRIG_CNTL, 0);
|
||||
WREG32(RADEON_CAP1_TRIG_CNTL, 0);
|
||||
|
||||
/* always set up dac2 on rn50 and some rv100 as lots
|
||||
* of servers seem to wire it up to a VGA port but
|
||||
* don't report it in the bios connector
|
||||
* table.
|
||||
*/
|
||||
switch (dev->pdev->device) {
|
||||
/* RN50 */
|
||||
case 0x515e:
|
||||
case 0x5969:
|
||||
force_dac2 = true;
|
||||
break;
|
||||
/* RV100*/
|
||||
case 0x5159:
|
||||
case 0x515a:
|
||||
/* DELL triple head servers */
|
||||
if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) &&
|
||||
((dev->pdev->subsystem_device == 0x016c) ||
|
||||
(dev->pdev->subsystem_device == 0x016d) ||
|
||||
(dev->pdev->subsystem_device == 0x016e) ||
|
||||
(dev->pdev->subsystem_device == 0x016f) ||
|
||||
(dev->pdev->subsystem_device == 0x0170) ||
|
||||
(dev->pdev->subsystem_device == 0x017d) ||
|
||||
(dev->pdev->subsystem_device == 0x017e) ||
|
||||
(dev->pdev->subsystem_device == 0x0183) ||
|
||||
(dev->pdev->subsystem_device == 0x018a) ||
|
||||
(dev->pdev->subsystem_device == 0x019a)))
|
||||
force_dac2 = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (force_dac2) {
|
||||
u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
|
||||
u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
|
||||
u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2);
|
||||
|
||||
/* For CRT on DAC2, don't turn it on if BIOS didn't
|
||||
enable it, even it's detected.
|
||||
*/
|
||||
|
||||
/* force it to crtc0 */
|
||||
dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
|
||||
dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
|
||||
disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
|
||||
|
||||
/* set up the TV DAC */
|
||||
tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL |
|
||||
RADEON_TV_DAC_STD_MASK |
|
||||
RADEON_TV_DAC_RDACPD |
|
||||
RADEON_TV_DAC_GDACPD |
|
||||
RADEON_TV_DAC_BDACPD |
|
||||
RADEON_TV_DAC_BGADJ_MASK |
|
||||
RADEON_TV_DAC_DACADJ_MASK);
|
||||
tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
|
||||
RADEON_TV_DAC_NHOLD |
|
||||
RADEON_TV_DAC_STD_PS2 |
|
||||
(0x58 << 16));
|
||||
|
||||
WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
|
||||
WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
|
||||
WREG32(RADEON_DAC_CNTL2, dac2_cntl);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1822,17 +1881,20 @@ static u32 r100_get_accessible_vram(struct radeon_device *rdev)
|
||||
void r100_vram_init_sizes(struct radeon_device *rdev)
|
||||
{
|
||||
u64 config_aper_size;
|
||||
u32 accessible;
|
||||
|
||||
/* work out accessible VRAM */
|
||||
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
|
||||
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
|
||||
rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev);
|
||||
/* FIXME we don't use the second aperture yet when we could use it */
|
||||
if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
|
||||
rdev->mc.visible_vram_size = rdev->mc.aper_size;
|
||||
config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
|
||||
|
||||
if (rdev->flags & RADEON_IS_IGP) {
|
||||
uint32_t tom;
|
||||
/* read NB_TOM to get the amount of ram stolen for the GPU */
|
||||
tom = RREG32(RADEON_NB_TOM);
|
||||
rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
|
||||
/* for IGPs we need to keep VRAM where it was put by the BIOS */
|
||||
rdev->mc.vram_location = (tom & 0xffff) << 16;
|
||||
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
|
||||
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
|
||||
} else {
|
||||
@ -1844,30 +1906,19 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
|
||||
rdev->mc.real_vram_size = 8192 * 1024;
|
||||
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
|
||||
}
|
||||
/* let driver place VRAM */
|
||||
rdev->mc.vram_location = 0xFFFFFFFFUL;
|
||||
/* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
|
||||
* Novell bug 204882 + along with lots of ubuntu ones */
|
||||
* Novell bug 204882 + along with lots of ubuntu ones
|
||||
*/
|
||||
if (config_aper_size > rdev->mc.real_vram_size)
|
||||
rdev->mc.mc_vram_size = config_aper_size;
|
||||
else
|
||||
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
|
||||
}
|
||||
|
||||
/* work out accessible VRAM */
|
||||
accessible = r100_get_accessible_vram(rdev);
|
||||
|
||||
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
|
||||
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
|
||||
|
||||
if (accessible > rdev->mc.aper_size)
|
||||
accessible = rdev->mc.aper_size;
|
||||
|
||||
if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
|
||||
/* FIXME remove this once we support unmappable VRAM */
|
||||
if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
|
||||
rdev->mc.mc_vram_size = rdev->mc.aper_size;
|
||||
|
||||
if (rdev->mc.real_vram_size > rdev->mc.aper_size)
|
||||
rdev->mc.real_vram_size = rdev->mc.aper_size;
|
||||
}
|
||||
}
|
||||
|
||||
void r100_vga_set_state(struct radeon_device *rdev, bool state)
|
||||
@ -1884,11 +1935,18 @@ void r100_vga_set_state(struct radeon_device *rdev, bool state)
|
||||
WREG32(RADEON_CONFIG_CNTL, temp);
|
||||
}
|
||||
|
||||
void r100_vram_info(struct radeon_device *rdev)
|
||||
void r100_mc_init(struct radeon_device *rdev)
|
||||
{
|
||||
r100_vram_get_type(rdev);
|
||||
u64 base;
|
||||
|
||||
r100_vram_get_type(rdev);
|
||||
r100_vram_init_sizes(rdev);
|
||||
base = rdev->mc.aper_base;
|
||||
if (rdev->flags & RADEON_IS_IGP)
|
||||
base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
|
||||
radeon_vram_location(rdev, &rdev->mc, base);
|
||||
if (!(rdev->flags & RADEON_IS_AGP))
|
||||
radeon_gtt_location(rdev, &rdev->mc);
|
||||
}
|
||||
|
||||
|
||||
@ -2753,10 +2811,9 @@ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
|
||||
void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save)
|
||||
{
|
||||
/* Update base address for crtc */
|
||||
WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_location);
|
||||
WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
|
||||
if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
|
||||
WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR,
|
||||
rdev->mc.vram_location);
|
||||
WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
|
||||
}
|
||||
/* Restore CRTC registers */
|
||||
WREG8(R_0003C2_GENMO_WT, save->GENMO_WT);
|
||||
@ -2883,31 +2940,6 @@ static int r100_startup(struct radeon_device *rdev)
|
||||
}
|
||||
|
||||
|
||||
int r100_mc_init(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
u32 tmp;
|
||||
|
||||
/* Setup GPU memory space */
|
||||
rdev->mc.vram_location = 0xFFFFFFFFUL;
|
||||
rdev->mc.gtt_location = 0xFFFFFFFFUL;
|
||||
if (rdev->flags & RADEON_IS_IGP) {
|
||||
tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM));
|
||||
rdev->mc.vram_location = tmp << 16;
|
||||
}
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
r = radeon_agp_init(rdev);
|
||||
if (r) {
|
||||
radeon_agp_disable(rdev);
|
||||
} else {
|
||||
rdev->mc.gtt_location = rdev->mc.agp_base;
|
||||
}
|
||||
}
|
||||
r = radeon_mc_setup(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int r100_init(struct radeon_device *rdev)
|
||||
{
|
||||
@ -2951,13 +2983,15 @@ int r100_init(struct radeon_device *rdev)
|
||||
radeon_get_clock_info(rdev->ddev);
|
||||
/* Initialize power management */
|
||||
radeon_pm_init(rdev);
|
||||
/* Get vram informations */
|
||||
r100_vram_info(rdev);
|
||||
/* Initialize memory controller (also test AGP) */
|
||||
r = r100_mc_init(rdev);
|
||||
dbgprintf("mc vram location %x\n", rdev->mc.vram_location);
|
||||
if (r)
|
||||
return r;
|
||||
/* initialize AGP */
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
r = radeon_agp_init(rdev);
|
||||
if (r) {
|
||||
radeon_agp_disable(rdev);
|
||||
}
|
||||
}
|
||||
/* initialize VRAM */
|
||||
r100_mc_init(rdev);
|
||||
/* Fence driver */
|
||||
// r = radeon_fence_driver_init(rdev);
|
||||
// if (r)
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "radeon_reg.h"
|
||||
#include "radeon.h"
|
||||
|
||||
#include "r100d.h"
|
||||
#include "r200_reg_safe.h"
|
||||
|
||||
//#include "r100_track.h"
|
||||
|
@ -117,18 +117,19 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
|
||||
r = radeon_gart_table_vram_pin(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
radeon_gart_restore(rdev);
|
||||
/* discard memory request outside of configured range */
|
||||
tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
|
||||
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
|
||||
WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location);
|
||||
tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - RADEON_GPU_PAGE_SIZE;
|
||||
WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start);
|
||||
tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK;
|
||||
WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
|
||||
WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
|
||||
WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
|
||||
table_addr = rdev->gart.table_addr;
|
||||
WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
|
||||
/* FIXME: setup default page */
|
||||
WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_location);
|
||||
WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start);
|
||||
WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
|
||||
/* Clear error */
|
||||
WREG32_PCIE(0x18, 0);
|
||||
@ -174,18 +175,20 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
|
||||
/* Who ever call radeon_fence_emit should call ring_lock and ask
|
||||
* for enough space (today caller are ib schedule and buffer move) */
|
||||
/* Write SC register so SC & US assert idle */
|
||||
radeon_ring_write(rdev, PACKET0(0x43E0, 0));
|
||||
radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_TL, 0));
|
||||
radeon_ring_write(rdev, 0);
|
||||
radeon_ring_write(rdev, PACKET0(0x43E4, 0));
|
||||
radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_BR, 0));
|
||||
radeon_ring_write(rdev, 0);
|
||||
/* Flush 3D cache */
|
||||
radeon_ring_write(rdev, PACKET0(0x4E4C, 0));
|
||||
radeon_ring_write(rdev, (2 << 0));
|
||||
radeon_ring_write(rdev, PACKET0(0x4F18, 0));
|
||||
radeon_ring_write(rdev, (1 << 0));
|
||||
radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(rdev, R300_RB3D_DC_FLUSH);
|
||||
radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(rdev, R300_ZC_FLUSH);
|
||||
/* Wait until IDLE & CLEAN */
|
||||
radeon_ring_write(rdev, PACKET0(0x1720, 0));
|
||||
radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9));
|
||||
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
|
||||
radeon_ring_write(rdev, (RADEON_WAIT_3D_IDLECLEAN |
|
||||
RADEON_WAIT_2D_IDLECLEAN |
|
||||
RADEON_WAIT_DMA_GUI_IDLE));
|
||||
radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
|
||||
radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
|
||||
RADEON_HDP_READ_BUFFER_INVALIDATE);
|
||||
@ -198,56 +201,6 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
|
||||
radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
|
||||
}
|
||||
|
||||
|
||||
#if 0
|
||||
|
||||
|
||||
int r300_copy_dma(struct radeon_device *rdev,
|
||||
uint64_t src_offset,
|
||||
uint64_t dst_offset,
|
||||
unsigned num_pages,
|
||||
struct radeon_fence *fence)
|
||||
{
|
||||
uint32_t size;
|
||||
uint32_t cur_size;
|
||||
int i, num_loops;
|
||||
int r = 0;
|
||||
|
||||
/* radeon pitch is /64 */
|
||||
size = num_pages << PAGE_SHIFT;
|
||||
num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
|
||||
r = radeon_ring_lock(rdev, num_loops * 4 + 64);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: moving bo (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
/* Must wait for 2D idle & clean before DMA or hangs might happen */
|
||||
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0 ));
|
||||
radeon_ring_write(rdev, (1 << 16));
|
||||
for (i = 0; i < num_loops; i++) {
|
||||
cur_size = size;
|
||||
if (cur_size > 0x1FFFFF) {
|
||||
cur_size = 0x1FFFFF;
|
||||
}
|
||||
size -= cur_size;
|
||||
radeon_ring_write(rdev, PACKET0(0x720, 2));
|
||||
radeon_ring_write(rdev, src_offset);
|
||||
radeon_ring_write(rdev, dst_offset);
|
||||
radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
|
||||
src_offset += cur_size;
|
||||
dst_offset += cur_size;
|
||||
}
|
||||
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
|
||||
radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
|
||||
if (fence) {
|
||||
r = radeon_fence_emit(rdev, fence);
|
||||
}
|
||||
radeon_ring_unlock_commit(rdev);
|
||||
return r;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void r300_ring_start(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned gb_tile_config;
|
||||
@ -287,8 +240,8 @@ void r300_ring_start(struct radeon_device *rdev)
|
||||
radeon_ring_write(rdev,
|
||||
RADEON_WAIT_2D_IDLECLEAN |
|
||||
RADEON_WAIT_3D_IDLECLEAN);
|
||||
radeon_ring_write(rdev, PACKET0(0x170C, 0));
|
||||
radeon_ring_write(rdev, 1 << 31);
|
||||
radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
|
||||
radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
|
||||
radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
|
||||
radeon_ring_write(rdev, 0);
|
||||
radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
|
||||
@ -355,8 +308,8 @@ int r300_mc_wait_for_idle(struct radeon_device *rdev)
|
||||
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
/* read MC_STATUS */
|
||||
tmp = RREG32(0x0150);
|
||||
if (tmp & (1 << 4)) {
|
||||
tmp = RREG32(RADEON_MC_STATUS);
|
||||
if (tmp & R300_MC_IDLE) {
|
||||
return 0;
|
||||
}
|
||||
DRM_UDELAY(1);
|
||||
@ -401,8 +354,8 @@ void r300_gpu_init(struct radeon_device *rdev)
|
||||
"programming pipes. Bad things might happen.\n");
|
||||
}
|
||||
|
||||
tmp = RREG32(0x170C);
|
||||
WREG32(0x170C, tmp | (1 << 31));
|
||||
tmp = RREG32(R300_DST_PIPE_CONFIG);
|
||||
WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
|
||||
|
||||
WREG32(R300_RB2D_DSTCACHE_MODE,
|
||||
R300_DC_AUTOFLUSH_ENABLE |
|
||||
@ -443,8 +396,8 @@ int r300_ga_reset(struct radeon_device *rdev)
|
||||
/* GA still busy soft reset it */
|
||||
WREG32(0x429C, 0x200);
|
||||
WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
|
||||
WREG32(0x43E0, 0);
|
||||
WREG32(0x43E4, 0);
|
||||
WREG32(R300_RE_SCISSORS_TL, 0);
|
||||
WREG32(R300_RE_SCISSORS_BR, 0);
|
||||
WREG32(0x24AC, 0);
|
||||
}
|
||||
/* Wait to prevent race in RBBM_STATUS */
|
||||
@ -494,7 +447,7 @@ int r300_gpu_reset(struct radeon_device *rdev)
|
||||
}
|
||||
/* Check if GPU is idle */
|
||||
status = RREG32(RADEON_RBBM_STATUS);
|
||||
if (status & (1 << 31)) {
|
||||
if (status & RADEON_RBBM_ACTIVE) {
|
||||
DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
|
||||
return -1;
|
||||
}
|
||||
@ -506,13 +459,13 @@ int r300_gpu_reset(struct radeon_device *rdev)
|
||||
/*
|
||||
* r300,r350,rv350,rv380 VRAM info
|
||||
*/
|
||||
void r300_vram_info(struct radeon_device *rdev)
|
||||
void r300_mc_init(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t tmp;
|
||||
u64 base;
|
||||
u32 tmp;
|
||||
|
||||
/* DDR for all card after R300 & IGP */
|
||||
rdev->mc.vram_is_ddr = true;
|
||||
|
||||
tmp = RREG32(RADEON_MEM_CNTL);
|
||||
tmp &= R300_MEM_NUM_CHANNELS_MASK;
|
||||
switch (tmp) {
|
||||
@ -521,8 +474,13 @@ void r300_vram_info(struct radeon_device *rdev)
|
||||
case 2: rdev->mc.vram_width = 256; break;
|
||||
default: rdev->mc.vram_width = 128; break;
|
||||
}
|
||||
|
||||
r100_vram_init_sizes(rdev);
|
||||
base = rdev->mc.aper_base;
|
||||
if (rdev->flags & RADEON_IS_IGP)
|
||||
base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
|
||||
radeon_vram_location(rdev, &rdev->mc, base);
|
||||
if (!(rdev->flags & RADEON_IS_AGP))
|
||||
radeon_gtt_location(rdev, &rdev->mc);
|
||||
}
|
||||
|
||||
void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
|
||||
@ -584,6 +542,40 @@ void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
|
||||
|
||||
}
|
||||
|
||||
int rv370_get_pcie_lanes(struct radeon_device *rdev)
|
||||
{
|
||||
u32 link_width_cntl;
|
||||
|
||||
if (rdev->flags & RADEON_IS_IGP)
|
||||
return 0;
|
||||
|
||||
if (!(rdev->flags & RADEON_IS_PCIE))
|
||||
return 0;
|
||||
|
||||
/* FIXME wait for idle */
|
||||
|
||||
if (rdev->family < CHIP_R600)
|
||||
link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
|
||||
else
|
||||
link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
|
||||
|
||||
switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
|
||||
case RADEON_PCIE_LC_LINK_WIDTH_X0:
|
||||
return 0;
|
||||
case RADEON_PCIE_LC_LINK_WIDTH_X1:
|
||||
return 1;
|
||||
case RADEON_PCIE_LC_LINK_WIDTH_X2:
|
||||
return 2;
|
||||
case RADEON_PCIE_LC_LINK_WIDTH_X4:
|
||||
return 4;
|
||||
case RADEON_PCIE_LC_LINK_WIDTH_X8:
|
||||
return 8;
|
||||
case RADEON_PCIE_LC_LINK_WIDTH_X16:
|
||||
default:
|
||||
return 16;
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
|
||||
{
|
||||
@ -716,6 +708,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
|
||||
tile_flags |= R300_TXO_MACRO_TILE;
|
||||
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
|
||||
tile_flags |= R300_TXO_MICRO_TILE;
|
||||
else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
|
||||
tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
|
||||
|
||||
tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
|
||||
tmp |= tile_flags;
|
||||
@ -766,6 +760,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
|
||||
tile_flags |= R300_COLOR_TILE_ENABLE;
|
||||
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
|
||||
tile_flags |= R300_COLOR_MICROTILE_ENABLE;
|
||||
else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
|
||||
tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
|
||||
|
||||
tmp = idx_value & ~(0x7 << 16);
|
||||
tmp |= tile_flags;
|
||||
@ -837,7 +833,9 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
|
||||
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
|
||||
tile_flags |= R300_DEPTHMACROTILE_ENABLE;
|
||||
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
|
||||
tile_flags |= R300_DEPTHMICROTILE_TILED;;
|
||||
tile_flags |= R300_DEPTHMICROTILE_TILED;
|
||||
else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
|
||||
tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
|
||||
|
||||
tmp = idx_value & ~(0x7 << 16);
|
||||
tmp |= tile_flags;
|
||||
@ -1347,13 +1345,15 @@ int r300_init(struct radeon_device *rdev)
|
||||
radeon_get_clock_info(rdev->ddev);
|
||||
/* Initialize power management */
|
||||
radeon_pm_init(rdev);
|
||||
/* Get vram informations */
|
||||
r300_vram_info(rdev);
|
||||
/* Initialize memory controller (also test AGP) */
|
||||
r = r420_mc_init(rdev);
|
||||
dbgprintf("mc vram location %x\n", rdev->mc.vram_location);
|
||||
if (r)
|
||||
return r;
|
||||
/* initialize AGP */
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
r = radeon_agp_init(rdev);
|
||||
if (r) {
|
||||
radeon_agp_disable(rdev);
|
||||
}
|
||||
}
|
||||
/* initialize memory controller */
|
||||
r300_mc_init(rdev);
|
||||
/* Fence driver */
|
||||
// r = radeon_fence_driver_init(rdev);
|
||||
// if (r)
|
||||
|
@ -952,6 +952,7 @@
|
||||
# define R300_TXO_ENDIAN_HALFDW_SWAP (3 << 0)
|
||||
# define R300_TXO_MACRO_TILE (1 << 2)
|
||||
# define R300_TXO_MICRO_TILE (1 << 3)
|
||||
# define R300_TXO_MICRO_TILE_SQUARE (2 << 3)
|
||||
# define R300_TXO_OFFSET_MASK 0xffffffe0
|
||||
# define R300_TXO_OFFSET_SHIFT 5
|
||||
/* END: Guess from R200 */
|
||||
@ -1360,6 +1361,7 @@
|
||||
# define R300_COLORPITCH_MASK 0x00001FF8 /* GUESS */
|
||||
# define R300_COLOR_TILE_ENABLE (1 << 16) /* GUESS */
|
||||
# define R300_COLOR_MICROTILE_ENABLE (1 << 17) /* GUESS */
|
||||
# define R300_COLOR_MICROTILE_SQUARE_ENABLE (2 << 17)
|
||||
# define R300_COLOR_ENDIAN_NO_SWAP (0 << 18) /* GUESS */
|
||||
# define R300_COLOR_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */
|
||||
# define R300_COLOR_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */
|
||||
|
@ -40,28 +40,6 @@ static void r420_set_reg_safe(struct radeon_device *rdev)
|
||||
rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm);
|
||||
}
|
||||
|
||||
int r420_mc_init(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
|
||||
/* Setup GPU memory space */
|
||||
rdev->mc.vram_location = 0xFFFFFFFFUL;
|
||||
rdev->mc.gtt_location = 0xFFFFFFFFUL;
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
r = radeon_agp_init(rdev);
|
||||
if (r) {
|
||||
radeon_agp_disable(rdev);
|
||||
} else {
|
||||
rdev->mc.gtt_location = rdev->mc.agp_base;
|
||||
}
|
||||
}
|
||||
r = radeon_mc_setup(rdev);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void r420_pipes_init(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned tmp;
|
||||
@ -69,7 +47,8 @@ void r420_pipes_init(struct radeon_device *rdev)
|
||||
unsigned num_pipes;
|
||||
|
||||
/* GA_ENHANCE workaround TCL deadlock issue */
|
||||
WREG32(0x4274, (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3));
|
||||
WREG32(R300_GA_ENHANCE, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL |
|
||||
(1 << 2) | (1 << 3));
|
||||
/* add idle wait as per freedesktop.org bug 24041 */
|
||||
if (r100_gui_wait_for_idle(rdev)) {
|
||||
printk(KERN_WARNING "Failed to wait GUI idle while "
|
||||
@ -97,17 +76,17 @@ void r420_pipes_init(struct radeon_device *rdev)
|
||||
tmp = (7 << 1);
|
||||
break;
|
||||
}
|
||||
WREG32(0x42C8, (1 << num_pipes) - 1);
|
||||
WREG32(R500_SU_REG_DEST, (1 << num_pipes) - 1);
|
||||
/* Sub pixel 1/12 so we can have 4K rendering according to doc */
|
||||
tmp |= (1 << 4) | (1 << 0);
|
||||
WREG32(0x4018, tmp);
|
||||
tmp |= R300_TILE_SIZE_16 | R300_ENABLE_TILING;
|
||||
WREG32(R300_GB_TILE_CONFIG, tmp);
|
||||
if (r100_gui_wait_for_idle(rdev)) {
|
||||
printk(KERN_WARNING "Failed to wait GUI idle while "
|
||||
"programming pipes. Bad things might happen.\n");
|
||||
}
|
||||
|
||||
tmp = RREG32(0x170C);
|
||||
WREG32(0x170C, tmp | (1 << 31));
|
||||
tmp = RREG32(R300_DST_PIPE_CONFIG);
|
||||
WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
|
||||
|
||||
WREG32(R300_RB2D_DSTCACHE_MODE,
|
||||
RREG32(R300_RB2D_DSTCACHE_MODE) |
|
||||
@ -314,13 +293,15 @@ int r420_init(struct radeon_device *rdev)
|
||||
radeon_get_clock_info(rdev->ddev);
|
||||
/* Initialize power management */
|
||||
radeon_pm_init(rdev);
|
||||
/* Get vram informations */
|
||||
r300_vram_info(rdev);
|
||||
/* Initialize memory controller (also test AGP) */
|
||||
r = r420_mc_init(rdev);
|
||||
/* initialize AGP */
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
r = radeon_agp_init(rdev);
|
||||
if (r) {
|
||||
return r;
|
||||
radeon_agp_disable(rdev);
|
||||
}
|
||||
}
|
||||
/* initialize memory controller */
|
||||
r300_mc_init(rdev);
|
||||
r420_debugfs(rdev);
|
||||
/* Fence driver */
|
||||
// r = radeon_fence_driver_init(rdev);
|
||||
|
@ -717,54 +717,62 @@
|
||||
#define AVIVO_DVOA_BIT_DEPTH_CONTROL 0x7988
|
||||
|
||||
#define AVIVO_DC_GPIO_HPD_A 0x7e94
|
||||
|
||||
#define AVIVO_GPIO_0 0x7e30
|
||||
#define AVIVO_GPIO_1 0x7e40
|
||||
#define AVIVO_GPIO_2 0x7e50
|
||||
#define AVIVO_GPIO_3 0x7e60
|
||||
|
||||
#define AVIVO_DC_GPIO_HPD_Y 0x7e9c
|
||||
|
||||
#define AVIVO_I2C_STATUS 0x7d30
|
||||
# define AVIVO_I2C_STATUS_DONE (1 << 0)
|
||||
# define AVIVO_I2C_STATUS_NACK (1 << 1)
|
||||
# define AVIVO_I2C_STATUS_HALT (1 << 2)
|
||||
# define AVIVO_I2C_STATUS_GO (1 << 3)
|
||||
# define AVIVO_I2C_STATUS_MASK 0x7
|
||||
/* If radeon_mm_i2c is to be believed, this is HALT, NACK, and maybe
|
||||
* DONE? */
|
||||
# define AVIVO_I2C_STATUS_CMD_RESET 0x7
|
||||
# define AVIVO_I2C_STATUS_CMD_WAIT (1 << 3)
|
||||
#define AVIVO_I2C_STOP 0x7d34
|
||||
#define AVIVO_I2C_START_CNTL 0x7d38
|
||||
# define AVIVO_I2C_START (1 << 8)
|
||||
# define AVIVO_I2C_CONNECTOR0 (0 << 16)
|
||||
# define AVIVO_I2C_CONNECTOR1 (1 << 16)
|
||||
#define R520_I2C_START (1<<0)
|
||||
#define R520_I2C_STOP (1<<1)
|
||||
#define R520_I2C_RX (1<<2)
|
||||
#define R520_I2C_EN (1<<8)
|
||||
#define R520_I2C_DDC1 (0<<16)
|
||||
#define R520_I2C_DDC2 (1<<16)
|
||||
#define R520_I2C_DDC3 (2<<16)
|
||||
#define R520_I2C_DDC_MASK (3<<16)
|
||||
#define AVIVO_I2C_CONTROL2 0x7d3c
|
||||
# define AVIVO_I2C_7D3C_SIZE_SHIFT 8
|
||||
# define AVIVO_I2C_7D3C_SIZE_MASK (0xf << 8)
|
||||
#define AVIVO_I2C_CONTROL3 0x7d40
|
||||
/* Reading is done 4 bytes at a time: read the bottom 8 bits from
|
||||
* 7d44, four times in a row.
|
||||
* Writing is a little more complex. First write DATA with
|
||||
* 0xnnnnnnzz, then 0xnnnnnnyy, where nnnnnn is some non-deterministic
|
||||
* magic number, zz is, I think, the slave address, and yy is the byte
|
||||
* you want to write. */
|
||||
#define AVIVO_I2C_DATA 0x7d44
|
||||
#define R520_I2C_ADDR_COUNT_MASK (0x7)
|
||||
#define R520_I2C_DATA_COUNT_SHIFT (8)
|
||||
#define R520_I2C_DATA_COUNT_MASK (0xF00)
|
||||
#define AVIVO_I2C_CNTL 0x7d50
|
||||
# define AVIVO_I2C_EN (1 << 0)
|
||||
# define AVIVO_I2C_RESET (1 << 8)
|
||||
#define AVIVO_DC_I2C_STATUS1 0x7d30
|
||||
# define AVIVO_DC_I2C_DONE (1 << 0)
|
||||
# define AVIVO_DC_I2C_NACK (1 << 1)
|
||||
# define AVIVO_DC_I2C_HALT (1 << 2)
|
||||
# define AVIVO_DC_I2C_GO (1 << 3)
|
||||
#define AVIVO_DC_I2C_RESET 0x7d34
|
||||
# define AVIVO_DC_I2C_SOFT_RESET (1 << 0)
|
||||
# define AVIVO_DC_I2C_ABORT (1 << 8)
|
||||
#define AVIVO_DC_I2C_CONTROL1 0x7d38
|
||||
# define AVIVO_DC_I2C_START (1 << 0)
|
||||
# define AVIVO_DC_I2C_STOP (1 << 1)
|
||||
# define AVIVO_DC_I2C_RECEIVE (1 << 2)
|
||||
# define AVIVO_DC_I2C_EN (1 << 8)
|
||||
# define AVIVO_DC_I2C_PIN_SELECT(x) ((x) << 16)
|
||||
# define AVIVO_SEL_DDC1 0
|
||||
# define AVIVO_SEL_DDC2 1
|
||||
# define AVIVO_SEL_DDC3 2
|
||||
#define AVIVO_DC_I2C_CONTROL2 0x7d3c
|
||||
# define AVIVO_DC_I2C_ADDR_COUNT(x) ((x) << 0)
|
||||
# define AVIVO_DC_I2C_DATA_COUNT(x) ((x) << 8)
|
||||
#define AVIVO_DC_I2C_CONTROL3 0x7d40
|
||||
# define AVIVO_DC_I2C_DATA_DRIVE_EN (1 << 0)
|
||||
# define AVIVO_DC_I2C_DATA_DRIVE_SEL (1 << 1)
|
||||
# define AVIVO_DC_I2C_CLK_DRIVE_EN (1 << 7)
|
||||
# define AVIVO_DC_I2C_RD_INTRA_BYTE_DELAY(x) ((x) << 8)
|
||||
# define AVIVO_DC_I2C_WR_INTRA_BYTE_DELAY(x) ((x) << 16)
|
||||
# define AVIVO_DC_I2C_TIME_LIMIT(x) ((x) << 24)
|
||||
#define AVIVO_DC_I2C_DATA 0x7d44
|
||||
#define AVIVO_DC_I2C_INTERRUPT_CONTROL 0x7d48
|
||||
# define AVIVO_DC_I2C_INTERRUPT_STATUS (1 << 0)
|
||||
# define AVIVO_DC_I2C_INTERRUPT_AK (1 << 8)
|
||||
# define AVIVO_DC_I2C_INTERRUPT_ENABLE (1 << 16)
|
||||
#define AVIVO_DC_I2C_ARBITRATION 0x7d50
|
||||
# define AVIVO_DC_I2C_SW_WANTS_TO_USE_I2C (1 << 0)
|
||||
# define AVIVO_DC_I2C_SW_CAN_USE_I2C (1 << 1)
|
||||
# define AVIVO_DC_I2C_SW_DONE_USING_I2C (1 << 8)
|
||||
# define AVIVO_DC_I2C_HW_NEEDS_I2C (1 << 9)
|
||||
# define AVIVO_DC_I2C_ABORT_HDCP_I2C (1 << 16)
|
||||
# define AVIVO_DC_I2C_HW_USING_I2C (1 << 17)
|
||||
|
||||
#define AVIVO_DC_GPIO_DDC1_MASK 0x7e40
|
||||
#define AVIVO_DC_GPIO_DDC1_A 0x7e44
|
||||
#define AVIVO_DC_GPIO_DDC1_EN 0x7e48
|
||||
#define AVIVO_DC_GPIO_DDC1_Y 0x7e4c
|
||||
|
||||
#define AVIVO_DC_GPIO_DDC2_MASK 0x7e50
|
||||
#define AVIVO_DC_GPIO_DDC2_A 0x7e54
|
||||
#define AVIVO_DC_GPIO_DDC2_EN 0x7e58
|
||||
#define AVIVO_DC_GPIO_DDC2_Y 0x7e5c
|
||||
|
||||
#define AVIVO_DC_GPIO_DDC3_MASK 0x7e60
|
||||
#define AVIVO_DC_GPIO_DDC3_A 0x7e64
|
||||
#define AVIVO_DC_GPIO_DDC3_EN 0x7e68
|
||||
#define AVIVO_DC_GPIO_DDC3_Y 0x7e6c
|
||||
|
||||
#define AVIVO_DISP_INTERRUPT_STATUS 0x7edc
|
||||
# define AVIVO_D1_VBLANK_INTERRUPT (1 << 4)
|
||||
|
@ -51,7 +51,6 @@ static int r520_mc_wait_for_idle(struct radeon_device *rdev)
|
||||
static void r520_gpu_init(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned pipe_select_current, gb_pipe_select, tmp;
|
||||
ENTER();
|
||||
|
||||
r100_hdp_reset(rdev);
|
||||
rv515_vga_render_disable(rdev);
|
||||
@ -95,7 +94,6 @@ static void r520_gpu_init(struct radeon_device *rdev)
|
||||
static void r520_vram_get_type(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t tmp;
|
||||
ENTER();
|
||||
|
||||
rdev->mc.vram_width = 128;
|
||||
rdev->mc.vram_is_ddr = true;
|
||||
@ -121,13 +119,15 @@ static void r520_vram_get_type(struct radeon_device *rdev)
|
||||
rdev->mc.vram_width *= 2;
|
||||
}
|
||||
|
||||
void r520_vram_info(struct radeon_device *rdev)
|
||||
void r520_mc_init(struct radeon_device *rdev)
|
||||
{
|
||||
fixed20_12 a;
|
||||
|
||||
r520_vram_get_type(rdev);
|
||||
|
||||
r100_vram_init_sizes(rdev);
|
||||
radeon_vram_location(rdev, &rdev->mc, 0);
|
||||
if (!(rdev->flags & RADEON_IS_AGP))
|
||||
radeon_gtt_location(rdev, &rdev->mc);
|
||||
/* FIXME: we should enforce default clock in case GPU is not in
|
||||
* default setup
|
||||
*/
|
||||
@ -251,13 +251,15 @@ int r520_init(struct radeon_device *rdev)
|
||||
radeon_get_clock_info(rdev->ddev);
|
||||
/* Initialize power management */
|
||||
radeon_pm_init(rdev);
|
||||
/* Get vram informations */
|
||||
r520_vram_info(rdev);
|
||||
/* Initialize memory controller (also test AGP) */
|
||||
r = r420_mc_init(rdev);
|
||||
dbgprintf("mc vram location %x\n", rdev->mc.vram_location);
|
||||
if (r)
|
||||
return r;
|
||||
/* initialize AGP */
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
r = radeon_agp_init(rdev);
|
||||
if (r) {
|
||||
radeon_agp_disable(rdev);
|
||||
}
|
||||
}
|
||||
/* initialize memory controller */
|
||||
r520_mc_init(rdev);
|
||||
rv515_debugfs(rdev);
|
||||
/* Fence driver */
|
||||
// r = radeon_fence_driver_init(rdev);
|
||||
@ -284,7 +286,6 @@ int r520_init(struct radeon_device *rdev)
|
||||
// r100_ib_fini(rdev);
|
||||
rv370_pcie_gart_fini(rdev);
|
||||
// radeon_agp_fini(rdev);
|
||||
// radeon_irq_kms_fini(rdev);
|
||||
rdev->accel_working = false;
|
||||
}
|
||||
|
||||
|
@ -352,23 +352,14 @@ void r600_hpd_fini(struct radeon_device *rdev)
|
||||
/*
|
||||
* R600 PCIE GART
|
||||
*/
|
||||
int r600_gart_clear_page(struct radeon_device *rdev, int i)
|
||||
{
|
||||
void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
|
||||
u64 pte;
|
||||
|
||||
if (i < 0 || i > rdev->gart.num_gpu_pages)
|
||||
return -EINVAL;
|
||||
pte = 0;
|
||||
writeq(pte, ((void __iomem *)ptr) + (i * 8));
|
||||
return 0;
|
||||
}
|
||||
|
||||
void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned i;
|
||||
u32 tmp;
|
||||
|
||||
/* flush hdp cache so updates hit vram */
|
||||
WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
|
||||
|
||||
WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
|
||||
WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
|
||||
WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
|
||||
@ -415,6 +406,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
|
||||
r = radeon_gart_table_vram_pin(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
radeon_gart_restore(rdev);
|
||||
|
||||
/* Setup L2 cache */
|
||||
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
|
||||
@ -618,6 +610,68 @@ static void r600_mc_program(struct radeon_device *rdev)
|
||||
rv515_vga_render_disable(rdev);
|
||||
}
|
||||
|
||||
/**
|
||||
* r600_vram_gtt_location - try to find VRAM & GTT location
|
||||
* @rdev: radeon device structure holding all necessary informations
|
||||
* @mc: memory controller structure holding memory informations
|
||||
*
|
||||
* Function will place try to place VRAM at same place as in CPU (PCI)
|
||||
* address space as some GPU seems to have issue when we reprogram at
|
||||
* different address space.
|
||||
*
|
||||
* If there is not enough space to fit the unvisible VRAM after the
|
||||
* aperture then we limit the VRAM size to the aperture.
|
||||
*
|
||||
* If we are using AGP then place VRAM adjacent to AGP aperture are we need
|
||||
* them to be in one from GPU point of view so that we can program GPU to
|
||||
* catch access outside them (weird GPU policy see ??).
|
||||
*
|
||||
* This function will never fails, worst case are limiting VRAM or GTT.
|
||||
*
|
||||
* Note: GTT start, end, size should be initialized before calling this
|
||||
* function on AGP platform.
|
||||
*/
|
||||
void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
|
||||
{
|
||||
u64 size_bf, size_af;
|
||||
|
||||
if (mc->mc_vram_size > 0xE0000000) {
|
||||
/* leave room for at least 512M GTT */
|
||||
dev_warn(rdev->dev, "limiting VRAM\n");
|
||||
mc->real_vram_size = 0xE0000000;
|
||||
mc->mc_vram_size = 0xE0000000;
|
||||
}
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
size_bf = mc->gtt_start;
|
||||
size_af = 0xFFFFFFFF - mc->gtt_end + 1;
|
||||
if (size_bf > size_af) {
|
||||
if (mc->mc_vram_size > size_bf) {
|
||||
dev_warn(rdev->dev, "limiting VRAM\n");
|
||||
mc->real_vram_size = size_bf;
|
||||
mc->mc_vram_size = size_bf;
|
||||
}
|
||||
mc->vram_start = mc->gtt_start - mc->mc_vram_size;
|
||||
} else {
|
||||
if (mc->mc_vram_size > size_af) {
|
||||
dev_warn(rdev->dev, "limiting VRAM\n");
|
||||
mc->real_vram_size = size_af;
|
||||
mc->mc_vram_size = size_af;
|
||||
}
|
||||
mc->vram_start = mc->gtt_end;
|
||||
}
|
||||
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
|
||||
dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
|
||||
mc->mc_vram_size >> 20, mc->vram_start,
|
||||
mc->vram_end, mc->real_vram_size >> 20);
|
||||
} else {
|
||||
u64 base = 0;
|
||||
if (rdev->flags & RADEON_IS_IGP)
|
||||
base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
|
||||
radeon_vram_location(rdev, &rdev->mc, base);
|
||||
radeon_gtt_location(rdev, mc);
|
||||
}
|
||||
}
|
||||
|
||||
int r600_mc_init(struct radeon_device *rdev)
|
||||
{
|
||||
fixed20_12 a;
|
||||
@ -657,75 +711,21 @@ int r600_mc_init(struct radeon_device *rdev)
|
||||
/* Setup GPU memory space */
|
||||
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
|
||||
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
|
||||
|
||||
if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
|
||||
rdev->mc.visible_vram_size = rdev->mc.aper_size;
|
||||
/* FIXME remove this once we support unmappable VRAM */
|
||||
if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
|
||||
rdev->mc.mc_vram_size = rdev->mc.aper_size;
|
||||
|
||||
if (rdev->mc.real_vram_size > rdev->mc.aper_size)
|
||||
rdev->mc.real_vram_size = rdev->mc.aper_size;
|
||||
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
/* gtt_size is setup by radeon_agp_init */
|
||||
rdev->mc.gtt_location = rdev->mc.agp_base;
|
||||
tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
|
||||
/* Try to put vram before or after AGP because we
|
||||
* we want SYSTEM_APERTURE to cover both VRAM and
|
||||
* AGP so that GPU can catch out of VRAM/AGP access
|
||||
*/
|
||||
if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
|
||||
/* Enough place before */
|
||||
rdev->mc.vram_location = rdev->mc.gtt_location -
|
||||
rdev->mc.mc_vram_size;
|
||||
} else if (tmp > rdev->mc.mc_vram_size) {
|
||||
/* Enough place after */
|
||||
rdev->mc.vram_location = rdev->mc.gtt_location +
|
||||
rdev->mc.gtt_size;
|
||||
} else {
|
||||
/* Try to setup VRAM then AGP might not
|
||||
* not work on some card
|
||||
*/
|
||||
rdev->mc.vram_location = 0x00000000UL;
|
||||
rdev->mc.gtt_location = rdev->mc.mc_vram_size;
|
||||
}
|
||||
} else {
|
||||
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
|
||||
rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
|
||||
0xFFFF) << 24;
|
||||
tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
|
||||
if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
|
||||
/* Enough place after vram */
|
||||
rdev->mc.gtt_location = tmp;
|
||||
} else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
|
||||
/* Enough place before vram */
|
||||
rdev->mc.gtt_location = 0;
|
||||
} else {
|
||||
/* Not enough place after or before shrink
|
||||
* gart size
|
||||
*/
|
||||
if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
|
||||
rdev->mc.gtt_location = 0;
|
||||
rdev->mc.gtt_size = rdev->mc.vram_location;
|
||||
} else {
|
||||
rdev->mc.gtt_location = tmp;
|
||||
rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
|
||||
}
|
||||
}
|
||||
rdev->mc.gtt_location = rdev->mc.mc_vram_size;
|
||||
}
|
||||
rdev->mc.vram_start = rdev->mc.vram_location;
|
||||
rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
|
||||
rdev->mc.gtt_start = rdev->mc.gtt_location;
|
||||
rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
|
||||
r600_vram_gtt_location(rdev, &rdev->mc);
|
||||
/* FIXME: we should enforce default clock in case GPU is not in
|
||||
* default setup
|
||||
*/
|
||||
a.full = rfixed_const(100);
|
||||
rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
|
||||
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
|
||||
|
||||
if (rdev->flags & RADEON_IS_IGP)
|
||||
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -980,6 +980,9 @@ void r600_gpu_init(struct radeon_device *rdev)
|
||||
{
|
||||
u32 tiling_config;
|
||||
u32 ramcfg;
|
||||
u32 backend_map;
|
||||
u32 cc_rb_backend_disable;
|
||||
u32 cc_gc_shader_pipe_config;
|
||||
u32 tmp;
|
||||
int i, j;
|
||||
u32 sq_config;
|
||||
@ -1089,8 +1092,11 @@ void r600_gpu_init(struct radeon_device *rdev)
|
||||
default:
|
||||
break;
|
||||
}
|
||||
rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
|
||||
rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
|
||||
tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
|
||||
tiling_config |= GROUP_SIZE(0);
|
||||
rdev->config.r600.tiling_group_size = 256;
|
||||
tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
|
||||
if (tmp > 3) {
|
||||
tiling_config |= ROW_TILING(3);
|
||||
@ -1100,24 +1106,33 @@ void r600_gpu_init(struct radeon_device *rdev)
|
||||
tiling_config |= SAMPLE_SPLIT(tmp);
|
||||
}
|
||||
tiling_config |= BANK_SWAPS(1);
|
||||
tmp = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
|
||||
rdev->config.r600.max_backends,
|
||||
(0xff << rdev->config.r600.max_backends) & 0xff);
|
||||
tiling_config |= BACKEND_MAP(tmp);
|
||||
|
||||
cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
|
||||
cc_rb_backend_disable |=
|
||||
BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
|
||||
|
||||
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
|
||||
cc_gc_shader_pipe_config |=
|
||||
INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
|
||||
cc_gc_shader_pipe_config |=
|
||||
INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
|
||||
|
||||
backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
|
||||
(R6XX_MAX_BACKENDS -
|
||||
r600_count_pipe_bits((cc_rb_backend_disable &
|
||||
R6XX_MAX_BACKENDS_MASK) >> 16)),
|
||||
(cc_rb_backend_disable >> 16));
|
||||
|
||||
tiling_config |= BACKEND_MAP(backend_map);
|
||||
WREG32(GB_TILING_CONFIG, tiling_config);
|
||||
WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
|
||||
WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
|
||||
|
||||
tmp = BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
|
||||
WREG32(CC_RB_BACKEND_DISABLE, tmp);
|
||||
|
||||
/* Setup pipes */
|
||||
tmp = INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
|
||||
tmp |= INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
|
||||
WREG32(CC_GC_SHADER_PIPE_CONFIG, tmp);
|
||||
WREG32(GC_USER_SHADER_PIPE_CONFIG, tmp);
|
||||
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
|
||||
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
|
||||
|
||||
tmp = R6XX_MAX_BACKENDS - r600_count_pipe_bits(tmp & INACTIVE_QD_PIPES_MASK);
|
||||
tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
|
||||
WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
|
||||
WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
|
||||
|
||||
@ -1711,12 +1726,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_fence *fence)
|
||||
{
|
||||
/* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */
|
||||
|
||||
radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
|
||||
radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
|
||||
/* wait for 3D idle clean */
|
||||
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
|
||||
/* Emit fence sequence & fire IRQ */
|
||||
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
|
||||
radeon_ring_write(rdev, fence->seq);
|
||||
radeon_ring_write(rdev, PACKET0(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
|
||||
radeon_ring_write(rdev, 1);
|
||||
/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
|
||||
radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
|
||||
radeon_ring_write(rdev, RB_INT_STAT);
|
||||
@ -1860,16 +1880,12 @@ int r600_init(struct radeon_device *rdev)
|
||||
/* Initialize power management */
|
||||
radeon_pm_init(rdev);
|
||||
/* Fence driver */
|
||||
// r = radeon_fence_driver_init(rdev);
|
||||
// if (r)
|
||||
// return r;
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
r = radeon_agp_init(rdev);
|
||||
if (r)
|
||||
radeon_agp_disable(rdev);
|
||||
}
|
||||
r = r600_mc_init(rdev);
|
||||
dbgprintf("mc vram location %x\n", rdev->mc.vram_location);
|
||||
if (r)
|
||||
return r;
|
||||
/* Memory manager */
|
||||
|
@ -35,7 +35,7 @@
|
||||
*/
|
||||
static int r600_audio_chipset_supported(struct radeon_device *rdev)
|
||||
{
|
||||
return (rdev->family >= CHIP_R600 && rdev->family < CHIP_RV710)
|
||||
return rdev->family >= CHIP_R600
|
||||
|| rdev->family == CHIP_RS600
|
||||
|| rdev->family == CHIP_RS690
|
||||
|| rdev->family == CHIP_RS740;
|
||||
@ -146,16 +146,24 @@ static void r600_audio_update_hdmi(unsigned long param)
|
||||
// jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
|
||||
}
|
||||
|
||||
/*
|
||||
* turn on/off audio engine
|
||||
*/
|
||||
static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
|
||||
{
|
||||
DRM_INFO("%s audio support", enable ? "Enabling" : "Disabling");
|
||||
WREG32_P(R600_AUDIO_ENABLE, enable ? 0x81000000 : 0x0, ~0x81000000);
|
||||
}
|
||||
|
||||
/*
|
||||
* initialize the audio vars and register the update timer
|
||||
*/
|
||||
int r600_audio_init(struct radeon_device *rdev)
|
||||
{
|
||||
if (!r600_audio_chipset_supported(rdev))
|
||||
if (!radeon_audio || !r600_audio_chipset_supported(rdev))
|
||||
return 0;
|
||||
|
||||
DRM_INFO("%s audio support", radeon_audio ? "Enabling" : "Disabling");
|
||||
WREG32_P(R600_AUDIO_ENABLE, radeon_audio ? 0x81000000 : 0x0, ~0x81000000);
|
||||
r600_audio_engine_enable(rdev, true);
|
||||
|
||||
rdev->audio_channels = -1;
|
||||
rdev->audio_rate = -1;
|
||||
@ -258,10 +266,10 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
|
||||
*/
|
||||
void r600_audio_fini(struct radeon_device *rdev)
|
||||
{
|
||||
if (!r600_audio_chipset_supported(rdev))
|
||||
if (!radeon_audio || !r600_audio_chipset_supported(rdev))
|
||||
return;
|
||||
|
||||
WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
|
||||
// del_timer(&rdev->audio_timer);
|
||||
|
||||
// del_timer(&rdev->audio_timer);
|
||||
r600_audio_engine_enable(rdev, false);
|
||||
}
|
||||
|
@ -883,6 +883,16 @@
|
||||
|
||||
#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
|
||||
|
||||
#define R_028C04_PA_SC_AA_CONFIG 0x028C04
|
||||
#define S_028C04_MSAA_NUM_SAMPLES(x) (((x) & 0x3) << 0)
|
||||
#define G_028C04_MSAA_NUM_SAMPLES(x) (((x) >> 0) & 0x3)
|
||||
#define C_028C04_MSAA_NUM_SAMPLES 0xFFFFFFFC
|
||||
#define S_028C04_AA_MASK_CENTROID_DTMN(x) (((x) & 0x1) << 4)
|
||||
#define G_028C04_AA_MASK_CENTROID_DTMN(x) (((x) >> 4) & 0x1)
|
||||
#define C_028C04_AA_MASK_CENTROID_DTMN 0xFFFFFFEF
|
||||
#define S_028C04_MAX_SAMPLE_DIST(x) (((x) & 0xF) << 13)
|
||||
#define G_028C04_MAX_SAMPLE_DIST(x) (((x) >> 13) & 0xF)
|
||||
#define C_028C04_MAX_SAMPLE_DIST 0xFFFE1FFF
|
||||
#define R_0280E0_CB_COLOR0_FRAG 0x0280E0
|
||||
#define S_0280E0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0)
|
||||
#define G_0280E0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF)
|
||||
@ -905,6 +915,461 @@
|
||||
#define R_0280D4_CB_COLOR5_TILE 0x0280D4
|
||||
#define R_0280D8_CB_COLOR6_TILE 0x0280D8
|
||||
#define R_0280DC_CB_COLOR7_TILE 0x0280DC
|
||||
|
||||
#define R_0280A0_CB_COLOR0_INFO 0x0280A0
|
||||
#define S_0280A0_ENDIAN(x) (((x) & 0x3) << 0)
|
||||
#define G_0280A0_ENDIAN(x) (((x) >> 0) & 0x3)
|
||||
#define C_0280A0_ENDIAN 0xFFFFFFFC
|
||||
#define S_0280A0_FORMAT(x) (((x) & 0x3F) << 2)
|
||||
#define G_0280A0_FORMAT(x) (((x) >> 2) & 0x3F)
|
||||
#define C_0280A0_FORMAT 0xFFFFFF03
|
||||
#define V_0280A0_COLOR_INVALID 0x00000000
|
||||
#define V_0280A0_COLOR_8 0x00000001
|
||||
#define V_0280A0_COLOR_4_4 0x00000002
|
||||
#define V_0280A0_COLOR_3_3_2 0x00000003
|
||||
#define V_0280A0_COLOR_16 0x00000005
|
||||
#define V_0280A0_COLOR_16_FLOAT 0x00000006
|
||||
#define V_0280A0_COLOR_8_8 0x00000007
|
||||
#define V_0280A0_COLOR_5_6_5 0x00000008
|
||||
#define V_0280A0_COLOR_6_5_5 0x00000009
|
||||
#define V_0280A0_COLOR_1_5_5_5 0x0000000A
|
||||
#define V_0280A0_COLOR_4_4_4_4 0x0000000B
|
||||
#define V_0280A0_COLOR_5_5_5_1 0x0000000C
|
||||
#define V_0280A0_COLOR_32 0x0000000D
|
||||
#define V_0280A0_COLOR_32_FLOAT 0x0000000E
|
||||
#define V_0280A0_COLOR_16_16 0x0000000F
|
||||
#define V_0280A0_COLOR_16_16_FLOAT 0x00000010
|
||||
#define V_0280A0_COLOR_8_24 0x00000011
|
||||
#define V_0280A0_COLOR_8_24_FLOAT 0x00000012
|
||||
#define V_0280A0_COLOR_24_8 0x00000013
|
||||
#define V_0280A0_COLOR_24_8_FLOAT 0x00000014
|
||||
#define V_0280A0_COLOR_10_11_11 0x00000015
|
||||
#define V_0280A0_COLOR_10_11_11_FLOAT 0x00000016
|
||||
#define V_0280A0_COLOR_11_11_10 0x00000017
|
||||
#define V_0280A0_COLOR_11_11_10_FLOAT 0x00000018
|
||||
#define V_0280A0_COLOR_2_10_10_10 0x00000019
|
||||
#define V_0280A0_COLOR_8_8_8_8 0x0000001A
|
||||
#define V_0280A0_COLOR_10_10_10_2 0x0000001B
|
||||
#define V_0280A0_COLOR_X24_8_32_FLOAT 0x0000001C
|
||||
#define V_0280A0_COLOR_32_32 0x0000001D
|
||||
#define V_0280A0_COLOR_32_32_FLOAT 0x0000001E
|
||||
#define V_0280A0_COLOR_16_16_16_16 0x0000001F
|
||||
#define V_0280A0_COLOR_16_16_16_16_FLOAT 0x00000020
|
||||
#define V_0280A0_COLOR_32_32_32_32 0x00000022
|
||||
#define V_0280A0_COLOR_32_32_32_32_FLOAT 0x00000023
|
||||
#define S_0280A0_ARRAY_MODE(x) (((x) & 0xF) << 8)
|
||||
#define G_0280A0_ARRAY_MODE(x) (((x) >> 8) & 0xF)
|
||||
#define C_0280A0_ARRAY_MODE 0xFFFFF0FF
|
||||
#define V_0280A0_ARRAY_LINEAR_GENERAL 0x00000000
|
||||
#define V_0280A0_ARRAY_LINEAR_ALIGNED 0x00000001
|
||||
#define V_0280A0_ARRAY_1D_TILED_THIN1 0x00000002
|
||||
#define V_0280A0_ARRAY_2D_TILED_THIN1 0x00000004
|
||||
#define S_0280A0_NUMBER_TYPE(x) (((x) & 0x7) << 12)
|
||||
#define G_0280A0_NUMBER_TYPE(x) (((x) >> 12) & 0x7)
|
||||
#define C_0280A0_NUMBER_TYPE 0xFFFF8FFF
|
||||
#define S_0280A0_READ_SIZE(x) (((x) & 0x1) << 15)
|
||||
#define G_0280A0_READ_SIZE(x) (((x) >> 15) & 0x1)
|
||||
#define C_0280A0_READ_SIZE 0xFFFF7FFF
|
||||
#define S_0280A0_COMP_SWAP(x) (((x) & 0x3) << 16)
|
||||
#define G_0280A0_COMP_SWAP(x) (((x) >> 16) & 0x3)
|
||||
#define C_0280A0_COMP_SWAP 0xFFFCFFFF
|
||||
#define S_0280A0_TILE_MODE(x) (((x) & 0x3) << 18)
|
||||
#define G_0280A0_TILE_MODE(x) (((x) >> 18) & 0x3)
|
||||
#define C_0280A0_TILE_MODE 0xFFF3FFFF
|
||||
#define S_0280A0_BLEND_CLAMP(x) (((x) & 0x1) << 20)
|
||||
#define G_0280A0_BLEND_CLAMP(x) (((x) >> 20) & 0x1)
|
||||
#define C_0280A0_BLEND_CLAMP 0xFFEFFFFF
|
||||
#define S_0280A0_CLEAR_COLOR(x) (((x) & 0x1) << 21)
|
||||
#define G_0280A0_CLEAR_COLOR(x) (((x) >> 21) & 0x1)
|
||||
#define C_0280A0_CLEAR_COLOR 0xFFDFFFFF
|
||||
#define S_0280A0_BLEND_BYPASS(x) (((x) & 0x1) << 22)
|
||||
#define G_0280A0_BLEND_BYPASS(x) (((x) >> 22) & 0x1)
|
||||
#define C_0280A0_BLEND_BYPASS 0xFFBFFFFF
|
||||
#define S_0280A0_BLEND_FLOAT32(x) (((x) & 0x1) << 23)
|
||||
#define G_0280A0_BLEND_FLOAT32(x) (((x) >> 23) & 0x1)
|
||||
#define C_0280A0_BLEND_FLOAT32 0xFF7FFFFF
|
||||
#define S_0280A0_SIMPLE_FLOAT(x) (((x) & 0x1) << 24)
|
||||
#define G_0280A0_SIMPLE_FLOAT(x) (((x) >> 24) & 0x1)
|
||||
#define C_0280A0_SIMPLE_FLOAT 0xFEFFFFFF
|
||||
#define S_0280A0_ROUND_MODE(x) (((x) & 0x1) << 25)
|
||||
#define G_0280A0_ROUND_MODE(x) (((x) >> 25) & 0x1)
|
||||
#define C_0280A0_ROUND_MODE 0xFDFFFFFF
|
||||
#define S_0280A0_TILE_COMPACT(x) (((x) & 0x1) << 26)
|
||||
#define G_0280A0_TILE_COMPACT(x) (((x) >> 26) & 0x1)
|
||||
#define C_0280A0_TILE_COMPACT 0xFBFFFFFF
|
||||
#define S_0280A0_SOURCE_FORMAT(x) (((x) & 0x1) << 27)
|
||||
#define G_0280A0_SOURCE_FORMAT(x) (((x) >> 27) & 0x1)
|
||||
#define C_0280A0_SOURCE_FORMAT 0xF7FFFFFF
|
||||
#define R_0280A4_CB_COLOR1_INFO 0x0280A4
|
||||
#define R_0280A8_CB_COLOR2_INFO 0x0280A8
|
||||
#define R_0280AC_CB_COLOR3_INFO 0x0280AC
|
||||
#define R_0280B0_CB_COLOR4_INFO 0x0280B0
|
||||
#define R_0280B4_CB_COLOR5_INFO 0x0280B4
|
||||
#define R_0280B8_CB_COLOR6_INFO 0x0280B8
|
||||
#define R_0280BC_CB_COLOR7_INFO 0x0280BC
|
||||
#define R_028060_CB_COLOR0_SIZE 0x028060
|
||||
#define S_028060_PITCH_TILE_MAX(x) (((x) & 0x3FF) << 0)
|
||||
#define G_028060_PITCH_TILE_MAX(x) (((x) >> 0) & 0x3FF)
|
||||
#define C_028060_PITCH_TILE_MAX 0xFFFFFC00
|
||||
#define S_028060_SLICE_TILE_MAX(x) (((x) & 0xFFFFF) << 10)
|
||||
#define G_028060_SLICE_TILE_MAX(x) (((x) >> 10) & 0xFFFFF)
|
||||
#define C_028060_SLICE_TILE_MAX 0xC00003FF
|
||||
#define R_028064_CB_COLOR1_SIZE 0x028064
|
||||
#define R_028068_CB_COLOR2_SIZE 0x028068
|
||||
#define R_02806C_CB_COLOR3_SIZE 0x02806C
|
||||
#define R_028070_CB_COLOR4_SIZE 0x028070
|
||||
#define R_028074_CB_COLOR5_SIZE 0x028074
|
||||
#define R_028078_CB_COLOR6_SIZE 0x028078
|
||||
#define R_02807C_CB_COLOR7_SIZE 0x02807C
|
||||
#define R_028238_CB_TARGET_MASK 0x028238
|
||||
#define S_028238_TARGET0_ENABLE(x) (((x) & 0xF) << 0)
|
||||
#define G_028238_TARGET0_ENABLE(x) (((x) >> 0) & 0xF)
|
||||
#define C_028238_TARGET0_ENABLE 0xFFFFFFF0
|
||||
#define S_028238_TARGET1_ENABLE(x) (((x) & 0xF) << 4)
|
||||
#define G_028238_TARGET1_ENABLE(x) (((x) >> 4) & 0xF)
|
||||
#define C_028238_TARGET1_ENABLE 0xFFFFFF0F
|
||||
#define S_028238_TARGET2_ENABLE(x) (((x) & 0xF) << 8)
|
||||
#define G_028238_TARGET2_ENABLE(x) (((x) >> 8) & 0xF)
|
||||
#define C_028238_TARGET2_ENABLE 0xFFFFF0FF
|
||||
#define S_028238_TARGET3_ENABLE(x) (((x) & 0xF) << 12)
|
||||
#define G_028238_TARGET3_ENABLE(x) (((x) >> 12) & 0xF)
|
||||
#define C_028238_TARGET3_ENABLE 0xFFFF0FFF
|
||||
#define S_028238_TARGET4_ENABLE(x) (((x) & 0xF) << 16)
|
||||
#define G_028238_TARGET4_ENABLE(x) (((x) >> 16) & 0xF)
|
||||
#define C_028238_TARGET4_ENABLE 0xFFF0FFFF
|
||||
#define S_028238_TARGET5_ENABLE(x) (((x) & 0xF) << 20)
|
||||
#define G_028238_TARGET5_ENABLE(x) (((x) >> 20) & 0xF)
|
||||
#define C_028238_TARGET5_ENABLE 0xFF0FFFFF
|
||||
#define S_028238_TARGET6_ENABLE(x) (((x) & 0xF) << 24)
|
||||
#define G_028238_TARGET6_ENABLE(x) (((x) >> 24) & 0xF)
|
||||
#define C_028238_TARGET6_ENABLE 0xF0FFFFFF
|
||||
#define S_028238_TARGET7_ENABLE(x) (((x) & 0xF) << 28)
|
||||
#define G_028238_TARGET7_ENABLE(x) (((x) >> 28) & 0xF)
|
||||
#define C_028238_TARGET7_ENABLE 0x0FFFFFFF
|
||||
#define R_02823C_CB_SHADER_MASK 0x02823C
|
||||
#define S_02823C_OUTPUT0_ENABLE(x) (((x) & 0xF) << 0)
|
||||
#define G_02823C_OUTPUT0_ENABLE(x) (((x) >> 0) & 0xF)
|
||||
#define C_02823C_OUTPUT0_ENABLE 0xFFFFFFF0
|
||||
#define S_02823C_OUTPUT1_ENABLE(x) (((x) & 0xF) << 4)
|
||||
#define G_02823C_OUTPUT1_ENABLE(x) (((x) >> 4) & 0xF)
|
||||
#define C_02823C_OUTPUT1_ENABLE 0xFFFFFF0F
|
||||
#define S_02823C_OUTPUT2_ENABLE(x) (((x) & 0xF) << 8)
|
||||
#define G_02823C_OUTPUT2_ENABLE(x) (((x) >> 8) & 0xF)
|
||||
#define C_02823C_OUTPUT2_ENABLE 0xFFFFF0FF
|
||||
#define S_02823C_OUTPUT3_ENABLE(x) (((x) & 0xF) << 12)
|
||||
#define G_02823C_OUTPUT3_ENABLE(x) (((x) >> 12) & 0xF)
|
||||
#define C_02823C_OUTPUT3_ENABLE 0xFFFF0FFF
|
||||
#define S_02823C_OUTPUT4_ENABLE(x) (((x) & 0xF) << 16)
|
||||
#define G_02823C_OUTPUT4_ENABLE(x) (((x) >> 16) & 0xF)
|
||||
#define C_02823C_OUTPUT4_ENABLE 0xFFF0FFFF
|
||||
#define S_02823C_OUTPUT5_ENABLE(x) (((x) & 0xF) << 20)
|
||||
#define G_02823C_OUTPUT5_ENABLE(x) (((x) >> 20) & 0xF)
|
||||
#define C_02823C_OUTPUT5_ENABLE 0xFF0FFFFF
|
||||
#define S_02823C_OUTPUT6_ENABLE(x) (((x) & 0xF) << 24)
|
||||
#define G_02823C_OUTPUT6_ENABLE(x) (((x) >> 24) & 0xF)
|
||||
#define C_02823C_OUTPUT6_ENABLE 0xF0FFFFFF
|
||||
#define S_02823C_OUTPUT7_ENABLE(x) (((x) & 0xF) << 28)
|
||||
#define G_02823C_OUTPUT7_ENABLE(x) (((x) >> 28) & 0xF)
|
||||
#define C_02823C_OUTPUT7_ENABLE 0x0FFFFFFF
|
||||
#define R_028AB0_VGT_STRMOUT_EN 0x028AB0
|
||||
#define S_028AB0_STREAMOUT(x) (((x) & 0x1) << 0)
|
||||
#define G_028AB0_STREAMOUT(x) (((x) >> 0) & 0x1)
|
||||
#define C_028AB0_STREAMOUT 0xFFFFFFFE
|
||||
#define R_028B20_VGT_STRMOUT_BUFFER_EN 0x028B20
|
||||
#define S_028B20_BUFFER_0_EN(x) (((x) & 0x1) << 0)
|
||||
#define G_028B20_BUFFER_0_EN(x) (((x) >> 0) & 0x1)
|
||||
#define C_028B20_BUFFER_0_EN 0xFFFFFFFE
|
||||
#define S_028B20_BUFFER_1_EN(x) (((x) & 0x1) << 1)
|
||||
#define G_028B20_BUFFER_1_EN(x) (((x) >> 1) & 0x1)
|
||||
#define C_028B20_BUFFER_1_EN 0xFFFFFFFD
|
||||
#define S_028B20_BUFFER_2_EN(x) (((x) & 0x1) << 2)
|
||||
#define G_028B20_BUFFER_2_EN(x) (((x) >> 2) & 0x1)
|
||||
#define C_028B20_BUFFER_2_EN 0xFFFFFFFB
|
||||
#define S_028B20_BUFFER_3_EN(x) (((x) & 0x1) << 3)
|
||||
#define G_028B20_BUFFER_3_EN(x) (((x) >> 3) & 0x1)
|
||||
#define C_028B20_BUFFER_3_EN 0xFFFFFFF7
|
||||
#define S_028B20_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
|
||||
#define G_028B20_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
|
||||
#define C_028B20_SIZE 0x00000000
|
||||
#define R_038000_SQ_TEX_RESOURCE_WORD0_0 0x038000
|
||||
#define S_038000_DIM(x) (((x) & 0x7) << 0)
|
||||
#define G_038000_DIM(x) (((x) >> 0) & 0x7)
|
||||
#define C_038000_DIM 0xFFFFFFF8
|
||||
#define V_038000_SQ_TEX_DIM_1D 0x00000000
|
||||
#define V_038000_SQ_TEX_DIM_2D 0x00000001
|
||||
#define V_038000_SQ_TEX_DIM_3D 0x00000002
|
||||
#define V_038000_SQ_TEX_DIM_CUBEMAP 0x00000003
|
||||
#define V_038000_SQ_TEX_DIM_1D_ARRAY 0x00000004
|
||||
#define V_038000_SQ_TEX_DIM_2D_ARRAY 0x00000005
|
||||
#define V_038000_SQ_TEX_DIM_2D_MSAA 0x00000006
|
||||
#define V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA 0x00000007
|
||||
#define S_038000_TILE_MODE(x) (((x) & 0xF) << 3)
|
||||
#define G_038000_TILE_MODE(x) (((x) >> 3) & 0xF)
|
||||
#define C_038000_TILE_MODE 0xFFFFFF87
|
||||
#define S_038000_TILE_TYPE(x) (((x) & 0x1) << 7)
|
||||
#define G_038000_TILE_TYPE(x) (((x) >> 7) & 0x1)
|
||||
#define C_038000_TILE_TYPE 0xFFFFFF7F
|
||||
#define S_038000_PITCH(x) (((x) & 0x7FF) << 8)
|
||||
#define G_038000_PITCH(x) (((x) >> 8) & 0x7FF)
|
||||
#define C_038000_PITCH 0xFFF800FF
|
||||
#define S_038000_TEX_WIDTH(x) (((x) & 0x1FFF) << 19)
|
||||
#define G_038000_TEX_WIDTH(x) (((x) >> 19) & 0x1FFF)
|
||||
#define C_038000_TEX_WIDTH 0x0007FFFF
|
||||
#define R_038004_SQ_TEX_RESOURCE_WORD1_0 0x038004
|
||||
#define S_038004_TEX_HEIGHT(x) (((x) & 0x1FFF) << 0)
|
||||
#define G_038004_TEX_HEIGHT(x) (((x) >> 0) & 0x1FFF)
|
||||
#define C_038004_TEX_HEIGHT 0xFFFFE000
|
||||
#define S_038004_TEX_DEPTH(x) (((x) & 0x1FFF) << 13)
|
||||
#define G_038004_TEX_DEPTH(x) (((x) >> 13) & 0x1FFF)
|
||||
#define C_038004_TEX_DEPTH 0xFC001FFF
|
||||
#define S_038004_DATA_FORMAT(x) (((x) & 0x3F) << 26)
|
||||
#define G_038004_DATA_FORMAT(x) (((x) >> 26) & 0x3F)
|
||||
#define C_038004_DATA_FORMAT 0x03FFFFFF
|
||||
#define V_038004_COLOR_INVALID 0x00000000
|
||||
#define V_038004_COLOR_8 0x00000001
|
||||
#define V_038004_COLOR_4_4 0x00000002
|
||||
#define V_038004_COLOR_3_3_2 0x00000003
|
||||
#define V_038004_COLOR_16 0x00000005
|
||||
#define V_038004_COLOR_16_FLOAT 0x00000006
|
||||
#define V_038004_COLOR_8_8 0x00000007
|
||||
#define V_038004_COLOR_5_6_5 0x00000008
|
||||
#define V_038004_COLOR_6_5_5 0x00000009
|
||||
#define V_038004_COLOR_1_5_5_5 0x0000000A
|
||||
#define V_038004_COLOR_4_4_4_4 0x0000000B
|
||||
#define V_038004_COLOR_5_5_5_1 0x0000000C
|
||||
#define V_038004_COLOR_32 0x0000000D
|
||||
#define V_038004_COLOR_32_FLOAT 0x0000000E
|
||||
#define V_038004_COLOR_16_16 0x0000000F
|
||||
#define V_038004_COLOR_16_16_FLOAT 0x00000010
|
||||
#define V_038004_COLOR_8_24 0x00000011
|
||||
#define V_038004_COLOR_8_24_FLOAT 0x00000012
|
||||
#define V_038004_COLOR_24_8 0x00000013
|
||||
#define V_038004_COLOR_24_8_FLOAT 0x00000014
|
||||
#define V_038004_COLOR_10_11_11 0x00000015
|
||||
#define V_038004_COLOR_10_11_11_FLOAT 0x00000016
|
||||
#define V_038004_COLOR_11_11_10 0x00000017
|
||||
#define V_038004_COLOR_11_11_10_FLOAT 0x00000018
|
||||
#define V_038004_COLOR_2_10_10_10 0x00000019
|
||||
#define V_038004_COLOR_8_8_8_8 0x0000001A
|
||||
#define V_038004_COLOR_10_10_10_2 0x0000001B
|
||||
#define V_038004_COLOR_X24_8_32_FLOAT 0x0000001C
|
||||
#define V_038004_COLOR_32_32 0x0000001D
|
||||
#define V_038004_COLOR_32_32_FLOAT 0x0000001E
|
||||
#define V_038004_COLOR_16_16_16_16 0x0000001F
|
||||
#define V_038004_COLOR_16_16_16_16_FLOAT 0x00000020
|
||||
#define V_038004_COLOR_32_32_32_32 0x00000022
|
||||
#define V_038004_COLOR_32_32_32_32_FLOAT 0x00000023
|
||||
#define V_038004_FMT_1 0x00000025
|
||||
#define V_038004_FMT_GB_GR 0x00000027
|
||||
#define V_038004_FMT_BG_RG 0x00000028
|
||||
#define V_038004_FMT_32_AS_8 0x00000029
|
||||
#define V_038004_FMT_32_AS_8_8 0x0000002A
|
||||
#define V_038004_FMT_5_9_9_9_SHAREDEXP 0x0000002B
|
||||
#define V_038004_FMT_8_8_8 0x0000002C
|
||||
#define V_038004_FMT_16_16_16 0x0000002D
|
||||
#define V_038004_FMT_16_16_16_FLOAT 0x0000002E
|
||||
#define V_038004_FMT_32_32_32 0x0000002F
|
||||
#define V_038004_FMT_32_32_32_FLOAT 0x00000030
|
||||
#define R_038010_SQ_TEX_RESOURCE_WORD4_0 0x038010
|
||||
#define S_038010_FORMAT_COMP_X(x) (((x) & 0x3) << 0)
|
||||
#define G_038010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3)
|
||||
#define C_038010_FORMAT_COMP_X 0xFFFFFFFC
|
||||
#define S_038010_FORMAT_COMP_Y(x) (((x) & 0x3) << 2)
|
||||
#define G_038010_FORMAT_COMP_Y(x) (((x) >> 2) & 0x3)
|
||||
#define C_038010_FORMAT_COMP_Y 0xFFFFFFF3
|
||||
#define S_038010_FORMAT_COMP_Z(x) (((x) & 0x3) << 4)
|
||||
#define G_038010_FORMAT_COMP_Z(x) (((x) >> 4) & 0x3)
|
||||
#define C_038010_FORMAT_COMP_Z 0xFFFFFFCF
|
||||
#define S_038010_FORMAT_COMP_W(x) (((x) & 0x3) << 6)
|
||||
#define G_038010_FORMAT_COMP_W(x) (((x) >> 6) & 0x3)
|
||||
#define C_038010_FORMAT_COMP_W 0xFFFFFF3F
|
||||
#define S_038010_NUM_FORMAT_ALL(x) (((x) & 0x3) << 8)
|
||||
#define G_038010_NUM_FORMAT_ALL(x) (((x) >> 8) & 0x3)
|
||||
#define C_038010_NUM_FORMAT_ALL 0xFFFFFCFF
|
||||
#define S_038010_SRF_MODE_ALL(x) (((x) & 0x1) << 10)
|
||||
#define G_038010_SRF_MODE_ALL(x) (((x) >> 10) & 0x1)
|
||||
#define C_038010_SRF_MODE_ALL 0xFFFFFBFF
|
||||
#define S_038010_FORCE_DEGAMMA(x) (((x) & 0x1) << 11)
|
||||
#define G_038010_FORCE_DEGAMMA(x) (((x) >> 11) & 0x1)
|
||||
#define C_038010_FORCE_DEGAMMA 0xFFFFF7FF
|
||||
#define S_038010_ENDIAN_SWAP(x) (((x) & 0x3) << 12)
|
||||
#define G_038010_ENDIAN_SWAP(x) (((x) >> 12) & 0x3)
|
||||
#define C_038010_ENDIAN_SWAP 0xFFFFCFFF
|
||||
#define S_038010_REQUEST_SIZE(x) (((x) & 0x3) << 14)
|
||||
#define G_038010_REQUEST_SIZE(x) (((x) >> 14) & 0x3)
|
||||
#define C_038010_REQUEST_SIZE 0xFFFF3FFF
|
||||
#define S_038010_DST_SEL_X(x) (((x) & 0x7) << 16)
|
||||
#define G_038010_DST_SEL_X(x) (((x) >> 16) & 0x7)
|
||||
#define C_038010_DST_SEL_X 0xFFF8FFFF
|
||||
#define S_038010_DST_SEL_Y(x) (((x) & 0x7) << 19)
|
||||
#define G_038010_DST_SEL_Y(x) (((x) >> 19) & 0x7)
|
||||
#define C_038010_DST_SEL_Y 0xFFC7FFFF
|
||||
#define S_038010_DST_SEL_Z(x) (((x) & 0x7) << 22)
|
||||
#define G_038010_DST_SEL_Z(x) (((x) >> 22) & 0x7)
|
||||
#define C_038010_DST_SEL_Z 0xFE3FFFFF
|
||||
#define S_038010_DST_SEL_W(x) (((x) & 0x7) << 25)
|
||||
#define G_038010_DST_SEL_W(x) (((x) >> 25) & 0x7)
|
||||
#define C_038010_DST_SEL_W 0xF1FFFFFF
|
||||
#define S_038010_BASE_LEVEL(x) (((x) & 0xF) << 28)
|
||||
#define G_038010_BASE_LEVEL(x) (((x) >> 28) & 0xF)
|
||||
#define C_038010_BASE_LEVEL 0x0FFFFFFF
|
||||
#define R_038014_SQ_TEX_RESOURCE_WORD5_0 0x038014
|
||||
#define S_038014_LAST_LEVEL(x) (((x) & 0xF) << 0)
|
||||
#define G_038014_LAST_LEVEL(x) (((x) >> 0) & 0xF)
|
||||
#define C_038014_LAST_LEVEL 0xFFFFFFF0
|
||||
#define S_038014_BASE_ARRAY(x) (((x) & 0x1FFF) << 4)
|
||||
#define G_038014_BASE_ARRAY(x) (((x) >> 4) & 0x1FFF)
|
||||
#define C_038014_BASE_ARRAY 0xFFFE000F
|
||||
#define S_038014_LAST_ARRAY(x) (((x) & 0x1FFF) << 17)
|
||||
#define G_038014_LAST_ARRAY(x) (((x) >> 17) & 0x1FFF)
|
||||
#define C_038014_LAST_ARRAY 0xC001FFFF
|
||||
#define R_0288A8_SQ_ESGS_RING_ITEMSIZE 0x0288A8
|
||||
#define S_0288A8_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
|
||||
#define G_0288A8_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
|
||||
#define C_0288A8_ITEMSIZE 0xFFFF8000
|
||||
#define R_008C44_SQ_ESGS_RING_SIZE 0x008C44
|
||||
#define S_008C44_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
|
||||
#define G_008C44_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
|
||||
#define C_008C44_MEM_SIZE 0x00000000
|
||||
#define R_0288B0_SQ_ESTMP_RING_ITEMSIZE 0x0288B0
|
||||
#define S_0288B0_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
|
||||
#define G_0288B0_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
|
||||
#define C_0288B0_ITEMSIZE 0xFFFF8000
|
||||
#define R_008C54_SQ_ESTMP_RING_SIZE 0x008C54
|
||||
#define S_008C54_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
|
||||
#define G_008C54_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
|
||||
#define C_008C54_MEM_SIZE 0x00000000
|
||||
#define R_0288C0_SQ_FBUF_RING_ITEMSIZE 0x0288C0
|
||||
#define S_0288C0_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
|
||||
#define G_0288C0_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
|
||||
#define C_0288C0_ITEMSIZE 0xFFFF8000
|
||||
#define R_008C74_SQ_FBUF_RING_SIZE 0x008C74
|
||||
#define S_008C74_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
|
||||
#define G_008C74_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
|
||||
#define C_008C74_MEM_SIZE 0x00000000
|
||||
#define R_0288B4_SQ_GSTMP_RING_ITEMSIZE 0x0288B4
|
||||
#define S_0288B4_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
|
||||
#define G_0288B4_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
|
||||
#define C_0288B4_ITEMSIZE 0xFFFF8000
|
||||
#define R_008C5C_SQ_GSTMP_RING_SIZE 0x008C5C
|
||||
#define S_008C5C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
|
||||
#define G_008C5C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
|
||||
#define C_008C5C_MEM_SIZE 0x00000000
|
||||
#define R_0288AC_SQ_GSVS_RING_ITEMSIZE 0x0288AC
|
||||
#define S_0288AC_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
|
||||
#define G_0288AC_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
|
||||
#define C_0288AC_ITEMSIZE 0xFFFF8000
|
||||
#define R_008C4C_SQ_GSVS_RING_SIZE 0x008C4C
|
||||
#define S_008C4C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
|
||||
#define G_008C4C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
|
||||
#define C_008C4C_MEM_SIZE 0x00000000
|
||||
#define R_0288BC_SQ_PSTMP_RING_ITEMSIZE 0x0288BC
|
||||
#define S_0288BC_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
|
||||
#define G_0288BC_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
|
||||
#define C_0288BC_ITEMSIZE 0xFFFF8000
|
||||
#define R_008C6C_SQ_PSTMP_RING_SIZE 0x008C6C
|
||||
#define S_008C6C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
|
||||
#define G_008C6C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
|
||||
#define C_008C6C_MEM_SIZE 0x00000000
|
||||
#define R_0288C4_SQ_REDUC_RING_ITEMSIZE 0x0288C4
|
||||
#define S_0288C4_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
|
||||
#define G_0288C4_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
|
||||
#define C_0288C4_ITEMSIZE 0xFFFF8000
|
||||
#define R_008C7C_SQ_REDUC_RING_SIZE 0x008C7C
|
||||
#define S_008C7C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
|
||||
#define G_008C7C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
|
||||
#define C_008C7C_MEM_SIZE 0x00000000
|
||||
#define R_0288B8_SQ_VSTMP_RING_ITEMSIZE 0x0288B8
|
||||
#define S_0288B8_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
|
||||
#define G_0288B8_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
|
||||
#define C_0288B8_ITEMSIZE 0xFFFF8000
|
||||
#define R_008C64_SQ_VSTMP_RING_SIZE 0x008C64
|
||||
#define S_008C64_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
|
||||
#define G_008C64_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
|
||||
#define C_008C64_MEM_SIZE 0x00000000
|
||||
#define R_0288C8_SQ_GS_VERT_ITEMSIZE 0x0288C8
|
||||
#define S_0288C8_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
|
||||
#define G_0288C8_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
|
||||
#define C_0288C8_ITEMSIZE 0xFFFF8000
|
||||
#define R_028010_DB_DEPTH_INFO 0x028010
|
||||
#define S_028010_FORMAT(x) (((x) & 0x7) << 0)
|
||||
#define G_028010_FORMAT(x) (((x) >> 0) & 0x7)
|
||||
#define C_028010_FORMAT 0xFFFFFFF8
|
||||
#define V_028010_DEPTH_INVALID 0x00000000
|
||||
#define V_028010_DEPTH_16 0x00000001
|
||||
#define V_028010_DEPTH_X8_24 0x00000002
|
||||
#define V_028010_DEPTH_8_24 0x00000003
|
||||
#define V_028010_DEPTH_X8_24_FLOAT 0x00000004
|
||||
#define V_028010_DEPTH_8_24_FLOAT 0x00000005
|
||||
#define V_028010_DEPTH_32_FLOAT 0x00000006
|
||||
#define V_028010_DEPTH_X24_8_32_FLOAT 0x00000007
|
||||
#define S_028010_READ_SIZE(x) (((x) & 0x1) << 3)
|
||||
#define G_028010_READ_SIZE(x) (((x) >> 3) & 0x1)
|
||||
#define C_028010_READ_SIZE 0xFFFFFFF7
|
||||
#define S_028010_ARRAY_MODE(x) (((x) & 0xF) << 15)
|
||||
#define G_028010_ARRAY_MODE(x) (((x) >> 15) & 0xF)
|
||||
#define C_028010_ARRAY_MODE 0xFFF87FFF
|
||||
#define S_028010_TILE_SURFACE_ENABLE(x) (((x) & 0x1) << 25)
|
||||
#define G_028010_TILE_SURFACE_ENABLE(x) (((x) >> 25) & 0x1)
|
||||
#define C_028010_TILE_SURFACE_ENABLE 0xFDFFFFFF
|
||||
#define S_028010_TILE_COMPACT(x) (((x) & 0x1) << 26)
|
||||
#define G_028010_TILE_COMPACT(x) (((x) >> 26) & 0x1)
|
||||
#define C_028010_TILE_COMPACT 0xFBFFFFFF
|
||||
#define S_028010_ZRANGE_PRECISION(x) (((x) & 0x1) << 31)
|
||||
#define G_028010_ZRANGE_PRECISION(x) (((x) >> 31) & 0x1)
|
||||
#define C_028010_ZRANGE_PRECISION 0x7FFFFFFF
|
||||
#define R_028000_DB_DEPTH_SIZE 0x028000
|
||||
#define S_028000_PITCH_TILE_MAX(x) (((x) & 0x3FF) << 0)
|
||||
#define G_028000_PITCH_TILE_MAX(x) (((x) >> 0) & 0x3FF)
|
||||
#define C_028000_PITCH_TILE_MAX 0xFFFFFC00
|
||||
#define S_028000_SLICE_TILE_MAX(x) (((x) & 0xFFFFF) << 10)
|
||||
#define G_028000_SLICE_TILE_MAX(x) (((x) >> 10) & 0xFFFFF)
|
||||
#define C_028000_SLICE_TILE_MAX 0xC00003FF
|
||||
#define R_028004_DB_DEPTH_VIEW 0x028004
|
||||
#define S_028004_SLICE_START(x) (((x) & 0x7FF) << 0)
|
||||
#define G_028004_SLICE_START(x) (((x) >> 0) & 0x7FF)
|
||||
#define C_028004_SLICE_START 0xFFFFF800
|
||||
#define S_028004_SLICE_MAX(x) (((x) & 0x7FF) << 13)
|
||||
#define G_028004_SLICE_MAX(x) (((x) >> 13) & 0x7FF)
|
||||
#define C_028004_SLICE_MAX 0xFF001FFF
|
||||
#define R_028800_DB_DEPTH_CONTROL 0x028800
|
||||
#define S_028800_STENCIL_ENABLE(x) (((x) & 0x1) << 0)
|
||||
#define G_028800_STENCIL_ENABLE(x) (((x) >> 0) & 0x1)
|
||||
#define C_028800_STENCIL_ENABLE 0xFFFFFFFE
|
||||
#define S_028800_Z_ENABLE(x) (((x) & 0x1) << 1)
|
||||
#define G_028800_Z_ENABLE(x) (((x) >> 1) & 0x1)
|
||||
#define C_028800_Z_ENABLE 0xFFFFFFFD
|
||||
#define S_028800_Z_WRITE_ENABLE(x) (((x) & 0x1) << 2)
|
||||
#define G_028800_Z_WRITE_ENABLE(x) (((x) >> 2) & 0x1)
|
||||
#define C_028800_Z_WRITE_ENABLE 0xFFFFFFFB
|
||||
#define S_028800_ZFUNC(x) (((x) & 0x7) << 4)
|
||||
#define G_028800_ZFUNC(x) (((x) >> 4) & 0x7)
|
||||
#define C_028800_ZFUNC 0xFFFFFF8F
|
||||
#define S_028800_BACKFACE_ENABLE(x) (((x) & 0x1) << 7)
|
||||
#define G_028800_BACKFACE_ENABLE(x) (((x) >> 7) & 0x1)
|
||||
#define C_028800_BACKFACE_ENABLE 0xFFFFFF7F
|
||||
#define S_028800_STENCILFUNC(x) (((x) & 0x7) << 8)
|
||||
#define G_028800_STENCILFUNC(x) (((x) >> 8) & 0x7)
|
||||
#define C_028800_STENCILFUNC 0xFFFFF8FF
|
||||
#define S_028800_STENCILFAIL(x) (((x) & 0x7) << 11)
|
||||
#define G_028800_STENCILFAIL(x) (((x) >> 11) & 0x7)
|
||||
#define C_028800_STENCILFAIL 0xFFFFC7FF
|
||||
#define S_028800_STENCILZPASS(x) (((x) & 0x7) << 14)
|
||||
#define G_028800_STENCILZPASS(x) (((x) >> 14) & 0x7)
|
||||
#define C_028800_STENCILZPASS 0xFFFE3FFF
|
||||
#define S_028800_STENCILZFAIL(x) (((x) & 0x7) << 17)
|
||||
#define G_028800_STENCILZFAIL(x) (((x) >> 17) & 0x7)
|
||||
#define C_028800_STENCILZFAIL 0xFFF1FFFF
|
||||
#define S_028800_STENCILFUNC_BF(x) (((x) & 0x7) << 20)
|
||||
#define G_028800_STENCILFUNC_BF(x) (((x) >> 20) & 0x7)
|
||||
#define C_028800_STENCILFUNC_BF 0xFF8FFFFF
|
||||
#define S_028800_STENCILFAIL_BF(x) (((x) & 0x7) << 23)
|
||||
#define G_028800_STENCILFAIL_BF(x) (((x) >> 23) & 0x7)
|
||||
#define C_028800_STENCILFAIL_BF 0xFC7FFFFF
|
||||
#define S_028800_STENCILZPASS_BF(x) (((x) & 0x7) << 26)
|
||||
#define G_028800_STENCILZPASS_BF(x) (((x) >> 26) & 0x7)
|
||||
#define C_028800_STENCILZPASS_BF 0xE3FFFFFF
|
||||
#define S_028800_STENCILZFAIL_BF(x) (((x) & 0x7) << 29)
|
||||
#define G_028800_STENCILZFAIL_BF(x) (((x) >> 29) & 0x7)
|
||||
#define C_028800_STENCILZFAIL_BF 0x1FFFFFFF
|
||||
|
||||
#endif
|
||||
|
@ -97,8 +97,13 @@ extern int radeon_testing;
|
||||
extern int radeon_connector_table;
|
||||
extern int radeon_tv;
|
||||
extern int radeon_new_pll;
|
||||
extern int radeon_dynpm;
|
||||
extern int radeon_audio;
|
||||
|
||||
typedef struct pm_message {
|
||||
int event;
|
||||
} pm_message_t;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
int width;
|
||||
@ -181,6 +186,21 @@ struct radeon_device;
|
||||
/*
|
||||
* BIOS.
|
||||
*/
|
||||
#define ATRM_BIOS_PAGE 4096
|
||||
|
||||
#if defined(CONFIG_VGA_SWITCHEROO)
|
||||
bool radeon_atrm_supported(struct pci_dev *pdev);
|
||||
int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len);
|
||||
#else
|
||||
static inline bool radeon_atrm_supported(struct pci_dev *pdev)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
bool radeon_get_bios(struct radeon_device *rdev);
|
||||
|
||||
|
||||
@ -201,17 +221,23 @@ void radeon_dummy_page_fini(struct radeon_device *rdev);
|
||||
struct radeon_clock {
|
||||
struct radeon_pll p1pll;
|
||||
struct radeon_pll p2pll;
|
||||
struct radeon_pll dcpll;
|
||||
struct radeon_pll spll;
|
||||
struct radeon_pll mpll;
|
||||
/* 10 Khz units */
|
||||
uint32_t default_mclk;
|
||||
uint32_t default_sclk;
|
||||
uint32_t default_dispclk;
|
||||
uint32_t dp_extclk;
|
||||
};
|
||||
|
||||
/*
|
||||
* Power management
|
||||
*/
|
||||
int radeon_pm_init(struct radeon_device *rdev);
|
||||
void radeon_pm_compute_clocks(struct radeon_device *rdev);
|
||||
void radeon_combios_get_power_modes(struct radeon_device *rdev);
|
||||
void radeon_atombios_get_power_modes(struct radeon_device *rdev);
|
||||
|
||||
/*
|
||||
* Fences.
|
||||
@ -339,6 +365,7 @@ union radeon_gart_table {
|
||||
};
|
||||
|
||||
#define RADEON_GPU_PAGE_SIZE 4096
|
||||
#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
|
||||
|
||||
struct radeon_gart {
|
||||
dma_addr_t table_addr;
|
||||
@ -373,11 +400,10 @@ struct radeon_mc {
|
||||
/* for some chips with <= 32MB we need to lie
|
||||
* about vram size near mc fb location */
|
||||
u64 mc_vram_size;
|
||||
u64 gtt_location;
|
||||
u64 visible_vram_size;
|
||||
u64 gtt_size;
|
||||
u64 gtt_start;
|
||||
u64 gtt_end;
|
||||
u64 vram_location;
|
||||
u64 vram_start;
|
||||
u64 vram_end;
|
||||
unsigned vram_width;
|
||||
@ -387,7 +413,6 @@ struct radeon_mc {
|
||||
bool igp_sideport_enabled;
|
||||
};
|
||||
|
||||
int radeon_mc_setup(struct radeon_device *rdev);
|
||||
bool radeon_combios_sideport_present(struct radeon_device *rdev);
|
||||
bool radeon_atombios_sideport_present(struct radeon_device *rdev);
|
||||
|
||||
@ -443,6 +468,7 @@ struct radeon_ib {
|
||||
struct radeon_ib_pool {
|
||||
// struct mutex mutex;
|
||||
struct radeon_bo *robj;
|
||||
struct list_head bogus_ib;
|
||||
struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
|
||||
bool ready;
|
||||
unsigned head_id;
|
||||
@ -496,6 +522,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||
int radeon_ib_pool_init(struct radeon_device *rdev);
|
||||
void radeon_ib_pool_fini(struct radeon_device *rdev);
|
||||
int radeon_ib_test(struct radeon_device *rdev);
|
||||
extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||
/* Ring access between begin & end cannot sleep */
|
||||
void radeon_ring_free_size(struct radeon_device *rdev);
|
||||
int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw);
|
||||
@ -529,6 +556,7 @@ struct radeon_cs_chunk {
|
||||
};
|
||||
|
||||
struct radeon_cs_parser {
|
||||
struct device *dev;
|
||||
struct radeon_device *rdev;
|
||||
// struct drm_file *filp;
|
||||
/* chunks */
|
||||
@ -632,7 +660,99 @@ struct radeon_wb {
|
||||
* Equation between gpu/memory clock and available bandwidth is hw dependent
|
||||
* (type of memory, bus size, efficiency, ...)
|
||||
*/
|
||||
enum radeon_pm_state {
|
||||
PM_STATE_DISABLED,
|
||||
PM_STATE_MINIMUM,
|
||||
PM_STATE_PAUSED,
|
||||
PM_STATE_ACTIVE
|
||||
};
|
||||
enum radeon_pm_action {
|
||||
PM_ACTION_NONE,
|
||||
PM_ACTION_MINIMUM,
|
||||
PM_ACTION_DOWNCLOCK,
|
||||
PM_ACTION_UPCLOCK
|
||||
};
|
||||
|
||||
enum radeon_voltage_type {
|
||||
VOLTAGE_NONE = 0,
|
||||
VOLTAGE_GPIO,
|
||||
VOLTAGE_VDDC,
|
||||
VOLTAGE_SW
|
||||
};
|
||||
|
||||
enum radeon_pm_state_type {
|
||||
POWER_STATE_TYPE_DEFAULT,
|
||||
POWER_STATE_TYPE_POWERSAVE,
|
||||
POWER_STATE_TYPE_BATTERY,
|
||||
POWER_STATE_TYPE_BALANCED,
|
||||
POWER_STATE_TYPE_PERFORMANCE,
|
||||
};
|
||||
|
||||
enum radeon_pm_clock_mode_type {
|
||||
POWER_MODE_TYPE_DEFAULT,
|
||||
POWER_MODE_TYPE_LOW,
|
||||
POWER_MODE_TYPE_MID,
|
||||
POWER_MODE_TYPE_HIGH,
|
||||
};
|
||||
|
||||
struct radeon_voltage {
|
||||
enum radeon_voltage_type type;
|
||||
/* gpio voltage */
|
||||
struct radeon_gpio_rec gpio;
|
||||
u32 delay; /* delay in usec from voltage drop to sclk change */
|
||||
bool active_high; /* voltage drop is active when bit is high */
|
||||
/* VDDC voltage */
|
||||
u8 vddc_id; /* index into vddc voltage table */
|
||||
u8 vddci_id; /* index into vddci voltage table */
|
||||
bool vddci_enabled;
|
||||
/* r6xx+ sw */
|
||||
u32 voltage;
|
||||
};
|
||||
|
||||
struct radeon_pm_non_clock_info {
|
||||
/* pcie lanes */
|
||||
int pcie_lanes;
|
||||
/* standardized non-clock flags */
|
||||
u32 flags;
|
||||
};
|
||||
|
||||
struct radeon_pm_clock_info {
|
||||
/* memory clock */
|
||||
u32 mclk;
|
||||
/* engine clock */
|
||||
u32 sclk;
|
||||
/* voltage info */
|
||||
struct radeon_voltage voltage;
|
||||
/* standardized clock flags - not sure we'll need these */
|
||||
u32 flags;
|
||||
};
|
||||
|
||||
struct radeon_power_state {
|
||||
enum radeon_pm_state_type type;
|
||||
/* XXX: use a define for num clock modes */
|
||||
struct radeon_pm_clock_info clock_info[8];
|
||||
/* number of valid clock modes in this power state */
|
||||
int num_clock_modes;
|
||||
struct radeon_pm_clock_info *default_clock_mode;
|
||||
/* non clock info about this state */
|
||||
struct radeon_pm_non_clock_info non_clock_info;
|
||||
bool voltage_drop_active;
|
||||
};
|
||||
|
||||
/*
|
||||
* Some modes are overclocked by very low value, accept them
|
||||
*/
|
||||
#define RADEON_MODE_OVERCLOCK_MARGIN 500 /* 5 MHz */
|
||||
|
||||
struct radeon_pm {
|
||||
// struct mutex mutex;
|
||||
// struct delayed_work idle_work;
|
||||
enum radeon_pm_state state;
|
||||
enum radeon_pm_action planned_action;
|
||||
unsigned long action_timeout;
|
||||
bool downclocked;
|
||||
int active_crtcs;
|
||||
int req_vblank;
|
||||
fixed20_12 max_bandwidth;
|
||||
fixed20_12 igp_sideport_mclk;
|
||||
fixed20_12 igp_system_mclk;
|
||||
@ -644,6 +764,15 @@ struct radeon_pm {
|
||||
fixed20_12 core_bandwidth;
|
||||
fixed20_12 sclk;
|
||||
fixed20_12 needed_bandwidth;
|
||||
/* XXX: use a define for num power modes */
|
||||
struct radeon_power_state power_state[8];
|
||||
/* number of valid power states */
|
||||
int num_power_states;
|
||||
struct radeon_power_state *current_power_state;
|
||||
struct radeon_pm_clock_info *current_clock_mode;
|
||||
struct radeon_power_state *requested_power_state;
|
||||
struct radeon_pm_clock_info *requested_clock_mode;
|
||||
struct radeon_power_state *default_power_state;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -689,6 +818,7 @@ struct radeon_asic {
|
||||
void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
|
||||
uint32_t (*get_memory_clock)(struct radeon_device *rdev);
|
||||
void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
|
||||
int (*get_pcie_lanes)(struct radeon_device *rdev);
|
||||
void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
|
||||
void (*set_clock_gating)(struct radeon_device *rdev, int enable);
|
||||
int (*set_surface_reg)(struct radeon_device *rdev, int reg,
|
||||
@ -739,6 +869,9 @@ struct r600_asic {
|
||||
unsigned sx_max_export_pos_size;
|
||||
unsigned sx_max_export_smx_size;
|
||||
unsigned sq_num_cf_insts;
|
||||
unsigned tiling_nbanks;
|
||||
unsigned tiling_npipes;
|
||||
unsigned tiling_group_size;
|
||||
};
|
||||
|
||||
struct rv770_asic {
|
||||
@ -759,6 +892,9 @@ struct rv770_asic {
|
||||
unsigned sc_prim_fifo_size;
|
||||
unsigned sc_hiz_tile_fifo_size;
|
||||
unsigned sc_earlyz_tile_fifo_fize;
|
||||
unsigned tiling_nbanks;
|
||||
unsigned tiling_npipes;
|
||||
unsigned tiling_group_size;
|
||||
};
|
||||
|
||||
union radeon_asic_config {
|
||||
@ -840,6 +976,7 @@ struct radeon_device {
|
||||
const struct firmware *rlc_fw; /* r6/700 RLC firmware */
|
||||
struct r600_blit r600_blit;
|
||||
int msi_enabled; /* msi enabled */
|
||||
int num_crtc; /* number of crtcs */
|
||||
|
||||
/* audio stuff */
|
||||
// struct timer_list audio_timer;
|
||||
@ -848,6 +985,8 @@ struct radeon_device {
|
||||
int audio_bits_per_sample;
|
||||
uint8_t audio_status_bits;
|
||||
uint8_t audio_category_code;
|
||||
|
||||
bool powered_down;
|
||||
};
|
||||
|
||||
int radeon_device_init(struct radeon_device *rdev,
|
||||
@ -904,6 +1043,8 @@ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32
|
||||
#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
|
||||
#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg))
|
||||
#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
|
||||
#define RREG32_PCIE_P(reg) rdev->pciep_rreg(rdev, (reg))
|
||||
#define WREG32_PCIE_P(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
|
||||
#define WREG32_P(reg, val, mask) \
|
||||
do { \
|
||||
uint32_t tmp_ = RREG32(reg); \
|
||||
@ -964,7 +1105,7 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
|
||||
#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
|
||||
#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
|
||||
#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
|
||||
|
||||
#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR))
|
||||
|
||||
/*
|
||||
* BIOS helpers.
|
||||
@ -1023,6 +1164,7 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
|
||||
#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
|
||||
#define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev))
|
||||
#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e))
|
||||
#define radeon_get_pcie_lanes(rdev) (rdev)->asic->get_pcie_lanes((rdev))
|
||||
#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
|
||||
#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
|
||||
#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
|
||||
@ -1037,6 +1179,7 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
|
||||
/* AGP */
|
||||
extern void radeon_agp_disable(struct radeon_device *rdev);
|
||||
extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
|
||||
extern void radeon_gart_restore(struct radeon_device *rdev);
|
||||
extern int radeon_modeset_init(struct radeon_device *rdev);
|
||||
extern void radeon_modeset_fini(struct radeon_device *rdev);
|
||||
extern bool radeon_card_posted(struct radeon_device *rdev);
|
||||
@ -1050,6 +1193,10 @@ extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enabl
|
||||
extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
|
||||
extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
|
||||
extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
|
||||
extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
|
||||
extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
|
||||
extern int radeon_resume_kms(struct drm_device *dev);
|
||||
extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
|
||||
|
||||
/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
|
||||
struct r100_mc_save {
|
||||
@ -1104,7 +1251,7 @@ extern void r200_set_safe_registers(struct radeon_device *rdev);
|
||||
/* r300,r350,rv350,rv370,rv380 */
|
||||
extern void r300_set_reg_safe(struct radeon_device *rdev);
|
||||
extern void r300_mc_program(struct radeon_device *rdev);
|
||||
extern void r300_vram_info(struct radeon_device *rdev);
|
||||
extern void r300_mc_init(struct radeon_device *rdev);
|
||||
extern void r300_clock_startup(struct radeon_device *rdev);
|
||||
extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
|
||||
extern int rv370_pcie_gart_init(struct radeon_device *rdev);
|
||||
@ -1113,7 +1260,6 @@ extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
|
||||
extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
|
||||
|
||||
/* r420,r423,rv410 */
|
||||
extern int r420_mc_init(struct radeon_device *rdev);
|
||||
extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg);
|
||||
extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
|
||||
extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
|
||||
@ -1155,13 +1301,13 @@ extern void rs690_line_buffer_adjust(struct radeon_device *rdev,
|
||||
struct drm_display_mode *mode2);
|
||||
|
||||
/* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */
|
||||
extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
|
||||
extern bool r600_card_posted(struct radeon_device *rdev);
|
||||
extern void r600_cp_stop(struct radeon_device *rdev);
|
||||
extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
|
||||
extern int r600_cp_resume(struct radeon_device *rdev);
|
||||
extern void r600_cp_fini(struct radeon_device *rdev);
|
||||
extern int r600_count_pipe_bits(uint32_t val);
|
||||
extern int r600_gart_clear_page(struct radeon_device *rdev, int i);
|
||||
extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
|
||||
extern int r600_pcie_gart_init(struct radeon_device *rdev);
|
||||
extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
|
||||
@ -1197,6 +1343,14 @@ extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
|
||||
uint8_t status_bits,
|
||||
uint8_t category_code);
|
||||
|
||||
/* evergreen */
|
||||
struct evergreen_mc_save {
|
||||
u32 vga_control[6];
|
||||
u32 vga_render_control;
|
||||
u32 vga_hdp_control;
|
||||
u32 crtc_control[6];
|
||||
};
|
||||
|
||||
#include "radeon_object.h"
|
||||
|
||||
#define DRM_UDELAY(d) udelay(d)
|
||||
|
@ -237,6 +237,10 @@ int radeon_agp_init(struct radeon_device *rdev)
|
||||
|
||||
rdev->mc.agp_base = rdev->ddev->agp->agp_info.aper_base;
|
||||
rdev->mc.gtt_size = rdev->ddev->agp->agp_info.aper_size << 20;
|
||||
rdev->mc.gtt_start = rdev->mc.agp_base;
|
||||
rdev->mc.gtt_end = rdev->mc.gtt_start + rdev->mc.gtt_size - 1;
|
||||
dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n",
|
||||
rdev->mc.gtt_size >> 20, rdev->mc.gtt_start, rdev->mc.gtt_end);
|
||||
|
||||
/* workaround some hw issues */
|
||||
if (rdev->family < CHIP_R200) {
|
||||
@ -244,7 +248,7 @@ int radeon_agp_init(struct radeon_device *rdev)
|
||||
}
|
||||
return 0;
|
||||
#else
|
||||
return 0;
|
||||
return -1;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,7 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock
|
||||
void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
|
||||
|
||||
/*
|
||||
* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
|
||||
* r100,rv100,rs100,rv200,rs200
|
||||
*/
|
||||
extern int r100_init(struct radeon_device *rdev);
|
||||
extern void r100_fini(struct radeon_device *rdev);
|
||||
@ -103,6 +103,52 @@ static struct radeon_asic r100_asic = {
|
||||
// .cs_parse = &r100_cs_parse,
|
||||
// .copy_blit = &r100_copy_blit,
|
||||
// .copy_dma = NULL,
|
||||
// .copy = &r100_copy_blit,
|
||||
.get_engine_clock = &radeon_legacy_get_engine_clock,
|
||||
.set_engine_clock = &radeon_legacy_set_engine_clock,
|
||||
.get_memory_clock = &radeon_legacy_get_memory_clock,
|
||||
.set_memory_clock = NULL,
|
||||
.get_pcie_lanes = NULL,
|
||||
.set_pcie_lanes = NULL,
|
||||
.set_clock_gating = &radeon_legacy_set_clock_gating,
|
||||
.set_surface_reg = r100_set_surface_reg,
|
||||
.clear_surface_reg = r100_clear_surface_reg,
|
||||
.bandwidth_update = &r100_bandwidth_update,
|
||||
.hpd_init = &r100_hpd_init,
|
||||
.hpd_fini = &r100_hpd_fini,
|
||||
.hpd_sense = &r100_hpd_sense,
|
||||
.hpd_set_polarity = &r100_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
/*
|
||||
* r200,rv250,rs300,rv280
|
||||
*/
|
||||
extern int r200_copy_dma(struct radeon_device *rdev,
|
||||
uint64_t src_offset,
|
||||
uint64_t dst_offset,
|
||||
unsigned num_pages,
|
||||
struct radeon_fence *fence);
|
||||
static struct radeon_asic r200_asic = {
|
||||
.init = &r100_init,
|
||||
// .fini = &r100_fini,
|
||||
// .suspend = &r100_suspend,
|
||||
// .resume = &r100_resume,
|
||||
// .vga_set_state = &r100_vga_set_state,
|
||||
.gpu_reset = &r100_gpu_reset,
|
||||
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
|
||||
.gart_set_page = &r100_pci_gart_set_page,
|
||||
.cp_commit = &r100_cp_commit,
|
||||
.ring_start = &r100_ring_start,
|
||||
.ring_test = &r100_ring_test,
|
||||
// .ring_ib_execute = &r100_ring_ib_execute,
|
||||
// .irq_set = &r100_irq_set,
|
||||
// .irq_process = &r100_irq_process,
|
||||
// .get_vblank_counter = &r100_get_vblank_counter,
|
||||
.fence_ring_emit = &r100_fence_ring_emit,
|
||||
// .cs_parse = &r100_cs_parse,
|
||||
// .copy_blit = &r100_copy_blit,
|
||||
// .copy_dma = NULL,
|
||||
// .copy = &r100_copy_blit,
|
||||
.get_engine_clock = &radeon_legacy_get_engine_clock,
|
||||
.set_engine_clock = &radeon_legacy_set_engine_clock,
|
||||
@ -138,11 +184,8 @@ extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t
|
||||
extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
|
||||
extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
|
||||
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
|
||||
extern int r300_copy_dma(struct radeon_device *rdev,
|
||||
uint64_t src_offset,
|
||||
uint64_t dst_offset,
|
||||
unsigned num_pages,
|
||||
struct radeon_fence *fence);
|
||||
extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
|
||||
|
||||
static struct radeon_asic r300_asic = {
|
||||
.init = &r300_init,
|
||||
// .fini = &r300_fini,
|
||||
@ -163,6 +206,45 @@ static struct radeon_asic r300_asic = {
|
||||
// .cs_parse = &r300_cs_parse,
|
||||
// .copy_blit = &r100_copy_blit,
|
||||
// .copy_dma = &r300_copy_dma,
|
||||
// .copy = &r100_copy_blit,
|
||||
.get_engine_clock = &radeon_legacy_get_engine_clock,
|
||||
.set_engine_clock = &radeon_legacy_set_engine_clock,
|
||||
.get_memory_clock = &radeon_legacy_get_memory_clock,
|
||||
.set_memory_clock = NULL,
|
||||
.get_pcie_lanes = &rv370_get_pcie_lanes,
|
||||
.set_pcie_lanes = &rv370_set_pcie_lanes,
|
||||
.set_clock_gating = &radeon_legacy_set_clock_gating,
|
||||
.set_surface_reg = r100_set_surface_reg,
|
||||
.clear_surface_reg = r100_clear_surface_reg,
|
||||
.bandwidth_update = &r100_bandwidth_update,
|
||||
.hpd_init = &r100_hpd_init,
|
||||
.hpd_fini = &r100_hpd_fini,
|
||||
.hpd_sense = &r100_hpd_sense,
|
||||
.hpd_set_polarity = &r100_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
|
||||
static struct radeon_asic r300_asic_pcie = {
|
||||
.init = &r300_init,
|
||||
// .fini = &r300_fini,
|
||||
// .suspend = &r300_suspend,
|
||||
// .resume = &r300_resume,
|
||||
// .vga_set_state = &r100_vga_set_state,
|
||||
.gpu_reset = &r300_gpu_reset,
|
||||
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
|
||||
.gart_set_page = &rv370_pcie_gart_set_page,
|
||||
.cp_commit = &r100_cp_commit,
|
||||
.ring_start = &r300_ring_start,
|
||||
.ring_test = &r100_ring_test,
|
||||
// .ring_ib_execute = &r100_ring_ib_execute,
|
||||
// .irq_set = &r100_irq_set,
|
||||
// .irq_process = &r100_irq_process,
|
||||
// .get_vblank_counter = &r100_get_vblank_counter,
|
||||
.fence_ring_emit = &r300_fence_ring_emit,
|
||||
// .cs_parse = &r300_cs_parse,
|
||||
// .copy_blit = &r100_copy_blit,
|
||||
// .copy_dma = &r300_copy_dma,
|
||||
// .copy = &r100_copy_blit,
|
||||
.get_engine_clock = &radeon_legacy_get_engine_clock,
|
||||
.set_engine_clock = &radeon_legacy_set_engine_clock,
|
||||
@ -212,6 +294,7 @@ static struct radeon_asic r420_asic = {
|
||||
.set_engine_clock = &radeon_atom_set_engine_clock,
|
||||
.get_memory_clock = &radeon_atom_get_memory_clock,
|
||||
.set_memory_clock = &radeon_atom_set_memory_clock,
|
||||
.get_pcie_lanes = &rv370_get_pcie_lanes,
|
||||
.set_pcie_lanes = &rv370_set_pcie_lanes,
|
||||
.set_clock_gating = &radeon_atom_set_clock_gating,
|
||||
.set_surface_reg = r100_set_surface_reg,
|
||||
@ -261,6 +344,7 @@ static struct radeon_asic rs400_asic = {
|
||||
.set_engine_clock = &radeon_legacy_set_engine_clock,
|
||||
.get_memory_clock = &radeon_legacy_get_memory_clock,
|
||||
.set_memory_clock = NULL,
|
||||
.get_pcie_lanes = NULL,
|
||||
.set_pcie_lanes = NULL,
|
||||
.set_clock_gating = &radeon_legacy_set_clock_gating,
|
||||
.set_surface_reg = r100_set_surface_reg,
|
||||
@ -320,8 +404,11 @@ static struct radeon_asic rs600_asic = {
|
||||
.set_engine_clock = &radeon_atom_set_engine_clock,
|
||||
.get_memory_clock = &radeon_atom_get_memory_clock,
|
||||
.set_memory_clock = &radeon_atom_set_memory_clock,
|
||||
.get_pcie_lanes = NULL,
|
||||
.set_pcie_lanes = NULL,
|
||||
.set_clock_gating = &radeon_atom_set_clock_gating,
|
||||
.set_surface_reg = r100_set_surface_reg,
|
||||
.clear_surface_reg = r100_clear_surface_reg,
|
||||
.bandwidth_update = &rs600_bandwidth_update,
|
||||
.hpd_init = &rs600_hpd_init,
|
||||
.hpd_fini = &rs600_hpd_fini,
|
||||
@ -366,6 +453,7 @@ static struct radeon_asic rs690_asic = {
|
||||
.set_engine_clock = &radeon_atom_set_engine_clock,
|
||||
.get_memory_clock = &radeon_atom_get_memory_clock,
|
||||
.set_memory_clock = &radeon_atom_set_memory_clock,
|
||||
.get_pcie_lanes = NULL,
|
||||
.set_pcie_lanes = NULL,
|
||||
.set_clock_gating = &radeon_atom_set_clock_gating,
|
||||
.set_surface_reg = r100_set_surface_reg,
|
||||
@ -418,6 +506,7 @@ static struct radeon_asic rv515_asic = {
|
||||
.set_engine_clock = &radeon_atom_set_engine_clock,
|
||||
.get_memory_clock = &radeon_atom_get_memory_clock,
|
||||
.set_memory_clock = &radeon_atom_set_memory_clock,
|
||||
.get_pcie_lanes = &rv370_get_pcie_lanes,
|
||||
.set_pcie_lanes = &rv370_set_pcie_lanes,
|
||||
.set_clock_gating = &radeon_atom_set_clock_gating,
|
||||
.set_surface_reg = r100_set_surface_reg,
|
||||
@ -461,6 +550,7 @@ static struct radeon_asic r520_asic = {
|
||||
.set_engine_clock = &radeon_atom_set_engine_clock,
|
||||
.get_memory_clock = &radeon_atom_get_memory_clock,
|
||||
.set_memory_clock = &radeon_atom_set_memory_clock,
|
||||
.get_pcie_lanes = &rv370_get_pcie_lanes,
|
||||
.set_pcie_lanes = &rv370_set_pcie_lanes,
|
||||
.set_clock_gating = &radeon_atom_set_clock_gating,
|
||||
.set_surface_reg = r100_set_surface_reg,
|
||||
@ -537,8 +627,9 @@ static struct radeon_asic r600_asic = {
|
||||
.set_engine_clock = &radeon_atom_set_engine_clock,
|
||||
.get_memory_clock = &radeon_atom_get_memory_clock,
|
||||
.set_memory_clock = &radeon_atom_set_memory_clock,
|
||||
.get_pcie_lanes = &rv370_get_pcie_lanes,
|
||||
.set_pcie_lanes = NULL,
|
||||
.set_clock_gating = &radeon_atom_set_clock_gating,
|
||||
.set_clock_gating = NULL,
|
||||
.set_surface_reg = r600_set_surface_reg,
|
||||
.clear_surface_reg = r600_clear_surface_reg,
|
||||
.bandwidth_update = &rv515_bandwidth_update,
|
||||
@ -581,6 +672,7 @@ static struct radeon_asic rv770_asic = {
|
||||
.set_engine_clock = &radeon_atom_set_engine_clock,
|
||||
.get_memory_clock = &radeon_atom_get_memory_clock,
|
||||
.set_memory_clock = &radeon_atom_set_memory_clock,
|
||||
.get_pcie_lanes = &rv370_get_pcie_lanes,
|
||||
.set_pcie_lanes = NULL,
|
||||
.set_clock_gating = &radeon_atom_set_clock_gating,
|
||||
.set_surface_reg = r600_set_surface_reg,
|
||||
@ -590,7 +682,55 @@ static struct radeon_asic rv770_asic = {
|
||||
.hpd_fini = &r600_hpd_fini,
|
||||
.hpd_sense = &r600_hpd_sense,
|
||||
.hpd_set_polarity = &r600_hpd_set_polarity,
|
||||
// .ioctl_wait_idle = r600_ioctl_wait_idle,
|
||||
};
|
||||
|
||||
/*
|
||||
* evergreen
|
||||
*/
|
||||
int evergreen_init(struct radeon_device *rdev);
|
||||
void evergreen_fini(struct radeon_device *rdev);
|
||||
int evergreen_suspend(struct radeon_device *rdev);
|
||||
int evergreen_resume(struct radeon_device *rdev);
|
||||
int evergreen_gpu_reset(struct radeon_device *rdev);
|
||||
void evergreen_bandwidth_update(struct radeon_device *rdev);
|
||||
void evergreen_hpd_init(struct radeon_device *rdev);
|
||||
void evergreen_hpd_fini(struct radeon_device *rdev);
|
||||
bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
|
||||
void evergreen_hpd_set_polarity(struct radeon_device *rdev,
|
||||
enum radeon_hpd_id hpd);
|
||||
|
||||
static struct radeon_asic evergreen_asic = {
|
||||
.init = &evergreen_init,
|
||||
// .fini = &evergreen_fini,
|
||||
// .suspend = &evergreen_suspend,
|
||||
// .resume = &evergreen_resume,
|
||||
.cp_commit = NULL,
|
||||
.gpu_reset = &evergreen_gpu_reset,
|
||||
.vga_set_state = &r600_vga_set_state,
|
||||
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
|
||||
.gart_set_page = &rs600_gart_set_page,
|
||||
.ring_test = NULL,
|
||||
// .ring_ib_execute = &r600_ring_ib_execute,
|
||||
// .irq_set = &r600_irq_set,
|
||||
// .irq_process = &r600_irq_process,
|
||||
.fence_ring_emit = &r600_fence_ring_emit,
|
||||
// .cs_parse = &r600_cs_parse,
|
||||
// .copy_blit = &r600_copy_blit,
|
||||
// .copy_dma = &r600_copy_blit,
|
||||
// .copy = &r600_copy_blit,
|
||||
.get_engine_clock = &radeon_atom_get_engine_clock,
|
||||
.set_engine_clock = &radeon_atom_set_engine_clock,
|
||||
.get_memory_clock = &radeon_atom_get_memory_clock,
|
||||
.set_memory_clock = &radeon_atom_set_memory_clock,
|
||||
.set_pcie_lanes = NULL,
|
||||
.set_clock_gating = NULL,
|
||||
.set_surface_reg = r600_set_surface_reg,
|
||||
.clear_surface_reg = r600_clear_surface_reg,
|
||||
.bandwidth_update = &evergreen_bandwidth_update,
|
||||
.hpd_init = &evergreen_hpd_init,
|
||||
.hpd_fini = &evergreen_hpd_fini,
|
||||
.hpd_sense = &evergreen_hpd_sense,
|
||||
.hpd_set_polarity = &evergreen_hpd_set_polarity,
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -159,8 +159,15 @@ static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device
|
||||
struct radeon_gpio_rec *gpio)
|
||||
{
|
||||
struct radeon_hpd hpd;
|
||||
u32 reg;
|
||||
|
||||
if (ASIC_IS_DCE4(rdev))
|
||||
reg = EVERGREEN_DC_GPIO_HPD_A;
|
||||
else
|
||||
reg = AVIVO_DC_GPIO_HPD_A;
|
||||
|
||||
hpd.gpio = *gpio;
|
||||
if (gpio->reg == AVIVO_DC_GPIO_HPD_A) {
|
||||
if (gpio->reg == reg) {
|
||||
switch(gpio->mask) {
|
||||
case (1 << 0):
|
||||
hpd.hpd = RADEON_HPD_1;
|
||||
@ -574,6 +581,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
|
||||
ddc_bus.valid = false;
|
||||
}
|
||||
|
||||
/* needed for aux chan transactions */
|
||||
ddc_bus.hpd_id = hpd.hpd ? (hpd.hpd - 1) : 0;
|
||||
|
||||
conn_id = le16_to_cpu(path->usConnObjectId);
|
||||
|
||||
if (!radeon_atom_apply_quirks
|
||||
@ -838,6 +848,7 @@ union firmware_info {
|
||||
ATOM_FIRMWARE_INFO_V1_2 info_12;
|
||||
ATOM_FIRMWARE_INFO_V1_3 info_13;
|
||||
ATOM_FIRMWARE_INFO_V1_4 info_14;
|
||||
ATOM_FIRMWARE_INFO_V2_1 info_21;
|
||||
};
|
||||
|
||||
bool radeon_atom_get_clock_info(struct drm_device *dev)
|
||||
@ -849,6 +860,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
|
||||
uint8_t frev, crev;
|
||||
struct radeon_pll *p1pll = &rdev->clock.p1pll;
|
||||
struct radeon_pll *p2pll = &rdev->clock.p2pll;
|
||||
struct radeon_pll *dcpll = &rdev->clock.dcpll;
|
||||
struct radeon_pll *spll = &rdev->clock.spll;
|
||||
struct radeon_pll *mpll = &rdev->clock.mpll;
|
||||
uint16_t data_offset;
|
||||
@ -951,8 +963,19 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
|
||||
rdev->clock.default_mclk =
|
||||
le32_to_cpu(firmware_info->info.ulDefaultMemoryClock);
|
||||
|
||||
if (ASIC_IS_DCE4(rdev)) {
|
||||
rdev->clock.default_dispclk =
|
||||
le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
|
||||
if (rdev->clock.default_dispclk == 0)
|
||||
rdev->clock.default_dispclk = 60000; /* 600 Mhz */
|
||||
rdev->clock.dp_extclk =
|
||||
le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
|
||||
}
|
||||
*dcpll = *p1pll;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1091,6 +1114,30 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
|
||||
return ss;
|
||||
}
|
||||
|
||||
static void radeon_atom_apply_lvds_quirks(struct drm_device *dev,
|
||||
struct radeon_encoder_atom_dig *lvds)
|
||||
{
|
||||
|
||||
/* Toshiba A300-1BU laptop panel doesn't like new pll divider algo */
|
||||
if ((dev->pdev->device == 0x95c4) &&
|
||||
(dev->pdev->subsystem_vendor == 0x1179) &&
|
||||
(dev->pdev->subsystem_device == 0xff50)) {
|
||||
if ((lvds->native_mode.hdisplay == 1280) &&
|
||||
(lvds->native_mode.vdisplay == 800))
|
||||
lvds->pll_algo = PLL_ALGO_LEGACY;
|
||||
}
|
||||
|
||||
/* Dell Studio 15 laptop panel doesn't like new pll divider algo */
|
||||
if ((dev->pdev->device == 0x95c4) &&
|
||||
(dev->pdev->subsystem_vendor == 0x1028) &&
|
||||
(dev->pdev->subsystem_device == 0x029f)) {
|
||||
if ((lvds->native_mode.hdisplay == 1280) &&
|
||||
(lvds->native_mode.vdisplay == 800))
|
||||
lvds->pll_algo = PLL_ALGO_LEGACY;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
union lvds_info {
|
||||
struct _ATOM_LVDS_INFO info;
|
||||
struct _ATOM_LVDS_INFO_V12 info_12;
|
||||
@ -1161,6 +1208,21 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
|
||||
|
||||
lvds->ss = radeon_atombios_get_ss_info(encoder, lvds_info->info.ucSS_Id);
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
if (radeon_new_pll == 0)
|
||||
lvds->pll_algo = PLL_ALGO_LEGACY;
|
||||
else
|
||||
lvds->pll_algo = PLL_ALGO_NEW;
|
||||
} else {
|
||||
if (radeon_new_pll == 1)
|
||||
lvds->pll_algo = PLL_ALGO_NEW;
|
||||
else
|
||||
lvds->pll_algo = PLL_ALGO_LEGACY;
|
||||
}
|
||||
|
||||
/* LVDS quirks */
|
||||
radeon_atom_apply_lvds_quirks(dev, lvds);
|
||||
|
||||
encoder->native_mode = lvds->native_mode;
|
||||
}
|
||||
return lvds;
|
||||
@ -1385,6 +1447,371 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
|
||||
return tv_dac;
|
||||
}
|
||||
|
||||
union power_info {
|
||||
struct _ATOM_POWERPLAY_INFO info;
|
||||
struct _ATOM_POWERPLAY_INFO_V2 info_2;
|
||||
struct _ATOM_POWERPLAY_INFO_V3 info_3;
|
||||
struct _ATOM_PPLIB_POWERPLAYTABLE info_4;
|
||||
};
|
||||
|
||||
void radeon_atombios_get_power_modes(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_mode_info *mode_info = &rdev->mode_info;
|
||||
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
|
||||
u16 data_offset;
|
||||
u8 frev, crev;
|
||||
u32 misc, misc2 = 0, sclk, mclk;
|
||||
union power_info *power_info;
|
||||
struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
|
||||
struct _ATOM_PPLIB_STATE *power_state;
|
||||
int num_modes = 0, i, j;
|
||||
int state_index = 0, mode_index = 0;
|
||||
|
||||
atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
|
||||
|
||||
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
|
||||
|
||||
rdev->pm.default_power_state = NULL;
|
||||
|
||||
if (power_info) {
|
||||
if (frev < 4) {
|
||||
num_modes = power_info->info.ucNumOfPowerModeEntries;
|
||||
if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
|
||||
num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
|
||||
for (i = 0; i < num_modes; i++) {
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
|
||||
switch (frev) {
|
||||
case 1:
|
||||
rdev->pm.power_state[state_index].num_clock_modes = 1;
|
||||
rdev->pm.power_state[state_index].clock_info[0].mclk =
|
||||
le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
|
||||
rdev->pm.power_state[state_index].clock_info[0].sclk =
|
||||
le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock);
|
||||
/* skip invalid modes */
|
||||
if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
|
||||
(rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
|
||||
continue;
|
||||
/* skip overclock modes for now */
|
||||
if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
|
||||
rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
|
||||
(rdev->pm.power_state[state_index].clock_info[0].sclk >
|
||||
rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
|
||||
continue;
|
||||
rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
|
||||
power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
|
||||
misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
|
||||
if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.type =
|
||||
VOLTAGE_GPIO;
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
|
||||
radeon_lookup_gpio(rdev,
|
||||
power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex);
|
||||
if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
|
||||
true;
|
||||
else
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
|
||||
false;
|
||||
} else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.type =
|
||||
VOLTAGE_VDDC;
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
|
||||
power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex;
|
||||
}
|
||||
/* order matters! */
|
||||
if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
|
||||
rdev->pm.power_state[state_index].type =
|
||||
POWER_STATE_TYPE_POWERSAVE;
|
||||
if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
|
||||
rdev->pm.power_state[state_index].type =
|
||||
POWER_STATE_TYPE_BATTERY;
|
||||
if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
|
||||
rdev->pm.power_state[state_index].type =
|
||||
POWER_STATE_TYPE_BATTERY;
|
||||
if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
|
||||
rdev->pm.power_state[state_index].type =
|
||||
POWER_STATE_TYPE_BALANCED;
|
||||
if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
|
||||
rdev->pm.power_state[state_index].type =
|
||||
POWER_STATE_TYPE_PERFORMANCE;
|
||||
if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
|
||||
rdev->pm.power_state[state_index].type =
|
||||
POWER_STATE_TYPE_DEFAULT;
|
||||
rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
|
||||
rdev->pm.power_state[state_index].default_clock_mode =
|
||||
&rdev->pm.power_state[state_index].clock_info[0];
|
||||
}
|
||||
state_index++;
|
||||
break;
|
||||
case 2:
|
||||
rdev->pm.power_state[state_index].num_clock_modes = 1;
|
||||
rdev->pm.power_state[state_index].clock_info[0].mclk =
|
||||
le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
|
||||
rdev->pm.power_state[state_index].clock_info[0].sclk =
|
||||
le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock);
|
||||
/* skip invalid modes */
|
||||
if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
|
||||
(rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
|
||||
continue;
|
||||
/* skip overclock modes for now */
|
||||
if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
|
||||
rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
|
||||
(rdev->pm.power_state[state_index].clock_info[0].sclk >
|
||||
rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
|
||||
continue;
|
||||
rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
|
||||
power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
|
||||
misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
|
||||
misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
|
||||
if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.type =
|
||||
VOLTAGE_GPIO;
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
|
||||
radeon_lookup_gpio(rdev,
|
||||
power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex);
|
||||
if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
|
||||
true;
|
||||
else
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
|
||||
false;
|
||||
} else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.type =
|
||||
VOLTAGE_VDDC;
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
|
||||
power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex;
|
||||
}
|
||||
/* order matters! */
|
||||
if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
|
||||
rdev->pm.power_state[state_index].type =
|
||||
POWER_STATE_TYPE_POWERSAVE;
|
||||
if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
|
||||
rdev->pm.power_state[state_index].type =
|
||||
POWER_STATE_TYPE_BATTERY;
|
||||
if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
|
||||
rdev->pm.power_state[state_index].type =
|
||||
POWER_STATE_TYPE_BATTERY;
|
||||
if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
|
||||
rdev->pm.power_state[state_index].type =
|
||||
POWER_STATE_TYPE_BALANCED;
|
||||
if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
|
||||
rdev->pm.power_state[state_index].type =
|
||||
POWER_STATE_TYPE_PERFORMANCE;
|
||||
if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
|
||||
rdev->pm.power_state[state_index].type =
|
||||
POWER_STATE_TYPE_BALANCED;
|
||||
if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
|
||||
rdev->pm.power_state[state_index].type =
|
||||
POWER_STATE_TYPE_DEFAULT;
|
||||
rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
|
||||
rdev->pm.power_state[state_index].default_clock_mode =
|
||||
&rdev->pm.power_state[state_index].clock_info[0];
|
||||
}
|
||||
state_index++;
|
||||
break;
|
||||
case 3:
|
||||
rdev->pm.power_state[state_index].num_clock_modes = 1;
|
||||
rdev->pm.power_state[state_index].clock_info[0].mclk =
|
||||
le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
|
||||
rdev->pm.power_state[state_index].clock_info[0].sclk =
|
||||
le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock);
|
||||
/* skip invalid modes */
|
||||
if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
|
||||
(rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
|
||||
continue;
|
||||
/* skip overclock modes for now */
|
||||
if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
|
||||
rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
|
||||
(rdev->pm.power_state[state_index].clock_info[0].sclk >
|
||||
rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
|
||||
continue;
|
||||
rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
|
||||
power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
|
||||
misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
|
||||
misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
|
||||
if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.type =
|
||||
VOLTAGE_GPIO;
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
|
||||
radeon_lookup_gpio(rdev,
|
||||
power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex);
|
||||
if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
|
||||
true;
|
||||
else
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
|
||||
false;
|
||||
} else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.type =
|
||||
VOLTAGE_VDDC;
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
|
||||
power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex;
|
||||
if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) {
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled =
|
||||
true;
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id =
|
||||
power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex;
|
||||
}
|
||||
}
|
||||
/* order matters! */
|
||||
if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
|
||||
rdev->pm.power_state[state_index].type =
|
||||
POWER_STATE_TYPE_POWERSAVE;
|
||||
if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
|
||||
rdev->pm.power_state[state_index].type =
|
||||
POWER_STATE_TYPE_BATTERY;
|
||||
if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
|
||||
rdev->pm.power_state[state_index].type =
|
||||
POWER_STATE_TYPE_BATTERY;
|
||||
if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
|
||||
rdev->pm.power_state[state_index].type =
|
||||
POWER_STATE_TYPE_BALANCED;
|
||||
if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
|
||||
rdev->pm.power_state[state_index].type =
|
||||
POWER_STATE_TYPE_PERFORMANCE;
|
||||
if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
|
||||
rdev->pm.power_state[state_index].type =
|
||||
POWER_STATE_TYPE_BALANCED;
|
||||
if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
|
||||
rdev->pm.power_state[state_index].type =
|
||||
POWER_STATE_TYPE_DEFAULT;
|
||||
rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
|
||||
rdev->pm.power_state[state_index].default_clock_mode =
|
||||
&rdev->pm.power_state[state_index].clock_info[0];
|
||||
}
|
||||
state_index++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else if (frev == 4) {
|
||||
for (i = 0; i < power_info->info_4.ucNumStates; i++) {
|
||||
mode_index = 0;
|
||||
power_state = (struct _ATOM_PPLIB_STATE *)
|
||||
(mode_info->atom_context->bios +
|
||||
data_offset +
|
||||
le16_to_cpu(power_info->info_4.usStateArrayOffset) +
|
||||
i * power_info->info_4.ucStateEntrySize);
|
||||
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
|
||||
(mode_info->atom_context->bios +
|
||||
data_offset +
|
||||
le16_to_cpu(power_info->info_4.usNonClockInfoArrayOffset) +
|
||||
(power_state->ucNonClockStateIndex *
|
||||
power_info->info_4.ucNonClockSize));
|
||||
for (j = 0; j < (power_info->info_4.ucStateEntrySize - 1); j++) {
|
||||
if (rdev->flags & RADEON_IS_IGP) {
|
||||
struct _ATOM_PPLIB_RS780_CLOCK_INFO *clock_info =
|
||||
(struct _ATOM_PPLIB_RS780_CLOCK_INFO *)
|
||||
(mode_info->atom_context->bios +
|
||||
data_offset +
|
||||
le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
|
||||
(power_state->ucClockStateIndices[j] *
|
||||
power_info->info_4.ucClockInfoSize));
|
||||
sclk = le16_to_cpu(clock_info->usLowEngineClockLow);
|
||||
sclk |= clock_info->ucLowEngineClockHigh << 16;
|
||||
rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
|
||||
/* skip invalid modes */
|
||||
if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
|
||||
continue;
|
||||
/* skip overclock modes for now */
|
||||
if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk >
|
||||
rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)
|
||||
continue;
|
||||
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
|
||||
VOLTAGE_SW;
|
||||
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
|
||||
clock_info->usVDDC;
|
||||
mode_index++;
|
||||
} else {
|
||||
struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info =
|
||||
(struct _ATOM_PPLIB_R600_CLOCK_INFO *)
|
||||
(mode_info->atom_context->bios +
|
||||
data_offset +
|
||||
le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
|
||||
(power_state->ucClockStateIndices[j] *
|
||||
power_info->info_4.ucClockInfoSize));
|
||||
sclk = le16_to_cpu(clock_info->usEngineClockLow);
|
||||
sclk |= clock_info->ucEngineClockHigh << 16;
|
||||
mclk = le16_to_cpu(clock_info->usMemoryClockLow);
|
||||
mclk |= clock_info->ucMemoryClockHigh << 16;
|
||||
rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
|
||||
rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
|
||||
/* skip invalid modes */
|
||||
if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
|
||||
(rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
|
||||
continue;
|
||||
/* skip overclock modes for now */
|
||||
if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk >
|
||||
rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
|
||||
(rdev->pm.power_state[state_index].clock_info[mode_index].sclk >
|
||||
rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
|
||||
continue;
|
||||
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
|
||||
VOLTAGE_SW;
|
||||
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
|
||||
clock_info->usVDDC;
|
||||
mode_index++;
|
||||
}
|
||||
}
|
||||
rdev->pm.power_state[state_index].num_clock_modes = mode_index;
|
||||
if (mode_index) {
|
||||
misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
|
||||
misc2 = le16_to_cpu(non_clock_info->usClassification);
|
||||
rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
|
||||
((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >>
|
||||
ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
|
||||
switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
|
||||
case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
|
||||
rdev->pm.power_state[state_index].type =
|
||||
POWER_STATE_TYPE_BATTERY;
|
||||
break;
|
||||
case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
|
||||
rdev->pm.power_state[state_index].type =
|
||||
POWER_STATE_TYPE_BALANCED;
|
||||
break;
|
||||
case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
|
||||
rdev->pm.power_state[state_index].type =
|
||||
POWER_STATE_TYPE_PERFORMANCE;
|
||||
break;
|
||||
}
|
||||
if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) {
|
||||
rdev->pm.power_state[state_index].type =
|
||||
POWER_STATE_TYPE_DEFAULT;
|
||||
rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
|
||||
rdev->pm.power_state[state_index].default_clock_mode =
|
||||
&rdev->pm.power_state[state_index].clock_info[mode_index - 1];
|
||||
}
|
||||
state_index++;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/* XXX figure out some good default low power mode for cards w/out power tables */
|
||||
}
|
||||
|
||||
if (rdev->pm.default_power_state == NULL) {
|
||||
/* add the default mode */
|
||||
rdev->pm.power_state[state_index].type =
|
||||
POWER_STATE_TYPE_DEFAULT;
|
||||
rdev->pm.power_state[state_index].num_clock_modes = 1;
|
||||
rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
|
||||
rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
|
||||
rdev->pm.power_state[state_index].default_clock_mode =
|
||||
&rdev->pm.power_state[state_index].clock_info[0];
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
|
||||
if (rdev->asic->get_pcie_lanes)
|
||||
rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev);
|
||||
else
|
||||
rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16;
|
||||
rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
|
||||
state_index++;
|
||||
}
|
||||
rdev->pm.num_power_states = state_index;
|
||||
|
||||
rdev->pm.current_power_state = rdev->pm.default_power_state;
|
||||
rdev->pm.current_clock_mode =
|
||||
rdev->pm.default_power_state->default_clock_mode;
|
||||
}
|
||||
|
||||
void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
|
||||
{
|
||||
DYNAMIC_CLOCK_GATING_PS_ALLOCATION args;
|
||||
@ -1395,16 +1822,6 @@ void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
|
||||
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
}
|
||||
|
||||
void radeon_atom_static_pwrmgt_setup(struct radeon_device *rdev, int enable)
|
||||
{
|
||||
ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION args;
|
||||
int index = GetIndexIntoMasterTable(COMMAND, EnableASIC_StaticPwrMgt);
|
||||
|
||||
args.ucEnable = enable;
|
||||
|
||||
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
}
|
||||
|
||||
uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev)
|
||||
{
|
||||
GET_ENGINE_CLOCK_PS_ALLOCATION args;
|
||||
|
@ -93,6 +93,38 @@ static bool radeon_read_bios(struct radeon_device *rdev)
|
||||
return true;
|
||||
}
|
||||
|
||||
/* ATRM is used to get the BIOS on the discrete cards in
|
||||
* dual-gpu systems.
|
||||
*/
|
||||
static bool radeon_atrm_get_bios(struct radeon_device *rdev)
|
||||
{
|
||||
int ret;
|
||||
int size = 64 * 1024;
|
||||
int i;
|
||||
|
||||
if (!radeon_atrm_supported(rdev->pdev))
|
||||
return false;
|
||||
|
||||
rdev->bios = kmalloc(size, GFP_KERNEL);
|
||||
if (!rdev->bios) {
|
||||
DRM_ERROR("Unable to allocate bios\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
for (i = 0; i < size / ATRM_BIOS_PAGE; i++) {
|
||||
ret = radeon_atrm_get_bios_chunk(rdev->bios,
|
||||
(i * ATRM_BIOS_PAGE),
|
||||
ATRM_BIOS_PAGE);
|
||||
if (ret <= 0)
|
||||
break;
|
||||
}
|
||||
|
||||
if (i == 0 || rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) {
|
||||
kfree(rdev->bios);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
static bool r700_read_disabled_bios(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t viph_control;
|
||||
@ -388,17 +420,17 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev)
|
||||
return legacy_read_disabled_bios(rdev);
|
||||
}
|
||||
|
||||
|
||||
bool radeon_get_bios(struct radeon_device *rdev)
|
||||
{
|
||||
bool r;
|
||||
uint16_t tmp;
|
||||
|
||||
if (rdev->flags & RADEON_IS_IGP) {
|
||||
r = radeon_atrm_get_bios(rdev);
|
||||
if (r == false)
|
||||
r = igp_read_bios_from_vram(rdev);
|
||||
if (r == false)
|
||||
r = radeon_read_bios(rdev);
|
||||
} else
|
||||
r = radeon_read_bios(rdev);
|
||||
if (r == false) {
|
||||
r = radeon_read_disabled_bios(rdev);
|
||||
}
|
||||
@ -408,6 +440,13 @@ bool radeon_get_bios(struct radeon_device *rdev)
|
||||
return false;
|
||||
}
|
||||
if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) {
|
||||
printk("BIOS signature incorrect %x %x\n", rdev->bios[0], rdev->bios[1]);
|
||||
goto free_bios;
|
||||
}
|
||||
|
||||
tmp = RBIOS16(0x18);
|
||||
if (RBIOS8(tmp + 0x14) != 0x0) {
|
||||
DRM_INFO("Not an x86 BIOS ROM, not using.\n");
|
||||
goto free_bios;
|
||||
}
|
||||
|
||||
|
@ -96,6 +96,7 @@ void radeon_get_clock_info(struct drm_device *dev)
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_pll *p1pll = &rdev->clock.p1pll;
|
||||
struct radeon_pll *p2pll = &rdev->clock.p2pll;
|
||||
struct radeon_pll *dcpll = &rdev->clock.dcpll;
|
||||
struct radeon_pll *spll = &rdev->clock.spll;
|
||||
struct radeon_pll *mpll = &rdev->clock.mpll;
|
||||
int ret;
|
||||
@ -204,6 +205,17 @@ void radeon_get_clock_info(struct drm_device *dev)
|
||||
p2pll->max_frac_feedback_div = 0;
|
||||
}
|
||||
|
||||
/* dcpll is DCE4 only */
|
||||
dcpll->min_post_div = 2;
|
||||
dcpll->max_post_div = 0x7f;
|
||||
dcpll->min_frac_feedback_div = 0;
|
||||
dcpll->max_frac_feedback_div = 9;
|
||||
dcpll->min_ref_div = 2;
|
||||
dcpll->max_ref_div = 0x3ff;
|
||||
dcpll->min_feedback_div = 4;
|
||||
dcpll->max_feedback_div = 0xfff;
|
||||
dcpll->best_vco = 0;
|
||||
|
||||
p1pll->min_ref_div = 2;
|
||||
p1pll->max_ref_div = 0x3ff;
|
||||
p1pll->min_feedback_div = 4;
|
||||
@ -846,8 +858,10 @@ int radeon_static_clocks_init(struct drm_device *dev)
|
||||
/* XXX make sure engine is idle */
|
||||
|
||||
if (radeon_dynclks != -1) {
|
||||
if (radeon_dynclks)
|
||||
if (radeon_dynclks) {
|
||||
if (rdev->asic->set_clock_gating)
|
||||
radeon_set_clock_gating(rdev, 1);
|
||||
}
|
||||
}
|
||||
radeon_apply_clock_quirks(rdev);
|
||||
return 0;
|
||||
|
@ -150,6 +150,9 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
|
||||
int rev;
|
||||
uint16_t offset = 0, check_offset;
|
||||
|
||||
if (!rdev->bios)
|
||||
return 0;
|
||||
|
||||
switch (table) {
|
||||
/* absolute offset tables */
|
||||
case COMBIOS_ASIC_INIT_1_TABLE:
|
||||
@ -443,6 +446,39 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
|
||||
|
||||
}
|
||||
|
||||
bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
|
||||
{
|
||||
int edid_info;
|
||||
struct edid *edid;
|
||||
edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE);
|
||||
if (!edid_info)
|
||||
return false;
|
||||
|
||||
edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1),
|
||||
GFP_KERNEL);
|
||||
if (edid == NULL)
|
||||
return false;
|
||||
|
||||
memcpy((unsigned char *)edid,
|
||||
(unsigned char *)(rdev->bios + edid_info), EDID_LENGTH);
|
||||
|
||||
if (!drm_edid_is_valid(edid)) {
|
||||
kfree(edid);
|
||||
return false;
|
||||
}
|
||||
|
||||
rdev->mode_info.bios_hardcoded_edid = edid;
|
||||
return true;
|
||||
}
|
||||
|
||||
struct edid *
|
||||
radeon_combios_get_hardcoded_edid(struct radeon_device *rdev)
|
||||
{
|
||||
if (rdev->mode_info.bios_hardcoded_edid)
|
||||
return rdev->mode_info.bios_hardcoded_edid;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rdev,
|
||||
int ddc_line)
|
||||
{
|
||||
@ -486,9 +522,65 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
|
||||
i2c.y_data_reg = ddc_line;
|
||||
}
|
||||
|
||||
if (rdev->family < CHIP_R200)
|
||||
switch (rdev->family) {
|
||||
case CHIP_R100:
|
||||
case CHIP_RV100:
|
||||
case CHIP_RS100:
|
||||
case CHIP_RV200:
|
||||
case CHIP_RS200:
|
||||
case CHIP_RS300:
|
||||
switch (ddc_line) {
|
||||
case RADEON_GPIO_DVI_DDC:
|
||||
/* in theory this should be hw capable,
|
||||
* but it doesn't seem to work
|
||||
*/
|
||||
i2c.hw_capable = false;
|
||||
break;
|
||||
default:
|
||||
i2c.hw_capable = false;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case CHIP_R200:
|
||||
switch (ddc_line) {
|
||||
case RADEON_GPIO_DVI_DDC:
|
||||
case RADEON_GPIO_MONID:
|
||||
i2c.hw_capable = true;
|
||||
break;
|
||||
default:
|
||||
i2c.hw_capable = false;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case CHIP_RV250:
|
||||
case CHIP_RV280:
|
||||
switch (ddc_line) {
|
||||
case RADEON_GPIO_VGA_DDC:
|
||||
case RADEON_GPIO_DVI_DDC:
|
||||
case RADEON_GPIO_CRT2_DDC:
|
||||
i2c.hw_capable = true;
|
||||
break;
|
||||
default:
|
||||
i2c.hw_capable = false;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case CHIP_R300:
|
||||
case CHIP_R350:
|
||||
switch (ddc_line) {
|
||||
case RADEON_GPIO_VGA_DDC:
|
||||
case RADEON_GPIO_DVI_DDC:
|
||||
i2c.hw_capable = true;
|
||||
break;
|
||||
default:
|
||||
i2c.hw_capable = false;
|
||||
else {
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case CHIP_RV350:
|
||||
case CHIP_RV380:
|
||||
case CHIP_RS400:
|
||||
case CHIP_RS480:
|
||||
switch (ddc_line) {
|
||||
case RADEON_GPIO_VGA_DDC:
|
||||
case RADEON_GPIO_DVI_DDC:
|
||||
@ -504,9 +596,14 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
|
||||
i2c.hw_capable = false;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
i2c.hw_capable = false;
|
||||
break;
|
||||
}
|
||||
i2c.mm_i2c = false;
|
||||
i2c.i2c_id = 0;
|
||||
i2c.hpd_id = 0;
|
||||
|
||||
if (ddc_line)
|
||||
i2c.valid = true;
|
||||
@ -527,9 +624,6 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
|
||||
int8_t rev;
|
||||
uint16_t sclk, mclk;
|
||||
|
||||
if (rdev->bios == NULL)
|
||||
return false;
|
||||
|
||||
pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE);
|
||||
if (pll_info) {
|
||||
rev = RBIOS8(pll_info);
|
||||
@ -654,9 +748,6 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
|
||||
if (!p_dac)
|
||||
return NULL;
|
||||
|
||||
if (rdev->bios == NULL)
|
||||
goto out;
|
||||
|
||||
/* check CRT table */
|
||||
dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
|
||||
if (dac_info) {
|
||||
@ -673,7 +764,6 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
|
||||
found = 1;
|
||||
}
|
||||
|
||||
out:
|
||||
if (!found) /* fallback to defaults */
|
||||
radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac);
|
||||
|
||||
@ -687,9 +777,6 @@ radeon_combios_get_tv_info(struct radeon_device *rdev)
|
||||
uint16_t tv_info;
|
||||
enum radeon_tv_std tv_std = TV_STD_NTSC;
|
||||
|
||||
if (rdev->bios == NULL)
|
||||
return tv_std;
|
||||
|
||||
tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
|
||||
if (tv_info) {
|
||||
if (RBIOS8(tv_info + 6) == 'T') {
|
||||
@ -793,9 +880,6 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
|
||||
if (!tv_dac)
|
||||
return NULL;
|
||||
|
||||
if (rdev->bios == NULL)
|
||||
goto out;
|
||||
|
||||
/* first check TV table */
|
||||
dac_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
|
||||
if (dac_info) {
|
||||
@ -857,7 +941,6 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
if (!found) /* fallback to defaults */
|
||||
radeon_legacy_get_tv_dac_info_from_table(rdev, tv_dac);
|
||||
|
||||
@ -945,11 +1028,6 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
|
||||
int tmp, i;
|
||||
struct radeon_encoder_lvds *lvds = NULL;
|
||||
|
||||
if (rdev->bios == NULL) {
|
||||
lvds = radeon_legacy_get_lvds_info_from_regs(rdev);
|
||||
goto out;
|
||||
}
|
||||
|
||||
lcd_info = combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE);
|
||||
|
||||
if (lcd_info) {
|
||||
@ -1050,7 +1128,7 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
|
||||
DRM_INFO("No panel info found in BIOS\n");
|
||||
lvds = radeon_legacy_get_lvds_info_from_regs(rdev);
|
||||
}
|
||||
out:
|
||||
|
||||
if (lvds)
|
||||
encoder->native_mode = lvds->native_mode;
|
||||
return lvds;
|
||||
@ -1102,9 +1180,6 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
|
||||
int i, n;
|
||||
uint8_t ver;
|
||||
|
||||
if (rdev->bios == NULL)
|
||||
return false;
|
||||
|
||||
tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
|
||||
|
||||
if (tmds_info) {
|
||||
@ -1184,9 +1259,6 @@ bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder
|
||||
enum radeon_combios_ddc gpio;
|
||||
struct radeon_i2c_bus_rec i2c_bus;
|
||||
|
||||
if (rdev->bios == NULL)
|
||||
return false;
|
||||
|
||||
tmds->i2c_bus = NULL;
|
||||
if (rdev->flags & RADEON_IS_IGP) {
|
||||
offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
|
||||
@ -1253,7 +1325,10 @@ bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder
|
||||
tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
|
||||
break;
|
||||
case DDC_LCD: /* MM i2c */
|
||||
DRM_ERROR("MM i2c requires hw i2c engine\n");
|
||||
i2c_bus.valid = true;
|
||||
i2c_bus.hw_capable = true;
|
||||
i2c_bus.mm_i2c = true;
|
||||
tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unsupported gpio %d\n", gpio);
|
||||
@ -1279,47 +1354,47 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
|
||||
rdev->mode_info.connector_table = radeon_connector_table;
|
||||
if (rdev->mode_info.connector_table == CT_NONE) {
|
||||
#ifdef CONFIG_PPC_PMAC
|
||||
if (machine_is_compatible("PowerBook3,3")) {
|
||||
if (of_machine_is_compatible("PowerBook3,3")) {
|
||||
/* powerbook with VGA */
|
||||
rdev->mode_info.connector_table = CT_POWERBOOK_VGA;
|
||||
} else if (machine_is_compatible("PowerBook3,4") ||
|
||||
machine_is_compatible("PowerBook3,5")) {
|
||||
} else if (of_machine_is_compatible("PowerBook3,4") ||
|
||||
of_machine_is_compatible("PowerBook3,5")) {
|
||||
/* powerbook with internal tmds */
|
||||
rdev->mode_info.connector_table = CT_POWERBOOK_INTERNAL;
|
||||
} else if (machine_is_compatible("PowerBook5,1") ||
|
||||
machine_is_compatible("PowerBook5,2") ||
|
||||
machine_is_compatible("PowerBook5,3") ||
|
||||
machine_is_compatible("PowerBook5,4") ||
|
||||
machine_is_compatible("PowerBook5,5")) {
|
||||
} else if (of_machine_is_compatible("PowerBook5,1") ||
|
||||
of_machine_is_compatible("PowerBook5,2") ||
|
||||
of_machine_is_compatible("PowerBook5,3") ||
|
||||
of_machine_is_compatible("PowerBook5,4") ||
|
||||
of_machine_is_compatible("PowerBook5,5")) {
|
||||
/* powerbook with external single link tmds (sil164) */
|
||||
rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
|
||||
} else if (machine_is_compatible("PowerBook5,6")) {
|
||||
} else if (of_machine_is_compatible("PowerBook5,6")) {
|
||||
/* powerbook with external dual or single link tmds */
|
||||
rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
|
||||
} else if (machine_is_compatible("PowerBook5,7") ||
|
||||
machine_is_compatible("PowerBook5,8") ||
|
||||
machine_is_compatible("PowerBook5,9")) {
|
||||
} else if (of_machine_is_compatible("PowerBook5,7") ||
|
||||
of_machine_is_compatible("PowerBook5,8") ||
|
||||
of_machine_is_compatible("PowerBook5,9")) {
|
||||
/* PowerBook6,2 ? */
|
||||
/* powerbook with external dual link tmds (sil1178?) */
|
||||
rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
|
||||
} else if (machine_is_compatible("PowerBook4,1") ||
|
||||
machine_is_compatible("PowerBook4,2") ||
|
||||
machine_is_compatible("PowerBook4,3") ||
|
||||
machine_is_compatible("PowerBook6,3") ||
|
||||
machine_is_compatible("PowerBook6,5") ||
|
||||
machine_is_compatible("PowerBook6,7")) {
|
||||
} else if (of_machine_is_compatible("PowerBook4,1") ||
|
||||
of_machine_is_compatible("PowerBook4,2") ||
|
||||
of_machine_is_compatible("PowerBook4,3") ||
|
||||
of_machine_is_compatible("PowerBook6,3") ||
|
||||
of_machine_is_compatible("PowerBook6,5") ||
|
||||
of_machine_is_compatible("PowerBook6,7")) {
|
||||
/* ibook */
|
||||
rdev->mode_info.connector_table = CT_IBOOK;
|
||||
} else if (machine_is_compatible("PowerMac4,4")) {
|
||||
} else if (of_machine_is_compatible("PowerMac4,4")) {
|
||||
/* emac */
|
||||
rdev->mode_info.connector_table = CT_EMAC;
|
||||
} else if (machine_is_compatible("PowerMac10,1")) {
|
||||
} else if (of_machine_is_compatible("PowerMac10,1")) {
|
||||
/* mini with internal tmds */
|
||||
rdev->mode_info.connector_table = CT_MINI_INTERNAL;
|
||||
} else if (machine_is_compatible("PowerMac10,2")) {
|
||||
} else if (of_machine_is_compatible("PowerMac10,2")) {
|
||||
/* mini with external tmds */
|
||||
rdev->mode_info.connector_table = CT_MINI_EXTERNAL;
|
||||
} else if (machine_is_compatible("PowerMac12,1")) {
|
||||
} else if (of_machine_is_compatible("PowerMac12,1")) {
|
||||
/* PowerMac8,1 ? */
|
||||
/* imac g5 isight */
|
||||
rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT;
|
||||
@ -1909,9 +1984,6 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
|
||||
struct radeon_i2c_bus_rec ddc_i2c;
|
||||
struct radeon_hpd hpd;
|
||||
|
||||
if (rdev->bios == NULL)
|
||||
return false;
|
||||
|
||||
conn_info = combios_get_table_offset(dev, COMBIOS_CONNECTOR_INFO_TABLE);
|
||||
if (conn_info) {
|
||||
for (i = 0; i < 4; i++) {
|
||||
@ -2278,6 +2350,115 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
|
||||
return true;
|
||||
}
|
||||
|
||||
void radeon_combios_get_power_modes(struct radeon_device *rdev)
|
||||
{
|
||||
struct drm_device *dev = rdev->ddev;
|
||||
u16 offset, misc, misc2 = 0;
|
||||
u8 rev, blocks, tmp;
|
||||
int state_index = 0;
|
||||
|
||||
rdev->pm.default_power_state = NULL;
|
||||
|
||||
if (rdev->flags & RADEON_IS_MOBILITY) {
|
||||
offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE);
|
||||
if (offset) {
|
||||
rev = RBIOS8(offset);
|
||||
blocks = RBIOS8(offset + 0x2);
|
||||
/* power mode 0 tends to be the only valid one */
|
||||
rdev->pm.power_state[state_index].num_clock_modes = 1;
|
||||
rdev->pm.power_state[state_index].clock_info[0].mclk = RBIOS32(offset + 0x5 + 0x2);
|
||||
rdev->pm.power_state[state_index].clock_info[0].sclk = RBIOS32(offset + 0x5 + 0x6);
|
||||
if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
|
||||
(rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
|
||||
goto default_mode;
|
||||
/* skip overclock modes for now */
|
||||
if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
|
||||
rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
|
||||
(rdev->pm.power_state[state_index].clock_info[0].sclk >
|
||||
rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
|
||||
goto default_mode;
|
||||
rdev->pm.power_state[state_index].type =
|
||||
POWER_STATE_TYPE_BATTERY;
|
||||
misc = RBIOS16(offset + 0x5 + 0x0);
|
||||
if (rev > 4)
|
||||
misc2 = RBIOS16(offset + 0x5 + 0xe);
|
||||
if (misc & 0x4) {
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO;
|
||||
if (misc & 0x8)
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
|
||||
true;
|
||||
else
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
|
||||
false;
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.valid = true;
|
||||
if (rev < 6) {
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.reg =
|
||||
RBIOS16(offset + 0x5 + 0xb) * 4;
|
||||
tmp = RBIOS8(offset + 0x5 + 0xd);
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.mask = (1 << tmp);
|
||||
} else {
|
||||
u8 entries = RBIOS8(offset + 0x5 + 0xb);
|
||||
u16 voltage_table_offset = RBIOS16(offset + 0x5 + 0xc);
|
||||
if (entries && voltage_table_offset) {
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.reg =
|
||||
RBIOS16(voltage_table_offset) * 4;
|
||||
tmp = RBIOS8(voltage_table_offset + 0x2);
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.mask = (1 << tmp);
|
||||
} else
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.valid = false;
|
||||
}
|
||||
switch ((misc2 & 0x700) >> 8) {
|
||||
case 0:
|
||||
default:
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 0;
|
||||
break;
|
||||
case 1:
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 33;
|
||||
break;
|
||||
case 2:
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 66;
|
||||
break;
|
||||
case 3:
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 99;
|
||||
break;
|
||||
case 4:
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 132;
|
||||
break;
|
||||
}
|
||||
} else
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
|
||||
if (rev > 6)
|
||||
rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
|
||||
RBIOS8(offset + 0x5 + 0x10);
|
||||
state_index++;
|
||||
} else {
|
||||
/* XXX figure out some good default low power mode for mobility cards w/out power tables */
|
||||
}
|
||||
} else {
|
||||
/* XXX figure out some good default low power mode for desktop cards */
|
||||
}
|
||||
|
||||
default_mode:
|
||||
/* add the default mode */
|
||||
rdev->pm.power_state[state_index].type =
|
||||
POWER_STATE_TYPE_DEFAULT;
|
||||
rdev->pm.power_state[state_index].num_clock_modes = 1;
|
||||
rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
|
||||
rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
|
||||
rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0];
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
|
||||
if (rdev->asic->get_pcie_lanes)
|
||||
rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev);
|
||||
else
|
||||
rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16;
|
||||
rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
|
||||
rdev->pm.num_power_states = state_index + 1;
|
||||
|
||||
rdev->pm.current_power_state = rdev->pm.default_power_state;
|
||||
rdev->pm.current_clock_mode =
|
||||
rdev->pm.default_power_state->default_clock_mode;
|
||||
}
|
||||
|
||||
void radeon_external_tmds_setup(struct drm_encoder *encoder)
|
||||
{
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
@ -2289,23 +2470,21 @@ void radeon_external_tmds_setup(struct drm_encoder *encoder)
|
||||
switch (tmds->dvo_chip) {
|
||||
case DVO_SIL164:
|
||||
/* sil 164 */
|
||||
radeon_i2c_do_lock(tmds->i2c_bus, 1);
|
||||
radeon_i2c_sw_put_byte(tmds->i2c_bus,
|
||||
radeon_i2c_put_byte(tmds->i2c_bus,
|
||||
tmds->slave_addr,
|
||||
0x08, 0x30);
|
||||
radeon_i2c_sw_put_byte(tmds->i2c_bus,
|
||||
radeon_i2c_put_byte(tmds->i2c_bus,
|
||||
tmds->slave_addr,
|
||||
0x09, 0x00);
|
||||
radeon_i2c_sw_put_byte(tmds->i2c_bus,
|
||||
radeon_i2c_put_byte(tmds->i2c_bus,
|
||||
tmds->slave_addr,
|
||||
0x0a, 0x90);
|
||||
radeon_i2c_sw_put_byte(tmds->i2c_bus,
|
||||
radeon_i2c_put_byte(tmds->i2c_bus,
|
||||
tmds->slave_addr,
|
||||
0x0c, 0x89);
|
||||
radeon_i2c_sw_put_byte(tmds->i2c_bus,
|
||||
radeon_i2c_put_byte(tmds->i2c_bus,
|
||||
tmds->slave_addr,
|
||||
0x08, 0x3b);
|
||||
radeon_i2c_do_lock(tmds->i2c_bus, 0);
|
||||
break;
|
||||
case DVO_SIL1178:
|
||||
/* sil 1178 - untested */
|
||||
@ -2338,9 +2517,6 @@ bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
|
||||
uint32_t reg, val, and_mask, or_mask;
|
||||
struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
|
||||
|
||||
if (rdev->bios == NULL)
|
||||
return false;
|
||||
|
||||
if (!tmds)
|
||||
return false;
|
||||
|
||||
@ -2390,11 +2566,9 @@ bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
|
||||
index++;
|
||||
val = RBIOS8(index);
|
||||
index++;
|
||||
radeon_i2c_do_lock(tmds->i2c_bus, 1);
|
||||
radeon_i2c_sw_put_byte(tmds->i2c_bus,
|
||||
radeon_i2c_put_byte(tmds->i2c_bus,
|
||||
slave_addr,
|
||||
reg, val);
|
||||
radeon_i2c_do_lock(tmds->i2c_bus, 0);
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown id %d\n", id >> 13);
|
||||
@ -2447,11 +2621,9 @@ bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
|
||||
reg = id & 0x1fff;
|
||||
val = RBIOS8(index);
|
||||
index += 1;
|
||||
radeon_i2c_do_lock(tmds->i2c_bus, 1);
|
||||
radeon_i2c_sw_put_byte(tmds->i2c_bus,
|
||||
radeon_i2c_put_byte(tmds->i2c_bus,
|
||||
tmds->slave_addr,
|
||||
reg, val);
|
||||
radeon_i2c_do_lock(tmds->i2c_bus, 0);
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown id %d\n", id >> 13);
|
||||
|
@ -479,10 +479,8 @@ static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connec
|
||||
ret = connector_status_connected;
|
||||
else {
|
||||
if (radeon_connector->ddc_bus) {
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
|
||||
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
|
||||
&radeon_connector->ddc_bus->adapter);
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
|
||||
if (radeon_connector->edid)
|
||||
ret = connector_status_connected;
|
||||
}
|
||||
@ -587,19 +585,14 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
|
||||
if (!encoder)
|
||||
ret = connector_status_disconnected;
|
||||
|
||||
if (radeon_connector->ddc_bus) {
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
|
||||
if (radeon_connector->ddc_bus)
|
||||
dret = radeon_ddc_probe(radeon_connector);
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
|
||||
}
|
||||
if (dret) {
|
||||
if (radeon_connector->edid) {
|
||||
kfree(radeon_connector->edid);
|
||||
radeon_connector->edid = NULL;
|
||||
}
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
|
||||
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
|
||||
|
||||
if (!radeon_connector->edid) {
|
||||
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
|
||||
@ -744,19 +737,14 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
|
||||
enum drm_connector_status ret = connector_status_disconnected;
|
||||
bool dret = false;
|
||||
|
||||
if (radeon_connector->ddc_bus) {
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
|
||||
if (radeon_connector->ddc_bus)
|
||||
dret = radeon_ddc_probe(radeon_connector);
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
|
||||
}
|
||||
if (dret) {
|
||||
if (radeon_connector->edid) {
|
||||
kfree(radeon_connector->edid);
|
||||
radeon_connector->edid = NULL;
|
||||
}
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
|
||||
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
|
||||
|
||||
if (!radeon_connector->edid) {
|
||||
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
|
||||
@ -952,7 +940,7 @@ static void radeon_dp_connector_destroy(struct drm_connector *connector)
|
||||
if (radeon_connector->edid)
|
||||
kfree(radeon_connector->edid);
|
||||
if (radeon_dig_connector->dp_i2c_bus)
|
||||
radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus);
|
||||
radeon_i2c_destroy_dp(radeon_dig_connector->dp_i2c_bus);
|
||||
kfree(radeon_connector->con_priv);
|
||||
drm_sysfs_connector_remove(connector);
|
||||
drm_connector_cleanup(connector);
|
||||
@ -988,12 +976,10 @@ static enum drm_connector_status radeon_dp_detect(struct drm_connector *connecto
|
||||
ret = connector_status_connected;
|
||||
}
|
||||
} else {
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
|
||||
if (radeon_ddc_probe(radeon_connector)) {
|
||||
radeon_dig_connector->dp_sink_type = sink_type;
|
||||
ret = connector_status_connected;
|
||||
}
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -39,17 +39,21 @@
|
||||
#include <drm/drm_pciids.h>
|
||||
|
||||
|
||||
int radeon_dynclks = -1;
|
||||
int radeon_r4xx_atom = 0;
|
||||
int radeon_agpmode = -1;
|
||||
int radeon_gart_size = 512; /* default gart size */
|
||||
int radeon_benchmarking = 0;
|
||||
int radeon_connector_table = 0;
|
||||
int radeon_tv = 0;
|
||||
int radeon_modeset = 1;
|
||||
int radeon_new_pll = 1;
|
||||
int radeon_vram_limit = 0;
|
||||
int radeon_audio = 0;
|
||||
int radeon_no_wb;
|
||||
int radeon_modeset = -1;
|
||||
int radeon_dynclks = -1;
|
||||
int radeon_r4xx_atom = 0;
|
||||
int radeon_agpmode = 0;
|
||||
int radeon_vram_limit = 0;
|
||||
int radeon_gart_size = 512; /* default gart size */
|
||||
int radeon_benchmarking = 0;
|
||||
int radeon_testing = 0;
|
||||
int radeon_connector_table = 0;
|
||||
int radeon_tv = 1;
|
||||
int radeon_new_pll = -1;
|
||||
int radeon_dynpm = -1;
|
||||
int radeon_audio = 1;
|
||||
|
||||
|
||||
extern display_t *rdisplay;
|
||||
|
||||
@ -135,79 +139,102 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* MC common functions
|
||||
/**
|
||||
* radeon_vram_location - try to find VRAM location
|
||||
* @rdev: radeon device structure holding all necessary informations
|
||||
* @mc: memory controller structure holding memory informations
|
||||
* @base: base address at which to put VRAM
|
||||
*
|
||||
* Function will place try to place VRAM at base address provided
|
||||
* as parameter (which is so far either PCI aperture address or
|
||||
* for IGP TOM base address).
|
||||
*
|
||||
* If there is not enough space to fit the unvisible VRAM in the 32bits
|
||||
* address space then we limit the VRAM size to the aperture.
|
||||
*
|
||||
* If we are using AGP and if the AGP aperture doesn't allow us to have
|
||||
* room for all the VRAM than we restrict the VRAM to the PCI aperture
|
||||
* size and print a warning.
|
||||
*
|
||||
* This function will never fails, worst case are limiting VRAM.
|
||||
*
|
||||
* Note: GTT start, end, size should be initialized before calling this
|
||||
* function on AGP platform.
|
||||
*
|
||||
* Note: We don't explictly enforce VRAM start to be aligned on VRAM size,
|
||||
* this shouldn't be a problem as we are using the PCI aperture as a reference.
|
||||
* Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
|
||||
* not IGP.
|
||||
*
|
||||
* Note: we use mc_vram_size as on some board we need to program the mc to
|
||||
* cover the whole aperture even if VRAM size is inferior to aperture size
|
||||
* Novell bug 204882 + along with lots of ubuntu ones
|
||||
*
|
||||
* Note: when limiting vram it's safe to overwritte real_vram_size because
|
||||
* we are not in case where real_vram_size is inferior to mc_vram_size (ie
|
||||
* note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
|
||||
* ones)
|
||||
*
|
||||
* Note: IGP TOM addr should be the same as the aperture addr, we don't
|
||||
* explicitly check for that thought.
|
||||
*
|
||||
* FIXME: when reducing VRAM size align new size on power of 2.
|
||||
*/
|
||||
int radeon_mc_setup(struct radeon_device *rdev)
|
||||
void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
|
||||
{
|
||||
uint32_t tmp;
|
||||
|
||||
/* Some chips have an "issue" with the memory controller, the
|
||||
* location must be aligned to the size. We just align it down,
|
||||
* too bad if we walk over the top of system memory, we don't
|
||||
* use DMA without a remapped anyway.
|
||||
* Affected chips are rv280, all r3xx, and all r4xx, but not IGP
|
||||
*/
|
||||
/* FGLRX seems to setup like this, VRAM a 0, then GART.
|
||||
*/
|
||||
/*
|
||||
* Note: from R6xx the address space is 40bits but here we only
|
||||
* use 32bits (still have to see a card which would exhaust 4G
|
||||
* address space).
|
||||
*/
|
||||
if (rdev->mc.vram_location != 0xFFFFFFFFUL) {
|
||||
/* vram location was already setup try to put gtt after
|
||||
* if it fits */
|
||||
tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
|
||||
tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
|
||||
if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
|
||||
rdev->mc.gtt_location = tmp;
|
||||
} else {
|
||||
if (rdev->mc.gtt_size >= rdev->mc.vram_location) {
|
||||
printk(KERN_ERR "[drm] GTT too big to fit "
|
||||
"before or after vram location.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
rdev->mc.gtt_location = 0;
|
||||
}
|
||||
} else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) {
|
||||
/* gtt location was already setup try to put vram before
|
||||
* if it fits */
|
||||
if (rdev->mc.mc_vram_size < rdev->mc.gtt_location) {
|
||||
rdev->mc.vram_location = 0;
|
||||
} else {
|
||||
tmp = rdev->mc.gtt_location + rdev->mc.gtt_size;
|
||||
tmp += (rdev->mc.mc_vram_size - 1);
|
||||
tmp &= ~(rdev->mc.mc_vram_size - 1);
|
||||
if ((0xFFFFFFFFUL - tmp) >= rdev->mc.mc_vram_size) {
|
||||
rdev->mc.vram_location = tmp;
|
||||
} else {
|
||||
printk(KERN_ERR "[drm] vram too big to fit "
|
||||
"before or after GTT location.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
rdev->mc.vram_location = 0;
|
||||
tmp = rdev->mc.mc_vram_size;
|
||||
tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
|
||||
rdev->mc.gtt_location = tmp;
|
||||
mc->vram_start = base;
|
||||
if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
|
||||
dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
|
||||
mc->real_vram_size = mc->aper_size;
|
||||
mc->mc_vram_size = mc->aper_size;
|
||||
}
|
||||
rdev->mc.vram_start = rdev->mc.vram_location;
|
||||
rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
|
||||
rdev->mc.gtt_start = rdev->mc.gtt_location;
|
||||
rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
|
||||
DRM_INFO("radeon: VRAM %uM\n", (unsigned)(rdev->mc.mc_vram_size >> 20));
|
||||
DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n",
|
||||
(unsigned)rdev->mc.vram_location,
|
||||
(unsigned)(rdev->mc.vram_location + rdev->mc.mc_vram_size - 1));
|
||||
DRM_INFO("radeon: GTT %uM\n", (unsigned)(rdev->mc.gtt_size >> 20));
|
||||
DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n",
|
||||
(unsigned)rdev->mc.gtt_location,
|
||||
(unsigned)(rdev->mc.gtt_location + rdev->mc.gtt_size - 1));
|
||||
return 0;
|
||||
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
|
||||
if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_end <= mc->gtt_end) {
|
||||
dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
|
||||
mc->real_vram_size = mc->aper_size;
|
||||
mc->mc_vram_size = mc->aper_size;
|
||||
}
|
||||
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
|
||||
dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
|
||||
mc->mc_vram_size >> 20, mc->vram_start,
|
||||
mc->vram_end, mc->real_vram_size >> 20);
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_gtt_location - try to find GTT location
|
||||
* @rdev: radeon device structure holding all necessary informations
|
||||
* @mc: memory controller structure holding memory informations
|
||||
*
|
||||
* Function will place try to place GTT before or after VRAM.
|
||||
*
|
||||
* If GTT size is bigger than space left then we ajust GTT size.
|
||||
* Thus function will never fails.
|
||||
*
|
||||
* FIXME: when reducing GTT size align new size on power of 2.
|
||||
*/
|
||||
void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
|
||||
{
|
||||
u64 size_af, size_bf;
|
||||
|
||||
size_af = 0xFFFFFFFF - mc->vram_end;
|
||||
size_bf = mc->vram_start;
|
||||
if (size_bf > size_af) {
|
||||
if (mc->gtt_size > size_bf) {
|
||||
dev_warn(rdev->dev, "limiting GTT\n");
|
||||
mc->gtt_size = size_bf;
|
||||
}
|
||||
mc->gtt_start = mc->vram_start - mc->gtt_size;
|
||||
} else {
|
||||
if (mc->gtt_size > size_af) {
|
||||
dev_warn(rdev->dev, "limiting GTT\n");
|
||||
mc->gtt_size = size_af;
|
||||
}
|
||||
mc->gtt_start = mc->vram_end + 1;
|
||||
}
|
||||
mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
|
||||
dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n",
|
||||
mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
|
||||
}
|
||||
|
||||
/*
|
||||
* GPU helpers function.
|
||||
@ -217,7 +244,16 @@ bool radeon_card_posted(struct radeon_device *rdev)
|
||||
uint32_t reg;
|
||||
|
||||
/* first check CRTCs */
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
if (ASIC_IS_DCE4(rdev)) {
|
||||
reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
|
||||
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
|
||||
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
|
||||
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
|
||||
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
|
||||
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
|
||||
if (reg & EVERGREEN_CRTC_MASTER_EN)
|
||||
return true;
|
||||
} else if (ASIC_IS_AVIVO(rdev)) {
|
||||
reg = RREG32(AVIVO_D1CRTC_CONTROL) |
|
||||
RREG32(AVIVO_D2CRTC_CONTROL);
|
||||
if (reg & AVIVO_CRTC_EN) {
|
||||
@ -264,6 +300,8 @@ bool radeon_boot_test_post_card(struct radeon_device *rdev)
|
||||
|
||||
int radeon_dummy_page_init(struct radeon_device *rdev)
|
||||
{
|
||||
if (rdev->dummy_page.page)
|
||||
return 0;
|
||||
rdev->dummy_page.page = AllocPage();
|
||||
if (rdev->dummy_page.page == NULL)
|
||||
return -ENOMEM;
|
||||
@ -342,7 +380,7 @@ void radeon_register_accessor_init(struct radeon_device *rdev)
|
||||
rdev->mc_rreg = &rs600_mc_rreg;
|
||||
rdev->mc_wreg = &rs600_mc_wreg;
|
||||
}
|
||||
if (rdev->family >= CHIP_R600) {
|
||||
if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) {
|
||||
rdev->pciep_rreg = &r600_pciep_rreg;
|
||||
rdev->pciep_wreg = &r600_pciep_wreg;
|
||||
}
|
||||
@ -361,21 +399,22 @@ int radeon_asic_init(struct radeon_device *rdev)
|
||||
case CHIP_RS100:
|
||||
case CHIP_RV200:
|
||||
case CHIP_RS200:
|
||||
rdev->asic = &r100_asic;
|
||||
break;
|
||||
case CHIP_R200:
|
||||
case CHIP_RV250:
|
||||
case CHIP_RS300:
|
||||
case CHIP_RV280:
|
||||
rdev->asic = &r100_asic;
|
||||
rdev->asic = &r200_asic;
|
||||
break;
|
||||
case CHIP_R300:
|
||||
case CHIP_R350:
|
||||
case CHIP_RV350:
|
||||
case CHIP_RV380:
|
||||
if (rdev->flags & RADEON_IS_PCIE)
|
||||
rdev->asic = &r300_asic_pcie;
|
||||
else
|
||||
rdev->asic = &r300_asic;
|
||||
if (rdev->flags & RADEON_IS_PCIE) {
|
||||
rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
|
||||
rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
|
||||
}
|
||||
break;
|
||||
case CHIP_R420:
|
||||
case CHIP_R423:
|
||||
@ -419,6 +458,13 @@ int radeon_asic_init(struct radeon_device *rdev)
|
||||
case CHIP_RV740:
|
||||
rdev->asic = &rv770_asic;
|
||||
break;
|
||||
case CHIP_CEDAR:
|
||||
case CHIP_REDWOOD:
|
||||
case CHIP_JUNIPER:
|
||||
case CHIP_CYPRESS:
|
||||
case CHIP_HEMLOCK:
|
||||
rdev->asic = &evergreen_asic;
|
||||
break;
|
||||
default:
|
||||
/* FIXME: not supported yet */
|
||||
return -EINVAL;
|
||||
@ -714,6 +760,8 @@ int radeon_device_init(struct radeon_device *rdev,
|
||||
DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
|
||||
|
||||
/* if we have > 1 VGA cards, then disable the radeon VGA resources */
|
||||
/* this will fail for cards that aren't VGA class devices, just
|
||||
* ignore it */
|
||||
// r = vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
|
||||
// if (r) {
|
||||
// return -EINVAL;
|
||||
@ -986,7 +1034,7 @@ u32_t drvEntry(int action, char *cmdline)
|
||||
return 0;
|
||||
};
|
||||
}
|
||||
dbgprintf("Radeon RC9 cmdline %s\n", cmdline);
|
||||
dbgprintf("Radeon RC10 cmdline %s\n", cmdline);
|
||||
|
||||
enum_pci_devices();
|
||||
|
||||
@ -1008,7 +1056,7 @@ u32_t drvEntry(int action, char *cmdline)
|
||||
if( (rdev->asic == &r600_asic) ||
|
||||
(rdev->asic == &rv770_asic))
|
||||
r600_2D_test(rdev);
|
||||
else
|
||||
else if (rdev->asic != &evergreen_asic)
|
||||
r100_2D_test(rdev);
|
||||
|
||||
err = RegService("DISPLAY", display_handler);
|
||||
@ -1018,3 +1066,10 @@ u32_t drvEntry(int action, char *cmdline)
|
||||
|
||||
return err;
|
||||
};
|
||||
|
||||
void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
|
||||
{};
|
||||
|
||||
void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
|
||||
{};
|
||||
|
||||
|
@ -68,6 +68,36 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc)
|
||||
WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
|
||||
}
|
||||
|
||||
static void evergreen_crtc_load_lut(struct drm_crtc *crtc)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
int i;
|
||||
|
||||
DRM_DEBUG("%d\n", radeon_crtc->crtc_id);
|
||||
WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0);
|
||||
|
||||
WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
|
||||
WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
|
||||
WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
|
||||
|
||||
WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
|
||||
WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
|
||||
WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
|
||||
|
||||
WREG32(EVERGREEN_DC_LUT_RW_MODE, radeon_crtc->crtc_id);
|
||||
WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK, 0x00000007);
|
||||
|
||||
WREG32(EVERGREEN_DC_LUT_RW_INDEX, 0);
|
||||
for (i = 0; i < 256; i++) {
|
||||
WREG32(EVERGREEN_DC_LUT_30_COLOR,
|
||||
(radeon_crtc->lut_r[i] << 20) |
|
||||
(radeon_crtc->lut_g[i] << 10) |
|
||||
(radeon_crtc->lut_b[i] << 0));
|
||||
}
|
||||
}
|
||||
|
||||
static void legacy_crtc_load_lut(struct drm_crtc *crtc)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
@ -100,7 +130,9 @@ void radeon_crtc_load_lut(struct drm_crtc *crtc)
|
||||
if (!crtc->enabled)
|
||||
return;
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev))
|
||||
if (ASIC_IS_DCE4(rdev))
|
||||
evergreen_crtc_load_lut(crtc);
|
||||
else if (ASIC_IS_AVIVO(rdev))
|
||||
avivo_crtc_load_lut(crtc);
|
||||
else
|
||||
legacy_crtc_load_lut(crtc);
|
||||
@ -361,6 +393,8 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
|
||||
|
||||
int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
|
||||
{
|
||||
struct drm_device *dev = radeon_connector->base.dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
int ret = 0;
|
||||
|
||||
if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
|
||||
@ -373,11 +407,11 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
|
||||
if (!radeon_connector->ddc_bus)
|
||||
return -1;
|
||||
if (!radeon_connector->edid) {
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
|
||||
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
|
||||
}
|
||||
|
||||
/* some servers provide a hardcoded edid in rom for KVMs */
|
||||
if (!radeon_connector->edid)
|
||||
radeon_connector->edid = radeon_combios_get_hardcoded_edid(rdev);
|
||||
if (radeon_connector->edid) {
|
||||
drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
|
||||
ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
|
||||
@ -395,9 +429,7 @@ static int radeon_ddc_dump(struct drm_connector *connector)
|
||||
|
||||
if (!radeon_connector->ddc_bus)
|
||||
return -1;
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
|
||||
edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
|
||||
if (edid) {
|
||||
kfree(edid);
|
||||
}
|
||||
@ -414,7 +446,7 @@ static inline uint32_t radeon_div(uint64_t n, uint32_t d)
|
||||
return n;
|
||||
}
|
||||
|
||||
void radeon_compute_pll(struct radeon_pll *pll,
|
||||
static void radeon_compute_pll_legacy(struct radeon_pll *pll,
|
||||
uint64_t freq,
|
||||
uint32_t *dot_clock_p,
|
||||
uint32_t *fb_div_p,
|
||||
@ -580,7 +612,99 @@ void radeon_compute_pll(struct radeon_pll *pll,
|
||||
*post_div_p = best_post_div;
|
||||
}
|
||||
|
||||
void radeon_compute_pll_avivo(struct radeon_pll *pll,
|
||||
static bool
|
||||
calc_fb_div(struct radeon_pll *pll,
|
||||
uint32_t freq,
|
||||
uint32_t post_div,
|
||||
uint32_t ref_div,
|
||||
uint32_t *fb_div,
|
||||
uint32_t *fb_div_frac)
|
||||
{
|
||||
fixed20_12 feedback_divider, a, b;
|
||||
u32 vco_freq;
|
||||
|
||||
vco_freq = freq * post_div;
|
||||
/* feedback_divider = vco_freq * ref_div / pll->reference_freq; */
|
||||
a.full = rfixed_const(pll->reference_freq);
|
||||
feedback_divider.full = rfixed_const(vco_freq);
|
||||
feedback_divider.full = rfixed_div(feedback_divider, a);
|
||||
a.full = rfixed_const(ref_div);
|
||||
feedback_divider.full = rfixed_mul(feedback_divider, a);
|
||||
|
||||
if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
|
||||
/* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */
|
||||
a.full = rfixed_const(10);
|
||||
feedback_divider.full = rfixed_mul(feedback_divider, a);
|
||||
feedback_divider.full += rfixed_const_half(0);
|
||||
feedback_divider.full = rfixed_floor(feedback_divider);
|
||||
feedback_divider.full = rfixed_div(feedback_divider, a);
|
||||
|
||||
/* *fb_div = floor(feedback_divider); */
|
||||
a.full = rfixed_floor(feedback_divider);
|
||||
*fb_div = rfixed_trunc(a);
|
||||
/* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */
|
||||
a.full = rfixed_const(10);
|
||||
b.full = rfixed_mul(feedback_divider, a);
|
||||
|
||||
feedback_divider.full = rfixed_floor(feedback_divider);
|
||||
feedback_divider.full = rfixed_mul(feedback_divider, a);
|
||||
feedback_divider.full = b.full - feedback_divider.full;
|
||||
*fb_div_frac = rfixed_trunc(feedback_divider);
|
||||
} else {
|
||||
/* *fb_div = floor(feedback_divider + 0.5); */
|
||||
feedback_divider.full += rfixed_const_half(0);
|
||||
feedback_divider.full = rfixed_floor(feedback_divider);
|
||||
|
||||
*fb_div = rfixed_trunc(feedback_divider);
|
||||
*fb_div_frac = 0;
|
||||
}
|
||||
|
||||
if (((*fb_div) < pll->min_feedback_div) || ((*fb_div) > pll->max_feedback_div))
|
||||
return false;
|
||||
else
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
calc_fb_ref_div(struct radeon_pll *pll,
|
||||
uint32_t freq,
|
||||
uint32_t post_div,
|
||||
uint32_t *fb_div,
|
||||
uint32_t *fb_div_frac,
|
||||
uint32_t *ref_div)
|
||||
{
|
||||
fixed20_12 ffreq, max_error, error, pll_out, a;
|
||||
u32 vco;
|
||||
|
||||
ffreq.full = rfixed_const(freq);
|
||||
/* max_error = ffreq * 0.0025; */
|
||||
a.full = rfixed_const(400);
|
||||
max_error.full = rfixed_div(ffreq, a);
|
||||
|
||||
for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) {
|
||||
if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) {
|
||||
vco = pll->reference_freq * (((*fb_div) * 10) + (*fb_div_frac));
|
||||
vco = vco / ((*ref_div) * 10);
|
||||
|
||||
if ((vco < pll->pll_out_min) || (vco > pll->pll_out_max))
|
||||
continue;
|
||||
|
||||
/* pll_out = vco / post_div; */
|
||||
a.full = rfixed_const(post_div);
|
||||
pll_out.full = rfixed_const(vco);
|
||||
pll_out.full = rfixed_div(pll_out, a);
|
||||
|
||||
if (pll_out.full >= ffreq.full) {
|
||||
error.full = pll_out.full - ffreq.full;
|
||||
if (error.full <= max_error.full)
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void radeon_compute_pll_new(struct radeon_pll *pll,
|
||||
uint64_t freq,
|
||||
uint32_t *dot_clock_p,
|
||||
uint32_t *fb_div_p,
|
||||
@ -588,87 +712,94 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
|
||||
uint32_t *ref_div_p,
|
||||
uint32_t *post_div_p)
|
||||
{
|
||||
fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq;
|
||||
fixed20_12 pll_out_max, pll_out_min;
|
||||
fixed20_12 pll_in_max, pll_in_min;
|
||||
fixed20_12 reference_freq;
|
||||
fixed20_12 error, ffreq, a, b;
|
||||
u32 fb_div = 0, fb_div_frac = 0, post_div = 0, ref_div = 0;
|
||||
u32 best_freq = 0, vco_frequency;
|
||||
|
||||
pll_out_max.full = rfixed_const(pll->pll_out_max);
|
||||
pll_out_min.full = rfixed_const(pll->pll_out_min);
|
||||
pll_in_max.full = rfixed_const(pll->pll_in_max);
|
||||
pll_in_min.full = rfixed_const(pll->pll_in_min);
|
||||
reference_freq.full = rfixed_const(pll->reference_freq);
|
||||
/* freq = freq / 10; */
|
||||
do_div(freq, 10);
|
||||
ffreq.full = rfixed_const(freq);
|
||||
error.full = rfixed_const(100 * 100);
|
||||
|
||||
/* max p */
|
||||
p.full = rfixed_div(pll_out_max, ffreq);
|
||||
p.full = rfixed_floor(p);
|
||||
if (pll->flags & RADEON_PLL_USE_POST_DIV) {
|
||||
post_div = pll->post_div;
|
||||
if ((post_div < pll->min_post_div) || (post_div > pll->max_post_div))
|
||||
goto done;
|
||||
|
||||
/* min m */
|
||||
m.full = rfixed_div(reference_freq, pll_in_max);
|
||||
m.full = rfixed_ceil(m);
|
||||
vco_frequency = freq * post_div;
|
||||
if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max))
|
||||
goto done;
|
||||
|
||||
while (1) {
|
||||
n.full = rfixed_div(ffreq, reference_freq);
|
||||
n.full = rfixed_mul(n, m);
|
||||
n.full = rfixed_mul(n, p);
|
||||
if (pll->flags & RADEON_PLL_USE_REF_DIV) {
|
||||
ref_div = pll->reference_div;
|
||||
if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div))
|
||||
goto done;
|
||||
if (!calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac))
|
||||
goto done;
|
||||
}
|
||||
} else {
|
||||
for (post_div = pll->max_post_div; post_div >= pll->min_post_div; --post_div) {
|
||||
if (pll->flags & RADEON_PLL_LEGACY) {
|
||||
if ((post_div == 5) ||
|
||||
(post_div == 7) ||
|
||||
(post_div == 9) ||
|
||||
(post_div == 10) ||
|
||||
(post_div == 11))
|
||||
continue;
|
||||
}
|
||||
|
||||
f_vco.full = rfixed_div(n, m);
|
||||
f_vco.full = rfixed_mul(f_vco, reference_freq);
|
||||
|
||||
f_pclk.full = rfixed_div(f_vco, p);
|
||||
|
||||
if (f_pclk.full > ffreq.full)
|
||||
error.full = f_pclk.full - ffreq.full;
|
||||
else
|
||||
error.full = ffreq.full - f_pclk.full;
|
||||
error.full = rfixed_div(error, f_pclk);
|
||||
a.full = rfixed_const(100 * 100);
|
||||
error.full = rfixed_mul(error, a);
|
||||
|
||||
a.full = rfixed_mul(m, p);
|
||||
a.full = rfixed_div(n, a);
|
||||
best_freq.full = rfixed_mul(reference_freq, a);
|
||||
|
||||
if (rfixed_trunc(error) < 25)
|
||||
break;
|
||||
|
||||
a.full = rfixed_const(1);
|
||||
m.full = m.full + a.full;
|
||||
a.full = rfixed_div(reference_freq, m);
|
||||
if (a.full >= pll_in_min.full)
|
||||
if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
|
||||
continue;
|
||||
|
||||
m.full = rfixed_div(reference_freq, pll_in_max);
|
||||
m.full = rfixed_ceil(m);
|
||||
a.full= rfixed_const(1);
|
||||
p.full = p.full - a.full;
|
||||
a.full = rfixed_mul(p, ffreq);
|
||||
if (a.full >= pll_out_min.full)
|
||||
vco_frequency = freq * post_div;
|
||||
if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max))
|
||||
continue;
|
||||
else {
|
||||
DRM_ERROR("Unable to find pll dividers\n");
|
||||
if (pll->flags & RADEON_PLL_USE_REF_DIV) {
|
||||
ref_div = pll->reference_div;
|
||||
if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div))
|
||||
goto done;
|
||||
if (calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac))
|
||||
break;
|
||||
} else {
|
||||
if (calc_fb_ref_div(pll, freq, post_div, &fb_div, &fb_div_frac, &ref_div))
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
a.full = rfixed_const(10);
|
||||
b.full = rfixed_mul(n, a);
|
||||
best_freq = pll->reference_freq * 10 * fb_div;
|
||||
best_freq += pll->reference_freq * fb_div_frac;
|
||||
best_freq = best_freq / (ref_div * post_div);
|
||||
|
||||
frac_n.full = rfixed_floor(n);
|
||||
frac_n.full = rfixed_mul(frac_n, a);
|
||||
frac_n.full = b.full - frac_n.full;
|
||||
done:
|
||||
if (best_freq == 0)
|
||||
DRM_ERROR("Couldn't find valid PLL dividers\n");
|
||||
|
||||
*dot_clock_p = rfixed_trunc(best_freq);
|
||||
*fb_div_p = rfixed_trunc(n);
|
||||
*frac_fb_div_p = rfixed_trunc(frac_n);
|
||||
*ref_div_p = rfixed_trunc(m);
|
||||
*post_div_p = rfixed_trunc(p);
|
||||
*dot_clock_p = best_freq / 10;
|
||||
*fb_div_p = fb_div;
|
||||
*frac_fb_div_p = fb_div_frac;
|
||||
*ref_div_p = ref_div;
|
||||
*post_div_p = post_div;
|
||||
|
||||
DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
|
||||
DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
|
||||
}
|
||||
|
||||
void radeon_compute_pll(struct radeon_pll *pll,
|
||||
uint64_t freq,
|
||||
uint32_t *dot_clock_p,
|
||||
uint32_t *fb_div_p,
|
||||
uint32_t *frac_fb_div_p,
|
||||
uint32_t *ref_div_p,
|
||||
uint32_t *post_div_p)
|
||||
{
|
||||
switch (pll->algo) {
|
||||
case PLL_ALGO_NEW:
|
||||
radeon_compute_pll_new(pll, freq, dot_clock_p, fb_div_p,
|
||||
frac_fb_div_p, ref_div_p, post_div_p);
|
||||
break;
|
||||
case PLL_ALGO_LEGACY:
|
||||
default:
|
||||
radeon_compute_pll_legacy(pll, freq, dot_clock_p, fb_div_p,
|
||||
frac_fb_div_p, ref_div_p, post_div_p);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
|
||||
@ -819,7 +950,7 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
|
||||
|
||||
int radeon_modeset_init(struct radeon_device *rdev)
|
||||
{
|
||||
int num_crtc = 2, i;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
drm_mode_config_init(rdev->ddev);
|
||||
@ -842,11 +973,23 @@ int radeon_modeset_init(struct radeon_device *rdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* check combios for a valid hardcoded EDID - Sun servers */
|
||||
if (!rdev->is_atom_bios) {
|
||||
/* check for hardcoded EDID in BIOS */
|
||||
radeon_combios_check_hardcoded_edid(rdev);
|
||||
}
|
||||
|
||||
if (rdev->flags & RADEON_SINGLE_CRTC)
|
||||
num_crtc = 1;
|
||||
rdev->num_crtc = 1;
|
||||
else {
|
||||
if (ASIC_IS_DCE4(rdev))
|
||||
rdev->num_crtc = 6;
|
||||
else
|
||||
rdev->num_crtc = 2;
|
||||
}
|
||||
|
||||
/* allocate crtcs */
|
||||
for (i = 0; i < num_crtc; i++) {
|
||||
for (i = 0; i < rdev->num_crtc; i++) {
|
||||
radeon_crtc_init(rdev->ddev, i);
|
||||
}
|
||||
|
||||
@ -863,6 +1006,8 @@ int radeon_modeset_init(struct radeon_device *rdev)
|
||||
|
||||
void radeon_modeset_fini(struct radeon_device *rdev)
|
||||
{
|
||||
kfree(rdev->mode_info.bios_hardcoded_edid);
|
||||
|
||||
if (rdev->mode_info.mode_config_initialized) {
|
||||
radeon_hpd_fini(rdev);
|
||||
drm_mode_config_cleanup(rdev->ddev);
|
||||
|
@ -228,6 +228,32 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct radeon_connector_atom_dig *
|
||||
radeon_get_atom_connector_priv_from_encoder(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct drm_connector *connector;
|
||||
struct radeon_connector *radeon_connector;
|
||||
struct radeon_connector_atom_dig *dig_connector;
|
||||
|
||||
if (!rdev->is_atom_bios)
|
||||
return NULL;
|
||||
|
||||
connector = radeon_get_connector_for_encoder(encoder);
|
||||
if (!connector)
|
||||
return NULL;
|
||||
|
||||
radeon_connector = to_radeon_connector(connector);
|
||||
|
||||
if (!radeon_connector->con_priv)
|
||||
return NULL;
|
||||
|
||||
dig_connector = radeon_connector->con_priv;
|
||||
|
||||
return dig_connector;
|
||||
}
|
||||
|
||||
static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
@ -236,6 +262,9 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
|
||||
/* adjust pm to upcoming mode change */
|
||||
radeon_pm_compute_clocks(rdev);
|
||||
|
||||
/* set the active encoder to connector routing */
|
||||
radeon_encoder_set_active_device(encoder);
|
||||
drm_mode_set_crtcinfo(adjusted_mode, 0);
|
||||
@ -458,34 +487,20 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||
struct radeon_connector_atom_dig *dig_connector =
|
||||
radeon_get_atom_connector_priv_from_encoder(encoder);
|
||||
union lvds_encoder_control args;
|
||||
int index = 0;
|
||||
int hdmi_detected = 0;
|
||||
uint8_t frev, crev;
|
||||
struct radeon_encoder_atom_dig *dig;
|
||||
struct drm_connector *connector;
|
||||
struct radeon_connector *radeon_connector;
|
||||
struct radeon_connector_atom_dig *dig_connector;
|
||||
|
||||
connector = radeon_get_connector_for_encoder(encoder);
|
||||
if (!connector)
|
||||
if (!dig || !dig_connector)
|
||||
return;
|
||||
|
||||
radeon_connector = to_radeon_connector(connector);
|
||||
|
||||
if (!radeon_encoder->enc_priv)
|
||||
return;
|
||||
|
||||
dig = radeon_encoder->enc_priv;
|
||||
|
||||
if (!radeon_connector->con_priv)
|
||||
return;
|
||||
|
||||
if (drm_detect_hdmi_monitor(radeon_connector->edid))
|
||||
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
|
||||
hdmi_detected = 1;
|
||||
|
||||
dig_connector = radeon_connector->con_priv;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
switch (radeon_encoder->encoder_id) {
|
||||
@ -586,7 +601,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_connector *connector;
|
||||
struct radeon_connector *radeon_connector;
|
||||
struct radeon_connector_atom_dig *radeon_dig_connector;
|
||||
struct radeon_connector_atom_dig *dig_connector;
|
||||
|
||||
connector = radeon_get_connector_for_encoder(encoder);
|
||||
if (!connector)
|
||||
@ -617,9 +632,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_DisplayPort:
|
||||
case DRM_MODE_CONNECTOR_eDP:
|
||||
radeon_dig_connector = radeon_connector->con_priv;
|
||||
if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
|
||||
(radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
|
||||
dig_connector = radeon_connector->con_priv;
|
||||
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
|
||||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
|
||||
return ATOM_ENCODER_MODE_DP;
|
||||
else if (drm_detect_hdmi_monitor(radeon_connector->edid))
|
||||
return ATOM_ENCODER_MODE_HDMI;
|
||||
@ -656,6 +671,18 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
|
||||
* - 2 DIG encoder blocks.
|
||||
* DIG1/2 can drive UNIPHY0/1/2 link A or link B
|
||||
*
|
||||
* DCE 4.0
|
||||
* - 3 DIG transmitter blocks UNPHY0/1/2 (links A and B).
|
||||
* Supports up to 6 digital outputs
|
||||
* - 6 DIG encoder blocks.
|
||||
* - DIG to PHY mapping is hardcoded
|
||||
* DIG1 drives UNIPHY0 link A, A+B
|
||||
* DIG2 drives UNIPHY0 link B
|
||||
* DIG3 drives UNIPHY1 link A, A+B
|
||||
* DIG4 drives UNIPHY1 link B
|
||||
* DIG5 drives UNIPHY2 link A, A+B
|
||||
* DIG6 drives UNIPHY2 link B
|
||||
*
|
||||
* Routing
|
||||
* crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
|
||||
* Examples:
|
||||
@ -664,87 +691,77 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
|
||||
* crtc0 -> dig1 -> UNIPHY2 link A -> LVDS
|
||||
* crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI
|
||||
*/
|
||||
static void
|
||||
|
||||
union dig_encoder_control {
|
||||
DIG_ENCODER_CONTROL_PS_ALLOCATION v1;
|
||||
DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
|
||||
DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
|
||||
};
|
||||
|
||||
void
|
||||
atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
DIG_ENCODER_CONTROL_PS_ALLOCATION args;
|
||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||
struct radeon_connector_atom_dig *dig_connector =
|
||||
radeon_get_atom_connector_priv_from_encoder(encoder);
|
||||
union dig_encoder_control args;
|
||||
int index = 0, num = 0;
|
||||
uint8_t frev, crev;
|
||||
struct radeon_encoder_atom_dig *dig;
|
||||
struct drm_connector *connector;
|
||||
struct radeon_connector *radeon_connector;
|
||||
struct radeon_connector_atom_dig *dig_connector;
|
||||
|
||||
connector = radeon_get_connector_for_encoder(encoder);
|
||||
if (!connector)
|
||||
if (!dig || !dig_connector)
|
||||
return;
|
||||
|
||||
radeon_connector = to_radeon_connector(connector);
|
||||
|
||||
if (!radeon_connector->con_priv)
|
||||
return;
|
||||
|
||||
dig_connector = radeon_connector->con_priv;
|
||||
|
||||
if (!radeon_encoder->enc_priv)
|
||||
return;
|
||||
|
||||
dig = radeon_encoder->enc_priv;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
if (ASIC_IS_DCE4(rdev))
|
||||
index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl);
|
||||
else {
|
||||
if (dig->dig_encoder)
|
||||
index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
|
||||
else
|
||||
index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
|
||||
}
|
||||
num = dig->dig_encoder + 1;
|
||||
|
||||
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
|
||||
|
||||
args.ucAction = action;
|
||||
args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
|
||||
args.v1.ucAction = action;
|
||||
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
|
||||
args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
|
||||
|
||||
if (ASIC_IS_DCE32(rdev)) {
|
||||
switch (radeon_encoder->encoder_id) {
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
|
||||
args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
|
||||
break;
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
|
||||
args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2;
|
||||
break;
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
|
||||
args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
|
||||
break;
|
||||
}
|
||||
if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
|
||||
if (dig_connector->dp_clock == 270000)
|
||||
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
|
||||
args.v1.ucLaneNum = dig_connector->dp_lane_count;
|
||||
} else if (radeon_encoder->pixel_clock > 165000)
|
||||
args.v1.ucLaneNum = 8;
|
||||
else
|
||||
args.v1.ucLaneNum = 4;
|
||||
|
||||
if (ASIC_IS_DCE4(rdev)) {
|
||||
args.v3.acConfig.ucDigSel = dig->dig_encoder;
|
||||
args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR;
|
||||
} else {
|
||||
switch (radeon_encoder->encoder_id) {
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
|
||||
args.ucConfig = ATOM_ENCODER_CONFIG_TRANSMITTER1;
|
||||
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
|
||||
break;
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
|
||||
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
|
||||
args.ucConfig = ATOM_ENCODER_CONFIG_TRANSMITTER2;
|
||||
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2;
|
||||
break;
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
|
||||
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
args.ucEncoderMode = atombios_get_encoder_mode(encoder);
|
||||
|
||||
if (args.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
|
||||
if (dig_connector->dp_clock == 270000)
|
||||
args.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
|
||||
args.ucLaneNum = dig_connector->dp_lane_count;
|
||||
} else if (radeon_encoder->pixel_clock > 165000)
|
||||
args.ucLaneNum = 8;
|
||||
else
|
||||
args.ucLaneNum = 4;
|
||||
|
||||
if (dig_connector->linkb)
|
||||
args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
|
||||
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
|
||||
else
|
||||
args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
|
||||
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
|
||||
}
|
||||
|
||||
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
|
||||
@ -753,6 +770,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
|
||||
union dig_transmitter_control {
|
||||
DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
|
||||
DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
|
||||
DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
|
||||
};
|
||||
|
||||
void
|
||||
@ -761,37 +779,29 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||
struct radeon_connector_atom_dig *dig_connector =
|
||||
radeon_get_atom_connector_priv_from_encoder(encoder);
|
||||
struct drm_connector *connector;
|
||||
struct radeon_connector *radeon_connector;
|
||||
union dig_transmitter_control args;
|
||||
int index = 0, num = 0;
|
||||
uint8_t frev, crev;
|
||||
struct radeon_encoder_atom_dig *dig;
|
||||
struct drm_connector *connector;
|
||||
struct radeon_connector *radeon_connector;
|
||||
struct radeon_connector_atom_dig *dig_connector;
|
||||
bool is_dp = false;
|
||||
int pll_id = 0;
|
||||
|
||||
if (!dig || !dig_connector)
|
||||
return;
|
||||
|
||||
connector = radeon_get_connector_for_encoder(encoder);
|
||||
if (!connector)
|
||||
return;
|
||||
|
||||
radeon_connector = to_radeon_connector(connector);
|
||||
|
||||
if (!radeon_encoder->enc_priv)
|
||||
return;
|
||||
|
||||
dig = radeon_encoder->enc_priv;
|
||||
|
||||
if (!radeon_connector->con_priv)
|
||||
return;
|
||||
|
||||
dig_connector = radeon_connector->con_priv;
|
||||
|
||||
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
|
||||
is_dp = true;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
if (ASIC_IS_DCE32(rdev))
|
||||
if (ASIC_IS_DCE32(rdev) || ASIC_IS_DCE4(rdev))
|
||||
index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
|
||||
else {
|
||||
switch (radeon_encoder->encoder_id) {
|
||||
@ -821,7 +831,54 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
|
||||
else
|
||||
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
|
||||
}
|
||||
if (ASIC_IS_DCE32(rdev)) {
|
||||
if (ASIC_IS_DCE4(rdev)) {
|
||||
if (is_dp)
|
||||
args.v3.ucLaneNum = dig_connector->dp_lane_count;
|
||||
else if (radeon_encoder->pixel_clock > 165000)
|
||||
args.v3.ucLaneNum = 8;
|
||||
else
|
||||
args.v3.ucLaneNum = 4;
|
||||
|
||||
if (dig_connector->linkb) {
|
||||
args.v3.acConfig.ucLinkSel = 1;
|
||||
args.v3.acConfig.ucEncoderSel = 1;
|
||||
}
|
||||
|
||||
/* Select the PLL for the PHY
|
||||
* DP PHY should be clocked from external src if there is
|
||||
* one.
|
||||
*/
|
||||
if (encoder->crtc) {
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
|
||||
pll_id = radeon_crtc->pll_id;
|
||||
}
|
||||
if (is_dp && rdev->clock.dp_extclk)
|
||||
args.v3.acConfig.ucRefClkSource = 2; /* external src */
|
||||
else
|
||||
args.v3.acConfig.ucRefClkSource = pll_id;
|
||||
|
||||
switch (radeon_encoder->encoder_id) {
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
|
||||
args.v3.acConfig.ucTransmitterSel = 0;
|
||||
num = 0;
|
||||
break;
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
|
||||
args.v3.acConfig.ucTransmitterSel = 1;
|
||||
num = 1;
|
||||
break;
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
|
||||
args.v3.acConfig.ucTransmitterSel = 2;
|
||||
num = 2;
|
||||
break;
|
||||
}
|
||||
|
||||
if (is_dp)
|
||||
args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */
|
||||
else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
|
||||
if (dig->coherent_mode)
|
||||
args.v3.acConfig.fCoherentMode = 1;
|
||||
}
|
||||
} else if (ASIC_IS_DCE32(rdev)) {
|
||||
if (dig->dig_encoder == 1)
|
||||
args.v2.acConfig.ucEncoderSel = 1;
|
||||
if (dig_connector->linkb)
|
||||
@ -849,7 +906,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
|
||||
args.v2.acConfig.fCoherentMode = 1;
|
||||
}
|
||||
} else {
|
||||
|
||||
args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
|
||||
|
||||
if (dig->dig_encoder)
|
||||
@ -1024,9 +1080,12 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
|
||||
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
}
|
||||
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
|
||||
|
||||
/* adjust pm to dpms change */
|
||||
radeon_pm_compute_clocks(rdev);
|
||||
}
|
||||
|
||||
union crtc_sourc_param {
|
||||
union crtc_source_param {
|
||||
SELECT_CRTC_SOURCE_PS_ALLOCATION v1;
|
||||
SELECT_CRTC_SOURCE_PARAMETERS_V2 v2;
|
||||
};
|
||||
@ -1038,7 +1097,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
|
||||
union crtc_sourc_param args;
|
||||
union crtc_source_param args;
|
||||
int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
|
||||
uint8_t frev, crev;
|
||||
struct radeon_encoder_atom_dig *dig;
|
||||
@ -1107,10 +1166,26 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
|
||||
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
|
||||
dig = radeon_encoder->enc_priv;
|
||||
if (dig->dig_encoder)
|
||||
switch (dig->dig_encoder) {
|
||||
case 0:
|
||||
args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
|
||||
break;
|
||||
case 1:
|
||||
args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
|
||||
else
|
||||
args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
|
||||
break;
|
||||
case 2:
|
||||
args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
|
||||
break;
|
||||
case 3:
|
||||
args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
|
||||
break;
|
||||
case 4:
|
||||
args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
|
||||
break;
|
||||
case 5:
|
||||
args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
|
||||
args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
|
||||
@ -1167,6 +1242,7 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
|
||||
}
|
||||
|
||||
/* set scaler clears this on some chips */
|
||||
/* XXX check DCE4 */
|
||||
if (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))) {
|
||||
if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE))
|
||||
WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
|
||||
@ -1183,6 +1259,33 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
|
||||
struct drm_encoder *test_encoder;
|
||||
struct radeon_encoder_atom_dig *dig;
|
||||
uint32_t dig_enc_in_use = 0;
|
||||
|
||||
if (ASIC_IS_DCE4(rdev)) {
|
||||
struct radeon_connector_atom_dig *dig_connector =
|
||||
radeon_get_atom_connector_priv_from_encoder(encoder);
|
||||
|
||||
switch (radeon_encoder->encoder_id) {
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
|
||||
if (dig_connector->linkb)
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
break;
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
|
||||
if (dig_connector->linkb)
|
||||
return 3;
|
||||
else
|
||||
return 2;
|
||||
break;
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
|
||||
if (dig_connector->linkb)
|
||||
return 5;
|
||||
else
|
||||
return 4;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* on DCE32 and encoder can driver any block so just crtc id */
|
||||
if (ASIC_IS_DCE32(rdev)) {
|
||||
return radeon_crtc->crtc_id;
|
||||
@ -1254,6 +1357,16 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
|
||||
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
|
||||
if (ASIC_IS_DCE4(rdev)) {
|
||||
/* disable the transmitter */
|
||||
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
|
||||
/* setup and enable the encoder */
|
||||
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP);
|
||||
|
||||
/* init and enable the transmitter */
|
||||
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
|
||||
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
|
||||
} else {
|
||||
/* disable the encoder and transmitter */
|
||||
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
|
||||
atombios_dig_encoder_setup(encoder, ATOM_DISABLE);
|
||||
@ -1263,6 +1376,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
|
||||
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
|
||||
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
|
||||
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
|
||||
}
|
||||
break;
|
||||
case ENCODER_OBJECT_ID_INTERNAL_DDI:
|
||||
atombios_ddia_setup(encoder, ATOM_ENABLE);
|
||||
@ -1282,6 +1396,8 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
|
||||
}
|
||||
atombios_apply_encoder_quirks(encoder, adjusted_mode);
|
||||
|
||||
/* XXX */
|
||||
if (!ASIC_IS_DCE4(rdev))
|
||||
r600_hdmi_setmode(encoder, adjusted_mode);
|
||||
}
|
||||
|
||||
@ -1480,10 +1596,18 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
|
||||
return;
|
||||
|
||||
encoder = &radeon_encoder->base;
|
||||
if (rdev->flags & RADEON_SINGLE_CRTC)
|
||||
switch (rdev->num_crtc) {
|
||||
case 1:
|
||||
encoder->possible_crtcs = 0x1;
|
||||
else
|
||||
break;
|
||||
case 2:
|
||||
default:
|
||||
encoder->possible_crtcs = 0x3;
|
||||
break;
|
||||
case 6:
|
||||
encoder->possible_crtcs = 0x3f;
|
||||
break;
|
||||
}
|
||||
|
||||
radeon_encoder->enc_priv = NULL;
|
||||
|
||||
|
@ -75,6 +75,11 @@ enum radeon_family {
|
||||
CHIP_RV730,
|
||||
CHIP_RV710,
|
||||
CHIP_RV740,
|
||||
CHIP_CEDAR,
|
||||
CHIP_REDWOOD,
|
||||
CHIP_JUNIPER,
|
||||
CHIP_CYPRESS,
|
||||
CHIP_HEMLOCK,
|
||||
CHIP_LAST,
|
||||
};
|
||||
|
||||
|
@ -147,14 +147,13 @@ int radeonfb_create(struct drm_device *dev,
|
||||
struct drm_mode_fb_cmd mode_cmd;
|
||||
struct drm_gem_object *gobj = NULL;
|
||||
struct radeon_bo *rbo = NULL;
|
||||
// struct device *device = &rdev->pdev->dev;
|
||||
struct device *device = &rdev->pdev->dev;
|
||||
int size, aligned_size, ret;
|
||||
u64 fb_gpuaddr;
|
||||
void *fbptr = NULL;
|
||||
unsigned long tmp;
|
||||
bool fb_tiled = false; /* useful for testing */
|
||||
u32 tiling_flags = 0;
|
||||
int crtc_count;
|
||||
|
||||
mode_cmd.width = surface_width;
|
||||
mode_cmd.height = surface_height;
|
||||
@ -235,7 +234,7 @@ int radeonfb_create(struct drm_device *dev,
|
||||
rdev->fbdev_rfb = rfb;
|
||||
rdev->fbdev_rbo = rbo;
|
||||
|
||||
info = framebuffer_alloc(sizeof(struct radeon_fb_device), NULL);
|
||||
info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
|
||||
if (info == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto out_unref;
|
||||
@ -245,11 +244,7 @@ int radeonfb_create(struct drm_device *dev,
|
||||
rfbdev = info->par;
|
||||
rfbdev->helper.funcs = &radeon_fb_helper_funcs;
|
||||
rfbdev->helper.dev = dev;
|
||||
if (rdev->flags & RADEON_SINGLE_CRTC)
|
||||
crtc_count = 1;
|
||||
else
|
||||
crtc_count = 2;
|
||||
ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, crtc_count,
|
||||
ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, rdev->num_crtc,
|
||||
RADEONFB_CONN_LIMIT);
|
||||
if (ret)
|
||||
goto out_unref;
|
||||
@ -262,7 +257,7 @@ int radeonfb_create(struct drm_device *dev,
|
||||
info->flags = FBINFO_DEFAULT;
|
||||
info->fbops = &radeonfb_ops;
|
||||
|
||||
tmp = fb_gpuaddr - rdev->mc.vram_location;
|
||||
tmp = fb_gpuaddr - rdev->mc.vram_start;
|
||||
info->fix.smem_start = rdev->mc.aper_base + tmp;
|
||||
info->fix.smem_len = size;
|
||||
info->screen_base = fbptr;
|
||||
@ -362,6 +357,4 @@ int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(radeonfb_remove);
|
||||
|
||||
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -139,6 +139,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
|
||||
unsigned t;
|
||||
unsigned p;
|
||||
int i, j;
|
||||
u64 page_base;
|
||||
|
||||
if (!rdev->gart.ready) {
|
||||
WARN(1, "trying to unbind memory to unitialized GART !\n");
|
||||
@ -151,9 +152,11 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
|
||||
// pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
|
||||
// PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
rdev->gart.pages[p] = NULL;
|
||||
rdev->gart.pages_addr[p] = 0;
|
||||
rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
|
||||
page_base = rdev->gart.pages_addr[p];
|
||||
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
|
||||
radeon_gart_set_page(rdev, t, 0);
|
||||
radeon_gart_set_page(rdev, t, page_base);
|
||||
page_base += RADEON_GPU_PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -187,11 +190,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
|
||||
|
||||
rdev->gart.pages_addr[p] = pagelist[i] & ~4095;
|
||||
|
||||
//if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) {
|
||||
// /* FIXME: failed to map page (return -ENOMEM?) */
|
||||
// radeon_gart_unbind(rdev, offset, pages);
|
||||
// return -ENOMEM;
|
||||
//}
|
||||
|
||||
rdev->gart.pages[p] = pagelist[i];
|
||||
page_base = rdev->gart.pages_addr[p];
|
||||
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
|
||||
@ -204,8 +203,26 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_gart_restore(struct radeon_device *rdev)
|
||||
{
|
||||
int i, j, t;
|
||||
u64 page_base;
|
||||
|
||||
for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
|
||||
page_base = rdev->gart.pages_addr[i];
|
||||
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
|
||||
radeon_gart_set_page(rdev, t, page_base);
|
||||
page_base += RADEON_GPU_PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
mb();
|
||||
radeon_gart_tlb_flush(rdev);
|
||||
}
|
||||
|
||||
int radeon_gart_init(struct radeon_device *rdev)
|
||||
{
|
||||
int r, i;
|
||||
|
||||
if (rdev->gart.pages) {
|
||||
return 0;
|
||||
}
|
||||
@ -214,6 +231,9 @@ int radeon_gart_init(struct radeon_device *rdev)
|
||||
DRM_ERROR("Page size is smaller than GPU page size!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
r = radeon_dummy_page_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
/* Compute table size */
|
||||
rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
|
||||
rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
|
||||
@ -232,6 +252,10 @@ int radeon_gart_init(struct radeon_device *rdev)
|
||||
radeon_gart_fini(rdev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
/* set GART entry to point to the dummy page by default */
|
||||
for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
|
||||
rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "drmP.h"
|
||||
#include "radeon_drm.h"
|
||||
#include "radeon.h"
|
||||
#include "atom.h"
|
||||
|
||||
/**
|
||||
* radeon_ddc_probe
|
||||
@ -59,7 +60,7 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
|
||||
}
|
||||
|
||||
|
||||
void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
|
||||
static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
|
||||
{
|
||||
struct radeon_device *rdev = i2c->dev->dev_private;
|
||||
struct radeon_i2c_bus_rec *rec = &i2c->rec;
|
||||
@ -71,7 +72,18 @@ void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
|
||||
*/
|
||||
if (rec->hw_capable) {
|
||||
if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) {
|
||||
if (rec->a_clk_reg == RADEON_GPIO_MONID) {
|
||||
u32 reg;
|
||||
|
||||
if (rdev->family >= CHIP_RV350)
|
||||
reg = RADEON_GPIO_MONID;
|
||||
else if ((rdev->family == CHIP_R300) ||
|
||||
(rdev->family == CHIP_R350))
|
||||
reg = RADEON_GPIO_DVI_DDC;
|
||||
else
|
||||
reg = RADEON_GPIO_CRT2_DDC;
|
||||
|
||||
mutex_lock(&rdev->dc_hw_i2c_mutex);
|
||||
if (rec->a_clk_reg == reg) {
|
||||
WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
|
||||
R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1)));
|
||||
} else {
|
||||
@ -168,6 +180,692 @@ static void set_data(void *i2c_priv, int data)
|
||||
WREG32(rec->en_data_reg, val);
|
||||
}
|
||||
|
||||
static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_pll *spll = &rdev->clock.spll;
|
||||
u32 sclk = radeon_get_engine_clock(rdev);
|
||||
u32 prescale = 0;
|
||||
u32 n, m;
|
||||
u8 loop;
|
||||
int i2c_clock;
|
||||
|
||||
switch (rdev->family) {
|
||||
case CHIP_R100:
|
||||
case CHIP_RV100:
|
||||
case CHIP_RS100:
|
||||
case CHIP_RV200:
|
||||
case CHIP_RS200:
|
||||
case CHIP_R200:
|
||||
case CHIP_RV250:
|
||||
case CHIP_RS300:
|
||||
case CHIP_RV280:
|
||||
case CHIP_R300:
|
||||
case CHIP_R350:
|
||||
case CHIP_RV350:
|
||||
n = (spll->reference_freq) / (4 * 6);
|
||||
for (loop = 1; loop < 255; loop++) {
|
||||
if ((loop * (loop - 1)) > n)
|
||||
break;
|
||||
}
|
||||
m = loop - 1;
|
||||
prescale = m | (loop << 8);
|
||||
break;
|
||||
case CHIP_RV380:
|
||||
case CHIP_RS400:
|
||||
case CHIP_RS480:
|
||||
case CHIP_R420:
|
||||
case CHIP_R423:
|
||||
case CHIP_RV410:
|
||||
sclk = radeon_get_engine_clock(rdev);
|
||||
prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128;
|
||||
break;
|
||||
case CHIP_RS600:
|
||||
case CHIP_RS690:
|
||||
case CHIP_RS740:
|
||||
/* todo */
|
||||
break;
|
||||
case CHIP_RV515:
|
||||
case CHIP_R520:
|
||||
case CHIP_RV530:
|
||||
case CHIP_RV560:
|
||||
case CHIP_RV570:
|
||||
case CHIP_R580:
|
||||
i2c_clock = 50;
|
||||
sclk = radeon_get_engine_clock(rdev);
|
||||
if (rdev->family == CHIP_R520)
|
||||
prescale = (127 << 8) + ((sclk * 10) / (4 * 127 * i2c_clock));
|
||||
else
|
||||
prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128;
|
||||
break;
|
||||
case CHIP_R600:
|
||||
case CHIP_RV610:
|
||||
case CHIP_RV630:
|
||||
case CHIP_RV670:
|
||||
/* todo */
|
||||
break;
|
||||
case CHIP_RV620:
|
||||
case CHIP_RV635:
|
||||
case CHIP_RS780:
|
||||
case CHIP_RS880:
|
||||
case CHIP_RV770:
|
||||
case CHIP_RV730:
|
||||
case CHIP_RV710:
|
||||
case CHIP_RV740:
|
||||
/* todo */
|
||||
break;
|
||||
case CHIP_CEDAR:
|
||||
case CHIP_REDWOOD:
|
||||
case CHIP_JUNIPER:
|
||||
case CHIP_CYPRESS:
|
||||
case CHIP_HEMLOCK:
|
||||
/* todo */
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("i2c: unhandled radeon chip\n");
|
||||
break;
|
||||
}
|
||||
return prescale;
|
||||
}
|
||||
|
||||
|
||||
/* hw i2c engine for r1xx-4xx hardware
|
||||
* hw can buffer up to 15 bytes
|
||||
*/
|
||||
static int r100_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
struct i2c_msg *msgs, int num)
|
||||
{
|
||||
struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
|
||||
struct radeon_device *rdev = i2c->dev->dev_private;
|
||||
struct radeon_i2c_bus_rec *rec = &i2c->rec;
|
||||
struct i2c_msg *p;
|
||||
int i, j, k, ret = num;
|
||||
u32 prescale;
|
||||
u32 i2c_cntl_0, i2c_cntl_1, i2c_data;
|
||||
u32 tmp, reg;
|
||||
|
||||
mutex_lock(&rdev->dc_hw_i2c_mutex);
|
||||
/* take the pm lock since we need a constant sclk */
|
||||
mutex_lock(&rdev->pm.mutex);
|
||||
|
||||
prescale = radeon_get_i2c_prescale(rdev);
|
||||
|
||||
reg = ((prescale << RADEON_I2C_PRESCALE_SHIFT) |
|
||||
RADEON_I2C_START |
|
||||
RADEON_I2C_STOP |
|
||||
RADEON_I2C_GO);
|
||||
|
||||
if (rdev->is_atom_bios) {
|
||||
tmp = RREG32(RADEON_BIOS_6_SCRATCH);
|
||||
WREG32(RADEON_BIOS_6_SCRATCH, tmp | ATOM_S6_HW_I2C_BUSY_STATE);
|
||||
}
|
||||
|
||||
if (rec->mm_i2c) {
|
||||
i2c_cntl_0 = RADEON_I2C_CNTL_0;
|
||||
i2c_cntl_1 = RADEON_I2C_CNTL_1;
|
||||
i2c_data = RADEON_I2C_DATA;
|
||||
} else {
|
||||
i2c_cntl_0 = RADEON_DVI_I2C_CNTL_0;
|
||||
i2c_cntl_1 = RADEON_DVI_I2C_CNTL_1;
|
||||
i2c_data = RADEON_DVI_I2C_DATA;
|
||||
|
||||
switch (rdev->family) {
|
||||
case CHIP_R100:
|
||||
case CHIP_RV100:
|
||||
case CHIP_RS100:
|
||||
case CHIP_RV200:
|
||||
case CHIP_RS200:
|
||||
case CHIP_RS300:
|
||||
switch (rec->mask_clk_reg) {
|
||||
case RADEON_GPIO_DVI_DDC:
|
||||
/* no gpio select bit */
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("gpio not supported with hw i2c\n");
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
break;
|
||||
case CHIP_R200:
|
||||
/* only bit 4 on r200 */
|
||||
switch (rec->mask_clk_reg) {
|
||||
case RADEON_GPIO_DVI_DDC:
|
||||
reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
|
||||
break;
|
||||
case RADEON_GPIO_MONID:
|
||||
reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("gpio not supported with hw i2c\n");
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
break;
|
||||
case CHIP_RV250:
|
||||
case CHIP_RV280:
|
||||
/* bits 3 and 4 */
|
||||
switch (rec->mask_clk_reg) {
|
||||
case RADEON_GPIO_DVI_DDC:
|
||||
reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
|
||||
break;
|
||||
case RADEON_GPIO_VGA_DDC:
|
||||
reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC2);
|
||||
break;
|
||||
case RADEON_GPIO_CRT2_DDC:
|
||||
reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("gpio not supported with hw i2c\n");
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
break;
|
||||
case CHIP_R300:
|
||||
case CHIP_R350:
|
||||
/* only bit 4 on r300/r350 */
|
||||
switch (rec->mask_clk_reg) {
|
||||
case RADEON_GPIO_VGA_DDC:
|
||||
reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
|
||||
break;
|
||||
case RADEON_GPIO_DVI_DDC:
|
||||
reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("gpio not supported with hw i2c\n");
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
break;
|
||||
case CHIP_RV350:
|
||||
case CHIP_RV380:
|
||||
case CHIP_R420:
|
||||
case CHIP_R423:
|
||||
case CHIP_RV410:
|
||||
case CHIP_RS400:
|
||||
case CHIP_RS480:
|
||||
/* bits 3 and 4 */
|
||||
switch (rec->mask_clk_reg) {
|
||||
case RADEON_GPIO_VGA_DDC:
|
||||
reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
|
||||
break;
|
||||
case RADEON_GPIO_DVI_DDC:
|
||||
reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC2);
|
||||
break;
|
||||
case RADEON_GPIO_MONID:
|
||||
reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("gpio not supported with hw i2c\n");
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("unsupported asic\n");
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* check for bus probe */
|
||||
p = &msgs[0];
|
||||
if ((num == 1) && (p->len == 0)) {
|
||||
WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
|
||||
RADEON_I2C_NACK |
|
||||
RADEON_I2C_HALT |
|
||||
RADEON_I2C_SOFT_RST));
|
||||
WREG32(i2c_data, (p->addr << 1) & 0xff);
|
||||
WREG32(i2c_data, 0);
|
||||
WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) |
|
||||
(1 << RADEON_I2C_ADDR_COUNT_SHIFT) |
|
||||
RADEON_I2C_EN |
|
||||
(48 << RADEON_I2C_TIME_LIMIT_SHIFT)));
|
||||
WREG32(i2c_cntl_0, reg);
|
||||
for (k = 0; k < 32; k++) {
|
||||
udelay(10);
|
||||
tmp = RREG32(i2c_cntl_0);
|
||||
if (tmp & RADEON_I2C_GO)
|
||||
continue;
|
||||
tmp = RREG32(i2c_cntl_0);
|
||||
if (tmp & RADEON_I2C_DONE)
|
||||
break;
|
||||
else {
|
||||
DRM_DEBUG("i2c write error 0x%08x\n", tmp);
|
||||
WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT);
|
||||
ret = -EIO;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
goto done;
|
||||
}
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
p = &msgs[i];
|
||||
for (j = 0; j < p->len; j++) {
|
||||
if (p->flags & I2C_M_RD) {
|
||||
WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
|
||||
RADEON_I2C_NACK |
|
||||
RADEON_I2C_HALT |
|
||||
RADEON_I2C_SOFT_RST));
|
||||
WREG32(i2c_data, ((p->addr << 1) & 0xff) | 0x1);
|
||||
WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) |
|
||||
(1 << RADEON_I2C_ADDR_COUNT_SHIFT) |
|
||||
RADEON_I2C_EN |
|
||||
(48 << RADEON_I2C_TIME_LIMIT_SHIFT)));
|
||||
WREG32(i2c_cntl_0, reg | RADEON_I2C_RECEIVE);
|
||||
for (k = 0; k < 32; k++) {
|
||||
udelay(10);
|
||||
tmp = RREG32(i2c_cntl_0);
|
||||
if (tmp & RADEON_I2C_GO)
|
||||
continue;
|
||||
tmp = RREG32(i2c_cntl_0);
|
||||
if (tmp & RADEON_I2C_DONE)
|
||||
break;
|
||||
else {
|
||||
DRM_DEBUG("i2c read error 0x%08x\n", tmp);
|
||||
WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT);
|
||||
ret = -EIO;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
p->buf[j] = RREG32(i2c_data) & 0xff;
|
||||
} else {
|
||||
WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
|
||||
RADEON_I2C_NACK |
|
||||
RADEON_I2C_HALT |
|
||||
RADEON_I2C_SOFT_RST));
|
||||
WREG32(i2c_data, (p->addr << 1) & 0xff);
|
||||
WREG32(i2c_data, p->buf[j]);
|
||||
WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) |
|
||||
(1 << RADEON_I2C_ADDR_COUNT_SHIFT) |
|
||||
RADEON_I2C_EN |
|
||||
(48 << RADEON_I2C_TIME_LIMIT_SHIFT)));
|
||||
WREG32(i2c_cntl_0, reg);
|
||||
for (k = 0; k < 32; k++) {
|
||||
udelay(10);
|
||||
tmp = RREG32(i2c_cntl_0);
|
||||
if (tmp & RADEON_I2C_GO)
|
||||
continue;
|
||||
tmp = RREG32(i2c_cntl_0);
|
||||
if (tmp & RADEON_I2C_DONE)
|
||||
break;
|
||||
else {
|
||||
DRM_DEBUG("i2c write error 0x%08x\n", tmp);
|
||||
WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT);
|
||||
ret = -EIO;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
WREG32(i2c_cntl_0, 0);
|
||||
WREG32(i2c_cntl_1, 0);
|
||||
WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
|
||||
RADEON_I2C_NACK |
|
||||
RADEON_I2C_HALT |
|
||||
RADEON_I2C_SOFT_RST));
|
||||
|
||||
if (rdev->is_atom_bios) {
|
||||
tmp = RREG32(RADEON_BIOS_6_SCRATCH);
|
||||
tmp &= ~ATOM_S6_HW_I2C_BUSY_STATE;
|
||||
WREG32(RADEON_BIOS_6_SCRATCH, tmp);
|
||||
}
|
||||
|
||||
mutex_unlock(&rdev->pm.mutex);
|
||||
mutex_unlock(&rdev->dc_hw_i2c_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* hw i2c engine for r5xx hardware
|
||||
* hw can buffer up to 15 bytes
|
||||
*/
|
||||
static int r500_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
struct i2c_msg *msgs, int num)
|
||||
{
|
||||
struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
|
||||
struct radeon_device *rdev = i2c->dev->dev_private;
|
||||
struct radeon_i2c_bus_rec *rec = &i2c->rec;
|
||||
struct i2c_msg *p;
|
||||
int i, j, remaining, current_count, buffer_offset, ret = num;
|
||||
u32 prescale;
|
||||
u32 tmp, reg;
|
||||
u32 saved1, saved2;
|
||||
|
||||
mutex_lock(&rdev->dc_hw_i2c_mutex);
|
||||
/* take the pm lock since we need a constant sclk */
|
||||
mutex_lock(&rdev->pm.mutex);
|
||||
|
||||
prescale = radeon_get_i2c_prescale(rdev);
|
||||
|
||||
/* clear gpio mask bits */
|
||||
tmp = RREG32(rec->mask_clk_reg);
|
||||
tmp &= ~rec->mask_clk_mask;
|
||||
WREG32(rec->mask_clk_reg, tmp);
|
||||
tmp = RREG32(rec->mask_clk_reg);
|
||||
|
||||
tmp = RREG32(rec->mask_data_reg);
|
||||
tmp &= ~rec->mask_data_mask;
|
||||
WREG32(rec->mask_data_reg, tmp);
|
||||
tmp = RREG32(rec->mask_data_reg);
|
||||
|
||||
/* clear pin values */
|
||||
tmp = RREG32(rec->a_clk_reg);
|
||||
tmp &= ~rec->a_clk_mask;
|
||||
WREG32(rec->a_clk_reg, tmp);
|
||||
tmp = RREG32(rec->a_clk_reg);
|
||||
|
||||
tmp = RREG32(rec->a_data_reg);
|
||||
tmp &= ~rec->a_data_mask;
|
||||
WREG32(rec->a_data_reg, tmp);
|
||||
tmp = RREG32(rec->a_data_reg);
|
||||
|
||||
/* set the pins to input */
|
||||
tmp = RREG32(rec->en_clk_reg);
|
||||
tmp &= ~rec->en_clk_mask;
|
||||
WREG32(rec->en_clk_reg, tmp);
|
||||
tmp = RREG32(rec->en_clk_reg);
|
||||
|
||||
tmp = RREG32(rec->en_data_reg);
|
||||
tmp &= ~rec->en_data_mask;
|
||||
WREG32(rec->en_data_reg, tmp);
|
||||
tmp = RREG32(rec->en_data_reg);
|
||||
|
||||
/* */
|
||||
tmp = RREG32(RADEON_BIOS_6_SCRATCH);
|
||||
WREG32(RADEON_BIOS_6_SCRATCH, tmp | ATOM_S6_HW_I2C_BUSY_STATE);
|
||||
saved1 = RREG32(AVIVO_DC_I2C_CONTROL1);
|
||||
saved2 = RREG32(0x494);
|
||||
WREG32(0x494, saved2 | 0x1);
|
||||
|
||||
WREG32(AVIVO_DC_I2C_ARBITRATION, AVIVO_DC_I2C_SW_WANTS_TO_USE_I2C);
|
||||
for (i = 0; i < 50; i++) {
|
||||
udelay(1);
|
||||
if (RREG32(AVIVO_DC_I2C_ARBITRATION) & AVIVO_DC_I2C_SW_CAN_USE_I2C)
|
||||
break;
|
||||
}
|
||||
if (i == 50) {
|
||||
DRM_ERROR("failed to get i2c bus\n");
|
||||
ret = -EBUSY;
|
||||
goto done;
|
||||
}
|
||||
|
||||
reg = AVIVO_DC_I2C_START | AVIVO_DC_I2C_STOP | AVIVO_DC_I2C_EN;
|
||||
switch (rec->mask_clk_reg) {
|
||||
case AVIVO_DC_GPIO_DDC1_MASK:
|
||||
reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC1);
|
||||
break;
|
||||
case AVIVO_DC_GPIO_DDC2_MASK:
|
||||
reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC2);
|
||||
break;
|
||||
case AVIVO_DC_GPIO_DDC3_MASK:
|
||||
reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC3);
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("gpio not supported with hw i2c\n");
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* check for bus probe */
|
||||
p = &msgs[0];
|
||||
if ((num == 1) && (p->len == 0)) {
|
||||
WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
|
||||
AVIVO_DC_I2C_NACK |
|
||||
AVIVO_DC_I2C_HALT));
|
||||
WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
|
||||
udelay(1);
|
||||
WREG32(AVIVO_DC_I2C_RESET, 0);
|
||||
|
||||
WREG32(AVIVO_DC_I2C_DATA, (p->addr << 1) & 0xff);
|
||||
WREG32(AVIVO_DC_I2C_DATA, 0);
|
||||
|
||||
WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48));
|
||||
WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) |
|
||||
AVIVO_DC_I2C_DATA_COUNT(1) |
|
||||
(prescale << 16)));
|
||||
WREG32(AVIVO_DC_I2C_CONTROL1, reg);
|
||||
WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO);
|
||||
for (j = 0; j < 200; j++) {
|
||||
udelay(50);
|
||||
tmp = RREG32(AVIVO_DC_I2C_STATUS1);
|
||||
if (tmp & AVIVO_DC_I2C_GO)
|
||||
continue;
|
||||
tmp = RREG32(AVIVO_DC_I2C_STATUS1);
|
||||
if (tmp & AVIVO_DC_I2C_DONE)
|
||||
break;
|
||||
else {
|
||||
DRM_DEBUG("i2c write error 0x%08x\n", tmp);
|
||||
WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT);
|
||||
ret = -EIO;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
goto done;
|
||||
}
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
p = &msgs[i];
|
||||
remaining = p->len;
|
||||
buffer_offset = 0;
|
||||
if (p->flags & I2C_M_RD) {
|
||||
while (remaining) {
|
||||
if (remaining > 15)
|
||||
current_count = 15;
|
||||
else
|
||||
current_count = remaining;
|
||||
WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
|
||||
AVIVO_DC_I2C_NACK |
|
||||
AVIVO_DC_I2C_HALT));
|
||||
WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
|
||||
udelay(1);
|
||||
WREG32(AVIVO_DC_I2C_RESET, 0);
|
||||
|
||||
WREG32(AVIVO_DC_I2C_DATA, ((p->addr << 1) & 0xff) | 0x1);
|
||||
WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48));
|
||||
WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) |
|
||||
AVIVO_DC_I2C_DATA_COUNT(current_count) |
|
||||
(prescale << 16)));
|
||||
WREG32(AVIVO_DC_I2C_CONTROL1, reg | AVIVO_DC_I2C_RECEIVE);
|
||||
WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO);
|
||||
for (j = 0; j < 200; j++) {
|
||||
udelay(50);
|
||||
tmp = RREG32(AVIVO_DC_I2C_STATUS1);
|
||||
if (tmp & AVIVO_DC_I2C_GO)
|
||||
continue;
|
||||
tmp = RREG32(AVIVO_DC_I2C_STATUS1);
|
||||
if (tmp & AVIVO_DC_I2C_DONE)
|
||||
break;
|
||||
else {
|
||||
DRM_DEBUG("i2c read error 0x%08x\n", tmp);
|
||||
WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT);
|
||||
ret = -EIO;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
for (j = 0; j < current_count; j++)
|
||||
p->buf[buffer_offset + j] = RREG32(AVIVO_DC_I2C_DATA) & 0xff;
|
||||
remaining -= current_count;
|
||||
buffer_offset += current_count;
|
||||
}
|
||||
} else {
|
||||
while (remaining) {
|
||||
if (remaining > 15)
|
||||
current_count = 15;
|
||||
else
|
||||
current_count = remaining;
|
||||
WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
|
||||
AVIVO_DC_I2C_NACK |
|
||||
AVIVO_DC_I2C_HALT));
|
||||
WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
|
||||
udelay(1);
|
||||
WREG32(AVIVO_DC_I2C_RESET, 0);
|
||||
|
||||
WREG32(AVIVO_DC_I2C_DATA, (p->addr << 1) & 0xff);
|
||||
for (j = 0; j < current_count; j++)
|
||||
WREG32(AVIVO_DC_I2C_DATA, p->buf[buffer_offset + j]);
|
||||
|
||||
WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48));
|
||||
WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) |
|
||||
AVIVO_DC_I2C_DATA_COUNT(current_count) |
|
||||
(prescale << 16)));
|
||||
WREG32(AVIVO_DC_I2C_CONTROL1, reg);
|
||||
WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO);
|
||||
for (j = 0; j < 200; j++) {
|
||||
udelay(50);
|
||||
tmp = RREG32(AVIVO_DC_I2C_STATUS1);
|
||||
if (tmp & AVIVO_DC_I2C_GO)
|
||||
continue;
|
||||
tmp = RREG32(AVIVO_DC_I2C_STATUS1);
|
||||
if (tmp & AVIVO_DC_I2C_DONE)
|
||||
break;
|
||||
else {
|
||||
DRM_DEBUG("i2c write error 0x%08x\n", tmp);
|
||||
WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT);
|
||||
ret = -EIO;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
remaining -= current_count;
|
||||
buffer_offset += current_count;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
|
||||
AVIVO_DC_I2C_NACK |
|
||||
AVIVO_DC_I2C_HALT));
|
||||
WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
|
||||
udelay(1);
|
||||
WREG32(AVIVO_DC_I2C_RESET, 0);
|
||||
|
||||
WREG32(AVIVO_DC_I2C_ARBITRATION, AVIVO_DC_I2C_SW_DONE_USING_I2C);
|
||||
WREG32(AVIVO_DC_I2C_CONTROL1, saved1);
|
||||
WREG32(0x494, saved2);
|
||||
tmp = RREG32(RADEON_BIOS_6_SCRATCH);
|
||||
tmp &= ~ATOM_S6_HW_I2C_BUSY_STATE;
|
||||
WREG32(RADEON_BIOS_6_SCRATCH, tmp);
|
||||
|
||||
mutex_unlock(&rdev->pm.mutex);
|
||||
mutex_unlock(&rdev->dc_hw_i2c_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int radeon_sw_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
struct i2c_msg *msgs, int num)
|
||||
{
|
||||
struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
|
||||
int ret;
|
||||
|
||||
radeon_i2c_do_lock(i2c, 1);
|
||||
ret = i2c_transfer(&i2c->algo.radeon.bit_adapter, msgs, num);
|
||||
radeon_i2c_do_lock(i2c, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
struct i2c_msg *msgs, int num)
|
||||
{
|
||||
struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
|
||||
struct radeon_device *rdev = i2c->dev->dev_private;
|
||||
struct radeon_i2c_bus_rec *rec = &i2c->rec;
|
||||
int ret;
|
||||
|
||||
switch (rdev->family) {
|
||||
case CHIP_R100:
|
||||
case CHIP_RV100:
|
||||
case CHIP_RS100:
|
||||
case CHIP_RV200:
|
||||
case CHIP_RS200:
|
||||
case CHIP_R200:
|
||||
case CHIP_RV250:
|
||||
case CHIP_RS300:
|
||||
case CHIP_RV280:
|
||||
case CHIP_R300:
|
||||
case CHIP_R350:
|
||||
case CHIP_RV350:
|
||||
case CHIP_RV380:
|
||||
case CHIP_R420:
|
||||
case CHIP_R423:
|
||||
case CHIP_RV410:
|
||||
case CHIP_RS400:
|
||||
case CHIP_RS480:
|
||||
if (rec->hw_capable)
|
||||
ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
|
||||
else
|
||||
ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
|
||||
break;
|
||||
case CHIP_RS600:
|
||||
case CHIP_RS690:
|
||||
case CHIP_RS740:
|
||||
/* XXX fill in hw i2c implementation */
|
||||
ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
|
||||
break;
|
||||
case CHIP_RV515:
|
||||
case CHIP_R520:
|
||||
case CHIP_RV530:
|
||||
case CHIP_RV560:
|
||||
case CHIP_RV570:
|
||||
case CHIP_R580:
|
||||
if (rec->hw_capable) {
|
||||
if (rec->mm_i2c)
|
||||
ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
|
||||
else
|
||||
ret = r500_hw_i2c_xfer(i2c_adap, msgs, num);
|
||||
} else
|
||||
ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
|
||||
break;
|
||||
case CHIP_R600:
|
||||
case CHIP_RV610:
|
||||
case CHIP_RV630:
|
||||
case CHIP_RV670:
|
||||
/* XXX fill in hw i2c implementation */
|
||||
ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
|
||||
break;
|
||||
case CHIP_RV620:
|
||||
case CHIP_RV635:
|
||||
case CHIP_RS780:
|
||||
case CHIP_RS880:
|
||||
case CHIP_RV770:
|
||||
case CHIP_RV730:
|
||||
case CHIP_RV710:
|
||||
case CHIP_RV740:
|
||||
/* XXX fill in hw i2c implementation */
|
||||
ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
|
||||
break;
|
||||
case CHIP_CEDAR:
|
||||
case CHIP_REDWOOD:
|
||||
case CHIP_JUNIPER:
|
||||
case CHIP_CYPRESS:
|
||||
case CHIP_HEMLOCK:
|
||||
/* XXX fill in hw i2c implementation */
|
||||
ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("i2c: unhandled radeon chip\n");
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u32 radeon_i2c_func(struct i2c_adapter *adap)
|
||||
{
|
||||
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
|
||||
}
|
||||
|
||||
static const struct i2c_algorithm radeon_i2c_algo = {
|
||||
.master_xfer = radeon_i2c_xfer,
|
||||
.functionality = radeon_i2c_func,
|
||||
};
|
||||
|
||||
struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
|
||||
struct radeon_i2c_bus_rec *rec,
|
||||
const char *name)
|
||||
@ -179,23 +877,33 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
|
||||
if (i2c == NULL)
|
||||
return NULL;
|
||||
|
||||
i2c->dev = dev;
|
||||
i2c->adapter.algo_data = &i2c->algo.bit;
|
||||
i2c->algo.bit.setsda = set_data;
|
||||
i2c->algo.bit.setscl = set_clock;
|
||||
i2c->algo.bit.getsda = get_data;
|
||||
i2c->algo.bit.getscl = get_clock;
|
||||
i2c->algo.bit.udelay = 20;
|
||||
/* set the internal bit adapter */
|
||||
// i2c->algo.radeon.bit_adapter.owner = THIS_MODULE;
|
||||
i2c_set_adapdata(&i2c->algo.radeon.bit_adapter, i2c);
|
||||
// sprintf(i2c->algo.radeon.bit_adapter.name, "Radeon internal i2c bit bus %s", name);
|
||||
i2c->algo.radeon.bit_adapter.algo_data = &i2c->algo.radeon.bit_data;
|
||||
i2c->algo.radeon.bit_data.setsda = set_data;
|
||||
i2c->algo.radeon.bit_data.setscl = set_clock;
|
||||
i2c->algo.radeon.bit_data.getsda = get_data;
|
||||
i2c->algo.radeon.bit_data.getscl = get_clock;
|
||||
i2c->algo.radeon.bit_data.udelay = 20;
|
||||
/* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
|
||||
* make this, 2 jiffies is a lot more reliable */
|
||||
i2c->algo.bit.timeout = 2;
|
||||
i2c->algo.bit.data = i2c;
|
||||
i2c->rec = *rec;
|
||||
ret = i2c_bit_add_bus(&i2c->adapter);
|
||||
i2c->algo.radeon.bit_data.timeout = 2;
|
||||
i2c->algo.radeon.bit_data.data = i2c;
|
||||
ret = i2c_bit_add_bus(&i2c->algo.radeon.bit_adapter);
|
||||
if (ret) {
|
||||
DRM_INFO("Failed to register i2c %s\n", name);
|
||||
DRM_ERROR("Failed to register internal bit i2c %s\n", name);
|
||||
goto out_free;
|
||||
}
|
||||
/* set the radeon i2c adapter */
|
||||
i2c->dev = dev;
|
||||
i2c->rec = *rec;
|
||||
// i2c->adapter.owner = THIS_MODULE;
|
||||
i2c_set_adapdata(&i2c->adapter, i2c);
|
||||
// sprintf(i2c->adapter.name, "Radeon i2c %s", name);
|
||||
i2c->adapter.algo_data = &i2c->algo.radeon;
|
||||
i2c->adapter.algo = &radeon_i2c_algo;
|
||||
|
||||
return i2c;
|
||||
out_free:
|
||||
@ -218,6 +926,7 @@ struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
|
||||
i2c->rec = *rec;
|
||||
// i2c->adapter.owner = THIS_MODULE;
|
||||
i2c->dev = dev;
|
||||
i2c_set_adapdata(&i2c->adapter, i2c);
|
||||
i2c->adapter.algo_data = &i2c->algo.dp;
|
||||
i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch;
|
||||
i2c->algo.dp.address = 0;
|
||||
@ -234,8 +943,14 @@ out_free:
|
||||
|
||||
}
|
||||
|
||||
|
||||
void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
|
||||
{
|
||||
if (!i2c)
|
||||
return;
|
||||
kfree(i2c);
|
||||
}
|
||||
|
||||
void radeon_i2c_destroy_dp(struct radeon_i2c_chan *i2c)
|
||||
{
|
||||
if (!i2c)
|
||||
return;
|
||||
@ -248,7 +963,7 @@ struct drm_encoder *radeon_best_encoder(struct drm_connector *connector)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
|
||||
void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
|
||||
u8 slave_addr,
|
||||
u8 addr,
|
||||
u8 *val)
|
||||
@ -282,7 +997,7 @@ void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
|
||||
}
|
||||
}
|
||||
|
||||
void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c_bus,
|
||||
void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus,
|
||||
u8 slave_addr,
|
||||
u8 addr,
|
||||
u8 val)
|
||||
|
@ -321,11 +321,13 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
|
||||
RADEON_CRTC_DISP_REQ_EN_B));
|
||||
WREG32_P(RADEON_CRTC_EXT_CNTL, 0, ~mask);
|
||||
}
|
||||
drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
|
||||
radeon_crtc_load_lut(crtc);
|
||||
break;
|
||||
case DRM_MODE_DPMS_STANDBY:
|
||||
case DRM_MODE_DPMS_SUSPEND:
|
||||
case DRM_MODE_DPMS_OFF:
|
||||
drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
|
||||
if (radeon_crtc->crtc_id)
|
||||
WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
|
||||
else {
|
||||
@ -401,7 +403,7 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
|
||||
|
||||
/* if scanout was in GTT this really wouldn't work */
|
||||
/* crtc offset is from display base addr not FB location */
|
||||
radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location;
|
||||
radeon_crtc->legacy_display_base_addr = rdev->mc.vram_start;
|
||||
|
||||
base -= radeon_crtc->legacy_display_base_addr;
|
||||
|
||||
@ -580,29 +582,6 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
|
||||
? RADEON_CRTC_V_SYNC_POL
|
||||
: 0));
|
||||
|
||||
/* TODO -> Dell Server */
|
||||
if (0) {
|
||||
uint32_t disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
|
||||
uint32_t tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
|
||||
uint32_t dac2_cntl = RREG32(RADEON_DAC_CNTL2);
|
||||
uint32_t crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
|
||||
|
||||
dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
|
||||
dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
|
||||
|
||||
/* For CRT on DAC2, don't turn it on if BIOS didn't
|
||||
enable it, even it's detected.
|
||||
*/
|
||||
disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
|
||||
tv_dac_cntl &= ~((1<<2) | (3<<8) | (7<<24) | (0xff<<16));
|
||||
tv_dac_cntl |= (0x03 | (2<<8) | (0x58<<16));
|
||||
|
||||
WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
|
||||
WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
|
||||
WREG32(RADEON_DAC_CNTL2, dac2_cntl);
|
||||
WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
|
||||
}
|
||||
|
||||
if (radeon_crtc->crtc_id) {
|
||||
uint32_t crtc2_gen_cntl;
|
||||
uint32_t disp2_merge_cntl;
|
||||
@ -724,6 +703,10 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
|
||||
pll = &rdev->clock.p1pll;
|
||||
|
||||
pll->flags = RADEON_PLL_LEGACY;
|
||||
if (radeon_new_pll == 1)
|
||||
pll->algo = PLL_ALGO_NEW;
|
||||
else
|
||||
pll->algo = PLL_ALGO_LEGACY;
|
||||
|
||||
if (mode->clock > 200000) /* range limits??? */
|
||||
pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
|
||||
|
@ -115,6 +115,9 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
|
||||
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
|
||||
else
|
||||
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
|
||||
|
||||
/* adjust pm to dpms change */
|
||||
radeon_pm_compute_clocks(rdev);
|
||||
}
|
||||
|
||||
static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder)
|
||||
@ -214,6 +217,11 @@ static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
|
||||
/* adjust pm to upcoming mode change */
|
||||
radeon_pm_compute_clocks(rdev);
|
||||
|
||||
/* set the active encoder to connector routing */
|
||||
radeon_encoder_set_active_device(encoder);
|
||||
@ -285,6 +293,9 @@ static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode
|
||||
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
|
||||
else
|
||||
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
|
||||
|
||||
/* adjust pm to dpms change */
|
||||
radeon_pm_compute_clocks(rdev);
|
||||
}
|
||||
|
||||
static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder)
|
||||
@ -470,6 +481,9 @@ static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
|
||||
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
|
||||
else
|
||||
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
|
||||
|
||||
/* adjust pm to dpms change */
|
||||
radeon_pm_compute_clocks(rdev);
|
||||
}
|
||||
|
||||
static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder)
|
||||
@ -635,6 +649,9 @@ static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
|
||||
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
|
||||
else
|
||||
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
|
||||
|
||||
/* adjust pm to dpms change */
|
||||
radeon_pm_compute_clocks(rdev);
|
||||
}
|
||||
|
||||
static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder)
|
||||
@ -842,6 +859,9 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
|
||||
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
|
||||
else
|
||||
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
|
||||
|
||||
/* adjust pm to dpms change */
|
||||
radeon_pm_compute_clocks(rdev);
|
||||
}
|
||||
|
||||
static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder)
|
||||
|
@ -83,6 +83,8 @@ struct radeon_i2c_bus_rec {
|
||||
bool valid;
|
||||
/* id used by atom */
|
||||
uint8_t i2c_id;
|
||||
/* id used by atom */
|
||||
uint8_t hpd_id;
|
||||
/* can be used with hw i2c engine */
|
||||
bool hw_capable;
|
||||
/* uses multi-media i2c engine */
|
||||
@ -113,6 +115,7 @@ struct radeon_tmds_pll {
|
||||
|
||||
#define RADEON_MAX_BIOS_CONNECTOR 16
|
||||
|
||||
/* pll flags */
|
||||
#define RADEON_PLL_USE_BIOS_DIVS (1 << 0)
|
||||
#define RADEON_PLL_NO_ODD_POST_DIV (1 << 1)
|
||||
#define RADEON_PLL_USE_REF_DIV (1 << 2)
|
||||
@ -127,6 +130,12 @@ struct radeon_tmds_pll {
|
||||
#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
|
||||
#define RADEON_PLL_USE_POST_DIV (1 << 12)
|
||||
|
||||
/* pll algo */
|
||||
enum radeon_pll_algo {
|
||||
PLL_ALGO_LEGACY,
|
||||
PLL_ALGO_NEW
|
||||
};
|
||||
|
||||
struct radeon_pll {
|
||||
/* reference frequency */
|
||||
uint32_t reference_freq;
|
||||
@ -157,6 +166,13 @@ struct radeon_pll {
|
||||
|
||||
/* pll id */
|
||||
uint32_t id;
|
||||
/* pll algo */
|
||||
enum radeon_pll_algo algo;
|
||||
};
|
||||
|
||||
struct i2c_algo_radeon_data {
|
||||
struct i2c_adapter bit_adapter;
|
||||
struct i2c_algo_bit_data bit_data;
|
||||
};
|
||||
|
||||
struct radeon_i2c_chan {
|
||||
@ -164,7 +180,7 @@ struct radeon_i2c_chan {
|
||||
struct drm_device *dev;
|
||||
union {
|
||||
struct i2c_algo_dp_aux_data dp;
|
||||
struct i2c_algo_bit_data bit;
|
||||
struct i2c_algo_radeon_data radeon;
|
||||
} algo;
|
||||
struct radeon_i2c_bus_rec rec;
|
||||
};
|
||||
@ -193,7 +209,7 @@ struct radeon_mode_info {
|
||||
struct card_info *atom_card_info;
|
||||
enum radeon_connector_table connector_table;
|
||||
bool mode_config_initialized;
|
||||
struct radeon_crtc *crtcs[2];
|
||||
struct radeon_crtc *crtcs[6];
|
||||
/* DVI-I properties */
|
||||
struct drm_property *coherent_mode_property;
|
||||
/* DAC enable load detect */
|
||||
@ -202,7 +218,8 @@ struct radeon_mode_info {
|
||||
struct drm_property *tv_std_property;
|
||||
/* legacy TMDS PLL detect */
|
||||
struct drm_property *tmds_pll_property;
|
||||
|
||||
/* hardcoded DFP edid from BIOS */
|
||||
struct edid *bios_hardcoded_edid;
|
||||
};
|
||||
|
||||
#define MAX_H_CODE_TIMING_LEN 32
|
||||
@ -237,6 +254,7 @@ struct radeon_crtc {
|
||||
fixed20_12 vsc;
|
||||
fixed20_12 hsc;
|
||||
struct drm_display_mode native_mode;
|
||||
int pll_id;
|
||||
};
|
||||
|
||||
struct radeon_encoder_primary_dac {
|
||||
@ -303,6 +321,7 @@ struct radeon_encoder_atom_dig {
|
||||
/* atom lvds */
|
||||
uint32_t lvds_misc;
|
||||
uint16_t panel_pwr_delay;
|
||||
enum radeon_pll_algo pll_algo;
|
||||
struct radeon_atom_ss *ss;
|
||||
/* panel mode */
|
||||
struct drm_display_mode native_mode;
|
||||
@ -398,6 +417,7 @@ extern void dp_link_train(struct drm_encoder *encoder,
|
||||
struct drm_connector *connector);
|
||||
extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
|
||||
extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
|
||||
extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action);
|
||||
extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
|
||||
int action, uint8_t lane_num,
|
||||
uint8_t lane_set);
|
||||
@ -411,11 +431,12 @@ extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
|
||||
struct radeon_i2c_bus_rec *rec,
|
||||
const char *name);
|
||||
extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c);
|
||||
extern void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
|
||||
extern void radeon_i2c_destroy_dp(struct radeon_i2c_chan *i2c);
|
||||
extern void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
|
||||
u8 slave_addr,
|
||||
u8 addr,
|
||||
u8 *val);
|
||||
extern void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c,
|
||||
extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
|
||||
u8 slave_addr,
|
||||
u8 addr,
|
||||
u8 val);
|
||||
@ -432,14 +453,6 @@ extern void radeon_compute_pll(struct radeon_pll *pll,
|
||||
uint32_t *ref_div_p,
|
||||
uint32_t *post_div_p);
|
||||
|
||||
extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
|
||||
uint64_t freq,
|
||||
uint32_t *dot_clock_p,
|
||||
uint32_t *fb_div_p,
|
||||
uint32_t *frac_fb_div_p,
|
||||
uint32_t *ref_div_p,
|
||||
uint32_t *post_div_p);
|
||||
|
||||
extern void radeon_setup_encoder_clones(struct drm_device *dev);
|
||||
|
||||
struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index);
|
||||
@ -473,6 +486,9 @@ extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
|
||||
int x, int y);
|
||||
|
||||
extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
|
||||
extern struct edid *
|
||||
radeon_combios_get_hardcoded_edid(struct radeon_device *rdev);
|
||||
extern bool radeon_atom_get_clock_info(struct drm_device *dev);
|
||||
extern bool radeon_combios_get_clock_info(struct drm_device *dev);
|
||||
extern struct radeon_encoder_atom_dig *
|
||||
@ -531,7 +547,6 @@ void radeon_atombios_init_crtc(struct drm_device *dev,
|
||||
struct radeon_crtc *radeon_crtc);
|
||||
void radeon_legacy_init_crtc(struct drm_device *dev,
|
||||
struct radeon_crtc *radeon_crtc);
|
||||
extern void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state);
|
||||
|
||||
void radeon_get_clock_info(struct drm_device *dev);
|
||||
|
||||
|
@ -179,7 +179,7 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
|
||||
|
||||
if(bo->domain & RADEON_GEM_DOMAIN_VRAM)
|
||||
{
|
||||
bo->tbo.offset += (u64)bo->rdev->mc.vram_location;
|
||||
bo->tbo.offset += (u64)bo->rdev->mc.vram_start;
|
||||
}
|
||||
else if (bo->domain & RADEON_GEM_DOMAIN_GTT)
|
||||
{
|
||||
@ -191,7 +191,7 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
|
||||
dbgprintf("pagelist %x\n", pagelist);
|
||||
radeon_gart_bind(bo->rdev, bo->tbo.offset,
|
||||
bo->tbo.vm_node->size, pagelist);
|
||||
bo->tbo.offset += (u64)bo->rdev->mc.gtt_location;
|
||||
bo->tbo.offset += (u64)bo->rdev->mc.gtt_start;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -378,7 +378,7 @@ int radeon_fb_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
|
||||
|
||||
bo->tbo.vm_node = vm_node;
|
||||
bo->tbo.offset = bo->tbo.vm_node->start << PAGE_SHIFT;
|
||||
bo->tbo.offset += (u64)bo->rdev->mc.vram_location;
|
||||
bo->tbo.offset += (u64)bo->rdev->mc.vram_start;
|
||||
bo->kptr = (void*)0xFE000000;
|
||||
bo->pin_count = 1;
|
||||
|
||||
|
@ -18,21 +18,407 @@
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Rafał Miłecki <zajec5@gmail.com>
|
||||
* Alex Deucher <alexdeucher@gmail.com>
|
||||
*/
|
||||
#include "drmP.h"
|
||||
#include "radeon.h"
|
||||
#include "avivod.h"
|
||||
|
||||
int radeon_debugfs_pm_init(struct radeon_device *rdev);
|
||||
#define RADEON_IDLE_LOOP_MS 100
|
||||
#define RADEON_RECLOCK_DELAY_MS 200
|
||||
#define RADEON_WAIT_VBLANK_TIMEOUT 200
|
||||
|
||||
static void radeon_pm_set_clocks_locked(struct radeon_device *rdev);
|
||||
static void radeon_pm_set_clocks(struct radeon_device *rdev);
|
||||
static void radeon_pm_idle_work_handler(struct work_struct *work);
|
||||
static int radeon_debugfs_pm_init(struct radeon_device *rdev);
|
||||
|
||||
static const char *pm_state_names[4] = {
|
||||
"PM_STATE_DISABLED",
|
||||
"PM_STATE_MINIMUM",
|
||||
"PM_STATE_PAUSED",
|
||||
"PM_STATE_ACTIVE"
|
||||
};
|
||||
|
||||
static const char *pm_state_types[5] = {
|
||||
"Default",
|
||||
"Powersave",
|
||||
"Battery",
|
||||
"Balanced",
|
||||
"Performance",
|
||||
};
|
||||
|
||||
static void radeon_print_power_mode_info(struct radeon_device *rdev)
|
||||
{
|
||||
int i, j;
|
||||
bool is_default;
|
||||
|
||||
DRM_INFO("%d Power State(s)\n", rdev->pm.num_power_states);
|
||||
for (i = 0; i < rdev->pm.num_power_states; i++) {
|
||||
if (rdev->pm.default_power_state == &rdev->pm.power_state[i])
|
||||
is_default = true;
|
||||
else
|
||||
is_default = false;
|
||||
DRM_INFO("State %d %s %s\n", i,
|
||||
pm_state_types[rdev->pm.power_state[i].type],
|
||||
is_default ? "(default)" : "");
|
||||
if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
|
||||
DRM_INFO("\t%d PCIE Lanes\n", rdev->pm.power_state[i].non_clock_info.pcie_lanes);
|
||||
DRM_INFO("\t%d Clock Mode(s)\n", rdev->pm.power_state[i].num_clock_modes);
|
||||
for (j = 0; j < rdev->pm.power_state[i].num_clock_modes; j++) {
|
||||
if (rdev->flags & RADEON_IS_IGP)
|
||||
DRM_INFO("\t\t%d engine: %d\n",
|
||||
j,
|
||||
rdev->pm.power_state[i].clock_info[j].sclk * 10);
|
||||
else
|
||||
DRM_INFO("\t\t%d engine/memory: %d/%d\n",
|
||||
j,
|
||||
rdev->pm.power_state[i].clock_info[j].sclk * 10,
|
||||
rdev->pm.power_state[i].clock_info[j].mclk * 10);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static struct radeon_power_state * radeon_pick_power_state(struct radeon_device *rdev,
|
||||
enum radeon_pm_state_type type)
|
||||
{
|
||||
int i, j;
|
||||
enum radeon_pm_state_type wanted_types[2];
|
||||
int wanted_count;
|
||||
|
||||
switch (type) {
|
||||
case POWER_STATE_TYPE_DEFAULT:
|
||||
default:
|
||||
return rdev->pm.default_power_state;
|
||||
case POWER_STATE_TYPE_POWERSAVE:
|
||||
if (rdev->flags & RADEON_IS_MOBILITY) {
|
||||
wanted_types[0] = POWER_STATE_TYPE_POWERSAVE;
|
||||
wanted_types[1] = POWER_STATE_TYPE_BATTERY;
|
||||
wanted_count = 2;
|
||||
} else {
|
||||
wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE;
|
||||
wanted_count = 1;
|
||||
}
|
||||
break;
|
||||
case POWER_STATE_TYPE_BATTERY:
|
||||
if (rdev->flags & RADEON_IS_MOBILITY) {
|
||||
wanted_types[0] = POWER_STATE_TYPE_BATTERY;
|
||||
wanted_types[1] = POWER_STATE_TYPE_POWERSAVE;
|
||||
wanted_count = 2;
|
||||
} else {
|
||||
wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE;
|
||||
wanted_count = 1;
|
||||
}
|
||||
break;
|
||||
case POWER_STATE_TYPE_BALANCED:
|
||||
case POWER_STATE_TYPE_PERFORMANCE:
|
||||
wanted_types[0] = type;
|
||||
wanted_count = 1;
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = 0; i < wanted_count; i++) {
|
||||
for (j = 0; j < rdev->pm.num_power_states; j++) {
|
||||
if (rdev->pm.power_state[j].type == wanted_types[i])
|
||||
return &rdev->pm.power_state[j];
|
||||
}
|
||||
}
|
||||
|
||||
return rdev->pm.default_power_state;
|
||||
}
|
||||
|
||||
static struct radeon_pm_clock_info * radeon_pick_clock_mode(struct radeon_device *rdev,
|
||||
struct radeon_power_state *power_state,
|
||||
enum radeon_pm_clock_mode_type type)
|
||||
{
|
||||
switch (type) {
|
||||
case POWER_MODE_TYPE_DEFAULT:
|
||||
default:
|
||||
return power_state->default_clock_mode;
|
||||
case POWER_MODE_TYPE_LOW:
|
||||
return &power_state->clock_info[0];
|
||||
case POWER_MODE_TYPE_MID:
|
||||
if (power_state->num_clock_modes > 2)
|
||||
return &power_state->clock_info[1];
|
||||
else
|
||||
return &power_state->clock_info[0];
|
||||
break;
|
||||
case POWER_MODE_TYPE_HIGH:
|
||||
return &power_state->clock_info[power_state->num_clock_modes - 1];
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void radeon_get_power_state(struct radeon_device *rdev,
|
||||
enum radeon_pm_action action)
|
||||
{
|
||||
switch (action) {
|
||||
case PM_ACTION_MINIMUM:
|
||||
rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_BATTERY);
|
||||
rdev->pm.requested_clock_mode =
|
||||
radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_LOW);
|
||||
break;
|
||||
case PM_ACTION_DOWNCLOCK:
|
||||
rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_POWERSAVE);
|
||||
rdev->pm.requested_clock_mode =
|
||||
radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_MID);
|
||||
break;
|
||||
case PM_ACTION_UPCLOCK:
|
||||
rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_DEFAULT);
|
||||
rdev->pm.requested_clock_mode =
|
||||
radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_HIGH);
|
||||
break;
|
||||
case PM_ACTION_NONE:
|
||||
default:
|
||||
DRM_ERROR("Requested mode for not defined action\n");
|
||||
return;
|
||||
}
|
||||
DRM_INFO("Requested: e: %d m: %d p: %d\n",
|
||||
rdev->pm.requested_clock_mode->sclk,
|
||||
rdev->pm.requested_clock_mode->mclk,
|
||||
rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
|
||||
}
|
||||
|
||||
static void radeon_set_power_state(struct radeon_device *rdev)
|
||||
{
|
||||
/* if *_clock_mode are the same, *_power_state are as well */
|
||||
if (rdev->pm.requested_clock_mode == rdev->pm.current_clock_mode)
|
||||
return;
|
||||
|
||||
DRM_INFO("Setting: e: %d m: %d p: %d\n",
|
||||
rdev->pm.requested_clock_mode->sclk,
|
||||
rdev->pm.requested_clock_mode->mclk,
|
||||
rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
|
||||
/* set pcie lanes */
|
||||
/* set voltage */
|
||||
/* set engine clock */
|
||||
radeon_set_engine_clock(rdev, rdev->pm.requested_clock_mode->sclk);
|
||||
/* set memory clock */
|
||||
|
||||
rdev->pm.current_power_state = rdev->pm.requested_power_state;
|
||||
rdev->pm.current_clock_mode = rdev->pm.requested_clock_mode;
|
||||
}
|
||||
|
||||
int radeon_pm_init(struct radeon_device *rdev)
|
||||
{
|
||||
rdev->pm.state = PM_STATE_DISABLED;
|
||||
rdev->pm.planned_action = PM_ACTION_NONE;
|
||||
rdev->pm.downclocked = false;
|
||||
|
||||
if (rdev->bios) {
|
||||
if (rdev->is_atom_bios)
|
||||
radeon_atombios_get_power_modes(rdev);
|
||||
else
|
||||
radeon_combios_get_power_modes(rdev);
|
||||
radeon_print_power_mode_info(rdev);
|
||||
}
|
||||
|
||||
if (radeon_debugfs_pm_init(rdev)) {
|
||||
DRM_ERROR("Failed to register debugfs file for PM!\n");
|
||||
}
|
||||
|
||||
// INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler);
|
||||
|
||||
if (radeon_dynpm != -1 && radeon_dynpm) {
|
||||
rdev->pm.state = PM_STATE_PAUSED;
|
||||
DRM_INFO("radeon: dynamic power management enabled\n");
|
||||
}
|
||||
|
||||
DRM_INFO("radeon: power management initialized\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_pm_compute_clocks(struct radeon_device *rdev)
|
||||
{
|
||||
struct drm_device *ddev = rdev->ddev;
|
||||
struct drm_connector *connector;
|
||||
struct radeon_crtc *radeon_crtc;
|
||||
int count = 0;
|
||||
|
||||
if (rdev->pm.state == PM_STATE_DISABLED)
|
||||
return;
|
||||
|
||||
mutex_lock(&rdev->pm.mutex);
|
||||
|
||||
rdev->pm.active_crtcs = 0;
|
||||
list_for_each_entry(connector,
|
||||
&ddev->mode_config.connector_list, head) {
|
||||
if (connector->encoder &&
|
||||
connector->dpms != DRM_MODE_DPMS_OFF) {
|
||||
radeon_crtc = to_radeon_crtc(connector->encoder->crtc);
|
||||
rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
|
||||
++count;
|
||||
}
|
||||
}
|
||||
|
||||
if (count > 1) {
|
||||
if (rdev->pm.state == PM_STATE_ACTIVE) {
|
||||
|
||||
rdev->pm.state = PM_STATE_PAUSED;
|
||||
rdev->pm.planned_action = PM_ACTION_UPCLOCK;
|
||||
if (rdev->pm.downclocked)
|
||||
radeon_pm_set_clocks(rdev);
|
||||
|
||||
DRM_DEBUG("radeon: dynamic power management deactivated\n");
|
||||
}
|
||||
} else if (count == 1) {
|
||||
/* TODO: Increase clocks if needed for current mode */
|
||||
|
||||
if (rdev->pm.state == PM_STATE_MINIMUM) {
|
||||
rdev->pm.state = PM_STATE_ACTIVE;
|
||||
rdev->pm.planned_action = PM_ACTION_UPCLOCK;
|
||||
radeon_pm_set_clocks(rdev);
|
||||
}
|
||||
else if (rdev->pm.state == PM_STATE_PAUSED) {
|
||||
rdev->pm.state = PM_STATE_ACTIVE;
|
||||
DRM_DEBUG("radeon: dynamic power management activated\n");
|
||||
}
|
||||
}
|
||||
else { /* count == 0 */
|
||||
if (rdev->pm.state != PM_STATE_MINIMUM) {
|
||||
rdev->pm.state = PM_STATE_MINIMUM;
|
||||
rdev->pm.planned_action = PM_ACTION_MINIMUM;
|
||||
radeon_pm_set_clocks(rdev);
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&rdev->pm.mutex);
|
||||
}
|
||||
|
||||
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
|
||||
{
|
||||
u32 stat_crtc1 = 0, stat_crtc2 = 0;
|
||||
bool in_vbl = true;
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
if (rdev->pm.active_crtcs & (1 << 0)) {
|
||||
stat_crtc1 = RREG32(D1CRTC_STATUS);
|
||||
if (!(stat_crtc1 & 1))
|
||||
in_vbl = false;
|
||||
}
|
||||
if (rdev->pm.active_crtcs & (1 << 1)) {
|
||||
stat_crtc2 = RREG32(D2CRTC_STATUS);
|
||||
if (!(stat_crtc2 & 1))
|
||||
in_vbl = false;
|
||||
}
|
||||
}
|
||||
if (in_vbl == false)
|
||||
DRM_INFO("not in vbl for pm change %08x %08x at %s\n", stat_crtc1,
|
||||
stat_crtc2, finish ? "exit" : "entry");
|
||||
return in_vbl;
|
||||
}
|
||||
static void radeon_pm_set_clocks_locked(struct radeon_device *rdev)
|
||||
{
|
||||
/*radeon_fence_wait_last(rdev);*/
|
||||
switch (rdev->pm.planned_action) {
|
||||
case PM_ACTION_UPCLOCK:
|
||||
rdev->pm.downclocked = false;
|
||||
break;
|
||||
case PM_ACTION_DOWNCLOCK:
|
||||
rdev->pm.downclocked = true;
|
||||
break;
|
||||
case PM_ACTION_MINIMUM:
|
||||
break;
|
||||
case PM_ACTION_NONE:
|
||||
DRM_ERROR("%s: PM_ACTION_NONE\n", __func__);
|
||||
break;
|
||||
}
|
||||
|
||||
/* check if we are in vblank */
|
||||
radeon_pm_debug_check_in_vbl(rdev, false);
|
||||
radeon_set_power_state(rdev);
|
||||
radeon_pm_debug_check_in_vbl(rdev, true);
|
||||
rdev->pm.planned_action = PM_ACTION_NONE;
|
||||
}
|
||||
|
||||
static void radeon_pm_set_clocks(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_get_power_state(rdev, rdev->pm.planned_action);
|
||||
mutex_lock(&rdev->cp.mutex);
|
||||
|
||||
if (rdev->pm.active_crtcs & (1 << 0)) {
|
||||
rdev->pm.req_vblank |= (1 << 0);
|
||||
// drm_vblank_get(rdev->ddev, 0);
|
||||
}
|
||||
if (rdev->pm.active_crtcs & (1 << 1)) {
|
||||
rdev->pm.req_vblank |= (1 << 1);
|
||||
// drm_vblank_get(rdev->ddev, 1);
|
||||
}
|
||||
if (rdev->pm.active_crtcs)
|
||||
// wait_event_interruptible_timeout(
|
||||
// rdev->irq.vblank_queue, 0,
|
||||
// msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
|
||||
if (rdev->pm.req_vblank & (1 << 0)) {
|
||||
rdev->pm.req_vblank &= ~(1 << 0);
|
||||
// drm_vblank_put(rdev->ddev, 0);
|
||||
}
|
||||
if (rdev->pm.req_vblank & (1 << 1)) {
|
||||
rdev->pm.req_vblank &= ~(1 << 1);
|
||||
// drm_vblank_put(rdev->ddev, 1);
|
||||
}
|
||||
|
||||
radeon_pm_set_clocks_locked(rdev);
|
||||
mutex_unlock(&rdev->cp.mutex);
|
||||
}
|
||||
|
||||
#if 0
|
||||
static void radeon_pm_idle_work_handler(struct work_struct *work)
|
||||
{
|
||||
struct radeon_device *rdev;
|
||||
rdev = container_of(work, struct radeon_device,
|
||||
pm.idle_work.work);
|
||||
|
||||
mutex_lock(&rdev->pm.mutex);
|
||||
if (rdev->pm.state == PM_STATE_ACTIVE) {
|
||||
unsigned long irq_flags;
|
||||
int not_processed = 0;
|
||||
|
||||
read_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
|
||||
if (!list_empty(&rdev->fence_drv.emited)) {
|
||||
struct list_head *ptr;
|
||||
list_for_each(ptr, &rdev->fence_drv.emited) {
|
||||
/* count up to 3, that's enought info */
|
||||
if (++not_processed >= 3)
|
||||
break;
|
||||
}
|
||||
}
|
||||
read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
||||
|
||||
if (not_processed >= 3) { /* should upclock */
|
||||
if (rdev->pm.planned_action == PM_ACTION_DOWNCLOCK) {
|
||||
rdev->pm.planned_action = PM_ACTION_NONE;
|
||||
} else if (rdev->pm.planned_action == PM_ACTION_NONE &&
|
||||
rdev->pm.downclocked) {
|
||||
rdev->pm.planned_action =
|
||||
PM_ACTION_UPCLOCK;
|
||||
rdev->pm.action_timeout = jiffies +
|
||||
msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
|
||||
}
|
||||
} else if (not_processed == 0) { /* should downclock */
|
||||
if (rdev->pm.planned_action == PM_ACTION_UPCLOCK) {
|
||||
rdev->pm.planned_action = PM_ACTION_NONE;
|
||||
} else if (rdev->pm.planned_action == PM_ACTION_NONE &&
|
||||
!rdev->pm.downclocked) {
|
||||
rdev->pm.planned_action =
|
||||
PM_ACTION_DOWNCLOCK;
|
||||
rdev->pm.action_timeout = jiffies +
|
||||
msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
|
||||
}
|
||||
}
|
||||
|
||||
if (rdev->pm.planned_action != PM_ACTION_NONE &&
|
||||
jiffies > rdev->pm.action_timeout) {
|
||||
radeon_pm_set_clocks(rdev);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&rdev->pm.mutex);
|
||||
|
||||
queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
|
||||
msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Debugfs info
|
||||
*/
|
||||
@ -44,11 +430,14 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
|
||||
seq_printf(m, "state: %s\n", pm_state_names[rdev->pm.state]);
|
||||
seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
|
||||
seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
|
||||
seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
|
||||
if (rdev->asic->get_memory_clock)
|
||||
seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
|
||||
if (rdev->asic->get_pcie_lanes)
|
||||
seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -58,7 +447,7 @@ static struct drm_info_list radeon_pm_info_list[] = {
|
||||
};
|
||||
#endif
|
||||
|
||||
int radeon_debugfs_pm_init(struct radeon_device *rdev)
|
||||
static int radeon_debugfs_pm_init(struct radeon_device *rdev)
|
||||
{
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
|
||||
|
@ -54,7 +54,7 @@
|
||||
#include "r300_reg.h"
|
||||
#include "r500_reg.h"
|
||||
#include "r600_reg.h"
|
||||
|
||||
#include "evergreen_reg.h"
|
||||
|
||||
#define RADEON_MC_AGP_LOCATION 0x014c
|
||||
#define RADEON_MC_AGP_START_MASK 0x0000FFFF
|
||||
@ -1060,32 +1060,38 @@
|
||||
|
||||
/* Multimedia I2C bus */
|
||||
#define RADEON_I2C_CNTL_0 0x0090
|
||||
#define RADEON_I2C_DONE (1 << 0)
|
||||
#define RADEON_I2C_NACK (1 << 1)
|
||||
#define RADEON_I2C_HALT (1 << 2)
|
||||
#define RADEON_I2C_SOFT_RST (1 << 5)
|
||||
#define RADEON_I2C_DRIVE_EN (1 << 6)
|
||||
#define RADEON_I2C_DRIVE_SEL (1 << 7)
|
||||
#define RADEON_I2C_START (1 << 8)
|
||||
#define RADEON_I2C_STOP (1 << 9)
|
||||
#define RADEON_I2C_RECEIVE (1 << 10)
|
||||
#define RADEON_I2C_ABORT (1 << 11)
|
||||
#define RADEON_I2C_GO (1 << 12)
|
||||
#define RADEON_I2C_PRESCALE_SHIFT 16
|
||||
# define RADEON_I2C_DONE (1 << 0)
|
||||
# define RADEON_I2C_NACK (1 << 1)
|
||||
# define RADEON_I2C_HALT (1 << 2)
|
||||
# define RADEON_I2C_SOFT_RST (1 << 5)
|
||||
# define RADEON_I2C_DRIVE_EN (1 << 6)
|
||||
# define RADEON_I2C_DRIVE_SEL (1 << 7)
|
||||
# define RADEON_I2C_START (1 << 8)
|
||||
# define RADEON_I2C_STOP (1 << 9)
|
||||
# define RADEON_I2C_RECEIVE (1 << 10)
|
||||
# define RADEON_I2C_ABORT (1 << 11)
|
||||
# define RADEON_I2C_GO (1 << 12)
|
||||
# define RADEON_I2C_PRESCALE_SHIFT 16
|
||||
#define RADEON_I2C_CNTL_1 0x0094
|
||||
#define RADEON_I2C_DATA_COUNT_SHIFT 0
|
||||
#define RADEON_I2C_ADDR_COUNT_SHIFT 4
|
||||
#define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT 8
|
||||
#define RADEON_I2C_SEL (1 << 16)
|
||||
#define RADEON_I2C_EN (1 << 17)
|
||||
#define RADEON_I2C_TIME_LIMIT_SHIFT 24
|
||||
# define RADEON_I2C_DATA_COUNT_SHIFT 0
|
||||
# define RADEON_I2C_ADDR_COUNT_SHIFT 4
|
||||
# define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT 8
|
||||
# define RADEON_I2C_SEL (1 << 16)
|
||||
# define RADEON_I2C_EN (1 << 17)
|
||||
# define RADEON_I2C_TIME_LIMIT_SHIFT 24
|
||||
#define RADEON_I2C_DATA 0x0098
|
||||
|
||||
#define RADEON_DVI_I2C_CNTL_0 0x02e0
|
||||
# define R200_DVI_I2C_PIN_SEL(x) ((x) << 3)
|
||||
# define R200_SEL_DDC1 0 /* 0x60 - VGA_DDC */
|
||||
# define R200_SEL_DDC2 1 /* 0x64 - DVI_DDC */
|
||||
# define R200_SEL_DDC3 2 /* 0x68 - MONID_DDC */
|
||||
# define R200_SEL_DDC1 0 /* depends on asic */
|
||||
# define R200_SEL_DDC2 1 /* depends on asic */
|
||||
# define R200_SEL_DDC3 2 /* depends on asic */
|
||||
# define RADEON_SW_WANTS_TO_USE_DVI_I2C (1 << 13)
|
||||
# define RADEON_SW_CAN_USE_DVI_I2C (1 << 13)
|
||||
# define RADEON_SW_DONE_USING_DVI_I2C (1 << 14)
|
||||
# define RADEON_HW_NEEDS_DVI_I2C (1 << 14)
|
||||
# define RADEON_ABORT_HW_DVI_I2C (1 << 15)
|
||||
# define RADEON_HW_USING_DVI_I2C (1 << 15)
|
||||
#define RADEON_DVI_I2C_CNTL_1 0x02e4
|
||||
#define RADEON_DVI_I2C_DATA 0x02e8
|
||||
|
||||
|
@ -84,7 +84,7 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
|
||||
mutex_unlock(&rdev->ib_pool.mutex);
|
||||
radeon_fence_unref(&fence);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
mutex_lock(&rdev->ib_pool.mutex);
|
||||
}
|
||||
radeon_fence_unref(&nib->fence);
|
||||
@ -146,6 +146,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
|
||||
|
||||
if (rdev->ib_pool.robj)
|
||||
return 0;
|
||||
INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
|
||||
/* Allocate 1M object buffer */
|
||||
r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
|
||||
true, RADEON_GEM_DOMAIN_GTT,
|
||||
@ -278,8 +279,6 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
|
||||
{
|
||||
int r;
|
||||
|
||||
ENTER();
|
||||
|
||||
rdev->cp.ring_size = ring_size;
|
||||
/* Allocate ring buffer */
|
||||
if (rdev->cp.ring_obj == NULL) {
|
||||
@ -310,9 +309,6 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
|
||||
}
|
||||
rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1;
|
||||
rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
|
||||
|
||||
LEAVE();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -366,7 +362,12 @@ int radeon_debugfs_ib_init(struct radeon_device *rdev)
|
||||
{
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
unsigned i;
|
||||
int r;
|
||||
|
||||
radeon_debugfs_ib_bogus_info_list[0].data = rdev;
|
||||
r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1);
|
||||
if (r)
|
||||
return r;
|
||||
for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
|
||||
sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
|
||||
radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
|
||||
|
@ -112,7 +112,7 @@ cursor_t* __stdcall select_cursor(cursor_t *cursor)
|
||||
if (ASIC_IS_AVIVO(rdev))
|
||||
WREG32(AVIVO_D1CUR_SURFACE_ADDRESS, gpu_addr);
|
||||
else {
|
||||
WREG32(RADEON_CUR_OFFSET, gpu_addr - rdev->mc.vram_location);
|
||||
WREG32(RADEON_CUR_OFFSET, gpu_addr - rdev->mc.vram_start);
|
||||
}
|
||||
|
||||
return old;
|
||||
@ -189,7 +189,7 @@ void __stdcall move_cursor(cursor_t *cursor, int x, int y)
|
||||
|
||||
/* offset is from DISP(2)_BASE_ADDRESS */
|
||||
WREG32(RADEON_CUR_OFFSET,
|
||||
(gpu_addr - rdev->mc.vram_location + (yorg * 256)));
|
||||
(gpu_addr - rdev->mc.vram_start + (yorg * 256)));
|
||||
}
|
||||
radeon_lock_cursor(false);
|
||||
}
|
||||
@ -296,7 +296,7 @@ int r100_2D_test(struct radeon_device *rdev)
|
||||
ENTER();
|
||||
|
||||
pitch = (1024*4)/64;
|
||||
offset = rdev->mc.vram_location;
|
||||
offset = rdev->mc.vram_start;
|
||||
|
||||
r = radeon_ring_lock(rdev, 16);
|
||||
if (r) {
|
||||
@ -2211,8 +2211,7 @@ int r600_2D_test(struct radeon_device *rdev)
|
||||
ENTER();
|
||||
|
||||
pitch = (1024*4)/64;
|
||||
offset = rdev->mc.vram_location;
|
||||
|
||||
offset = rdev->mc.vram_start;
|
||||
ps_size = R600_solid_ps(rdev, ps_shader);
|
||||
vs_size = R600_solid_vs(rdev, vs_shader);
|
||||
|
||||
@ -2355,7 +2354,7 @@ int r600_2D_test(struct radeon_device *rdev)
|
||||
|
||||
|
||||
set_render_target(rdev, COLOR_8_8_8_8, 1024, 768, /* FIXME */
|
||||
rdev->mc.vram_location);
|
||||
rdev->mc.vram_start);
|
||||
|
||||
set_scissors(rdev, 0, 0, 1024, 768);
|
||||
|
||||
|
@ -83,7 +83,7 @@ cursor_t* __stdcall select_cursor_kms(cursor_t *cursor)
|
||||
if (ASIC_IS_AVIVO(rdev))
|
||||
WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr);
|
||||
else {
|
||||
radeon_crtc->legacy_cursor_offset = gpu_addr - rdev->mc.vram_location;
|
||||
radeon_crtc->legacy_cursor_offset = gpu_addr - rdev->mc.vram_start;
|
||||
/* offset is from DISP(2)_BASE_ADDRESS */
|
||||
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
|
||||
}
|
||||
@ -175,7 +175,7 @@ void __stdcall move_cursor_kms(cursor_t *cursor, int x, int y)
|
||||
|
||||
/* offset is from DISP(2)_BASE_ADDRESS */
|
||||
WREG32(RADEON_CUR_OFFSET,
|
||||
(gpu_addr - rdev->mc.vram_location + (yorg * 256)));
|
||||
(gpu_addr - rdev->mc.vram_start + (yorg * 256)));
|
||||
}
|
||||
radeon_lock_cursor_kms(crtc, false);
|
||||
}
|
||||
|
@ -113,6 +113,7 @@ int rs400_gart_enable(struct radeon_device *rdev)
|
||||
uint32_t size_reg;
|
||||
uint32_t tmp;
|
||||
|
||||
radeon_gart_restore(rdev);
|
||||
tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
|
||||
tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
|
||||
WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
|
||||
@ -150,9 +151,8 @@ int rs400_gart_enable(struct radeon_device *rdev)
|
||||
WREG32(RADEON_AGP_BASE, 0xFFFFFFFF);
|
||||
WREG32(RS480_AGP_BASE_2, 0);
|
||||
}
|
||||
tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
|
||||
tmp = REG_SET(RS690_MC_AGP_TOP, tmp >> 16);
|
||||
tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_location >> 16);
|
||||
tmp = REG_SET(RS690_MC_AGP_TOP, rdev->mc.gtt_end >> 16);
|
||||
tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_start >> 16);
|
||||
if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
|
||||
WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp);
|
||||
tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
|
||||
@ -251,14 +251,19 @@ void rs400_gpu_init(struct radeon_device *rdev)
|
||||
}
|
||||
}
|
||||
|
||||
void rs400_vram_info(struct radeon_device *rdev)
|
||||
void rs400_mc_init(struct radeon_device *rdev)
|
||||
{
|
||||
u64 base;
|
||||
|
||||
rs400_gart_adjust_size(rdev);
|
||||
rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
|
||||
/* DDR for all card after R300 & IGP */
|
||||
rdev->mc.vram_is_ddr = true;
|
||||
rdev->mc.vram_width = 128;
|
||||
|
||||
r100_vram_init_sizes(rdev);
|
||||
base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
|
||||
radeon_vram_location(rdev, &rdev->mc, base);
|
||||
radeon_gtt_location(rdev, &rdev->mc);
|
||||
}
|
||||
|
||||
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
|
||||
@ -362,22 +367,6 @@ static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
|
||||
#endif
|
||||
}
|
||||
|
||||
static int rs400_mc_init(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
u32 tmp;
|
||||
|
||||
/* Setup GPU memory space */
|
||||
tmp = RREG32(R_00015C_NB_TOM);
|
||||
rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16;
|
||||
rdev->mc.gtt_location = 0xFFFFFFFFUL;
|
||||
r = radeon_mc_setup(rdev);
|
||||
rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rs400_mc_program(struct radeon_device *rdev)
|
||||
{
|
||||
struct r100_mc_save save;
|
||||
@ -472,12 +461,8 @@ int rs400_init(struct radeon_device *rdev)
|
||||
radeon_get_clock_info(rdev->ddev);
|
||||
/* Initialize power management */
|
||||
radeon_pm_init(rdev);
|
||||
/* Get vram informations */
|
||||
rs400_vram_info(rdev);
|
||||
/* Initialize memory controller (also test AGP) */
|
||||
r = rs400_mc_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
/* initialize memory controller */
|
||||
rs400_mc_init(rdev);
|
||||
/* Fence driver */
|
||||
// r = radeon_fence_driver_init(rdev);
|
||||
// if (r)
|
||||
|
@ -45,23 +45,6 @@
|
||||
void rs600_gpu_init(struct radeon_device *rdev);
|
||||
int rs600_mc_wait_for_idle(struct radeon_device *rdev);
|
||||
|
||||
int rs600_mc_init(struct radeon_device *rdev)
|
||||
{
|
||||
/* read back the MC value from the hw */
|
||||
int r;
|
||||
u32 tmp;
|
||||
|
||||
/* Setup GPU memory space */
|
||||
tmp = RREG32_MC(R_000004_MC_FB_LOCATION);
|
||||
rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16;
|
||||
rdev->mc.gtt_location = 0xffffffffUL;
|
||||
r = radeon_mc_setup(rdev);
|
||||
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* hpd for digital panel detect/disconnect */
|
||||
bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
|
||||
{
|
||||
@ -213,6 +196,7 @@ int rs600_gart_enable(struct radeon_device *rdev)
|
||||
r = radeon_gart_table_vram_pin(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
radeon_gart_restore(rdev);
|
||||
/* Enable bus master */
|
||||
tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS;
|
||||
WREG32(R_00004C_BUS_CNTL, tmp);
|
||||
@ -423,22 +407,22 @@ void rs600_gpu_init(struct radeon_device *rdev)
|
||||
dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
|
||||
}
|
||||
|
||||
void rs600_vram_info(struct radeon_device *rdev)
|
||||
void rs600_mc_init(struct radeon_device *rdev)
|
||||
{
|
||||
rdev->mc.vram_is_ddr = true;
|
||||
rdev->mc.vram_width = 128;
|
||||
|
||||
rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
|
||||
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
|
||||
u64 base;
|
||||
|
||||
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
|
||||
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
|
||||
|
||||
if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
|
||||
rdev->mc.mc_vram_size = rdev->mc.aper_size;
|
||||
|
||||
if (rdev->mc.real_vram_size > rdev->mc.aper_size)
|
||||
rdev->mc.real_vram_size = rdev->mc.aper_size;
|
||||
rdev->mc.vram_is_ddr = true;
|
||||
rdev->mc.vram_width = 128;
|
||||
rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
|
||||
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
|
||||
rdev->mc.visible_vram_size = rdev->mc.aper_size;
|
||||
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
|
||||
base = RREG32_MC(R_000004_MC_FB_LOCATION);
|
||||
base = G_000004_MC_FB_START(base) << 16;
|
||||
radeon_vram_location(rdev, &rdev->mc, base);
|
||||
radeon_gtt_location(rdev, &rdev->mc);
|
||||
}
|
||||
|
||||
void rs600_bandwidth_update(struct radeon_device *rdev)
|
||||
@ -571,12 +555,8 @@ int rs600_init(struct radeon_device *rdev)
|
||||
radeon_get_clock_info(rdev->ddev);
|
||||
/* Initialize power management */
|
||||
radeon_pm_init(rdev);
|
||||
/* Get vram informations */
|
||||
rs600_vram_info(rdev);
|
||||
/* Initialize memory controller (also test AGP) */
|
||||
r = rs600_mc_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
/* initialize memory controller */
|
||||
rs600_mc_init(rdev);
|
||||
rs600_debugfs(rdev);
|
||||
/* Fence driver */
|
||||
// r = radeon_fence_driver_init(rdev);
|
||||
|
@ -129,27 +129,21 @@ void rs690_pm_info(struct radeon_device *rdev)
|
||||
rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp);
|
||||
}
|
||||
|
||||
void rs690_vram_info(struct radeon_device *rdev)
|
||||
void rs690_mc_init(struct radeon_device *rdev)
|
||||
{
|
||||
fixed20_12 a;
|
||||
u64 base;
|
||||
|
||||
rs400_gart_adjust_size(rdev);
|
||||
|
||||
rdev->mc.vram_is_ddr = true;
|
||||
rdev->mc.vram_width = 128;
|
||||
|
||||
rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
|
||||
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
|
||||
|
||||
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
|
||||
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
|
||||
|
||||
if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
|
||||
rdev->mc.mc_vram_size = rdev->mc.aper_size;
|
||||
|
||||
if (rdev->mc.real_vram_size > rdev->mc.aper_size)
|
||||
rdev->mc.real_vram_size = rdev->mc.aper_size;
|
||||
|
||||
rdev->mc.visible_vram_size = rdev->mc.aper_size;
|
||||
base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
|
||||
base = G_000100_MC_FB_START(base) << 16;
|
||||
rs690_pm_info(rdev);
|
||||
/* FIXME: we should enforce default clock in case GPU is not in
|
||||
* default setup
|
||||
@ -160,22 +154,9 @@ void rs690_vram_info(struct radeon_device *rdev)
|
||||
a.full = rfixed_const(16);
|
||||
/* core_bandwidth = sclk(Mhz) * 16 */
|
||||
rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
|
||||
}
|
||||
|
||||
static int rs690_mc_init(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
u32 tmp;
|
||||
|
||||
/* Setup GPU memory space */
|
||||
tmp = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
|
||||
rdev->mc.vram_location = G_000100_MC_FB_START(tmp) << 16;
|
||||
rdev->mc.gtt_location = 0xFFFFFFFFUL;
|
||||
r = radeon_mc_setup(rdev);
|
||||
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
return 0;
|
||||
radeon_vram_location(rdev, &rdev->mc, base);
|
||||
radeon_gtt_location(rdev, &rdev->mc);
|
||||
}
|
||||
|
||||
void rs690_line_buffer_adjust(struct radeon_device *rdev,
|
||||
@ -686,12 +667,8 @@ int rs690_init(struct radeon_device *rdev)
|
||||
radeon_get_clock_info(rdev->ddev);
|
||||
/* Initialize power management */
|
||||
radeon_pm_init(rdev);
|
||||
/* Get vram informations */
|
||||
rs690_vram_info(rdev);
|
||||
/* Initialize memory controller (also test AGP) */
|
||||
r = rs690_mc_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
/* initialize memory controller */
|
||||
rs690_mc_init(rdev);
|
||||
rv515_debugfs(rdev);
|
||||
/* Fence driver */
|
||||
// r = radeon_fence_driver_init(rdev);
|
||||
|
@ -55,8 +55,6 @@ void rv515_ring_start(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
|
||||
ENTER();
|
||||
|
||||
r = radeon_ring_lock(rdev, 64);
|
||||
if (r) {
|
||||
return;
|
||||
@ -119,9 +117,6 @@ void rv515_ring_start(struct radeon_device *rdev)
|
||||
radeon_ring_write(rdev, PACKET0(0x20C8, 0));
|
||||
radeon_ring_write(rdev, 0);
|
||||
radeon_ring_unlock_commit(rdev);
|
||||
|
||||
LEAVE();
|
||||
|
||||
}
|
||||
|
||||
int rv515_mc_wait_for_idle(struct radeon_device *rdev)
|
||||
@ -183,8 +178,6 @@ int rv515_ga_reset(struct radeon_device *rdev)
|
||||
bool reinit_cp;
|
||||
int i;
|
||||
|
||||
ENTER();
|
||||
|
||||
reinit_cp = rdev->cp.ready;
|
||||
rdev->cp.ready = false;
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
@ -237,8 +230,6 @@ int rv515_gpu_reset(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t status;
|
||||
|
||||
ENTER();
|
||||
|
||||
/* reset order likely matter */
|
||||
status = RREG32(RBBM_STATUS);
|
||||
/* reset HDP */
|
||||
@ -286,13 +277,15 @@ static void rv515_vram_get_type(struct radeon_device *rdev)
|
||||
}
|
||||
}
|
||||
|
||||
void rv515_vram_info(struct radeon_device *rdev)
|
||||
void rv515_mc_init(struct radeon_device *rdev)
|
||||
{
|
||||
fixed20_12 a;
|
||||
|
||||
rv515_vram_get_type(rdev);
|
||||
|
||||
r100_vram_init_sizes(rdev);
|
||||
radeon_vram_location(rdev, &rdev->mc, 0);
|
||||
if (!(rdev->flags & RADEON_IS_AGP))
|
||||
radeon_gtt_location(rdev, &rdev->mc);
|
||||
/* FIXME: we should enforce default clock in case GPU is not in
|
||||
* default setup
|
||||
*/
|
||||
@ -549,13 +542,15 @@ int rv515_init(struct radeon_device *rdev)
|
||||
radeon_get_clock_info(rdev->ddev);
|
||||
/* Initialize power management */
|
||||
radeon_pm_init(rdev);
|
||||
/* Get vram informations */
|
||||
rv515_vram_info(rdev);
|
||||
/* Initialize memory controller (also test AGP) */
|
||||
r = r420_mc_init(rdev);
|
||||
dbgprintf("mc vram location %x\n", rdev->mc.vram_location);
|
||||
if (r)
|
||||
return r;
|
||||
/* initialize AGP */
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
r = radeon_agp_init(rdev);
|
||||
if (r) {
|
||||
radeon_agp_disable(rdev);
|
||||
}
|
||||
}
|
||||
/* initialize memory controller */
|
||||
rv515_mc_init(rdev);
|
||||
rv515_debugfs(rdev);
|
||||
/* Fence driver */
|
||||
// r = radeon_fence_driver_init(rdev);
|
||||
|
@ -56,6 +56,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
|
||||
r = radeon_gart_table_vram_pin(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
radeon_gart_restore(rdev);
|
||||
/* Setup L2 cache */
|
||||
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
|
||||
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
|
||||
@ -273,7 +274,8 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
|
||||
/*
|
||||
* Core functions
|
||||
*/
|
||||
static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
|
||||
static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
|
||||
u32 num_tile_pipes,
|
||||
u32 num_backends,
|
||||
u32 backend_disable_mask)
|
||||
{
|
||||
@ -284,6 +286,7 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
|
||||
u32 swizzle_pipe[R7XX_MAX_PIPES];
|
||||
u32 cur_backend;
|
||||
u32 i;
|
||||
bool force_no_swizzle;
|
||||
|
||||
if (num_tile_pipes > R7XX_MAX_PIPES)
|
||||
num_tile_pipes = R7XX_MAX_PIPES;
|
||||
@ -313,6 +316,18 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
|
||||
if (enabled_backends_count != num_backends)
|
||||
num_backends = enabled_backends_count;
|
||||
|
||||
switch (rdev->family) {
|
||||
case CHIP_RV770:
|
||||
case CHIP_RV730:
|
||||
force_no_swizzle = false;
|
||||
break;
|
||||
case CHIP_RV710:
|
||||
case CHIP_RV740:
|
||||
default:
|
||||
force_no_swizzle = true;
|
||||
break;
|
||||
}
|
||||
|
||||
memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
|
||||
switch (num_tile_pipes) {
|
||||
case 1:
|
||||
@ -323,32 +338,71 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
|
||||
swizzle_pipe[1] = 1;
|
||||
break;
|
||||
case 3:
|
||||
if (force_no_swizzle) {
|
||||
swizzle_pipe[0] = 0;
|
||||
swizzle_pipe[1] = 1;
|
||||
swizzle_pipe[2] = 2;
|
||||
} else {
|
||||
swizzle_pipe[0] = 0;
|
||||
swizzle_pipe[1] = 2;
|
||||
swizzle_pipe[2] = 1;
|
||||
}
|
||||
break;
|
||||
case 4:
|
||||
if (force_no_swizzle) {
|
||||
swizzle_pipe[0] = 0;
|
||||
swizzle_pipe[1] = 1;
|
||||
swizzle_pipe[2] = 2;
|
||||
swizzle_pipe[3] = 3;
|
||||
} else {
|
||||
swizzle_pipe[0] = 0;
|
||||
swizzle_pipe[1] = 2;
|
||||
swizzle_pipe[2] = 3;
|
||||
swizzle_pipe[3] = 1;
|
||||
}
|
||||
break;
|
||||
case 5:
|
||||
if (force_no_swizzle) {
|
||||
swizzle_pipe[0] = 0;
|
||||
swizzle_pipe[1] = 1;
|
||||
swizzle_pipe[2] = 2;
|
||||
swizzle_pipe[3] = 3;
|
||||
swizzle_pipe[4] = 4;
|
||||
} else {
|
||||
swizzle_pipe[0] = 0;
|
||||
swizzle_pipe[1] = 2;
|
||||
swizzle_pipe[2] = 4;
|
||||
swizzle_pipe[3] = 1;
|
||||
swizzle_pipe[4] = 3;
|
||||
}
|
||||
break;
|
||||
case 6:
|
||||
if (force_no_swizzle) {
|
||||
swizzle_pipe[0] = 0;
|
||||
swizzle_pipe[1] = 1;
|
||||
swizzle_pipe[2] = 2;
|
||||
swizzle_pipe[3] = 3;
|
||||
swizzle_pipe[4] = 4;
|
||||
swizzle_pipe[5] = 5;
|
||||
} else {
|
||||
swizzle_pipe[0] = 0;
|
||||
swizzle_pipe[1] = 2;
|
||||
swizzle_pipe[2] = 4;
|
||||
swizzle_pipe[3] = 5;
|
||||
swizzle_pipe[4] = 3;
|
||||
swizzle_pipe[5] = 1;
|
||||
}
|
||||
break;
|
||||
case 7:
|
||||
if (force_no_swizzle) {
|
||||
swizzle_pipe[0] = 0;
|
||||
swizzle_pipe[1] = 1;
|
||||
swizzle_pipe[2] = 2;
|
||||
swizzle_pipe[3] = 3;
|
||||
swizzle_pipe[4] = 4;
|
||||
swizzle_pipe[5] = 5;
|
||||
swizzle_pipe[6] = 6;
|
||||
} else {
|
||||
swizzle_pipe[0] = 0;
|
||||
swizzle_pipe[1] = 2;
|
||||
swizzle_pipe[2] = 4;
|
||||
@ -356,8 +410,19 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
|
||||
swizzle_pipe[4] = 3;
|
||||
swizzle_pipe[5] = 1;
|
||||
swizzle_pipe[6] = 5;
|
||||
}
|
||||
break;
|
||||
case 8:
|
||||
if (force_no_swizzle) {
|
||||
swizzle_pipe[0] = 0;
|
||||
swizzle_pipe[1] = 1;
|
||||
swizzle_pipe[2] = 2;
|
||||
swizzle_pipe[3] = 3;
|
||||
swizzle_pipe[4] = 4;
|
||||
swizzle_pipe[5] = 5;
|
||||
swizzle_pipe[6] = 6;
|
||||
swizzle_pipe[7] = 7;
|
||||
} else {
|
||||
swizzle_pipe[0] = 0;
|
||||
swizzle_pipe[1] = 2;
|
||||
swizzle_pipe[2] = 4;
|
||||
@ -366,6 +431,7 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
|
||||
swizzle_pipe[5] = 1;
|
||||
swizzle_pipe[6] = 7;
|
||||
swizzle_pipe[7] = 5;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
@ -385,8 +451,10 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
|
||||
static void rv770_gpu_init(struct radeon_device *rdev)
|
||||
{
|
||||
int i, j, num_qd_pipes;
|
||||
u32 ta_aux_cntl;
|
||||
u32 sx_debug_1;
|
||||
u32 smx_dc_ctl0;
|
||||
u32 db_debug3;
|
||||
u32 num_gs_verts_per_thread;
|
||||
u32 vgt_gs_per_es;
|
||||
u32 gs_prim_buffer_depth = 0;
|
||||
@ -515,6 +583,7 @@ static void rv770_gpu_init(struct radeon_device *rdev)
|
||||
|
||||
switch (rdev->config.rv770.max_tile_pipes) {
|
||||
case 1:
|
||||
default:
|
||||
gb_tiling_config |= PIPE_TILING(0);
|
||||
break;
|
||||
case 2:
|
||||
@ -526,16 +595,17 @@ static void rv770_gpu_init(struct radeon_device *rdev)
|
||||
case 8:
|
||||
gb_tiling_config |= PIPE_TILING(3);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
|
||||
|
||||
if (rdev->family == CHIP_RV770)
|
||||
gb_tiling_config |= BANK_TILING(1);
|
||||
else
|
||||
gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
|
||||
rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
|
||||
|
||||
gb_tiling_config |= GROUP_SIZE(0);
|
||||
rdev->config.rv770.tiling_group_size = 256;
|
||||
|
||||
if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
|
||||
gb_tiling_config |= ROW_TILING(3);
|
||||
@ -549,18 +619,27 @@ static void rv770_gpu_init(struct radeon_device *rdev)
|
||||
|
||||
gb_tiling_config |= BANK_SWAPS(1);
|
||||
|
||||
backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
|
||||
rdev->config.rv770.max_backends,
|
||||
(0xff << rdev->config.rv770.max_backends) & 0xff);
|
||||
gb_tiling_config |= BACKEND_MAP(backend_map);
|
||||
cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
|
||||
cc_rb_backend_disable |=
|
||||
BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK);
|
||||
|
||||
cc_gc_shader_pipe_config =
|
||||
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
|
||||
cc_gc_shader_pipe_config |=
|
||||
INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK);
|
||||
cc_gc_shader_pipe_config |=
|
||||
INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK);
|
||||
|
||||
cc_rb_backend_disable =
|
||||
BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK);
|
||||
if (rdev->family == CHIP_RV740)
|
||||
backend_map = 0x28;
|
||||
else
|
||||
backend_map = r700_get_tile_pipe_to_backend_map(rdev,
|
||||
rdev->config.rv770.max_tile_pipes,
|
||||
(R7XX_MAX_BACKENDS -
|
||||
r600_count_pipe_bits((cc_rb_backend_disable &
|
||||
R7XX_MAX_BACKENDS_MASK) >> 16)),
|
||||
(cc_rb_backend_disable >> 16));
|
||||
gb_tiling_config |= BACKEND_MAP(backend_map);
|
||||
|
||||
|
||||
WREG32(GB_TILING_CONFIG, gb_tiling_config);
|
||||
WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
|
||||
@ -568,16 +647,13 @@ static void rv770_gpu_init(struct radeon_device *rdev)
|
||||
|
||||
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
|
||||
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
|
||||
WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
|
||||
WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
|
||||
|
||||
WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
|
||||
WREG32(CGTS_SYS_TCC_DISABLE, 0);
|
||||
WREG32(CGTS_TCC_DISABLE, 0);
|
||||
WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
|
||||
WREG32(CGTS_USER_TCC_DISABLE, 0);
|
||||
|
||||
num_qd_pipes =
|
||||
R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK);
|
||||
R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
|
||||
WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK);
|
||||
WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK);
|
||||
|
||||
@ -587,10 +663,8 @@ static void rv770_gpu_init(struct radeon_device *rdev)
|
||||
|
||||
WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
|
||||
|
||||
WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
|
||||
SYNC_GRADIENT |
|
||||
SYNC_WALKER |
|
||||
SYNC_ALIGNER));
|
||||
ta_aux_cntl = RREG32(TA_CNTL_AUX);
|
||||
WREG32(TA_CNTL_AUX, ta_aux_cntl | DISABLE_CUBE_ANISO);
|
||||
|
||||
sx_debug_1 = RREG32(SX_DEBUG_1);
|
||||
sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
|
||||
@ -601,14 +675,28 @@ static void rv770_gpu_init(struct radeon_device *rdev)
|
||||
smx_dc_ctl0 |= CACHE_DEPTH((rdev->config.rv770.sx_num_of_sets * 64) - 1);
|
||||
WREG32(SMX_DC_CTL0, smx_dc_ctl0);
|
||||
|
||||
if (rdev->family != CHIP_RV740)
|
||||
WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) |
|
||||
GS_FLUSH_CTL(4) |
|
||||
ACK_FLUSH_CTL(3) |
|
||||
SYNC_FLUSH_CTL));
|
||||
|
||||
if (rdev->family == CHIP_RV770)
|
||||
WREG32(DB_DEBUG3, DB_CLK_OFF_DELAY(0x1f));
|
||||
else {
|
||||
db_debug3 = RREG32(DB_DEBUG3);
|
||||
db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f);
|
||||
switch (rdev->family) {
|
||||
case CHIP_RV770:
|
||||
case CHIP_RV740:
|
||||
db_debug3 |= DB_CLK_OFF_DELAY(0x1f);
|
||||
break;
|
||||
case CHIP_RV710:
|
||||
case CHIP_RV730:
|
||||
default:
|
||||
db_debug3 |= DB_CLK_OFF_DELAY(2);
|
||||
break;
|
||||
}
|
||||
WREG32(DB_DEBUG3, db_debug3);
|
||||
|
||||
if (rdev->family != CHIP_RV770) {
|
||||
db_debug4 = RREG32(DB_DEBUG4);
|
||||
db_debug4 |= DISABLE_TILE_COVERED_FOR_PS_ITER;
|
||||
WREG32(DB_DEBUG4, db_debug4);
|
||||
@ -637,10 +725,10 @@ static void rv770_gpu_init(struct radeon_device *rdev)
|
||||
ALU_UPDATE_FIFO_HIWATER(0x8));
|
||||
switch (rdev->family) {
|
||||
case CHIP_RV770:
|
||||
sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1);
|
||||
break;
|
||||
case CHIP_RV730:
|
||||
case CHIP_RV710:
|
||||
sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1);
|
||||
break;
|
||||
case CHIP_RV740:
|
||||
default:
|
||||
sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x4);
|
||||
@ -813,45 +901,13 @@ int rv770_mc_init(struct radeon_device *rdev)
|
||||
/* Setup GPU memory space */
|
||||
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
|
||||
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
|
||||
|
||||
if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
|
||||
rdev->mc.visible_vram_size = rdev->mc.aper_size;
|
||||
/* FIXME remove this once we support unmappable VRAM */
|
||||
if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
|
||||
rdev->mc.mc_vram_size = rdev->mc.aper_size;
|
||||
|
||||
if (rdev->mc.real_vram_size > rdev->mc.aper_size)
|
||||
rdev->mc.real_vram_size = rdev->mc.aper_size;
|
||||
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
/* gtt_size is setup by radeon_agp_init */
|
||||
rdev->mc.gtt_location = rdev->mc.agp_base;
|
||||
tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
|
||||
/* Try to put vram before or after AGP because we
|
||||
* we want SYSTEM_APERTURE to cover both VRAM and
|
||||
* AGP so that GPU can catch out of VRAM/AGP access
|
||||
*/
|
||||
if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
|
||||
/* Enough place before */
|
||||
rdev->mc.vram_location = rdev->mc.gtt_location -
|
||||
rdev->mc.mc_vram_size;
|
||||
} else if (tmp > rdev->mc.mc_vram_size) {
|
||||
/* Enough place after */
|
||||
rdev->mc.vram_location = rdev->mc.gtt_location +
|
||||
rdev->mc.gtt_size;
|
||||
} else {
|
||||
/* Try to setup VRAM then AGP might not
|
||||
* not work on some card
|
||||
*/
|
||||
rdev->mc.vram_location = 0x00000000UL;
|
||||
rdev->mc.gtt_location = rdev->mc.mc_vram_size;
|
||||
}
|
||||
} else {
|
||||
rdev->mc.vram_location = 0x00000000UL;
|
||||
rdev->mc.gtt_location = rdev->mc.mc_vram_size;
|
||||
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
|
||||
}
|
||||
rdev->mc.vram_start = rdev->mc.vram_location;
|
||||
rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
|
||||
rdev->mc.gtt_start = rdev->mc.gtt_location;
|
||||
rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
|
||||
r600_vram_gtt_location(rdev, &rdev->mc);
|
||||
/* FIXME: we should enforce default clock in case GPU is not in
|
||||
* default setup
|
||||
*/
|
||||
@ -860,6 +916,7 @@ int rv770_mc_init(struct radeon_device *rdev)
|
||||
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int rv770_gpu_reset(struct radeon_device *rdev)
|
||||
{
|
||||
/* FIXME: implement any rv770 specific bits */
|
||||
@ -961,6 +1018,7 @@ int rv770_init(struct radeon_device *rdev)
|
||||
// r = radeon_fence_driver_init(rdev);
|
||||
// if (r)
|
||||
// return r;
|
||||
/* initialize AGP */
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
r = radeon_agp_init(rdev);
|
||||
if (r)
|
||||
@ -992,16 +1050,17 @@ int rv770_init(struct radeon_device *rdev)
|
||||
rdev->accel_working = false;
|
||||
}
|
||||
if (rdev->accel_working) {
|
||||
// r = radeon_ib_pool_init(rdev);
|
||||
// if (r) {
|
||||
// DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
|
||||
// rdev->accel_working = false;
|
||||
// }
|
||||
// r = r600_ib_test(rdev);
|
||||
// if (r) {
|
||||
// DRM_ERROR("radeon: failled testing IB (%d).\n", r);
|
||||
// rdev->accel_working = false;
|
||||
// }
|
||||
// r = radeon_ib_pool_init(rdev);
|
||||
// if (r) {
|
||||
// dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
|
||||
// rdev->accel_working = false;
|
||||
// } else {
|
||||
// r = r600_ib_test(rdev);
|
||||
// if (r) {
|
||||
// dev_err(rdev->dev, "IB test failed (%d).\n", r);
|
||||
// rdev->accel_working = false;
|
||||
// }
|
||||
// }
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -343,4 +343,6 @@
|
||||
|
||||
#define WAIT_UNTIL 0x8040
|
||||
|
||||
#define SRBM_STATUS 0x0E50
|
||||
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user