kms: pre rc9 II

git-svn-id: svn://kolibrios.org@1403 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge) 2010-02-11 15:24:47 +00:00
parent 505644f066
commit 371d66a59b
38 changed files with 2769 additions and 547 deletions

View File

@ -23,6 +23,7 @@
#include <types.h> #include <types.h>
#include <list.h> #include <list.h>
#include <linux/kernel.h>
#include <syscall.h> #include <syscall.h>
#include <errno.h> #include <errno.h>
#include <linux/i2c.h> #include <linux/i2c.h>

View File

@ -26,113 +26,14 @@
* with the slab allocator. * with the slab allocator.
*/ */
#include <linux/kernel.h>
#include <linux/idr.h> #include <linux/idr.h>
#include <stdlib.h> //#include <stdlib.h>
#include "drm.h" #include "drm.h"
#include "drmP.h" #include "drmP.h"
#include "drm_crtc.h" #include "drm_crtc.h"
#define ADDR "=m" (*(volatile long *) addr)
static inline void __set_bit(int nr, volatile void *addr)
{
asm volatile("bts %1,%0"
: ADDR
: "Ir" (nr) : "memory");
}
static inline void __clear_bit(int nr, volatile void *addr)
{
asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
}
static inline int constant_test_bit(int nr, const volatile void *addr)
{
return ((1UL << (nr % 32)) &
(((unsigned long *)addr)[nr / 32])) != 0;
}
static inline int variable_test_bit(int nr, volatile const void *addr)
{
int oldbit;
asm volatile("bt %2,%1\n\t"
"sbb %0,%0"
: "=r" (oldbit)
: "m" (*(unsigned long *)addr), "Ir" (nr));
return oldbit;
};
#define test_bit(nr,addr) \
(__builtin_constant_p(nr) ? \
constant_test_bit((nr),(addr)) : \
variable_test_bit((nr),(addr)))
static inline int fls(int x)
{
int r;
__asm__("bsrl %1,%0\n\t"
"jnz 1f\n\t"
"movl $-1,%0\n"
"1:" : "=r" (r) : "rm" (x));
return r+1;
}
static inline unsigned long __ffs(unsigned long word)
{
__asm__("bsfl %1,%0"
:"=r" (word)
:"rm" (word));
return word;
}
static inline unsigned find_first_bit(const unsigned long *addr, unsigned size)
{
unsigned x = 0;
while (x < size) {
unsigned long val = *addr++;
if (val)
return __ffs(val) + x;
x += (sizeof(*addr)<<3);
}
return x;
}
int find_next_bit(const unsigned long *addr, int size, int offset)
{
const unsigned long *p = addr + (offset >> 5);
int set = 0, bit = offset & 31, res;
if (bit)
{
/*
* Look for nonzero in the first 32 bits:
*/
__asm__("bsfl %1,%0\n\t"
"jne 1f\n\t"
"movl $32, %0\n"
"1:"
: "=r" (set)
: "r" (*p >> bit));
if (set < (32 - bit))
return set + offset;
set = 32 - bit;
p++;
}
/*
* No set bit yet, search remaining full words for a bit
*/
res = find_first_bit (p, size - 32 * (p - addr));
return (offset + set + res);
}
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))

View File

@ -27,7 +27,7 @@
#define _LINUX_I2C_H #define _LINUX_I2C_H
#include <types.h> #include <types.h>
#include <list.h>
#define I2C_NAME_SIZE 20 #define I2C_NAME_SIZE 20

View File

@ -107,5 +107,18 @@ struct file {};
struct vm_area_struct {}; struct vm_area_struct {};
struct address_space {}; struct address_space {};
#define preempt_disable() do { } while (0)
#define preempt_enable_no_resched() do { } while (0)
#define preempt_enable() do { } while (0)
#define preempt_check_resched() do { } while (0)
#define preempt_disable_notrace() do { } while (0)
#define preempt_enable_no_resched_notrace() do { } while (0)
#define preempt_enable_notrace() do { } while (0)
void free (void *ptr);
#endif #endif

View File

@ -581,8 +581,8 @@ static inline void __hlist_del(struct hlist_node *n)
static inline void hlist_del(struct hlist_node *n) static inline void hlist_del(struct hlist_node *n)
{ {
__hlist_del(n); __hlist_del(n);
n->next = LIST_POISON1; n->next = (struct hlist_node*)LIST_POISON1;
n->pprev = LIST_POISON2; n->pprev = (struct hlist_node**)LIST_POISON2;
} }
static inline void hlist_del_init(struct hlist_node *n) static inline void hlist_del_init(struct hlist_node *n)

View File

@ -0,0 +1,11 @@
#ifndef _LINUX_LIST_SORT_H
#define _LINUX_LIST_SORT_H
#include <linux/types.h>
struct list_head;
void list_sort(void *priv, struct list_head *head,
int (*cmp)(void *priv, struct list_head *a,
struct list_head *b));
#endif

View File

@ -106,6 +106,8 @@
#define CONNECTOR_OBJECT_ID_CROSSFIRE 0x11 #define CONNECTOR_OBJECT_ID_CROSSFIRE 0x11
#define CONNECTOR_OBJECT_ID_HARDCODE_DVI 0x12 #define CONNECTOR_OBJECT_ID_HARDCODE_DVI 0x12
#define CONNECTOR_OBJECT_ID_DISPLAYPORT 0x13 #define CONNECTOR_OBJECT_ID_DISPLAYPORT 0x13
#define CONNECTOR_OBJECT_ID_eDP 0x14
#define CONNECTOR_OBJECT_ID_MXM 0x15
/* deleted */ /* deleted */
@ -115,6 +117,14 @@
#define ROUTER_OBJECT_ID_NONE 0x00 #define ROUTER_OBJECT_ID_NONE 0x00
#define ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL 0x01 #define ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL 0x01
/****************************************************/
/* Generic Object ID Definition */
/****************************************************/
#define GENERIC_OBJECT_ID_NONE 0x00
#define GENERIC_OBJECT_ID_GLSYNC 0x01
#define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE 0x02
#define GENERIC_OBJECT_ID_MXM_OPM 0x03
/****************************************************/ /****************************************************/
/* Graphics Object ENUM ID Definition */ /* Graphics Object ENUM ID Definition */
/****************************************************/ /****************************************************/
@ -124,6 +134,7 @@
#define GRAPH_OBJECT_ENUM_ID4 0x04 #define GRAPH_OBJECT_ENUM_ID4 0x04
#define GRAPH_OBJECT_ENUM_ID5 0x05 #define GRAPH_OBJECT_ENUM_ID5 0x05
#define GRAPH_OBJECT_ENUM_ID6 0x06 #define GRAPH_OBJECT_ENUM_ID6 0x06
#define GRAPH_OBJECT_ENUM_ID7 0x07
/****************************************************/ /****************************************************/
/* Graphics Object ID Bit definition */ /* Graphics Object ID Bit definition */
@ -138,16 +149,16 @@
#define ENUM_ID_SHIFT 0x08 #define ENUM_ID_SHIFT 0x08
#define OBJECT_TYPE_SHIFT 0x0C #define OBJECT_TYPE_SHIFT 0x0C
/****************************************************/ /****************************************************/
/* Graphics Object family definition */ /* Graphics Object family definition */
/****************************************************/ /****************************************************/
#define CONSTRUCTOBJECTFAMILYID(GRAPHICS_OBJECT_TYPE, GRAPHICS_OBJECT_ID) \ #define CONSTRUCTOBJECTFAMILYID(GRAPHICS_OBJECT_TYPE, GRAPHICS_OBJECT_ID) (GRAPHICS_OBJECT_TYPE << OBJECT_TYPE_SHIFT | \
(GRAPHICS_OBJECT_TYPE << OBJECT_TYPE_SHIFT | \
GRAPHICS_OBJECT_ID << OBJECT_ID_SHIFT) GRAPHICS_OBJECT_ID << OBJECT_ID_SHIFT)
/****************************************************/ /****************************************************/
/* GPU Object ID definition - Shared with BIOS */ /* GPU Object ID definition - Shared with BIOS */
/****************************************************/ /****************************************************/
#define GPU_ENUM_ID1 (GRAPH_OBJECT_TYPE_GPU << OBJECT_TYPE_SHIFT |\ #define GPU_ENUM_ID1 ( GRAPH_OBJECT_TYPE_GPU << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT) GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT)
/****************************************************/ /****************************************************/
@ -185,203 +196,167 @@
#define ENCODER_DP_DP501_ENUM_ID1 0x211D #define ENCODER_DP_DP501_ENUM_ID1 0x211D
#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 0x211E #define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 0x211E
*/ */
#define ENCODER_INTERNAL_LVDS_ENUM_ID1 \ #define ENCODER_INTERNAL_LVDS_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_LVDS << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_INTERNAL_LVDS << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 \ #define ENCODER_INTERNAL_TMDS1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_TMDS1 << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_INTERNAL_TMDS1 << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 \ #define ENCODER_INTERNAL_TMDS2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_TMDS2 << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_INTERNAL_TMDS2 << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_DAC1_ENUM_ID1 \ #define ENCODER_INTERNAL_DAC1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_DAC1 << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_INTERNAL_DAC1 << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_DAC2_ENUM_ID1 \ #define ENCODER_INTERNAL_DAC2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_DAC2 << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_INTERNAL_DAC2 << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 \ #define ENCODER_INTERNAL_SDVOA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_SDVOA_ENUM_ID2 \ #define ENCODER_INTERNAL_SDVOA_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 \ #define ENCODER_INTERNAL_SDVOB_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_SDVOB << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_INTERNAL_SDVOB << OBJECT_ID_SHIFT)
#define ENCODER_SIL170B_ENUM_ID1 \ #define ENCODER_SIL170B_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_SI170B << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_SI170B << OBJECT_ID_SHIFT)
#define ENCODER_CH7303_ENUM_ID1 \ #define ENCODER_CH7303_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_CH7303 << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_CH7303 << OBJECT_ID_SHIFT)
#define ENCODER_CH7301_ENUM_ID1 \ #define ENCODER_CH7301_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_CH7301 << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_CH7301 << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_DVO1_ENUM_ID1 \ #define ENCODER_INTERNAL_DVO1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_DVO1 << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_INTERNAL_DVO1 << OBJECT_ID_SHIFT)
#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1 \ #define ENCODER_EXTERNAL_SDVOA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
#define ENCODER_EXTERNAL_SDVOA_ENUM_ID2 \ #define ENCODER_EXTERNAL_SDVOA_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1 \
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ #define ENCODER_EXTERNAL_SDVOB_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_EXTERNAL_SDVOB << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_EXTERNAL_SDVOB << OBJECT_ID_SHIFT)
#define ENCODER_TITFP513_ENUM_ID1 \
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ #define ENCODER_TITFP513_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_TITFP513 << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_TITFP513 << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_LVTM1_ENUM_ID1 \ #define ENCODER_INTERNAL_LVTM1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_LVTM1 << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_INTERNAL_LVTM1 << OBJECT_ID_SHIFT)
#define ENCODER_VT1623_ENUM_ID1 \ #define ENCODER_VT1623_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_VT1623 << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_VT1623 << OBJECT_ID_SHIFT)
#define ENCODER_HDMI_SI1930_ENUM_ID1 \ #define ENCODER_HDMI_SI1930_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_HDMI_SI1930 << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_HDMI_SI1930 << OBJECT_ID_SHIFT)
#define ENCODER_HDMI_INTERNAL_ENUM_ID1 \ #define ENCODER_HDMI_INTERNAL_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_HDMI_INTERNAL << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_HDMI_INTERNAL << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 \ #define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID2 \
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ #define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 \
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ #define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 \ #define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 \ #define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 << OBJECT_ID_SHIFT) /* Shared with CV/TV and CRT */ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 << OBJECT_ID_SHIFT) // Shared with CV/TV and CRT
#define ENCODER_SI178_ENUM_ID1 \ #define ENCODER_SI178_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_SI178 << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_SI178 << OBJECT_ID_SHIFT)
#define ENCODER_MVPU_FPGA_ENUM_ID1 \ #define ENCODER_MVPU_FPGA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_MVPU_FPGA << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_MVPU_FPGA << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_DDI_ENUM_ID1 \ #define ENCODER_INTERNAL_DDI_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_DDI << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_INTERNAL_DDI << OBJECT_ID_SHIFT)
#define ENCODER_VT1625_ENUM_ID1 \ #define ENCODER_VT1625_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_VT1625 << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_VT1625 << OBJECT_ID_SHIFT)
#define ENCODER_HDMI_SI1932_ENUM_ID1 \ #define ENCODER_HDMI_SI1932_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_HDMI_SI1932 << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_HDMI_SI1932 << OBJECT_ID_SHIFT)
#define ENCODER_DP_DP501_ENUM_ID1 \ #define ENCODER_DP_DP501_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_DP_DP501 << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_DP_DP501 << OBJECT_ID_SHIFT)
#define ENCODER_DP_AN9801_ENUM_ID1 \ #define ENCODER_DP_AN9801_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_DP_AN9801 << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_DP_AN9801 << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 \ #define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_UNIPHY_ENUM_ID2 \ #define ENCODER_INTERNAL_UNIPHY_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_KLDSCP_LVTMA_ENUM_ID1 \ #define ENCODER_INTERNAL_KLDSCP_LVTMA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID1 \ #define ENCODER_INTERNAL_UNIPHY1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID2 \ #define ENCODER_INTERNAL_UNIPHY1_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID1 \ #define ENCODER_INTERNAL_UNIPHY2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID2 \ #define ENCODER_INTERNAL_UNIPHY2_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1 \ #define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
@ -406,167 +381,253 @@
#define CONNECTOR_7PIN_DIN_ENUM_ID1 0x310F #define CONNECTOR_7PIN_DIN_ENUM_ID1 0x310F
#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 0x3110 #define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 0x3110
*/ */
#define CONNECTOR_LVDS_ENUM_ID1 \ #define CONNECTOR_LVDS_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT) CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1 \ #define CONNECTOR_LVDS_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
#define CONNECTOR_eDP_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_eDP << OBJECT_ID_SHIFT)
#define CONNECTOR_eDP_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_eDP << OBJECT_ID_SHIFT)
#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT) CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID2 \ #define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT) CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1 \ #define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT) CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID2 \ #define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT) CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1 \ #define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT) CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID2 \ #define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT) CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 \ #define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT) CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
#define CONNECTOR_VGA_ENUM_ID1 \ #define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
#define CONNECTOR_VGA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT) CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
#define CONNECTOR_VGA_ENUM_ID2 \ #define CONNECTOR_VGA_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT) CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
#define CONNECTOR_COMPOSITE_ENUM_ID1 \ #define CONNECTOR_COMPOSITE_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT) CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
#define CONNECTOR_SVIDEO_ENUM_ID1 \ #define CONNECTOR_COMPOSITE_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
#define CONNECTOR_SVIDEO_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT) CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
#define CONNECTOR_YPbPr_ENUM_ID1 \ #define CONNECTOR_SVIDEO_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
#define CONNECTOR_YPbPr_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT) CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
#define CONNECTOR_D_CONNECTOR_ENUM_ID1 \ #define CONNECTOR_YPbPr_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
#define CONNECTOR_D_CONNECTOR_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT) CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
#define CONNECTOR_9PIN_DIN_ENUM_ID1 \ #define CONNECTOR_D_CONNECTOR_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
#define CONNECTOR_9PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT) CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
#define CONNECTOR_SCART_ENUM_ID1 \ #define CONNECTOR_9PIN_DIN_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
#define CONNECTOR_SCART_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT) CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1 \ #define CONNECTOR_SCART_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT) CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 \ #define CONNECTOR_HDMI_TYPE_A_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
#define CONNECTOR_HDMI_TYPE_A_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT) CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
#define CONNECTOR_7PIN_DIN_ENUM_ID1 \ #define CONNECTOR_HDMI_TYPE_B_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
#define CONNECTOR_7PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT) CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
#define CONNECTOR_7PIN_DIN_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 \ #define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT) CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID2 \ #define CONNECTOR_PCIE_CONNECTOR_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT) CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
#define CONNECTOR_CROSSFIRE_ENUM_ID1 \ #define CONNECTOR_CROSSFIRE_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT) CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
#define CONNECTOR_CROSSFIRE_ENUM_ID2 \ #define CONNECTOR_CROSSFIRE_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT) CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
#define CONNECTOR_HARDCODE_DVI_ENUM_ID1 \
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ #define CONNECTOR_HARDCODE_DVI_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT) CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
#define CONNECTOR_HARDCODE_DVI_ENUM_ID2 \ #define CONNECTOR_HARDCODE_DVI_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT) CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
#define CONNECTOR_DISPLAYPORT_ENUM_ID1 \ #define CONNECTOR_DISPLAYPORT_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
#define CONNECTOR_DISPLAYPORT_ENUM_ID2 \ #define CONNECTOR_DISPLAYPORT_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
#define CONNECTOR_DISPLAYPORT_ENUM_ID3 \ #define CONNECTOR_DISPLAYPORT_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
#define CONNECTOR_DISPLAYPORT_ENUM_ID4 \ #define CONNECTOR_DISPLAYPORT_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
#define CONNECTOR_DISPLAYPORT_ENUM_ID5 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
#define CONNECTOR_DISPLAYPORT_ENUM_ID6 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
#define CONNECTOR_MXM_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_A
#define CONNECTOR_MXM_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_B
#define CONNECTOR_MXM_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_C
#define CONNECTOR_MXM_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_D
#define CONNECTOR_MXM_ENUM_ID5 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_LVDS_TXxx
#define CONNECTOR_MXM_ENUM_ID6 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_LVDS_UXxx
#define CONNECTOR_MXM_ENUM_ID7 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID7 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DAC
/****************************************************/ /****************************************************/
/* Router Object ID definition - Shared with BIOS */ /* Router Object ID definition - Shared with BIOS */
/****************************************************/ /****************************************************/
#define ROUTER_I2C_EXTENDER_CNTL_ENUM_ID1 \ #define ROUTER_I2C_EXTENDER_CNTL_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ROUTER << OBJECT_TYPE_SHIFT |\
(GRAPH_OBJECT_TYPE_ROUTER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL << OBJECT_ID_SHIFT) ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL << OBJECT_ID_SHIFT)
/* deleted */ /* deleted */
/****************************************************/
/* Generic Object ID definition - Shared with BIOS */
/****************************************************/
#define GENERICOBJECT_GLSYNC_ENUM_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
GENERIC_OBJECT_ID_GLSYNC << OBJECT_ID_SHIFT)
#define GENERICOBJECT_PX2_NON_DRIVABLE_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
GENERIC_OBJECT_ID_PX2_NON_DRIVABLE<< OBJECT_ID_SHIFT)
#define GENERICOBJECT_PX2_NON_DRIVABLE_ID2 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
GENERIC_OBJECT_ID_PX2_NON_DRIVABLE<< OBJECT_ID_SHIFT)
#define GENERICOBJECT_MXM_OPM_ENUM_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
GENERIC_OBJECT_ID_MXM_OPM << OBJECT_ID_SHIFT)
/****************************************************/ /****************************************************/
/* Object Cap definition - Shared with BIOS */ /* Object Cap definition - Shared with BIOS */
/****************************************************/ /****************************************************/
#define GRAPHICS_OBJECT_CAP_I2C 0x00000001L #define GRAPHICS_OBJECT_CAP_I2C 0x00000001L
#define GRAPHICS_OBJECT_CAP_TABLE_ID 0x00000002L #define GRAPHICS_OBJECT_CAP_TABLE_ID 0x00000002L
#define GRAPHICS_OBJECT_I2CCOMMAND_TABLE_ID 0x01 #define GRAPHICS_OBJECT_I2CCOMMAND_TABLE_ID 0x01
#define GRAPHICS_OBJECT_HOTPLUGDETECTIONINTERUPT_TABLE_ID 0x02 #define GRAPHICS_OBJECT_HOTPLUGDETECTIONINTERUPT_TABLE_ID 0x02
#define GRAPHICS_OBJECT_ENCODER_OUTPUT_PROTECTION_TABLE_ID 0x03 #define GRAPHICS_OBJECT_ENCODER_OUTPUT_PROTECTION_TABLE_ID 0x03
@ -576,3 +637,7 @@
#endif #endif
#endif /*GRAPHICTYPE */ #endif /*GRAPHICTYPE */

View File

@ -58,6 +58,7 @@ typedef struct {
} atom_exec_context; } atom_exec_context;
int atom_debug = 0; int atom_debug = 0;
static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params); void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
static uint32_t atom_arg_mask[8] = static uint32_t atom_arg_mask[8] =
@ -245,6 +246,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
case ATOM_WS_ATTRIBUTES: case ATOM_WS_ATTRIBUTES:
val = gctx->io_attr; val = gctx->io_attr;
break; break;
case ATOM_WS_REGPTR:
val = gctx->reg_block;
break;
default: default:
val = ctx->ws[idx]; val = ctx->ws[idx];
} }
@ -384,6 +388,32 @@ static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
return atom_get_src_int(ctx, attr, ptr, NULL, 1); return atom_get_src_int(ctx, attr, ptr, NULL, 1);
} }
static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
{
uint32_t val = 0xCDCDCDCD;
switch (align) {
case ATOM_SRC_DWORD:
val = U32(*ptr);
(*ptr) += 4;
break;
case ATOM_SRC_WORD0:
case ATOM_SRC_WORD8:
case ATOM_SRC_WORD16:
val = U16(*ptr);
(*ptr) += 2;
break;
case ATOM_SRC_BYTE0:
case ATOM_SRC_BYTE8:
case ATOM_SRC_BYTE16:
case ATOM_SRC_BYTE24:
val = U8(*ptr);
(*ptr)++;
break;
}
return val;
}
static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr, static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
int *ptr, uint32_t *saved, int print) int *ptr, uint32_t *saved, int print)
{ {
@ -481,6 +511,9 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
case ATOM_WS_ATTRIBUTES: case ATOM_WS_ATTRIBUTES:
gctx->io_attr = val; gctx->io_attr = val;
break; break;
case ATOM_WS_REGPTR:
gctx->reg_block = val;
break;
default: default:
ctx->ws[idx] = val; ctx->ws[idx] = val;
} }
@ -573,7 +606,7 @@ static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
else else
SDEBUG(" table: %d\n", idx); SDEBUG(" table: %d\n", idx);
if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)) if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
atom_execute_table(ctx->ctx, idx, ctx->ps + ctx->ps_shift); atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
} }
static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg) static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
@ -676,7 +709,7 @@ static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
SDEBUG(" dst: "); SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
SDEBUG(" src1: "); SDEBUG(" src1: ");
src1 = atom_get_src(ctx, attr, ptr); src1 = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
SDEBUG(" src2: "); SDEBUG(" src2: ");
src2 = atom_get_src(ctx, attr, ptr); src2 = atom_get_src(ctx, attr, ptr);
dst &= src1; dst &= src1;
@ -808,6 +841,38 @@ static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block); SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block);
} }
static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
{
uint8_t attr = U8((*ptr)++), shift;
uint32_t saved, dst;
int dptr = *ptr;
attr &= 0x38;
attr |= atom_def_dst[attr >> 3] << 6;
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
SDEBUG(" shift: %d\n", shift);
dst <<= shift;
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
{
uint8_t attr = U8((*ptr)++), shift;
uint32_t saved, dst;
int dptr = *ptr;
attr &= 0x38;
attr |= atom_def_dst[attr >> 3] << 6;
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
SDEBUG(" shift: %d\n", shift);
dst >>= shift;
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
{ {
uint8_t attr = U8((*ptr)++), shift; uint8_t attr = U8((*ptr)++), shift;
@ -817,7 +882,7 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
attr |= atom_def_dst[attr >> 3] << 6; attr |= atom_def_dst[attr >> 3] << 6;
SDEBUG(" dst: "); SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
shift = U8((*ptr)++); shift = atom_get_src(ctx, attr, ptr);
SDEBUG(" shift: %d\n", shift); SDEBUG(" shift: %d\n", shift);
dst <<= shift; dst <<= shift;
SDEBUG(" dst: "); SDEBUG(" dst: ");
@ -833,7 +898,7 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
attr |= atom_def_dst[attr >> 3] << 6; attr |= atom_def_dst[attr >> 3] << 6;
SDEBUG(" dst: "); SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
shift = U8((*ptr)++); shift = atom_get_src(ctx, attr, ptr);
SDEBUG(" shift: %d\n", shift); SDEBUG(" shift: %d\n", shift);
dst >>= shift; dst >>= shift;
SDEBUG(" dst: "); SDEBUG(" dst: ");
@ -936,18 +1001,18 @@ static struct {
atom_op_or, ATOM_ARG_FB}, { atom_op_or, ATOM_ARG_FB}, {
atom_op_or, ATOM_ARG_PLL}, { atom_op_or, ATOM_ARG_PLL}, {
atom_op_or, ATOM_ARG_MC}, { atom_op_or, ATOM_ARG_MC}, {
atom_op_shl, ATOM_ARG_REG}, { atom_op_shift_left, ATOM_ARG_REG}, {
atom_op_shl, ATOM_ARG_PS}, { atom_op_shift_left, ATOM_ARG_PS}, {
atom_op_shl, ATOM_ARG_WS}, { atom_op_shift_left, ATOM_ARG_WS}, {
atom_op_shl, ATOM_ARG_FB}, { atom_op_shift_left, ATOM_ARG_FB}, {
atom_op_shl, ATOM_ARG_PLL}, { atom_op_shift_left, ATOM_ARG_PLL}, {
atom_op_shl, ATOM_ARG_MC}, { atom_op_shift_left, ATOM_ARG_MC}, {
atom_op_shr, ATOM_ARG_REG}, { atom_op_shift_right, ATOM_ARG_REG}, {
atom_op_shr, ATOM_ARG_PS}, { atom_op_shift_right, ATOM_ARG_PS}, {
atom_op_shr, ATOM_ARG_WS}, { atom_op_shift_right, ATOM_ARG_WS}, {
atom_op_shr, ATOM_ARG_FB}, { atom_op_shift_right, ATOM_ARG_FB}, {
atom_op_shr, ATOM_ARG_PLL}, { atom_op_shift_right, ATOM_ARG_PLL}, {
atom_op_shr, ATOM_ARG_MC}, { atom_op_shift_right, ATOM_ARG_MC}, {
atom_op_mul, ATOM_ARG_REG}, { atom_op_mul, ATOM_ARG_REG}, {
atom_op_mul, ATOM_ARG_PS}, { atom_op_mul, ATOM_ARG_PS}, {
atom_op_mul, ATOM_ARG_WS}, { atom_op_mul, ATOM_ARG_WS}, {
@ -1040,7 +1105,7 @@ static struct {
atom_op_shr, ATOM_ARG_MC}, { atom_op_shr, ATOM_ARG_MC}, {
atom_op_debug, 0},}; atom_op_debug, 0},};
void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
{ {
int base = CU16(ctx->cmd_table + 4 + 2 * index); int base = CU16(ctx->cmd_table + 4 + 2 * index);
int len, ws, ps, ptr; int len, ws, ps, ptr;
@ -1057,8 +1122,6 @@ void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps); SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
/* reset reg block */
ctx->reg_block = 0;
ectx.ctx = ctx; ectx.ctx = ctx;
ectx.ps_shift = ps / 4; ectx.ps_shift = ps / 4;
ectx.start = base; ectx.start = base;
@ -1092,6 +1155,19 @@ void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
kfree(ectx.ws); kfree(ectx.ws);
} }
void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
{
mutex_lock(&ctx->mutex);
/* reset reg block */
ctx->reg_block = 0;
/* reset fb window */
ctx->fb_base = 0;
/* reset io mode */
ctx->io_mode = ATOM_IO_MM;
atom_execute_table_locked(ctx, index, params);
mutex_unlock(&ctx->mutex);
}
static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
static void atom_index_iio(struct atom_context *ctx, int base) static void atom_index_iio(struct atom_context *ctx, int base)

View File

@ -91,6 +91,7 @@
#define ATOM_WS_AND_MASK 0x45 #define ATOM_WS_AND_MASK 0x45
#define ATOM_WS_FB_WINDOW 0x46 #define ATOM_WS_FB_WINDOW 0x46
#define ATOM_WS_ATTRIBUTES 0x47 #define ATOM_WS_ATTRIBUTES 0x47
#define ATOM_WS_REGPTR 0x48
#define ATOM_IIO_NOP 0 #define ATOM_IIO_NOP 0
#define ATOM_IIO_START 1 #define ATOM_IIO_START 1
@ -120,6 +121,7 @@ struct card_info {
struct atom_context { struct atom_context {
struct card_info *card; struct card_info *card;
// struct mutex mutex;
void *bios; void *bios;
uint32_t cmd_table, data_table; uint32_t cmd_table, data_table;
uint16_t *iio; uint16_t *iio;

View File

@ -4690,6 +4690,205 @@ typedef struct _ATOM_POWERPLAY_INFO_V3 {
ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
} ATOM_POWERPLAY_INFO_V3; } ATOM_POWERPLAY_INFO_V3;
/* New PPlib */
/**************************************************************************/
typedef struct _ATOM_PPLIB_THERMALCONTROLLER
{
UCHAR ucType; // one of ATOM_PP_THERMALCONTROLLER_*
UCHAR ucI2cLine; // as interpreted by DAL I2C
UCHAR ucI2cAddress;
UCHAR ucFanParameters; // Fan Control Parameters.
UCHAR ucFanMinRPM; // Fan Minimum RPM (hundreds) -- for display purposes only.
UCHAR ucFanMaxRPM; // Fan Maximum RPM (hundreds) -- for display purposes only.
UCHAR ucReserved; // ----
UCHAR ucFlags; // to be defined
} ATOM_PPLIB_THERMALCONTROLLER;
#define ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f
#define ATOM_PP_FANPARAMETERS_NOFAN 0x80 // No fan is connected to this controller.
#define ATOM_PP_THERMALCONTROLLER_NONE 0
#define ATOM_PP_THERMALCONTROLLER_LM63 1 // Not used by PPLib
#define ATOM_PP_THERMALCONTROLLER_ADM1032 2 // Not used by PPLib
#define ATOM_PP_THERMALCONTROLLER_ADM1030 3 // Not used by PPLib
#define ATOM_PP_THERMALCONTROLLER_MUA6649 4 // Not used by PPLib
#define ATOM_PP_THERMALCONTROLLER_LM64 5
#define ATOM_PP_THERMALCONTROLLER_F75375 6 // Not used by PPLib
#define ATOM_PP_THERMALCONTROLLER_RV6xx 7
#define ATOM_PP_THERMALCONTROLLER_RV770 8
#define ATOM_PP_THERMALCONTROLLER_ADT7473 9
typedef struct _ATOM_PPLIB_STATE
{
UCHAR ucNonClockStateIndex;
UCHAR ucClockStateIndices[1]; // variable-sized
} ATOM_PPLIB_STATE;
//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
#define ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 4
#define ATOM_PP_PLATFORM_CAP_ASPM_L0s 8
#define ATOM_PP_PLATFORM_CAP_ASPM_L1 16
#define ATOM_PP_PLATFORM_CAP_HARDWAREDC 32
#define ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY 64
#define ATOM_PP_PLATFORM_CAP_STEPVDDC 128
#define ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL 256
#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
typedef struct _ATOM_PPLIB_POWERPLAYTABLE
{
ATOM_COMMON_TABLE_HEADER sHeader;
UCHAR ucDataRevision;
UCHAR ucNumStates;
UCHAR ucStateEntrySize;
UCHAR ucClockInfoSize;
UCHAR ucNonClockSize;
// offset from start of this table to array of ucNumStates ATOM_PPLIB_STATE structures
USHORT usStateArrayOffset;
// offset from start of this table to array of ASIC-specific structures,
// currently ATOM_PPLIB_CLOCK_INFO.
USHORT usClockInfoArrayOffset;
// offset from start of this table to array of ATOM_PPLIB_NONCLOCK_INFO
USHORT usNonClockInfoArrayOffset;
USHORT usBackbiasTime; // in microseconds
USHORT usVoltageTime; // in microseconds
USHORT usTableSize; //the size of this structure, or the extended structure
ULONG ulPlatformCaps; // See ATOM_PPLIB_CAPS_*
ATOM_PPLIB_THERMALCONTROLLER sThermalController;
USHORT usBootClockInfoOffset;
USHORT usBootNonClockInfoOffset;
} ATOM_PPLIB_POWERPLAYTABLE;
//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007
#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0
#define ATOM_PPLIB_CLASSIFICATION_UI_NONE 0
#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY 1
#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED 3
#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE 5
// 2, 4, 6, 7 are reserved
#define ATOM_PPLIB_CLASSIFICATION_BOOT 0x0008
#define ATOM_PPLIB_CLASSIFICATION_THERMAL 0x0010
#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE 0x0020
#define ATOM_PPLIB_CLASSIFICATION_REST 0x0040
#define ATOM_PPLIB_CLASSIFICATION_FORCED 0x0080
#define ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE 0x0100
#define ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE 0x0200
#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400
#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800
#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000
// remaining 3 bits are reserved
//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002
// 0 is 2.5Gb/s, 1 is 5Gb/s
#define ATOM_PPLIB_PCIE_LINK_SPEED_MASK 0x00000004
#define ATOM_PPLIB_PCIE_LINK_SPEED_SHIFT 2
// lanes - 1: 1, 2, 4, 8, 12, 16 permitted by PCIE spec
#define ATOM_PPLIB_PCIE_LINK_WIDTH_MASK 0x000000F8
#define ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT 3
// lookup into reduced refresh-rate table
#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK 0x00000F00
#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT 8
#define ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED 0
#define ATOM_PPLIB_LIMITED_REFRESHRATE_50HZ 1
// 2-15 TBD as needed.
#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000
#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000
#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000
#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
// Contained in an array starting at the offset
// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
typedef struct _ATOM_PPLIB_NONCLOCK_INFO
{
USHORT usClassification;
UCHAR ucMinTemperature;
UCHAR ucMaxTemperature;
ULONG ulCapsAndSettings;
UCHAR ucRequiredPower;
UCHAR ucUnused1[3];
} ATOM_PPLIB_NONCLOCK_INFO;
// Contained in an array starting at the offset
// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
{
USHORT usEngineClockLow;
UCHAR ucEngineClockHigh;
USHORT usMemoryClockLow;
UCHAR ucMemoryClockHigh;
USHORT usVDDC;
USHORT usUnused1;
USHORT usUnused2;
ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
} ATOM_PPLIB_R600_CLOCK_INFO;
// ulFlags in ATOM_PPLIB_R600_CLOCK_INFO
#define ATOM_PPLIB_R600_FLAGS_PCIEGEN2 1
#define ATOM_PPLIB_R600_FLAGS_UVDSAFE 2
#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4
#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8
#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16
typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
{
USHORT usLowEngineClockLow; // Low Engine clock in MHz (the same way as on the R600).
UCHAR ucLowEngineClockHigh;
USHORT usHighEngineClockLow; // High Engine clock in MHz.
UCHAR ucHighEngineClockHigh;
USHORT usMemoryClockLow; // For now one of the ATOM_PPLIB_RS780_SPMCLK_XXXX constants.
UCHAR ucMemoryClockHigh; // Currentyl unused.
UCHAR ucPadding; // For proper alignment and size.
USHORT usVDDC; // For the 780, use: None, Low, High, Variable
UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16}
UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement.
USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
ULONG ulFlags;
} ATOM_PPLIB_RS780_CLOCK_INFO;
#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0
#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1
#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2
#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3
#define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is.
#define ATOM_PPLIB_RS780_SPMCLK_LOW 1
#define ATOM_PPLIB_RS780_SPMCLK_HIGH 2
#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0
#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1
#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2
/**************************************************************************/ /**************************************************************************/
/* Following definitions are for compatiblity issue in different SW components. */ /* Following definitions are for compatiblity issue in different SW components. */

View File

@ -0,0 +1,785 @@
/*
* Copyright 2007-8 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alex Deucher
*/
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon.h"
#include "atom.h"
#include "atom-bits.h"
#include "drm_dp_helper.h"
/* move these to drm_dp_helper.c/h */
#define DP_LINK_CONFIGURATION_SIZE 9
#define DP_LINK_STATUS_SIZE 6
#define DP_DPCD_SIZE 8
static char *voltage_names[] = {
"0.4V", "0.6V", "0.8V", "1.2V"
};
static char *pre_emph_names[] = {
"0dB", "3.5dB", "6dB", "9.5dB"
};
static const int dp_clocks[] = {
54000, /* 1 lane, 1.62 Ghz */
90000, /* 1 lane, 2.70 Ghz */
108000, /* 2 lane, 1.62 Ghz */
180000, /* 2 lane, 2.70 Ghz */
216000, /* 4 lane, 1.62 Ghz */
360000, /* 4 lane, 2.70 Ghz */
};
static const int num_dp_clocks = sizeof(dp_clocks) / sizeof(int);
/* common helper functions */
static int dp_lanes_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
{
int i;
u8 max_link_bw;
u8 max_lane_count;
if (!dpcd)
return 0;
max_link_bw = dpcd[DP_MAX_LINK_RATE];
max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
switch (max_link_bw) {
case DP_LINK_BW_1_62:
default:
for (i = 0; i < num_dp_clocks; i++) {
if (i % 2)
continue;
switch (max_lane_count) {
case 1:
if (i > 1)
return 0;
break;
case 2:
if (i > 3)
return 0;
break;
case 4:
default:
break;
}
if (dp_clocks[i] > mode_clock) {
if (i < 2)
return 1;
else if (i < 4)
return 2;
else
return 4;
}
}
break;
case DP_LINK_BW_2_7:
for (i = 0; i < num_dp_clocks; i++) {
switch (max_lane_count) {
case 1:
if (i > 1)
return 0;
break;
case 2:
if (i > 3)
return 0;
break;
case 4:
default:
break;
}
if (dp_clocks[i] > mode_clock) {
if (i < 2)
return 1;
else if (i < 4)
return 2;
else
return 4;
}
}
break;
}
return 0;
}
static int dp_link_clock_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
{
int i;
u8 max_link_bw;
u8 max_lane_count;
if (!dpcd)
return 0;
max_link_bw = dpcd[DP_MAX_LINK_RATE];
max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
switch (max_link_bw) {
case DP_LINK_BW_1_62:
default:
for (i = 0; i < num_dp_clocks; i++) {
if (i % 2)
continue;
switch (max_lane_count) {
case 1:
if (i > 1)
return 0;
break;
case 2:
if (i > 3)
return 0;
break;
case 4:
default:
break;
}
if (dp_clocks[i] > mode_clock)
return 162000;
}
break;
case DP_LINK_BW_2_7:
for (i = 0; i < num_dp_clocks; i++) {
switch (max_lane_count) {
case 1:
if (i > 1)
return 0;
break;
case 2:
if (i > 3)
return 0;
break;
case 4:
default:
break;
}
if (dp_clocks[i] > mode_clock)
return (i % 2) ? 270000 : 162000;
}
}
return 0;
}
int dp_mode_valid(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
{
int lanes = dp_lanes_for_mode_clock(dpcd, mode_clock);
int bw = dp_lanes_for_mode_clock(dpcd, mode_clock);
if ((lanes == 0) || (bw == 0))
return MODE_CLOCK_HIGH;
return MODE_OK;
}
static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
{
return link_status[r - DP_LANE0_1_STATUS];
}
static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_LANE0_1_STATUS + (lane >> 1);
int s = (lane & 1) * 4;
u8 l = dp_link_status(link_status, i);
return (l >> s) & 0xf;
}
static bool dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count)
{
int lane;
u8 lane_status;
for (lane = 0; lane < lane_count; lane++) {
lane_status = dp_get_lane_status(link_status, lane);
if ((lane_status & DP_LANE_CR_DONE) == 0)
return false;
}
return true;
}
static bool dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count)
{
u8 lane_align;
u8 lane_status;
int lane;
lane_align = dp_link_status(link_status,
DP_LANE_ALIGN_STATUS_UPDATED);
if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
return false;
for (lane = 0; lane < lane_count; lane++) {
lane_status = dp_get_lane_status(link_status, lane);
if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
return false;
}
return true;
}
static u8 dp_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
int s = ((lane & 1) ?
DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
u8 l = dp_link_status(link_status, i);
return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
}
static u8 dp_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
int s = ((lane & 1) ?
DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
u8 l = dp_link_status(link_status, i);
return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
}
/* XXX fix me -- chip specific */
#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200
static u8 dp_pre_emphasis_max(u8 voltage_swing)
{
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
return DP_TRAIN_PRE_EMPHASIS_6;
case DP_TRAIN_VOLTAGE_SWING_600:
return DP_TRAIN_PRE_EMPHASIS_6;
case DP_TRAIN_VOLTAGE_SWING_800:
return DP_TRAIN_PRE_EMPHASIS_3_5;
case DP_TRAIN_VOLTAGE_SWING_1200:
default:
return DP_TRAIN_PRE_EMPHASIS_0;
}
}
static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count,
u8 train_set[4])
{
u8 v = 0;
u8 p = 0;
int lane;
for (lane = 0; lane < lane_count; lane++) {
u8 this_v = dp_get_adjust_request_voltage(link_status, lane);
u8 this_p = dp_get_adjust_request_pre_emphasis(link_status, lane);
DRM_DEBUG("requested signal parameters: lane %d voltage %s pre_emph %s\n",
lane,
voltage_names[this_v >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
pre_emph_names[this_p >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
if (this_v > v)
v = this_v;
if (this_p > p)
p = this_p;
}
if (v >= DP_VOLTAGE_MAX)
v = DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
if (p >= dp_pre_emphasis_max(v))
p = dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
DRM_DEBUG("using signal parameters: voltage %s pre_emph %s\n",
voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
for (lane = 0; lane < 4; lane++)
train_set[lane] = v | p;
}
/* radeon aux chan functions */
bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
int num_bytes, u8 *read_byte,
u8 read_buf_len, u8 delay)
{
struct drm_device *dev = chan->dev;
struct radeon_device *rdev = dev->dev_private;
PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
unsigned char *base;
memset(&args, 0, sizeof(args));
base = (unsigned char *)rdev->mode_info.atom_context->scratch;
memcpy(base, req_bytes, num_bytes);
args.lpAuxRequest = 0;
args.lpDataOut = 16;
args.ucDataOutLen = 0;
args.ucChannelID = chan->rec.i2c_id;
args.ucDelay = delay / 10;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
if (args.ucReplyStatus) {
DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n",
req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
chan->rec.i2c_id, args.ucReplyStatus);
return false;
}
if (args.ucDataOutLen && read_byte && read_buf_len) {
if (read_buf_len < args.ucDataOutLen) {
DRM_ERROR("Buffer to small for return answer %d %d\n",
read_buf_len, args.ucDataOutLen);
return false;
}
{
int len = min(read_buf_len, args.ucDataOutLen);
memcpy(read_byte, base + 16, len);
}
}
return true;
}
bool radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, uint16_t address,
uint8_t send_bytes, uint8_t *send)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
u8 msg[20];
u8 msg_len, dp_msg_len;
bool ret;
dp_msg_len = 4;
msg[0] = address;
msg[1] = address >> 8;
msg[2] = AUX_NATIVE_WRITE << 4;
dp_msg_len += send_bytes;
msg[3] = (dp_msg_len << 4) | (send_bytes - 1);
if (send_bytes > 16)
return false;
memcpy(&msg[4], send, send_bytes);
msg_len = 4 + send_bytes;
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, NULL, 0, 0);
return ret;
}
bool radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, uint16_t address,
uint8_t delay, uint8_t expected_bytes,
uint8_t *read_p)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
u8 msg[20];
u8 msg_len, dp_msg_len;
bool ret = false;
msg_len = 4;
dp_msg_len = 4;
msg[0] = address;
msg[1] = address >> 8;
msg[2] = AUX_NATIVE_READ << 4;
msg[3] = (dp_msg_len) << 4;
msg[3] |= expected_bytes - 1;
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, read_p, expected_bytes, delay);
return ret;
}
/* radeon dp functions */
static u8 radeon_dp_encoder_service(struct radeon_device *rdev, int action, int dp_clock,
uint8_t ucconfig, uint8_t lane_num)
{
DP_ENCODER_SERVICE_PARAMETERS args;
int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
memset(&args, 0, sizeof(args));
args.ucLinkClock = dp_clock / 10;
args.ucConfig = ucconfig;
args.ucAction = action;
args.ucLaneNum = lane_num;
args.ucStatus = 0;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
return args.ucStatus;
}
u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
struct drm_device *dev = radeon_connector->base.dev;
struct radeon_device *rdev = dev->dev_private;
return radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_GET_SINK_TYPE, 0,
dig_connector->dp_i2c_bus->rec.i2c_id, 0);
}
bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
u8 msg[25];
int ret;
ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, 0, 8, msg);
if (ret) {
memcpy(dig_connector->dpcd, msg, 8);
{
int i;
DRM_DEBUG("DPCD: ");
for (i = 0; i < 8; i++)
DRM_DEBUG("%02x ", msg[i]);
DRM_DEBUG("\n");
}
return true;
}
dig_connector->dpcd[0] = 0;
return false;
}
void radeon_dp_set_link_config(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
(connector->connector_type != DRM_MODE_CONNECTOR_eDP))
return;
radeon_connector = to_radeon_connector(connector);
if (!radeon_connector->con_priv)
return;
dig_connector = radeon_connector->con_priv;
dig_connector->dp_clock =
dp_link_clock_for_mode_clock(dig_connector->dpcd, mode->clock);
dig_connector->dp_lane_count =
dp_lanes_for_mode_clock(dig_connector->dpcd, mode->clock);
}
int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector,
struct drm_display_mode *mode)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
return dp_mode_valid(dig_connector->dpcd, mode->clock);
}
static bool atom_dp_get_link_status(struct radeon_connector *radeon_connector,
u8 link_status[DP_LINK_STATUS_SIZE])
{
int ret;
ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS, 100,
DP_LINK_STATUS_SIZE, link_status);
if (!ret) {
DRM_ERROR("displayport link status failed\n");
return false;
}
DRM_DEBUG("link status %02x %02x %02x %02x %02x %02x\n",
link_status[0], link_status[1], link_status[2],
link_status[3], link_status[4], link_status[5]);
return true;
}
bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
u8 link_status[DP_LINK_STATUS_SIZE];
if (!atom_dp_get_link_status(radeon_connector, link_status))
return false;
if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count))
return false;
return true;
}
static void dp_set_power(struct radeon_connector *radeon_connector, u8 power_state)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
if (dig_connector->dpcd[0] >= 0x11) {
radeon_dp_aux_native_write(radeon_connector, DP_SET_POWER, 1,
&power_state);
}
}
static void dp_set_downspread(struct radeon_connector *radeon_connector, u8 downspread)
{
radeon_dp_aux_native_write(radeon_connector, DP_DOWNSPREAD_CTRL, 1,
&downspread);
}
static void dp_set_link_bw_lanes(struct radeon_connector *radeon_connector,
u8 link_configuration[DP_LINK_CONFIGURATION_SIZE])
{
radeon_dp_aux_native_write(radeon_connector, DP_LINK_BW_SET, 2,
link_configuration);
}
static void dp_update_dpvs_emph(struct radeon_connector *radeon_connector,
struct drm_encoder *encoder,
u8 train_set[4])
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
int i;
for (i = 0; i < dig_connector->dp_lane_count; i++)
atombios_dig_transmitter_setup(encoder,
ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH,
i, train_set[i]);
radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_LANE0_SET,
dig_connector->dp_lane_count, train_set);
}
static void dp_set_training(struct radeon_connector *radeon_connector,
u8 training)
{
radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_PATTERN_SET,
1, &training);
}
void dp_link_train(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
int enc_id = 0;
bool clock_recovery, channel_eq;
u8 link_status[DP_LINK_STATUS_SIZE];
u8 link_configuration[DP_LINK_CONFIGURATION_SIZE];
u8 tries, voltage;
u8 train_set[4];
int i;
if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
(connector->connector_type != DRM_MODE_CONNECTOR_eDP))
return;
if (!radeon_encoder->enc_priv)
return;
dig = radeon_encoder->enc_priv;
radeon_connector = to_radeon_connector(connector);
if (!radeon_connector->con_priv)
return;
dig_connector = radeon_connector->con_priv;
if (dig->dig_encoder)
enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
else
enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
if (dig_connector->linkb)
enc_id |= ATOM_DP_CONFIG_LINK_B;
else
enc_id |= ATOM_DP_CONFIG_LINK_A;
memset(link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
if (dig_connector->dp_clock == 270000)
link_configuration[0] = DP_LINK_BW_2_7;
else
link_configuration[0] = DP_LINK_BW_1_62;
link_configuration[1] = dig_connector->dp_lane_count;
if (dig_connector->dpcd[0] >= 0x11)
link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
/* power up the sink */
dp_set_power(radeon_connector, DP_SET_POWER_D0);
/* disable the training pattern on the sink */
dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
/* set link bw and lanes on the sink */
dp_set_link_bw_lanes(radeon_connector, link_configuration);
/* disable downspread on the sink */
dp_set_downspread(radeon_connector, 0);
/* start training on the source */
radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START,
dig_connector->dp_clock, enc_id, 0);
/* set training pattern 1 on the source */
radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
dig_connector->dp_clock, enc_id, 0);
/* set initial vs/emph */
memset(train_set, 0, 4);
udelay(400);
/* set training pattern 1 on the sink */
dp_set_training(radeon_connector, DP_TRAINING_PATTERN_1);
dp_update_dpvs_emph(radeon_connector, encoder, train_set);
/* clock recovery loop */
clock_recovery = false;
tries = 0;
voltage = 0xff;
for (;;) {
udelay(100);
if (!atom_dp_get_link_status(radeon_connector, link_status))
break;
if (dp_clock_recovery_ok(link_status, dig_connector->dp_lane_count)) {
clock_recovery = true;
break;
}
for (i = 0; i < dig_connector->dp_lane_count; i++) {
if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
}
if (i == dig_connector->dp_lane_count) {
DRM_ERROR("clock recovery reached max voltage\n");
break;
}
if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
++tries;
if (tries == 5) {
DRM_ERROR("clock recovery tried 5 times\n");
break;
}
} else
tries = 0;
voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
/* Compute new train_set as requested by sink */
dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set);
dp_update_dpvs_emph(radeon_connector, encoder, train_set);
}
if (!clock_recovery)
DRM_ERROR("clock recovery failed\n");
else
DRM_DEBUG("clock recovery at voltage %d pre-emphasis %d\n",
train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
(train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
DP_TRAIN_PRE_EMPHASIS_SHIFT);
/* set training pattern 2 on the sink */
dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2);
/* set training pattern 2 on the source */
radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
dig_connector->dp_clock, enc_id, 1);
/* channel equalization loop */
tries = 0;
channel_eq = false;
for (;;) {
udelay(400);
if (!atom_dp_get_link_status(radeon_connector, link_status))
break;
if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count)) {
channel_eq = true;
break;
}
/* Try 5 times */
if (tries > 5) {
DRM_ERROR("channel eq failed: 5 tries\n");
break;
}
/* Compute new train_set as requested by sink */
dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set);
dp_update_dpvs_emph(radeon_connector, encoder, train_set);
tries++;
}
if (!channel_eq)
DRM_ERROR("channel eq failed\n");
else
DRM_DEBUG("channel eq at voltage %d pre-emphasis %d\n",
train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
(train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK)
>> DP_TRAIN_PRE_EMPHASIS_SHIFT);
/* disable the training pattern on the sink */
dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
dig_connector->dp_clock, enc_id, 0);
}
int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
uint8_t write_byte, uint8_t *read_byte)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter;
int ret = 0;
uint16_t address = algo_data->address;
uint8_t msg[5];
uint8_t reply[2];
int msg_len, dp_msg_len;
int reply_bytes;
/* Set up the command byte */
if (mode & MODE_I2C_READ)
msg[2] = AUX_I2C_READ << 4;
else
msg[2] = AUX_I2C_WRITE << 4;
if (!(mode & MODE_I2C_STOP))
msg[2] |= AUX_I2C_MOT << 4;
msg[0] = address;
msg[1] = address >> 8;
reply_bytes = 1;
msg_len = 4;
dp_msg_len = 3;
switch (mode) {
case MODE_I2C_WRITE:
msg[4] = write_byte;
msg_len++;
dp_msg_len += 2;
break;
case MODE_I2C_READ:
dp_msg_len += 1;
break;
default:
break;
}
msg[3] = (dp_msg_len) << 4;
ret = radeon_process_aux_ch(auxch, msg, msg_len, reply, reply_bytes, 0);
if (ret) {
if (read_byte)
*read_byte = reply[0];
return reply_bytes;
}
return -EREMOTEIO;
}

View File

@ -1,11 +1,12 @@
#include <linux/kernel.h>
#include <pci.h> #include <pci.h>
#include <errno-base.h> #include <errno-base.h>
#include <syscall.h> #include <syscall.h>
static LIST_HEAD(devices); static LIST_HEAD(devices);
static dev_t* pci_scan_device(u32_t bus, int devfn); static pci_dev_t* pci_scan_device(u32_t bus, int devfn);
/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ /* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */
@ -309,9 +310,9 @@ static int pci_setup_device(struct pci_dev *dev)
return 0; return 0;
}; };
static dev_t* pci_scan_device(u32_t bus, int devfn) static pci_dev_t* pci_scan_device(u32_t bus, int devfn)
{ {
dev_t *dev; pci_dev_t *dev;
u32_t id; u32_t id;
u8_t hdr; u8_t hdr;
@ -344,7 +345,7 @@ static dev_t* pci_scan_device(u32_t bus, int devfn)
hdr = PciRead8(bus, devfn, PCI_HEADER_TYPE); hdr = PciRead8(bus, devfn, PCI_HEADER_TYPE);
dev = (dev_t*)kzalloc(sizeof(dev_t), 0); dev = (pci_dev_t*)kzalloc(sizeof(dev_t), 0);
INIT_LIST_HEAD(&dev->link); INIT_LIST_HEAD(&dev->link);
@ -370,7 +371,7 @@ int pci_scan_slot(u32_t bus, int devfn)
for (func = 0; func < 8; func++, devfn++) for (func = 0; func < 8; func++, devfn++)
{ {
dev_t *dev; pci_dev_t *dev;
dev = pci_scan_device(bus, devfn); dev = pci_scan_device(bus, devfn);
if( dev ) if( dev )
@ -416,9 +417,9 @@ void pci_scan_bus(u32_t bus)
int enum_pci_devices() int enum_pci_devices()
{ {
dev_t *dev; pci_dev_t *dev;
u32_t last_bus; u32_t last_bus;
u32_t bus = 0 , devfn = 0; u32_t bus = 0 , devfn = 0;
// list_initialize(&devices); // list_initialize(&devices);
@ -744,14 +745,14 @@ int pci_enable_device(struct pci_dev *dev)
struct pci_device_id* find_pci_device(dev_t* pdev, struct pci_device_id *idlist) struct pci_device_id* find_pci_device(pci_dev_t* pdev, struct pci_device_id *idlist)
{ {
dev_t *dev; pci_dev_t *dev;
struct pci_device_id *ent; struct pci_device_id *ent;
for(dev = (dev_t*)devices.next; for(dev = (pci_dev_t*)devices.next;
&dev->link != &devices; &dev->link != &devices;
dev = (dev_t*)dev->link.next) dev = (pci_dev_t*)dev->link.next)
{ {
if( dev->pci_dev.vendor != idlist->vendor ) if( dev->pci_dev.vendor != idlist->vendor )
continue; continue;

View File

@ -120,16 +120,17 @@ void r100_hpd_init(struct radeon_device *rdev)
struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector *radeon_connector = to_radeon_connector(connector);
switch (radeon_connector->hpd.hpd) { switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1: case RADEON_HPD_1:
rdev->irq.hpd[0] = true; // rdev->irq.hpd[0] = true;
break; break;
case RADEON_HPD_2: case RADEON_HPD_2:
rdev->irq.hpd[1] = true; // rdev->irq.hpd[1] = true;
break; break;
default: default:
break; break;
} }
} }
r100_irq_set(rdev); // if (rdev->irq.installed)
// r100_irq_set(rdev);
} }
void r100_hpd_fini(struct radeon_device *rdev) void r100_hpd_fini(struct radeon_device *rdev)
@ -141,10 +142,10 @@ void r100_hpd_fini(struct radeon_device *rdev)
struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector *radeon_connector = to_radeon_connector(connector);
switch (radeon_connector->hpd.hpd) { switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1: case RADEON_HPD_1:
rdev->irq.hpd[0] = false; // rdev->irq.hpd[0] = false;
break; break;
case RADEON_HPD_2: case RADEON_HPD_2:
rdev->irq.hpd[1] = false; // rdev->irq.hpd[1] = false;
break; break;
default: default:
break; break;
@ -263,6 +264,13 @@ static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
} }
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
{
if (crtc == 0)
return RREG32(RADEON_CRTC_CRNT_FRAME);
else
return RREG32(RADEON_CRTC2_CRNT_FRAME);
}
void r100_fence_ring_emit(struct radeon_device *rdev, void r100_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence) struct radeon_fence *fence)
@ -272,6 +280,11 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
/* Wait until IDLE & CLEAN */ /* Wait until IDLE & CLEAN */
radeon_ring_write(rdev, PACKET0(0x1720, 0)); radeon_ring_write(rdev, PACKET0(0x1720, 0));
radeon_ring_write(rdev, (1 << 16) | (1 << 17)); radeon_ring_write(rdev, (1 << 16) | (1 << 17));
radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(rdev, rdev->config.r100.hdp_cntl |
RADEON_HDP_READ_BUFFER_INVALIDATE);
radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(rdev, rdev->config.r100.hdp_cntl);
/* Emit fence sequence & fire IRQ */ /* Emit fence sequence & fire IRQ */
radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0)); radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
radeon_ring_write(rdev, fence->seq); radeon_ring_write(rdev, fence->seq);
@ -519,19 +532,6 @@ static int r100_cp_init_microcode(struct radeon_device *rdev)
return err; return err;
} }
static inline __u32 __swab32(__u32 x)
{
asm("bswapl %0" :
"=&r" (x)
:"r" (x));
return x;
}
static inline __u32 be32_to_cpup(const __be32 *p)
{
return __swab32(*(__u32 *)p);
}
static void r100_cp_load_microcode(struct radeon_device *rdev) static void r100_cp_load_microcode(struct radeon_device *rdev)
{ {
@ -1308,7 +1308,6 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_TXFORMAT_ARGB4444: case RADEON_TXFORMAT_ARGB4444:
case RADEON_TXFORMAT_VYUY422: case RADEON_TXFORMAT_VYUY422:
case RADEON_TXFORMAT_YVYU422: case RADEON_TXFORMAT_YVYU422:
case RADEON_TXFORMAT_DXT1:
case RADEON_TXFORMAT_SHADOW16: case RADEON_TXFORMAT_SHADOW16:
case RADEON_TXFORMAT_LDUDV655: case RADEON_TXFORMAT_LDUDV655:
case RADEON_TXFORMAT_DUDV88: case RADEON_TXFORMAT_DUDV88:
@ -1316,12 +1315,19 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
break; break;
case RADEON_TXFORMAT_ARGB8888: case RADEON_TXFORMAT_ARGB8888:
case RADEON_TXFORMAT_RGBA8888: case RADEON_TXFORMAT_RGBA8888:
case RADEON_TXFORMAT_DXT23:
case RADEON_TXFORMAT_DXT45:
case RADEON_TXFORMAT_SHADOW32: case RADEON_TXFORMAT_SHADOW32:
case RADEON_TXFORMAT_LDUDUV8888: case RADEON_TXFORMAT_LDUDUV8888:
track->textures[i].cpp = 4; track->textures[i].cpp = 4;
break; break;
case RADEON_TXFORMAT_DXT1:
track->textures[i].cpp = 1;
track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
break;
case RADEON_TXFORMAT_DXT23:
case RADEON_TXFORMAT_DXT45:
track->textures[i].cpp = 1;
track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
break;
} }
track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
@ -1421,6 +1427,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
return -EINVAL; return -EINVAL;
} }
track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
track->immd_dwords = pkt->count - 1; track->immd_dwords = pkt->count - 1;
r = r100_cs_track_check(p->rdev, track); r = r100_cs_track_check(p->rdev, track);
@ -1642,14 +1649,6 @@ void r100_gpu_init(struct radeon_device *rdev)
r100_hdp_reset(rdev); r100_hdp_reset(rdev);
} }
void r100_hdp_flush(struct radeon_device *rdev)
{
u32 tmp;
tmp = RREG32(RADEON_HOST_PATH_CNTL);
tmp |= RADEON_HDP_READ_BUFFER_INVALIDATE;
WREG32(RADEON_HOST_PATH_CNTL, tmp);
}
void r100_hdp_reset(struct radeon_device *rdev) void r100_hdp_reset(struct radeon_device *rdev)
{ {
uint32_t tmp; uint32_t tmp;
@ -2848,9 +2847,7 @@ int r100_mc_init(struct radeon_device *rdev)
if (rdev->flags & RADEON_IS_AGP) { if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev); r = radeon_agp_init(rdev);
if (r) { if (r) {
printk(KERN_WARNING "[drm] Disabling AGP\n"); radeon_agp_disable(rdev);
rdev->flags &= ~RADEON_IS_AGP;
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
} else { } else {
rdev->mc.gtt_location = rdev->mc.agp_base; rdev->mc.gtt_location = rdev->mc.agp_base;
} }
@ -2901,6 +2898,8 @@ int r100_init(struct radeon_device *rdev)
r100_errata(rdev); r100_errata(rdev);
/* Initialize clocks */ /* Initialize clocks */
radeon_get_clock_info(rdev->ddev); radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
/* Get vram informations */ /* Get vram informations */
r100_vram_info(rdev); r100_vram_info(rdev);
/* Initialize memory controller (also test AGP) */ /* Initialize memory controller (also test AGP) */

View File

@ -372,13 +372,16 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case 5: case 5:
case 6: case 6:
case 7: case 7:
/* 1D/2D */
track->textures[i].tex_coord_type = 0; track->textures[i].tex_coord_type = 0;
break; break;
case 1: case 1:
track->textures[i].tex_coord_type = 1; /* CUBE */
track->textures[i].tex_coord_type = 2;
break; break;
case 2: case 2:
track->textures[i].tex_coord_type = 2; /* 3D */
track->textures[i].tex_coord_type = 1;
break; break;
} }
break; break;
@ -402,7 +405,6 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_TXFORMAT_Y8: case R200_TXFORMAT_Y8:
track->textures[i].cpp = 1; track->textures[i].cpp = 1;
break; break;
case R200_TXFORMAT_DXT1:
case R200_TXFORMAT_AI88: case R200_TXFORMAT_AI88:
case R200_TXFORMAT_ARGB1555: case R200_TXFORMAT_ARGB1555:
case R200_TXFORMAT_RGB565: case R200_TXFORMAT_RGB565:
@ -419,9 +421,16 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_TXFORMAT_ABGR8888: case R200_TXFORMAT_ABGR8888:
case R200_TXFORMAT_BGR111110: case R200_TXFORMAT_BGR111110:
case R200_TXFORMAT_LDVDU8888: case R200_TXFORMAT_LDVDU8888:
track->textures[i].cpp = 4;
break;
case R200_TXFORMAT_DXT1:
track->textures[i].cpp = 1;
track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
break;
case R200_TXFORMAT_DXT23: case R200_TXFORMAT_DXT23:
case R200_TXFORMAT_DXT45: case R200_TXFORMAT_DXT45:
track->textures[i].cpp = 4; track->textures[i].cpp = 1;
track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
break; break;
} }
track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);

View File

@ -36,7 +36,15 @@
#include "rv350d.h" #include "rv350d.h"
#include "r300_reg_safe.h" #include "r300_reg_safe.h"
/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 */ /* This files gather functions specifics to: r300,r350,rv350,rv370,rv380
*
* GPU Errata:
* - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL
* using MMIO to flush host path read cache, this lead to HARDLOCKUP.
* However, scheduling such write to the ring seems harmless, i suspect
* the CP read collide with the flush somehow, or maybe the MC, hard to
* tell. (Jerome Glisse)
*/
/* /*
* rv370,rv380 PCIE GART * rv370,rv380 PCIE GART
@ -174,6 +182,11 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
/* Wait until IDLE & CLEAN */ /* Wait until IDLE & CLEAN */
radeon_ring_write(rdev, PACKET0(0x1720, 0)); radeon_ring_write(rdev, PACKET0(0x1720, 0));
radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9)); radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9));
radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
RADEON_HDP_READ_BUFFER_INVALIDATE);
radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(rdev, rdev->config.r300.hdp_cntl);
/* Emit fence sequence & fire IRQ */ /* Emit fence sequence & fire IRQ */
radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0)); radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
radeon_ring_write(rdev, fence->seq); radeon_ring_write(rdev, fence->seq);
@ -691,7 +704,15 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt); r100_cs_dump_packet(p, pkt);
return r; return r;
} }
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R300_TXO_MACRO_TILE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_TXO_MICRO_TILE;
tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
tmp |= tile_flags;
ib[idx] = tmp;
track->textures[i].robj = reloc->robj; track->textures[i].robj = reloc->robj;
break; break;
/* Tracked registers */ /* Tracked registers */
@ -857,7 +878,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_TX_FORMAT_Z6Y5X5: case R300_TX_FORMAT_Z6Y5X5:
case R300_TX_FORMAT_W4Z4Y4X4: case R300_TX_FORMAT_W4Z4Y4X4:
case R300_TX_FORMAT_W1Z5Y5X5: case R300_TX_FORMAT_W1Z5Y5X5:
case R300_TX_FORMAT_DXT1:
case R300_TX_FORMAT_D3DMFT_CxV8U8: case R300_TX_FORMAT_D3DMFT_CxV8U8:
case R300_TX_FORMAT_B8G8_B8G8: case R300_TX_FORMAT_B8G8_B8G8:
case R300_TX_FORMAT_G8R8_G8B8: case R300_TX_FORMAT_G8R8_G8B8:
@ -871,8 +891,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case 0x17: case 0x17:
case R300_TX_FORMAT_FL_I32: case R300_TX_FORMAT_FL_I32:
case 0x1e: case 0x1e:
case R300_TX_FORMAT_DXT3:
case R300_TX_FORMAT_DXT5:
track->textures[i].cpp = 4; track->textures[i].cpp = 4;
break; break;
case R300_TX_FORMAT_W16Z16Y16X16: case R300_TX_FORMAT_W16Z16Y16X16:
@ -883,6 +901,23 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_TX_FORMAT_FL_R32G32B32A32: case R300_TX_FORMAT_FL_R32G32B32A32:
track->textures[i].cpp = 16; track->textures[i].cpp = 16;
break; break;
case R300_TX_FORMAT_DXT1:
track->textures[i].cpp = 1;
track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
break;
case R300_TX_FORMAT_ATI2N:
if (p->rdev->family < CHIP_R420) {
DRM_ERROR("Invalid texture format %u\n",
(idx_value & 0x1F));
return -EINVAL;
}
/* The same rules apply as for DXT3/5. */
/* Pass through. */
case R300_TX_FORMAT_DXT3:
case R300_TX_FORMAT_DXT5:
track->textures[i].cpp = 1;
track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
break;
default: default:
DRM_ERROR("Invalid texture format %u\n", DRM_ERROR("Invalid texture format %u\n",
(idx_value & 0x1F)); (idx_value & 0x1F));
@ -942,6 +977,16 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->textures[i].width_11 = tmp; track->textures[i].width_11 = tmp;
tmp = ((idx_value >> 16) & 1) << 11; tmp = ((idx_value >> 16) & 1) << 11;
track->textures[i].height_11 = tmp; track->textures[i].height_11 = tmp;
/* ATI1N */
if (idx_value & (1 << 14)) {
/* The same rules apply as for DXT1. */
track->textures[i].compress_format =
R100_TRACK_COMP_DXT1;
}
} else if (idx_value & (1 << 14)) {
DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
return -EINVAL;
} }
break; break;
case 0x4480: case 0x4480:
@ -983,6 +1028,18 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
} }
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break; break;
case 0x4e0c:
/* RB3D_COLOR_CHANNEL_MASK */
track->color_channel_mask = idx_value;
break;
case 0x4d1c:
/* ZB_BW_CNTL */
track->fastfill = !!(idx_value & (1 << 2));
break;
case 0x4e04:
/* RB3D_BLENDCNTL */
track->blend_read_enable = !!(idx_value & (1 << 2));
break;
case 0x4be8: case 0x4be8:
/* valid register only on RV530 */ /* valid register only on RV530 */
if (p->rdev->family == CHIP_RV530) if (p->rdev->family == CHIP_RV530)
@ -1221,6 +1278,7 @@ static int r300_startup(struct radeon_device *rdev)
} }
/* Enable IRQ */ /* Enable IRQ */
// r100_irq_set(rdev); // r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */ /* 1M ring buffer */
// r = r100_cp_init(rdev, 1024 * 1024); // r = r100_cp_init(rdev, 1024 * 1024);
// if (r) { // if (r) {
@ -1280,6 +1338,8 @@ int r300_init(struct radeon_device *rdev)
r300_errata(rdev); r300_errata(rdev);
/* Initialize clocks */ /* Initialize clocks */
radeon_get_clock_info(rdev->ddev); radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
/* Get vram informations */ /* Get vram informations */
r300_vram_info(rdev); r300_vram_info(rdev);
/* Initialize memory controller (also test AGP) */ /* Initialize memory controller (also test AGP) */

View File

@ -900,6 +900,7 @@
# define R300_TX_FORMAT_FL_I32 0x1B # define R300_TX_FORMAT_FL_I32 0x1B
# define R300_TX_FORMAT_FL_I32A32 0x1C # define R300_TX_FORMAT_FL_I32A32 0x1C
# define R300_TX_FORMAT_FL_R32G32B32A32 0x1D # define R300_TX_FORMAT_FL_R32G32B32A32 0x1D
# define R300_TX_FORMAT_ATI2N 0x1F
/* alpha modes, convenience mostly */ /* alpha modes, convenience mostly */
/* if you have alpha, pick constant appropriate to the /* if you have alpha, pick constant appropriate to the
number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */ number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */

View File

@ -30,7 +30,15 @@
#include "radeon_reg.h" #include "radeon_reg.h"
#include "radeon.h" #include "radeon.h"
#include "atom.h" #include "atom.h"
#include "r100d.h"
#include "r420d.h" #include "r420d.h"
#include "r420_reg_safe.h"
static void r420_set_reg_safe(struct radeon_device *rdev)
{
rdev->config.r300.reg_safe_bm = r420_reg_safe_bm;
rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm);
}
int r420_mc_init(struct radeon_device *rdev) int r420_mc_init(struct radeon_device *rdev)
{ {
@ -42,9 +50,7 @@ int r420_mc_init(struct radeon_device *rdev)
if (rdev->flags & RADEON_IS_AGP) { if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev); r = radeon_agp_init(rdev);
if (r) { if (r) {
printk(KERN_WARNING "[drm] Disabling AGP\n"); radeon_agp_disable(rdev);
rdev->flags &= ~RADEON_IS_AGP;
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
} else { } else {
rdev->mc.gtt_location = rdev->mc.agp_base; rdev->mc.gtt_location = rdev->mc.agp_base;
} }
@ -165,6 +171,34 @@ static void r420_clock_resume(struct radeon_device *rdev)
WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl); WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl);
} }
static void r420_cp_errata_init(struct radeon_device *rdev)
{
/* RV410 and R420 can lock up if CP DMA to host memory happens
* while the 2D engine is busy.
*
* The proper workaround is to queue a RESYNC at the beginning
* of the CP init, apparently.
*/
radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
radeon_ring_lock(rdev, 8);
radeon_ring_write(rdev, PACKET0(R300_CP_RESYNC_ADDR, 1));
radeon_ring_write(rdev, rdev->config.r300.resync_scratch);
radeon_ring_write(rdev, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev);
}
static void r420_cp_errata_fini(struct radeon_device *rdev)
{
/* Catch the RESYNC we dispatched all the way back,
* at the very beginning of the CP init.
*/
radeon_ring_lock(rdev, 8);
radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, R300_RB3D_DC_FINISH);
radeon_ring_unlock_commit(rdev);
radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
}
static int r420_startup(struct radeon_device *rdev) static int r420_startup(struct radeon_device *rdev)
{ {
int r; int r;
@ -190,6 +224,7 @@ static int r420_startup(struct radeon_device *rdev)
r420_pipes_init(rdev); r420_pipes_init(rdev);
/* Enable IRQ */ /* Enable IRQ */
// r100_irq_set(rdev); // r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */ /* 1M ring buffer */
// r = r100_cp_init(rdev, 1024 * 1024); // r = r100_cp_init(rdev, 1024 * 1024);
// if (r) { // if (r) {

View File

@ -0,0 +1,42 @@
static const unsigned r420_reg_safe_bm[159] = {
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF,
0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000,
0xF0000078, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFC78, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFF,
0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE009FF,
0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000,
0x00000000, 0x00000100, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0xFF800000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x0003FC01, 0xFFFFFCF8, 0xFF800B19,
};

View File

@ -236,28 +236,28 @@ void r600_hpd_init(struct radeon_device *rdev)
switch (radeon_connector->hpd.hpd) { switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1: case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, tmp); WREG32(DC_HPD1_CONTROL, tmp);
rdev->irq.hpd[0] = true; // rdev->irq.hpd[0] = true;
break; break;
case RADEON_HPD_2: case RADEON_HPD_2:
WREG32(DC_HPD2_CONTROL, tmp); WREG32(DC_HPD2_CONTROL, tmp);
rdev->irq.hpd[1] = true; // rdev->irq.hpd[1] = true;
break; break;
case RADEON_HPD_3: case RADEON_HPD_3:
WREG32(DC_HPD3_CONTROL, tmp); WREG32(DC_HPD3_CONTROL, tmp);
rdev->irq.hpd[2] = true; // rdev->irq.hpd[2] = true;
break; break;
case RADEON_HPD_4: case RADEON_HPD_4:
WREG32(DC_HPD4_CONTROL, tmp); WREG32(DC_HPD4_CONTROL, tmp);
rdev->irq.hpd[3] = true; // rdev->irq.hpd[3] = true;
break; break;
/* DCE 3.2 */ /* DCE 3.2 */
case RADEON_HPD_5: case RADEON_HPD_5:
WREG32(DC_HPD5_CONTROL, tmp); WREG32(DC_HPD5_CONTROL, tmp);
rdev->irq.hpd[4] = true; // rdev->irq.hpd[4] = true;
break; break;
case RADEON_HPD_6: case RADEON_HPD_6:
WREG32(DC_HPD6_CONTROL, tmp); WREG32(DC_HPD6_CONTROL, tmp);
rdev->irq.hpd[5] = true; // rdev->irq.hpd[5] = true;
break; break;
default: default:
break; break;
@ -269,22 +269,23 @@ void r600_hpd_init(struct radeon_device *rdev)
switch (radeon_connector->hpd.hpd) { switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1: case RADEON_HPD_1:
WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN); WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
rdev->irq.hpd[0] = true; // rdev->irq.hpd[0] = true;
break; break;
case RADEON_HPD_2: case RADEON_HPD_2:
WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN); WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
rdev->irq.hpd[1] = true; // rdev->irq.hpd[1] = true;
break; break;
case RADEON_HPD_3: case RADEON_HPD_3:
WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN); WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
rdev->irq.hpd[2] = true; // rdev->irq.hpd[2] = true;
break; break;
default: default:
break; break;
} }
} }
} }
r600_irq_set(rdev); // if (rdev->irq.installed)
// r600_irq_set(rdev);
} }
void r600_hpd_fini(struct radeon_device *rdev) void r600_hpd_fini(struct radeon_device *rdev)
@ -298,28 +299,28 @@ void r600_hpd_fini(struct radeon_device *rdev)
switch (radeon_connector->hpd.hpd) { switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1: case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, 0); WREG32(DC_HPD1_CONTROL, 0);
rdev->irq.hpd[0] = false; // rdev->irq.hpd[0] = false;
break; break;
case RADEON_HPD_2: case RADEON_HPD_2:
WREG32(DC_HPD2_CONTROL, 0); WREG32(DC_HPD2_CONTROL, 0);
rdev->irq.hpd[1] = false; // rdev->irq.hpd[1] = false;
break; break;
case RADEON_HPD_3: case RADEON_HPD_3:
WREG32(DC_HPD3_CONTROL, 0); WREG32(DC_HPD3_CONTROL, 0);
rdev->irq.hpd[2] = false; // rdev->irq.hpd[2] = false;
break; break;
case RADEON_HPD_4: case RADEON_HPD_4:
WREG32(DC_HPD4_CONTROL, 0); WREG32(DC_HPD4_CONTROL, 0);
rdev->irq.hpd[3] = false; // rdev->irq.hpd[3] = false;
break; break;
/* DCE 3.2 */ /* DCE 3.2 */
case RADEON_HPD_5: case RADEON_HPD_5:
WREG32(DC_HPD5_CONTROL, 0); WREG32(DC_HPD5_CONTROL, 0);
rdev->irq.hpd[4] = false; // rdev->irq.hpd[4] = false;
break; break;
case RADEON_HPD_6: case RADEON_HPD_6:
WREG32(DC_HPD6_CONTROL, 0); WREG32(DC_HPD6_CONTROL, 0);
rdev->irq.hpd[5] = false; // rdev->irq.hpd[5] = false;
break; break;
default: default:
break; break;
@ -331,15 +332,15 @@ void r600_hpd_fini(struct radeon_device *rdev)
switch (radeon_connector->hpd.hpd) { switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1: case RADEON_HPD_1:
WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0); WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
rdev->irq.hpd[0] = false; // rdev->irq.hpd[0] = false;
break; break;
case RADEON_HPD_2: case RADEON_HPD_2:
WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0); WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
rdev->irq.hpd[1] = false; // rdev->irq.hpd[1] = false;
break; break;
case RADEON_HPD_3: case RADEON_HPD_3:
WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0); WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
rdev->irq.hpd[2] = false; // rdev->irq.hpd[2] = false;
break; break;
default: default:
break; break;
@ -486,8 +487,12 @@ void r600_pcie_gart_disable(struct radeon_device *rdev)
WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp); WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
if (rdev->gart.table.vram.robj) { if (rdev->gart.table.vram.robj) {
// radeon_object_kunmap(rdev->gart.table.vram.robj); r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
// radeon_object_unpin(rdev->gart.table.vram.robj); if (likely(r == 0)) {
radeon_bo_kunmap(rdev->gart.table.vram.robj);
radeon_bo_unpin(rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
}
} }
} }
@ -618,7 +623,6 @@ int r600_mc_init(struct radeon_device *rdev)
fixed20_12 a; fixed20_12 a;
u32 tmp; u32 tmp;
int chansize, numchan; int chansize, numchan;
int r;
/* Get VRAM informations */ /* Get VRAM informations */
rdev->mc.vram_is_ddr = true; rdev->mc.vram_is_ddr = true;
@ -661,9 +665,6 @@ int r600_mc_init(struct radeon_device *rdev)
rdev->mc.real_vram_size = rdev->mc.aper_size; rdev->mc.real_vram_size = rdev->mc.aper_size;
if (rdev->flags & RADEON_IS_AGP) { if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
if (r)
return r;
/* gtt_size is setup by radeon_agp_init */ /* gtt_size is setup by radeon_agp_init */
rdev->mc.gtt_location = rdev->mc.agp_base; rdev->mc.gtt_location = rdev->mc.agp_base;
tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size; tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
@ -721,6 +722,10 @@ int r600_mc_init(struct radeon_device *rdev)
a.full = rfixed_const(100); a.full = rfixed_const(100);
rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
if (rdev->flags & RADEON_IS_IGP)
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
return 0; return 0;
} }
@ -1379,11 +1384,6 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
(void)RREG32(PCIE_PORT_DATA); (void)RREG32(PCIE_PORT_DATA);
} }
void r600_hdp_flush(struct radeon_device *rdev)
{
WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
}
/* /*
* CP & Ring * CP & Ring
*/ */
@ -1591,6 +1591,11 @@ int r600_init(struct radeon_device *rdev)
// r = radeon_fence_driver_init(rdev); // r = radeon_fence_driver_init(rdev);
// if (r) // if (r)
// return r; // return r;
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
if (r)
radeon_agp_disable(rdev);
}
r = r600_mc_init(rdev); r = r600_mc_init(rdev);
dbgprintf("mc vram location %x\n", rdev->mc.vram_location); dbgprintf("mc vram location %x\n", rdev->mc.vram_location);
if (r) if (r)

View File

@ -0,0 +1,267 @@
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
* Copyright 2009 Christian König.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Christian König
*/
#include "drmP.h"
#include "radeon.h"
#include "radeon_reg.h"
#include "atom.h"
#define AUDIO_TIMER_INTERVALL 100 /* 1/10 sekund should be enough */
/*
* check if the chipset is supported
*/
static int r600_audio_chipset_supported(struct radeon_device *rdev)
{
return (rdev->family >= CHIP_R600 && rdev->family < CHIP_RV710)
|| rdev->family == CHIP_RS600
|| rdev->family == CHIP_RS690
|| rdev->family == CHIP_RS740;
}
/*
* current number of channels
*/
static int r600_audio_channels(struct radeon_device *rdev)
{
return (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0x7) + 1;
}
/*
* current bits per sample
*/
static int r600_audio_bits_per_sample(struct radeon_device *rdev)
{
uint32_t value = (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0xF0) >> 4;
switch (value) {
case 0x0: return 8;
case 0x1: return 16;
case 0x2: return 20;
case 0x3: return 24;
case 0x4: return 32;
}
DRM_ERROR("Unknown bits per sample 0x%x using 16 instead.\n", (int)value);
return 16;
}
/*
* current sampling rate in HZ
*/
static int r600_audio_rate(struct radeon_device *rdev)
{
uint32_t value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
uint32_t result;
if (value & 0x4000)
result = 44100;
else
result = 48000;
result *= ((value >> 11) & 0x7) + 1;
result /= ((value >> 8) & 0x7) + 1;
return result;
}
/*
* iec 60958 status bits
*/
static uint8_t r600_audio_status_bits(struct radeon_device *rdev)
{
return RREG32(R600_AUDIO_STATUS_BITS) & 0xff;
}
/*
* iec 60958 category code
*/
static uint8_t r600_audio_category_code(struct radeon_device *rdev)
{
return (RREG32(R600_AUDIO_STATUS_BITS) >> 8) & 0xff;
}
/*
* update all hdmi interfaces with current audio parameters
*/
static void r600_audio_update_hdmi(unsigned long param)
{
struct radeon_device *rdev = (struct radeon_device *)param;
struct drm_device *dev = rdev->ddev;
int channels = r600_audio_channels(rdev);
int rate = r600_audio_rate(rdev);
int bps = r600_audio_bits_per_sample(rdev);
uint8_t status_bits = r600_audio_status_bits(rdev);
uint8_t category_code = r600_audio_category_code(rdev);
struct drm_encoder *encoder;
int changes = 0;
changes |= channels != rdev->audio_channels;
changes |= rate != rdev->audio_rate;
changes |= bps != rdev->audio_bits_per_sample;
changes |= status_bits != rdev->audio_status_bits;
changes |= category_code != rdev->audio_category_code;
if (changes) {
rdev->audio_channels = channels;
rdev->audio_rate = rate;
rdev->audio_bits_per_sample = bps;
rdev->audio_status_bits = status_bits;
rdev->audio_category_code = category_code;
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (changes || r600_hdmi_buffer_status_changed(encoder))
r600_hdmi_update_audio_settings(
encoder, channels,
rate, bps, status_bits,
category_code);
}
// mod_timer(&rdev->audio_timer,
// jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
}
/*
* initialize the audio vars and register the update timer
*/
int r600_audio_init(struct radeon_device *rdev)
{
if (!r600_audio_chipset_supported(rdev))
return 0;
DRM_INFO("%s audio support", radeon_audio ? "Enabling" : "Disabling");
WREG32_P(R600_AUDIO_ENABLE, radeon_audio ? 0x81000000 : 0x0, ~0x81000000);
rdev->audio_channels = -1;
rdev->audio_rate = -1;
rdev->audio_bits_per_sample = -1;
rdev->audio_status_bits = 0;
rdev->audio_category_code = 0;
// setup_timer(
// &rdev->audio_timer,
// r600_audio_update_hdmi,
// (unsigned long)rdev);
// mod_timer(&rdev->audio_timer, jiffies + 1);
return 0;
}
/*
* determin how the encoders and audio interface is wired together
*/
int r600_audio_tmds_index(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_encoder *other;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
return 0;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
/* special case check if an TMDS1 is present */
list_for_each_entry(other, &dev->mode_config.encoder_list, head) {
if (to_radeon_encoder(other)->encoder_id ==
ENCODER_OBJECT_ID_INTERNAL_TMDS1)
return 1;
}
return 0;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
return 1;
default:
DRM_ERROR("Unsupported encoder type 0x%02X\n",
radeon_encoder->encoder_id);
return -1;
}
}
/*
* atach the audio codec to the clock source of the encoder
*/
void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
int base_rate = 48000;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
WREG32_P(R600_AUDIO_TIMING, 0, ~0x301);
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301);
break;
default:
DRM_ERROR("Unsupported encoder type 0x%02X\n",
radeon_encoder->encoder_id);
return;
}
switch (r600_audio_tmds_index(encoder)) {
case 0:
WREG32(R600_AUDIO_PLL1_MUL, base_rate*50);
WREG32(R600_AUDIO_PLL1_DIV, clock*100);
WREG32(R600_AUDIO_CLK_SRCSEL, 0);
break;
case 1:
WREG32(R600_AUDIO_PLL2_MUL, base_rate*50);
WREG32(R600_AUDIO_PLL2_DIV, clock*100);
WREG32(R600_AUDIO_CLK_SRCSEL, 1);
break;
}
}
/*
* release the audio timer
* TODO: How to do this correctly on SMP systems?
*/
void r600_audio_fini(struct radeon_device *rdev)
{
if (!r600_audio_chipset_supported(rdev))
return;
WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
// del_timer(&rdev->audio_timer);
}

View File

@ -0,0 +1,506 @@
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
* Copyright 2009 Christian König.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Christian König
*/
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon.h"
#include "atom.h"
/*
* HDMI color format
*/
enum r600_hdmi_color_format {
RGB = 0,
YCC_422 = 1,
YCC_444 = 2
};
/*
* IEC60958 status bits
*/
enum r600_hdmi_iec_status_bits {
AUDIO_STATUS_DIG_ENABLE = 0x01,
AUDIO_STATUS_V = 0x02,
AUDIO_STATUS_VCFG = 0x04,
AUDIO_STATUS_EMPHASIS = 0x08,
AUDIO_STATUS_COPYRIGHT = 0x10,
AUDIO_STATUS_NONAUDIO = 0x20,
AUDIO_STATUS_PROFESSIONAL = 0x40,
AUDIO_STATUS_LEVEL = 0x80
};
struct {
uint32_t Clock;
int N_32kHz;
int CTS_32kHz;
int N_44_1kHz;
int CTS_44_1kHz;
int N_48kHz;
int CTS_48kHz;
} r600_hdmi_ACR[] = {
/* 32kHz 44.1kHz 48kHz */
/* Clock N CTS N CTS N CTS */
{ 25174, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */
{ 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */
{ 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */
{ 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */
{ 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */
{ 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */
{ 74175, 11648, 210937, 17836, 234375, 11648, 140625 }, /* 74.25/1.001 MHz */
{ 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */
{ 148351, 11648, 421875, 8918, 234375, 5824, 140625 }, /* 148.50/1.001 MHz */
{ 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */
{ 0, 4096, 0, 6272, 0, 6144, 0 } /* Other */
};
/*
* calculate CTS value if it's not found in the table
*/
static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq)
{
if (*CTS == 0)
*CTS = clock*N/(128*freq)*1000;
DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n",
N, *CTS, freq);
}
/*
* update the N and CTS parameters for a given pixel clock rate
*/
static void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
int CTS;
int N;
int i;
for (i = 0; r600_hdmi_ACR[i].Clock != clock && r600_hdmi_ACR[i].Clock != 0; i++);
CTS = r600_hdmi_ACR[i].CTS_32kHz;
N = r600_hdmi_ACR[i].N_32kHz;
r600_hdmi_calc_CTS(clock, &CTS, N, 32000);
WREG32(offset+R600_HDMI_32kHz_CTS, CTS << 12);
WREG32(offset+R600_HDMI_32kHz_N, N);
CTS = r600_hdmi_ACR[i].CTS_44_1kHz;
N = r600_hdmi_ACR[i].N_44_1kHz;
r600_hdmi_calc_CTS(clock, &CTS, N, 44100);
WREG32(offset+R600_HDMI_44_1kHz_CTS, CTS << 12);
WREG32(offset+R600_HDMI_44_1kHz_N, N);
CTS = r600_hdmi_ACR[i].CTS_48kHz;
N = r600_hdmi_ACR[i].N_48kHz;
r600_hdmi_calc_CTS(clock, &CTS, N, 48000);
WREG32(offset+R600_HDMI_48kHz_CTS, CTS << 12);
WREG32(offset+R600_HDMI_48kHz_N, N);
}
/*
* calculate the crc for a given info frame
*/
static void r600_hdmi_infoframe_checksum(uint8_t packetType,
uint8_t versionNumber,
uint8_t length,
uint8_t *frame)
{
int i;
frame[0] = packetType + versionNumber + length;
for (i = 1; i <= length; i++)
frame[0] += frame[i];
frame[0] = 0x100 - frame[0];
}
/*
* build a HDMI Video Info Frame
*/
static void r600_hdmi_videoinfoframe(
struct drm_encoder *encoder,
enum r600_hdmi_color_format color_format,
int active_information_present,
uint8_t active_format_aspect_ratio,
uint8_t scan_information,
uint8_t colorimetry,
uint8_t ex_colorimetry,
uint8_t quantization,
int ITC,
uint8_t picture_aspect_ratio,
uint8_t video_format_identification,
uint8_t pixel_repetition,
uint8_t non_uniform_picture_scaling,
uint8_t bar_info_data_valid,
uint16_t top_bar,
uint16_t bottom_bar,
uint16_t left_bar,
uint16_t right_bar
)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
uint8_t frame[14];
frame[0x0] = 0;
frame[0x1] =
(scan_information & 0x3) |
((bar_info_data_valid & 0x3) << 2) |
((active_information_present & 0x1) << 4) |
((color_format & 0x3) << 5);
frame[0x2] =
(active_format_aspect_ratio & 0xF) |
((picture_aspect_ratio & 0x3) << 4) |
((colorimetry & 0x3) << 6);
frame[0x3] =
(non_uniform_picture_scaling & 0x3) |
((quantization & 0x3) << 2) |
((ex_colorimetry & 0x7) << 4) |
((ITC & 0x1) << 7);
frame[0x4] = (video_format_identification & 0x7F);
frame[0x5] = (pixel_repetition & 0xF);
frame[0x6] = (top_bar & 0xFF);
frame[0x7] = (top_bar >> 8);
frame[0x8] = (bottom_bar & 0xFF);
frame[0x9] = (bottom_bar >> 8);
frame[0xA] = (left_bar & 0xFF);
frame[0xB] = (left_bar >> 8);
frame[0xC] = (right_bar & 0xFF);
frame[0xD] = (right_bar >> 8);
r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0,
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
WREG32(offset+R600_HDMI_VIDEOINFOFRAME_1,
frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
WREG32(offset+R600_HDMI_VIDEOINFOFRAME_2,
frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
WREG32(offset+R600_HDMI_VIDEOINFOFRAME_3,
frame[0xC] | (frame[0xD] << 8));
}
/*
* build a Audio Info Frame
*/
static void r600_hdmi_audioinfoframe(
struct drm_encoder *encoder,
uint8_t channel_count,
uint8_t coding_type,
uint8_t sample_size,
uint8_t sample_frequency,
uint8_t format,
uint8_t channel_allocation,
uint8_t level_shift,
int downmix_inhibit
)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
uint8_t frame[11];
frame[0x0] = 0;
frame[0x1] = (channel_count & 0x7) | ((coding_type & 0xF) << 4);
frame[0x2] = (sample_size & 0x3) | ((sample_frequency & 0x7) << 2);
frame[0x3] = format;
frame[0x4] = channel_allocation;
frame[0x5] = ((level_shift & 0xF) << 3) | ((downmix_inhibit & 0x1) << 7);
frame[0x6] = 0;
frame[0x7] = 0;
frame[0x8] = 0;
frame[0x9] = 0;
frame[0xA] = 0;
r600_hdmi_infoframe_checksum(0x84, 0x01, 0x0A, frame);
WREG32(offset+R600_HDMI_AUDIOINFOFRAME_0,
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
WREG32(offset+R600_HDMI_AUDIOINFOFRAME_1,
frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x8] << 24));
}
/*
* test if audio buffer is filled enough to start playing
*/
static int r600_hdmi_is_audio_buffer_filled(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
return (RREG32(offset+R600_HDMI_STATUS) & 0x10) != 0;
}
/*
* have buffer status changed since last call?
*/
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
int status, result;
if (!radeon_encoder->hdmi_offset)
return 0;
status = r600_hdmi_is_audio_buffer_filled(encoder);
result = radeon_encoder->hdmi_buffer_status != status;
radeon_encoder->hdmi_buffer_status = status;
return result;
}
/*
* write the audio workaround status to the hardware
*/
void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
uint32_t offset = radeon_encoder->hdmi_offset;
if (!offset)
return;
if (r600_hdmi_is_audio_buffer_filled(encoder)) {
/* disable audio workaround and start delivering of audio frames */
WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001);
} else if (radeon_encoder->hdmi_audio_workaround) {
/* enable audio workaround and start delivering of audio frames */
WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001);
} else {
/* disable audio workaround and stop delivering of audio frames */
WREG32_P(offset+R600_HDMI_CNTL, 0x00000000, ~0x00001001);
}
}
/*
* update the info frames with the data from the current display mode
*/
void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
if (!offset)
return;
r600_audio_set_clock(encoder, mode->clock);
WREG32(offset+R600_HDMI_UNKNOWN_0, 0x1000);
WREG32(offset+R600_HDMI_UNKNOWN_1, 0x0);
WREG32(offset+R600_HDMI_UNKNOWN_2, 0x1000);
r600_hdmi_update_ACR(encoder, mode->clock);
WREG32(offset+R600_HDMI_VIDEOCNTL, 0x13);
WREG32(offset+R600_HDMI_VERSION, 0x202);
r600_hdmi_videoinfoframe(encoder, RGB, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
/* it's unknown what these bits do excatly, but it's indeed quite usefull for debugging */
WREG32(offset+R600_HDMI_AUDIO_DEBUG_0, 0x00FFFFFF);
WREG32(offset+R600_HDMI_AUDIO_DEBUG_1, 0x007FFFFF);
WREG32(offset+R600_HDMI_AUDIO_DEBUG_2, 0x00000001);
WREG32(offset+R600_HDMI_AUDIO_DEBUG_3, 0x00000001);
r600_hdmi_audio_workaround(encoder);
/* audio packets per line, does anyone know how to calc this ? */
WREG32_P(offset+R600_HDMI_CNTL, 0x00040000, ~0x001F0000);
/* update? reset? don't realy know */
WREG32_P(offset+R600_HDMI_CNTL, 0x14000000, ~0x14000000);
}
/*
* update settings with current parameters from audio engine
*/
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
int channels,
int rate,
int bps,
uint8_t status_bits,
uint8_t category_code)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
uint32_t iec;
if (!offset)
return;
DRM_DEBUG("%s with %d channels, %d Hz sampling rate, %d bits per sample,\n",
r600_hdmi_is_audio_buffer_filled(encoder) ? "playing" : "stopped",
channels, rate, bps);
DRM_DEBUG("0x%02X IEC60958 status bits and 0x%02X category code\n",
(int)status_bits, (int)category_code);
iec = 0;
if (status_bits & AUDIO_STATUS_PROFESSIONAL)
iec |= 1 << 0;
if (status_bits & AUDIO_STATUS_NONAUDIO)
iec |= 1 << 1;
if (status_bits & AUDIO_STATUS_COPYRIGHT)
iec |= 1 << 2;
if (status_bits & AUDIO_STATUS_EMPHASIS)
iec |= 1 << 3;
iec |= category_code << 8;
switch (rate) {
case 32000: iec |= 0x3 << 24; break;
case 44100: iec |= 0x0 << 24; break;
case 88200: iec |= 0x8 << 24; break;
case 176400: iec |= 0xc << 24; break;
case 48000: iec |= 0x2 << 24; break;
case 96000: iec |= 0xa << 24; break;
case 192000: iec |= 0xe << 24; break;
}
WREG32(offset+R600_HDMI_IEC60958_1, iec);
iec = 0;
switch (bps) {
case 16: iec |= 0x2; break;
case 20: iec |= 0x3; break;
case 24: iec |= 0xb; break;
}
if (status_bits & AUDIO_STATUS_V)
iec |= 0x5 << 16;
WREG32_P(offset+R600_HDMI_IEC60958_2, iec, ~0x5000f);
/* 0x021 or 0x031 sets the audio frame length */
WREG32(offset+R600_HDMI_AUDIOCNTL, 0x31);
r600_hdmi_audioinfoframe(encoder, channels-1, 0, 0, 0, 0, 0, 0, 0);
r600_hdmi_audio_workaround(encoder);
/* update? reset? don't realy know */
WREG32_P(offset+R600_HDMI_CNTL, 0x04000000, ~0x04000000);
}
/*
* enable/disable the HDMI engine
*/
void r600_hdmi_enable(struct drm_encoder *encoder, int enable)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
if (!offset)
return;
DRM_DEBUG("%s HDMI interface @ 0x%04X\n", enable ? "Enabling" : "Disabling", offset);
/* some version of atombios ignore the enable HDMI flag
* so enabling/disabling HDMI was moved here for TMDS1+2 */
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
WREG32_P(AVIVO_TMDSA_CNTL, enable ? 0x4 : 0x0, ~0x4);
WREG32(offset+R600_HDMI_ENABLE, enable ? 0x101 : 0x0);
break;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
WREG32_P(AVIVO_LVTMA_CNTL, enable ? 0x4 : 0x0, ~0x4);
WREG32(offset+R600_HDMI_ENABLE, enable ? 0x105 : 0x0);
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
/* This part is doubtfull in my opinion */
WREG32(offset+R600_HDMI_ENABLE, enable ? 0x110 : 0x0);
break;
default:
DRM_ERROR("unknown HDMI output type\n");
break;
}
}
/*
* determin at which register offset the HDMI encoder is
*/
void r600_hdmi_init(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
radeon_encoder->hdmi_offset = R600_HDMI_TMDS1;
break;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
switch (r600_audio_tmds_index(encoder)) {
case 0:
radeon_encoder->hdmi_offset = R600_HDMI_TMDS1;
break;
case 1:
radeon_encoder->hdmi_offset = R600_HDMI_TMDS2;
break;
default:
radeon_encoder->hdmi_offset = 0;
break;
}
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
radeon_encoder->hdmi_offset = R600_HDMI_TMDS2;
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
radeon_encoder->hdmi_offset = R600_HDMI_DIG;
break;
default:
radeon_encoder->hdmi_offset = 0;
break;
}
DRM_DEBUG("using HDMI engine at offset 0x%04X for encoder 0x%x\n",
radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
/* TODO: make this configureable */
radeon_encoder->hdmi_audio_workaround = 0;
}

View File

@ -110,5 +110,79 @@
#define R600_BIOS_6_SCRATCH 0x173c #define R600_BIOS_6_SCRATCH 0x173c
#define R600_BIOS_7_SCRATCH 0x1740 #define R600_BIOS_7_SCRATCH 0x1740
/* Audio, these regs were reverse enginered,
* so the chance is high that the naming is wrong
* R6xx+ ??? */
/* Audio clocks */
#define R600_AUDIO_PLL1_MUL 0x0514
#define R600_AUDIO_PLL1_DIV 0x0518
#define R600_AUDIO_PLL2_MUL 0x0524
#define R600_AUDIO_PLL2_DIV 0x0528
#define R600_AUDIO_CLK_SRCSEL 0x0534
/* Audio general */
#define R600_AUDIO_ENABLE 0x7300
#define R600_AUDIO_TIMING 0x7344
/* Audio params */
#define R600_AUDIO_VENDOR_ID 0x7380
#define R600_AUDIO_REVISION_ID 0x7384
#define R600_AUDIO_ROOT_NODE_COUNT 0x7388
#define R600_AUDIO_NID1_NODE_COUNT 0x738c
#define R600_AUDIO_NID1_TYPE 0x7390
#define R600_AUDIO_SUPPORTED_SIZE_RATE 0x7394
#define R600_AUDIO_SUPPORTED_CODEC 0x7398
#define R600_AUDIO_SUPPORTED_POWER_STATES 0x739c
#define R600_AUDIO_NID2_CAPS 0x73a0
#define R600_AUDIO_NID3_CAPS 0x73a4
#define R600_AUDIO_NID3_PIN_CAPS 0x73a8
/* Audio conn list */
#define R600_AUDIO_CONN_LIST_LEN 0x73ac
#define R600_AUDIO_CONN_LIST 0x73b0
/* Audio verbs */
#define R600_AUDIO_RATE_BPS_CHANNEL 0x73c0
#define R600_AUDIO_PLAYING 0x73c4
#define R600_AUDIO_IMPLEMENTATION_ID 0x73c8
#define R600_AUDIO_CONFIG_DEFAULT 0x73cc
#define R600_AUDIO_PIN_SENSE 0x73d0
#define R600_AUDIO_PIN_WIDGET_CNTL 0x73d4
#define R600_AUDIO_STATUS_BITS 0x73d8
/* HDMI base register addresses */
#define R600_HDMI_TMDS1 0x7400
#define R600_HDMI_TMDS2 0x7700
#define R600_HDMI_DIG 0x7800
/* HDMI registers */
#define R600_HDMI_ENABLE 0x00
#define R600_HDMI_STATUS 0x04
#define R600_HDMI_CNTL 0x08
#define R600_HDMI_UNKNOWN_0 0x0C
#define R600_HDMI_AUDIOCNTL 0x10
#define R600_HDMI_VIDEOCNTL 0x14
#define R600_HDMI_VERSION 0x18
#define R600_HDMI_UNKNOWN_1 0x28
#define R600_HDMI_VIDEOINFOFRAME_0 0x54
#define R600_HDMI_VIDEOINFOFRAME_1 0x58
#define R600_HDMI_VIDEOINFOFRAME_2 0x5c
#define R600_HDMI_VIDEOINFOFRAME_3 0x60
#define R600_HDMI_32kHz_CTS 0xac
#define R600_HDMI_32kHz_N 0xb0
#define R600_HDMI_44_1kHz_CTS 0xb4
#define R600_HDMI_44_1kHz_N 0xb8
#define R600_HDMI_48kHz_CTS 0xbc
#define R600_HDMI_48kHz_N 0xc0
#define R600_HDMI_AUDIOINFOFRAME_0 0xcc
#define R600_HDMI_AUDIOINFOFRAME_1 0xd0
#define R600_HDMI_IEC60958_1 0xd4
#define R600_HDMI_IEC60958_2 0xd8
#define R600_HDMI_UNKNOWN_2 0xdc
#define R600_HDMI_AUDIO_DEBUG_0 0xe0
#define R600_HDMI_AUDIO_DEBUG_1 0xe4
#define R600_HDMI_AUDIO_DEBUG_2 0xe8
#define R600_HDMI_AUDIO_DEBUG_3 0xec
#endif #endif

View File

@ -882,4 +882,29 @@
#define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17) #define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17)
#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 #define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
#define R_0280E0_CB_COLOR0_FRAG 0x0280E0
#define S_0280E0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0)
#define G_0280E0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF)
#define C_0280E0_BASE_256B 0x00000000
#define R_0280E4_CB_COLOR1_FRAG 0x0280E4
#define R_0280E8_CB_COLOR2_FRAG 0x0280E8
#define R_0280EC_CB_COLOR3_FRAG 0x0280EC
#define R_0280F0_CB_COLOR4_FRAG 0x0280F0
#define R_0280F4_CB_COLOR5_FRAG 0x0280F4
#define R_0280F8_CB_COLOR6_FRAG 0x0280F8
#define R_0280FC_CB_COLOR7_FRAG 0x0280FC
#define R_0280C0_CB_COLOR0_TILE 0x0280C0
#define S_0280C0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0)
#define G_0280C0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF)
#define C_0280C0_BASE_256B 0x00000000
#define R_0280C4_CB_COLOR1_TILE 0x0280C4
#define R_0280C8_CB_COLOR2_TILE 0x0280C8
#define R_0280CC_CB_COLOR3_TILE 0x0280CC
#define R_0280D0_CB_COLOR4_TILE 0x0280D0
#define R_0280D4_CB_COLOR5_TILE 0x0280D4
#define R_0280D8_CB_COLOR6_TILE 0x0280D8
#define R_0280DC_CB_COLOR7_TILE 0x0280DC
#endif #endif

View File

@ -97,7 +97,7 @@ extern int radeon_testing;
extern int radeon_connector_table; extern int radeon_connector_table;
extern int radeon_tv; extern int radeon_tv;
extern int radeon_new_pll; extern int radeon_new_pll;
extern int radeon_audio;
typedef struct typedef struct
{ {
@ -225,6 +225,7 @@ struct radeon_fence_driver {
struct list_head created; struct list_head created;
struct list_head emited; struct list_head emited;
struct list_head signaled; struct list_head signaled;
bool initialized;
}; };
struct radeon_fence { struct radeon_fence {
@ -265,8 +266,9 @@ struct radeon_surface_reg {
struct radeon_mman { struct radeon_mman {
struct ttm_bo_global_ref bo_global_ref; struct ttm_bo_global_ref bo_global_ref;
struct ttm_global_reference mem_global_ref; struct ttm_global_reference mem_global_ref;
bool mem_global_referenced;
struct ttm_bo_device bdev; struct ttm_bo_device bdev;
bool mem_global_referenced;
bool initialized;
}; };
struct radeon_bo { struct radeon_bo {
@ -379,10 +381,12 @@ struct radeon_mc {
u64 real_vram_size; u64 real_vram_size;
int vram_mtrr; int vram_mtrr;
bool vram_is_ddr; bool vram_is_ddr;
bool igp_sideport_enabled;
}; };
int radeon_mc_setup(struct radeon_device *rdev); int radeon_mc_setup(struct radeon_device *rdev);
bool radeon_combios_sideport_present(struct radeon_device *rdev);
bool radeon_atombios_sideport_present(struct radeon_device *rdev);
/* /*
* GPU scratch registers structures, functions & helpers * GPU scratch registers structures, functions & helpers
@ -420,12 +424,12 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
* CP & ring. * CP & ring.
*/ */
struct radeon_ib { struct radeon_ib {
struct list_head list; struct list_head list;
unsigned long idx; unsigned long idx;
uint64_t gpu_addr; uint64_t gpu_addr;
struct radeon_fence *fence; struct radeon_fence *fence;
uint32_t *ptr; uint32_t *ptr;
uint32_t length_dw; uint32_t length_dw;
}; };
/* /*
@ -433,28 +437,28 @@ struct radeon_ib {
* mutex protects scheduled_ibs, ready, alloc_bm * mutex protects scheduled_ibs, ready, alloc_bm
*/ */
struct radeon_ib_pool { struct radeon_ib_pool {
// struct mutex mutex; // struct mutex mutex;
struct radeon_bo *robj; struct radeon_bo *robj;
struct list_head scheduled_ibs; struct list_head scheduled_ibs;
struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
bool ready; bool ready;
DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE); DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE);
}; };
struct radeon_cp { struct radeon_cp {
struct radeon_bo *ring_obj; struct radeon_bo *ring_obj;
volatile uint32_t *ring; volatile uint32_t *ring;
unsigned rptr; unsigned rptr;
unsigned wptr; unsigned wptr;
unsigned wptr_old; unsigned wptr_old;
unsigned ring_size; unsigned ring_size;
unsigned ring_free_dw; unsigned ring_free_dw;
int count_dw; int count_dw;
uint64_t gpu_addr; uint64_t gpu_addr;
uint32_t align_mask; uint32_t align_mask;
uint32_t ptr_mask; uint32_t ptr_mask;
// struct mutex mutex; // struct mutex mutex;
bool ready; bool ready;
}; };
/* /*
@ -468,7 +472,6 @@ struct r600_ih {
unsigned wptr_old; unsigned wptr_old;
unsigned ring_size; unsigned ring_size;
uint64_t gpu_addr; uint64_t gpu_addr;
uint32_t align_mask;
uint32_t ptr_mask; uint32_t ptr_mask;
spinlock_t lock; spinlock_t lock;
bool enabled; bool enabled;
@ -507,8 +510,8 @@ struct radeon_cs_reloc {
// struct drm_gem_object *gobj; // struct drm_gem_object *gobj;
struct radeon_bo *robj; struct radeon_bo *robj;
// struct radeon_bo_list lobj; // struct radeon_bo_list lobj;
uint32_t handle; uint32_t handle;
uint32_t flags; uint32_t flags;
}; };
struct radeon_cs_chunk { struct radeon_cs_chunk {
@ -690,7 +693,6 @@ struct radeon_asic {
uint32_t offset, uint32_t obj_size); uint32_t offset, uint32_t obj_size);
int (*clear_surface_reg)(struct radeon_device *rdev, int reg); int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
void (*bandwidth_update)(struct radeon_device *rdev); void (*bandwidth_update)(struct radeon_device *rdev);
void (*hdp_flush)(struct radeon_device *rdev);
void (*hpd_init)(struct radeon_device *rdev); void (*hpd_init)(struct radeon_device *rdev);
void (*hpd_fini)(struct radeon_device *rdev); void (*hpd_fini)(struct radeon_device *rdev);
bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@ -703,11 +705,14 @@ struct radeon_asic {
struct r100_asic { struct r100_asic {
const unsigned *reg_safe_bm; const unsigned *reg_safe_bm;
unsigned reg_safe_bm_size; unsigned reg_safe_bm_size;
u32 hdp_cntl;
}; };
struct r300_asic { struct r300_asic {
const unsigned *reg_safe_bm; const unsigned *reg_safe_bm;
unsigned reg_safe_bm_size; unsigned reg_safe_bm_size;
u32 resync_scratch;
u32 hdp_cntl;
}; };
struct r600_asic { struct r600_asic {
@ -822,8 +827,17 @@ struct radeon_device {
struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
const struct firmware *me_fw; /* all family ME firmware */ const struct firmware *me_fw; /* all family ME firmware */
const struct firmware *pfp_fw; /* r6/700 PFP firmware */ const struct firmware *pfp_fw; /* r6/700 PFP firmware */
const struct firmware *rlc_fw; /* r6/700 RLC firmware */
struct r600_blit r600_blit; struct r600_blit r600_blit;
int msi_enabled; /* msi enabled */ int msi_enabled; /* msi enabled */
/* audio stuff */
// struct timer_list audio_timer;
int audio_channels;
int audio_rate;
int audio_bits_per_sample;
uint8_t audio_status_bits;
uint8_t audio_category_code;
}; };
int radeon_device_init(struct radeon_device *rdev, int radeon_device_init(struct radeon_device *rdev,
@ -842,7 +856,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
{ {
if (reg < 0x10000) if (reg < rdev->rmmio_size)
return readl(((void __iomem *)rdev->rmmio) + reg); return readl(((void __iomem *)rdev->rmmio) + reg);
else { else {
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
@ -852,7 +866,7 @@ static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{ {
if (reg < 0x10000) if (reg < rdev->rmmio_size)
writel(v, ((void __iomem *)rdev->rmmio) + reg); writel(v, ((void __iomem *)rdev->rmmio) + reg);
else { else {
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
@ -1004,13 +1018,14 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s))) #define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r))) #define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r)))
#define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev)) #define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev))
#define radeon_hdp_flush(rdev) (rdev)->asic->hdp_flush((rdev))
#define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev)) #define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev))
#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev)) #define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev))
#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd)) #define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd))
#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd)) #define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd))
/* Common functions */ /* Common functions */
/* AGP */
extern void radeon_agp_disable(struct radeon_device *rdev);
extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
extern int radeon_modeset_init(struct radeon_device *rdev); extern int radeon_modeset_init(struct radeon_device *rdev);
extern void radeon_modeset_fini(struct radeon_device *rdev); extern void radeon_modeset_fini(struct radeon_device *rdev);
@ -1024,6 +1039,7 @@ extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain); extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
struct r100_mc_save { struct r100_mc_save {
@ -1153,6 +1169,22 @@ extern int r600_irq_init(struct radeon_device *rdev);
extern void r600_irq_fini(struct radeon_device *rdev); extern void r600_irq_fini(struct radeon_device *rdev);
extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size); extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
extern int r600_irq_set(struct radeon_device *rdev); extern int r600_irq_set(struct radeon_device *rdev);
extern void r600_irq_suspend(struct radeon_device *rdev);
/* r600 audio */
extern int r600_audio_init(struct radeon_device *rdev);
extern int r600_audio_tmds_index(struct drm_encoder *encoder);
extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
extern void r600_audio_fini(struct radeon_device *rdev);
extern void r600_hdmi_init(struct drm_encoder *encoder);
extern void r600_hdmi_enable(struct drm_encoder *encoder, int enable);
extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
int channels,
int rate,
int bps,
uint8_t status_bits,
uint8_t category_code);
#include "radeon_object.h" #include "radeon_object.h"
@ -1164,7 +1196,7 @@ resource_size_t
drm_get_resource_len(struct drm_device *dev, unsigned int resource); drm_get_resource_len(struct drm_device *dev, unsigned int resource);
bool set_mode(struct drm_device *dev, struct drm_connector *connector, bool set_mode(struct drm_device *dev, struct drm_connector *connector,
mode_t *mode, bool strict); videomode_t *mode, bool strict);
#endif #endif

View File

@ -144,9 +144,19 @@ int radeon_agp_init(struct radeon_device *rdev)
ret = drm_agp_info(rdev->ddev, &info); ret = drm_agp_info(rdev->ddev, &info);
if (ret) { if (ret) {
drm_agp_release(rdev->ddev);
DRM_ERROR("Unable to get AGP info: %d\n", ret); DRM_ERROR("Unable to get AGP info: %d\n", ret);
return ret; return ret;
} }
if (rdev->ddev->agp->agp_info.aper_size < 32) {
drm_agp_release(rdev->ddev);
dev_warn(rdev->dev, "AGP aperture too small (%zuM) "
"need at least 32M, disabling AGP\n",
rdev->ddev->agp->agp_info.aper_size);
return -EINVAL;
}
mode.mode = info.mode; mode.mode = info.mode;
agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode; agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
is_v3 = !!(agp_status & RADEON_AGPv3_MODE); is_v3 = !!(agp_status & RADEON_AGPv3_MODE);
@ -221,6 +231,7 @@ int radeon_agp_init(struct radeon_device *rdev)
ret = drm_agp_enable(rdev->ddev, mode); ret = drm_agp_enable(rdev->ddev, mode);
if (ret) { if (ret) {
DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode); DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
drm_agp_release(rdev->ddev);
return ret; return ret;
} }
@ -252,10 +263,8 @@ void radeon_agp_resume(struct radeon_device *rdev)
void radeon_agp_fini(struct radeon_device *rdev) void radeon_agp_fini(struct radeon_device *rdev)
{ {
#if __OS_HAS_AGP #if __OS_HAS_AGP
if (rdev->flags & RADEON_IS_AGP) {
if (rdev->ddev->agp && rdev->ddev->agp->acquired) { if (rdev->ddev->agp && rdev->ddev->agp->acquired) {
drm_agp_release(rdev->ddev); drm_agp_release(rdev->ddev);
}
} }
#endif #endif
} }

View File

@ -33,6 +33,7 @@
*/ */
uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev); uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev);
void radeon_legacy_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock); void radeon_legacy_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev);
void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev); uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev);
@ -76,7 +77,6 @@ int r100_clear_surface_reg(struct radeon_device *rdev, int reg);
void r100_bandwidth_update(struct radeon_device *rdev); void r100_bandwidth_update(struct radeon_device *rdev);
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r100_ring_test(struct radeon_device *rdev); int r100_ring_test(struct radeon_device *rdev);
void r100_hdp_flush(struct radeon_device *rdev);
void r100_hpd_init(struct radeon_device *rdev); void r100_hpd_init(struct radeon_device *rdev);
void r100_hpd_fini(struct radeon_device *rdev); void r100_hpd_fini(struct radeon_device *rdev);
bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@ -106,14 +106,13 @@ static struct radeon_asic r100_asic = {
// .copy = &r100_copy_blit, // .copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock, .get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock, .set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = NULL, .get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL, .set_memory_clock = NULL,
.set_pcie_lanes = NULL, .set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating, .set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg, .set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg, .clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update, .bandwidth_update = &r100_bandwidth_update,
.hdp_flush = &r100_hdp_flush,
.hpd_init = &r100_hpd_init, .hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini, .hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense, .hpd_sense = &r100_hpd_sense,
@ -166,14 +165,13 @@ static struct radeon_asic r300_asic = {
// .copy = &r100_copy_blit, // .copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock, .get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock, .set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = NULL, .get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL, .set_memory_clock = NULL,
.set_pcie_lanes = &rv370_set_pcie_lanes, .set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_legacy_set_clock_gating, .set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg, .set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg, .clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update, .bandwidth_update = &r100_bandwidth_update,
.hdp_flush = &r100_hdp_flush,
.hpd_init = &r100_hpd_init, .hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini, .hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense, .hpd_sense = &r100_hpd_sense,
@ -217,7 +215,6 @@ static struct radeon_asic r420_asic = {
.set_surface_reg = r100_set_surface_reg, .set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg, .clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update, .bandwidth_update = &r100_bandwidth_update,
.hdp_flush = &r100_hdp_flush,
.hpd_init = &r100_hpd_init, .hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini, .hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense, .hpd_sense = &r100_hpd_sense,
@ -259,14 +256,13 @@ static struct radeon_asic rs400_asic = {
// .copy = &r100_copy_blit, // .copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock, .get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock, .set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = NULL, .get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL, .set_memory_clock = NULL,
.set_pcie_lanes = NULL, .set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating, .set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg, .set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg, .clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update, .bandwidth_update = &r100_bandwidth_update,
.hdp_flush = &r100_hdp_flush,
.hpd_init = &r100_hpd_init, .hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini, .hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense, .hpd_sense = &r100_hpd_sense,
@ -323,7 +319,6 @@ static struct radeon_asic rs600_asic = {
.set_pcie_lanes = NULL, .set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating, .set_clock_gating = &radeon_atom_set_clock_gating,
.bandwidth_update = &rs600_bandwidth_update, .bandwidth_update = &rs600_bandwidth_update,
.hdp_flush = &r100_hdp_flush,
.hpd_init = &rs600_hpd_init, .hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini, .hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense, .hpd_sense = &rs600_hpd_sense,
@ -371,7 +366,6 @@ static struct radeon_asic rs690_asic = {
.set_surface_reg = r100_set_surface_reg, .set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg, .clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rs690_bandwidth_update, .bandwidth_update = &rs690_bandwidth_update,
.hdp_flush = &r100_hdp_flush,
.hpd_init = &rs600_hpd_init, .hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini, .hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense, .hpd_sense = &rs600_hpd_sense,
@ -423,7 +417,6 @@ static struct radeon_asic rv515_asic = {
.set_surface_reg = r100_set_surface_reg, .set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg, .clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update, .bandwidth_update = &rv515_bandwidth_update,
.hdp_flush = &r100_hdp_flush,
.hpd_init = &rs600_hpd_init, .hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini, .hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense, .hpd_sense = &rs600_hpd_sense,
@ -466,7 +459,6 @@ static struct radeon_asic r520_asic = {
.set_surface_reg = r100_set_surface_reg, .set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg, .clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update, .bandwidth_update = &rv515_bandwidth_update,
.hdp_flush = &r100_hdp_flush,
.hpd_init = &rs600_hpd_init, .hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini, .hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense, .hpd_sense = &rs600_hpd_sense,
@ -507,7 +499,6 @@ int r600_ring_test(struct radeon_device *rdev);
int r600_copy_blit(struct radeon_device *rdev, int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset, uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence); unsigned num_pages, struct radeon_fence *fence);
void r600_hdp_flush(struct radeon_device *rdev);
void r600_hpd_init(struct radeon_device *rdev); void r600_hpd_init(struct radeon_device *rdev);
void r600_hpd_fini(struct radeon_device *rdev); void r600_hpd_fini(struct radeon_device *rdev);
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@ -542,7 +533,6 @@ static struct radeon_asic r600_asic = {
.set_surface_reg = r600_set_surface_reg, .set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg, .clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update, .bandwidth_update = &rv515_bandwidth_update,
.hdp_flush = &r600_hdp_flush,
.hpd_init = &r600_hpd_init, .hpd_init = &r600_hpd_init,
.hpd_fini = &r600_hpd_fini, .hpd_fini = &r600_hpd_fini,
.hpd_sense = &r600_hpd_sense, .hpd_sense = &r600_hpd_sense,
@ -586,7 +576,6 @@ static struct radeon_asic rv770_asic = {
.set_surface_reg = r600_set_surface_reg, .set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg, .clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update, .bandwidth_update = &rv515_bandwidth_update,
.hdp_flush = &r600_hdp_flush,
.hpd_init = &r600_hpd_init, .hpd_init = &r600_hpd_init,
.hpd_fini = &r600_hpd_fini, .hpd_fini = &r600_hpd_fini,
.hpd_sense = &r600_hpd_sense, .hpd_sense = &r600_hpd_sense,

View File

@ -56,13 +56,13 @@ uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
else if (post_div == 3) else if (post_div == 3)
sclk >>= 2; sclk >>= 2;
else if (post_div == 4) else if (post_div == 4)
sclk >>= 4; sclk >>= 3;
return sclk; return sclk;
} }
/* 10 khz */ /* 10 khz */
static uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev) uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
{ {
struct radeon_pll *mpll = &rdev->clock.mpll; struct radeon_pll *mpll = &rdev->clock.mpll;
uint32_t fb_div, ref_div, post_div, mclk; uint32_t fb_div, ref_div, post_div, mclk;
@ -86,7 +86,7 @@ static uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
else if (post_div == 3) else if (post_div == 3)
mclk >>= 2; mclk >>= 2;
else if (post_div == 4) else if (post_div == 4)
mclk >>= 4; mclk >>= 3;
return mclk; return mclk;
} }

View File

@ -49,8 +49,10 @@ void radeon_connector_hotplug(struct drm_connector *connector)
if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
if (radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_DISPLAYPORT) { (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
if ((radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_eDP)) {
if (radeon_dp_needs_link_train(radeon_connector)) { if (radeon_dp_needs_link_train(radeon_connector)) {
if (connector->encoder) if (connector->encoder)
dp_link_train(connector->encoder, connector); dp_link_train(connector->encoder, connector);
@ -208,6 +210,18 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode
drm_mode_set_name(mode); drm_mode_set_name(mode);
DRM_DEBUG("Adding native panel mode %s\n", mode->name); DRM_DEBUG("Adding native panel mode %s\n", mode->name);
} else if (native_mode->hdisplay != 0 &&
native_mode->vdisplay != 0) {
/* mac laptops without an edid */
/* Note that this is not necessarily the exact panel mode,
* but an approximation based on the cvt formula. For these
* systems we should ideally read the mode info out of the
* registers or add a mode table, but this works and is much
* simpler.
*/
mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false);
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
DRM_DEBUG("Adding cvt approximation of native panel mode %s\n", mode->name);
} }
return mode; return mode;
} }
@ -603,7 +617,7 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
ret = connector_status_connected; ret = connector_status_connected;
} }
} else { } else {
if (radeon_connector->dac_load_detect) { if (radeon_connector->dac_load_detect && encoder) {
encoder_funcs = encoder->helper_private; encoder_funcs = encoder->helper_private;
ret = encoder_funcs->detect(encoder, connector); ret = encoder_funcs->detect(encoder, connector);
} }
@ -886,10 +900,18 @@ static void radeon_dvi_force(struct drm_connector *connector)
static int radeon_dvi_mode_valid(struct drm_connector *connector, static int radeon_dvi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode) struct drm_display_mode *mode)
{ {
struct drm_device *dev = connector->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector *radeon_connector = to_radeon_connector(connector);
/* XXX check mode bandwidth */ /* XXX check mode bandwidth */
/* clocks over 135 MHz have heat issues with DVI on RV100 */
if (radeon_connector->use_digital &&
(rdev->family == CHIP_RV100) &&
(mode->clock > 135000))
return MODE_CLOCK_HIGH;
if (radeon_connector->use_digital && (mode->clock > 165000)) { if (radeon_connector->use_digital && (mode->clock > 165000)) {
if ((radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) || if ((radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) ||
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) || (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
@ -955,7 +977,8 @@ static enum drm_connector_status radeon_dp_detect(struct drm_connector *connecto
} }
sink_type = radeon_dp_getsinktype(radeon_connector); sink_type = radeon_dp_getsinktype(radeon_connector);
if (sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { if ((sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(sink_type == CONNECTOR_OBJECT_ID_eDP)) {
if (radeon_dp_getdpcd(radeon_connector)) { if (radeon_dp_getdpcd(radeon_connector)) {
radeon_dig_connector->dp_sink_type = sink_type; radeon_dig_connector->dp_sink_type = sink_type;
ret = connector_status_connected; ret = connector_status_connected;
@ -980,7 +1003,8 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
/* XXX check mode bandwidth */ /* XXX check mode bandwidth */
if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return radeon_dp_mode_valid_helper(radeon_connector, mode); return radeon_dp_mode_valid_helper(radeon_connector, mode);
else else
return MODE_OK; return MODE_OK;
@ -1133,6 +1157,7 @@ radeon_add_atom_connector(struct drm_device *dev,
subpixel_order = SubPixelHorizontalRGB; subpixel_order = SubPixelHorizontalRGB;
break; break;
case DRM_MODE_CONNECTOR_DisplayPort: case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
if (!radeon_dig_connector) if (!radeon_dig_connector)
goto failed; goto failed;
@ -1145,9 +1170,15 @@ radeon_add_atom_connector(struct drm_device *dev,
goto failed; goto failed;
if (i2c_bus->valid) { if (i2c_bus->valid) {
/* add DP i2c bus */ /* add DP i2c bus */
if (connector_type == DRM_MODE_CONNECTOR_eDP)
radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch");
else
radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch"); radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
if (!radeon_dig_connector->dp_i2c_bus) if (!radeon_dig_connector->dp_i2c_bus)
goto failed; goto failed;
if (connector_type == DRM_MODE_CONNECTOR_eDP)
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "eDP");
else
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP"); radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
if (!radeon_connector->ddc_bus) if (!radeon_connector->ddc_bus)
goto failed; goto failed;
@ -1171,7 +1202,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1); 1);
drm_connector_attach_property(&radeon_connector->base, drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.tv_std_property, rdev->mode_info.tv_std_property,
1); radeon_atombios_get_tv_info(rdev));
} }
break; break;
case DRM_MODE_CONNECTOR_LVDS: case DRM_MODE_CONNECTOR_LVDS:
@ -1315,7 +1346,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1); 1);
drm_connector_attach_property(&radeon_connector->base, drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.tv_std_property, rdev->mode_info.tv_std_property,
1); radeon_combios_get_tv_info(rdev));
} }
break; break;
case DRM_MODE_CONNECTOR_LVDS: case DRM_MODE_CONNECTOR_LVDS:

View File

@ -156,6 +156,26 @@ radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t
return ret; return ret;
} }
static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_DDI:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
return true;
default:
return false;
}
}
void void
radeon_link_encoder_connector(struct drm_device *dev) radeon_link_encoder_connector(struct drm_device *dev)
{ {
@ -202,7 +222,7 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector); radeon_connector = to_radeon_connector(connector);
if (radeon_encoder->devices & radeon_connector->devices) if (radeon_encoder->active_device & radeon_connector->devices)
return connector; return connector;
} }
return NULL; return NULL;
@ -233,6 +253,8 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
if (!ASIC_IS_AVIVO(rdev)) { if (!ASIC_IS_AVIVO(rdev)) {
adjusted_mode->hdisplay = mode->hdisplay; adjusted_mode->hdisplay = mode->hdisplay;
adjusted_mode->vdisplay = mode->vdisplay; adjusted_mode->vdisplay = mode->vdisplay;
adjusted_mode->crtc_hdisplay = mode->hdisplay;
adjusted_mode->crtc_vdisplay = mode->vdisplay;
} }
adjusted_mode->base.id = mode_id; adjusted_mode->base.id = mode_id;
} }
@ -438,6 +460,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
union lvds_encoder_control args; union lvds_encoder_control args;
int index = 0; int index = 0;
int hdmi_detected = 0;
uint8_t frev, crev; uint8_t frev, crev;
struct radeon_encoder_atom_dig *dig; struct radeon_encoder_atom_dig *dig;
struct drm_connector *connector; struct drm_connector *connector;
@ -458,6 +481,9 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
if (!radeon_connector->con_priv) if (!radeon_connector->con_priv)
return; return;
if (drm_detect_hdmi_monitor(radeon_connector->edid))
hdmi_detected = 1;
dig_connector = radeon_connector->con_priv; dig_connector = radeon_connector->con_priv;
memset(&args, 0, sizeof(args)); memset(&args, 0, sizeof(args));
@ -487,13 +513,13 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
case 1: case 1:
args.v1.ucMisc = 0; args.v1.ucMisc = 0;
args.v1.ucAction = action; args.v1.ucAction = action;
if (drm_detect_hdmi_monitor(radeon_connector->edid)) if (hdmi_detected)
args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (dig->lvds_misc & (1 << 0)) if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL)
args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
if (dig->lvds_misc & (1 << 1)) if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
args.v1.ucMisc |= (1 << 1); args.v1.ucMisc |= (1 << 1);
} else { } else {
if (dig_connector->linkb) if (dig_connector->linkb)
@ -512,7 +538,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
if (dig->coherent_mode) if (dig->coherent_mode)
args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT; args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT;
} }
if (drm_detect_hdmi_monitor(radeon_connector->edid)) if (hdmi_detected)
args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
args.v2.ucTruncate = 0; args.v2.ucTruncate = 0;
@ -520,18 +546,18 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
args.v2.ucTemporal = 0; args.v2.ucTemporal = 0;
args.v2.ucFRC = 0; args.v2.ucFRC = 0;
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (dig->lvds_misc & (1 << 0)) if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL)
args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
if (dig->lvds_misc & (1 << 5)) { if (dig->lvds_misc & ATOM_PANEL_MISC_SPATIAL) {
args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN; args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN;
if (dig->lvds_misc & (1 << 1)) if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH; args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH;
} }
if (dig->lvds_misc & (1 << 6)) { if (dig->lvds_misc & ATOM_PANEL_MISC_TEMPORAL) {
args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN; args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN;
if (dig->lvds_misc & (1 << 1)) if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH; args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH;
if (((dig->lvds_misc >> 2) & 0x3) == 2) if (((dig->lvds_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2)
args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4; args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
} }
} else { } else {
@ -552,7 +578,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
} }
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
r600_hdmi_enable(encoder, hdmi_detected);
} }
int int
@ -590,21 +616,23 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
return ATOM_ENCODER_MODE_LVDS; return ATOM_ENCODER_MODE_LVDS;
break; break;
case DRM_MODE_CONNECTOR_DisplayPort: case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
radeon_dig_connector = radeon_connector->con_priv; radeon_dig_connector = radeon_connector->con_priv;
if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return ATOM_ENCODER_MODE_DP; return ATOM_ENCODER_MODE_DP;
else if (drm_detect_hdmi_monitor(radeon_connector->edid)) else if (drm_detect_hdmi_monitor(radeon_connector->edid))
return ATOM_ENCODER_MODE_HDMI; return ATOM_ENCODER_MODE_HDMI;
else else
return ATOM_ENCODER_MODE_DVI; return ATOM_ENCODER_MODE_DVI;
break; break;
case CONNECTOR_DVI_A: case DRM_MODE_CONNECTOR_DVIA:
case CONNECTOR_VGA: case DRM_MODE_CONNECTOR_VGA:
return ATOM_ENCODER_MODE_CRT; return ATOM_ENCODER_MODE_CRT;
break; break;
case CONNECTOR_STV: case DRM_MODE_CONNECTOR_Composite:
case CONNECTOR_CTV: case DRM_MODE_CONNECTOR_SVIDEO:
case CONNECTOR_DIN: case DRM_MODE_CONNECTOR_9PinDIN:
/* fix me */ /* fix me */
return ATOM_ENCODER_MODE_TV; return ATOM_ENCODER_MODE_TV;
/*return ATOM_ENCODER_MODE_CV;*/ /*return ATOM_ENCODER_MODE_CV;*/
@ -668,31 +696,11 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
memset(&args, 0, sizeof(args)); memset(&args, 0, sizeof(args));
if (ASIC_IS_DCE32(rdev)) { if (dig->dig_encoder)
if (dig->dig_block)
index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
else else
index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
num = dig->dig_block + 1; num = dig->dig_encoder + 1;
} else {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
/* XXX doesn't really matter which dig encoder we pick as long as it's
* not already in use
*/
if (dig_connector->linkb)
index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
else
index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
num = 1;
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
/* Only dig2 encoder can drive LVTMA */
index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
num = 2;
break;
}
}
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
@ -814,7 +822,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
} }
if (ASIC_IS_DCE32(rdev)) { if (ASIC_IS_DCE32(rdev)) {
if (dig->dig_block) if (dig->dig_encoder == 1)
args.v2.acConfig.ucEncoderSel = 1; args.v2.acConfig.ucEncoderSel = 1;
if (dig_connector->linkb) if (dig_connector->linkb)
args.v2.acConfig.ucLinkSel = 1; args.v2.acConfig.ucLinkSel = 1;
@ -841,17 +849,16 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
args.v2.acConfig.fCoherentMode = 1; args.v2.acConfig.fCoherentMode = 1;
} }
} else { } else {
args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
switch (radeon_encoder->encoder_id) { if (dig->dig_encoder)
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
/* XXX doesn't really matter which dig encoder we pick as long as it's
* not already in use
*/
if (dig_connector->linkb)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER; args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
else else
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
if (rdev->flags & RADEON_IS_IGP) { if (rdev->flags & RADEON_IS_IGP) {
if (radeon_encoder->pixel_clock > 165000) { if (radeon_encoder->pixel_clock > 165000) {
if (dig_connector->igp_lane_info & 0x3) if (dig_connector->igp_lane_info & 0x3)
@ -870,10 +877,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
} }
} }
break; break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
/* Only dig2 encoder can drive LVTMA */
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
break;
} }
if (radeon_encoder->pixel_clock > 165000) if (radeon_encoder->pixel_clock > 165000)
@ -893,7 +896,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
} }
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
} }
static void static void
@ -1039,6 +1041,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
union crtc_sourc_param args; union crtc_sourc_param args;
int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source); int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
uint8_t frev, crev; uint8_t frev, crev;
struct radeon_encoder_atom_dig *dig;
memset(&args, 0, sizeof(args)); memset(&args, 0, sizeof(args));
@ -1102,40 +1105,16 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
if (ASIC_IS_DCE32(rdev)) { case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
if (radeon_crtc->crtc_id) dig = radeon_encoder->enc_priv;
if (dig->dig_encoder)
args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
else else
args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
} else {
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
connector = radeon_get_connector_for_encoder(encoder);
if (!connector)
return;
radeon_connector = to_radeon_connector(connector);
if (!radeon_connector->con_priv)
return;
dig_connector = radeon_connector->con_priv;
/* XXX doesn't really matter which dig encoder we pick as long as it's
* not already in use
*/
if (dig_connector->linkb)
args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
else
args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
}
break; break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID; args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
break; break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
/* Only dig2 encoder can drive LVTMA */
args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
@ -1162,7 +1141,6 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
} }
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
} }
static void static void
@ -1196,6 +1174,47 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
} }
} }
static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_encoder *test_encoder;
struct radeon_encoder_atom_dig *dig;
uint32_t dig_enc_in_use = 0;
/* on DCE32 and encoder can driver any block so just crtc id */
if (ASIC_IS_DCE32(rdev)) {
return radeon_crtc->crtc_id;
}
/* on DCE3 - LVTMA can only be driven by DIGB */
list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
struct radeon_encoder *radeon_test_encoder;
if (encoder == test_encoder)
continue;
if (!radeon_encoder_is_digital(test_encoder))
continue;
radeon_test_encoder = to_radeon_encoder(test_encoder);
dig = radeon_test_encoder->enc_priv;
if (dig->dig_encoder >= 0)
dig_enc_in_use |= (1 << dig->dig_encoder);
}
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA) {
if (dig_enc_in_use & 0x2)
DRM_ERROR("LVDS required digital encoder 2 but it was in use - stealing\n");
return 1;
}
if (!(dig_enc_in_use & 1))
return 0;
return 1;
}
static void static void
radeon_atom_encoder_mode_set(struct drm_encoder *encoder, radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode, struct drm_display_mode *mode,
@ -1208,12 +1227,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
if (radeon_encoder->active_device & if (radeon_encoder->active_device &
(ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
if (radeon_encoder->enc_priv) { struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct radeon_encoder_atom_dig *dig; if (dig)
dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
dig = radeon_encoder->enc_priv;
dig->dig_block = radeon_crtc->crtc_id;
}
} }
radeon_encoder->pixel_clock = adjusted_mode->clock; radeon_encoder->pixel_clock = adjusted_mode->clock;
@ -1265,6 +1281,8 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
break; break;
} }
atombios_apply_encoder_quirks(encoder, adjusted_mode); atombios_apply_encoder_quirks(encoder, adjusted_mode);
r600_hdmi_setmode(encoder, adjusted_mode);
} }
static bool static bool
@ -1371,7 +1389,13 @@ static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
static void radeon_atom_encoder_disable(struct drm_encoder *encoder) static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
{ {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig;
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
if (radeon_encoder_is_digital(encoder)) {
dig = radeon_encoder->enc_priv;
dig->dig_encoder = -1;
}
radeon_encoder->active_device = 0; radeon_encoder->active_device = 0;
} }
@ -1428,6 +1452,7 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
/* coherent mode by default */ /* coherent mode by default */
dig->coherent_mode = true; dig->coherent_mode = true;
dig->dig_encoder = -1;
return dig; return dig;
} }
@ -1510,4 +1535,6 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
break; break;
} }
r600_hdmi_init(encoder);
} }

View File

@ -65,7 +65,7 @@ static struct fb_ops radeonfb_ops = {
}; };
/** /**
* Curretly it is assumed that the old framebuffer is reused. * Currently it is assumed that the old framebuffer is reused.
* *
* LOCKING * LOCKING
* caller should hold the mode config lock. * caller should hold the mode config lock.

View File

@ -46,6 +46,7 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man; uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man;
int panel_pwr_delay = 2000; int panel_pwr_delay = 2000;
bool is_mac = false;
DRM_DEBUG("\n"); DRM_DEBUG("\n");
if (radeon_encoder->enc_priv) { if (radeon_encoder->enc_priv) {
@ -58,6 +59,15 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
} }
} }
/* macs (and possibly some x86 oem systems?) wire up LVDS strangely
* Taken from radeonfb.
*/
if ((rdev->mode_info.connector_table == CT_IBOOK) ||
(rdev->mode_info.connector_table == CT_POWERBOOK_EXTERNAL) ||
(rdev->mode_info.connector_table == CT_POWERBOOK_INTERNAL) ||
(rdev->mode_info.connector_table == CT_POWERBOOK_VGA))
is_mac = true;
switch (mode) { switch (mode) {
case DRM_MODE_DPMS_ON: case DRM_MODE_DPMS_ON:
disp_pwr_man = RREG32(RADEON_DISP_PWR_MAN); disp_pwr_man = RREG32(RADEON_DISP_PWR_MAN);
@ -74,6 +84,8 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN | RADEON_LVDS_DIGON | RADEON_LVDS_BLON); lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN | RADEON_LVDS_DIGON | RADEON_LVDS_BLON);
if (is_mac)
lvds_gen_cntl |= RADEON_LVDS_BL_MOD_EN;
lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS); lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS);
udelay(panel_pwr_delay * 1000); udelay(panel_pwr_delay * 1000);
WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
@ -85,7 +97,14 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
WREG32_PLL_P(RADEON_PIXCLKS_CNTL, 0, ~RADEON_PIXCLK_LVDS_ALWAYS_ONb); WREG32_PLL_P(RADEON_PIXCLKS_CNTL, 0, ~RADEON_PIXCLK_LVDS_ALWAYS_ONb);
lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS; lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
if (is_mac) {
lvds_gen_cntl &= ~RADEON_LVDS_BL_MOD_EN;
WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_EN);
} else {
WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON); lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON);
}
udelay(panel_pwr_delay * 1000); udelay(panel_pwr_delay * 1000);
WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl); WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
@ -207,6 +226,8 @@ static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
*adjusted_mode = *native_mode; *adjusted_mode = *native_mode;
adjusted_mode->hdisplay = mode->hdisplay; adjusted_mode->hdisplay = mode->hdisplay;
adjusted_mode->vdisplay = mode->vdisplay; adjusted_mode->vdisplay = mode->vdisplay;
adjusted_mode->crtc_hdisplay = mode->hdisplay;
adjusted_mode->crtc_vdisplay = mode->vdisplay;
adjusted_mode->base.id = mode_id; adjusted_mode->base.id = mode_id;
} }

View File

@ -1,5 +1,4 @@
#include <stdint.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm.h> #include <drm.h>
#include <drm_mm.h> #include <drm_mm.h>
@ -60,7 +59,7 @@ int init_cursor(cursor_t *cursor)
radeon_object_kunmap(cursor->robj); radeon_object_kunmap(cursor->robj);
cursor->header.destroy = destroy_cursor; // cursor->header.destroy = destroy_cursor;
return 0; return 0;
}; };
@ -101,7 +100,7 @@ cursor_t* __stdcall select_cursor(cursor_t *cursor)
old = rdisplay->cursor; old = rdisplay->cursor;
rdisplay->cursor = cursor; rdisplay->cursor = cursor;
gpu_addr = cursor->robj->gpu_addr; // gpu_addr = cursor->robj->gpu_addr;
if (ASIC_IS_AVIVO(rdev)) if (ASIC_IS_AVIVO(rdev))
WREG32(AVIVO_D1CUR_SURFACE_ADDRESS, gpu_addr); WREG32(AVIVO_D1CUR_SURFACE_ADDRESS, gpu_addr);
@ -163,7 +162,7 @@ void __stdcall move_cursor(cursor_t *cursor, int x, int y)
WREG32(RADEON_CUR_HORZ_VERT_POSN, WREG32(RADEON_CUR_HORZ_VERT_POSN,
(RADEON_CUR_LOCK | (x << 16) | y)); (RADEON_CUR_LOCK | (x << 16) | y));
gpu_addr = cursor->robj->gpu_addr; // gpu_addr = cursor->robj->gpu_addr;
/* offset is from DISP(2)_BASE_ADDRESS */ /* offset is from DISP(2)_BASE_ADDRESS */
WREG32(RADEON_CUR_OFFSET, WREG32(RADEON_CUR_OFFSET,

View File

@ -1,5 +1,4 @@
#include <stdint.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm.h> #include <drm.h>
#include <drm_mm.h> #include <drm_mm.h>
@ -79,7 +78,7 @@ cursor_t* __stdcall select_cursor_kms(cursor_t *cursor)
old = rdisplay->cursor; old = rdisplay->cursor;
rdisplay->cursor = cursor; rdisplay->cursor = cursor;
gpu_addr = cursor->robj->gpu_addr; // gpu_addr = cursor->robj->gpu_addr;
if (ASIC_IS_AVIVO(rdev)) if (ASIC_IS_AVIVO(rdev))
WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr); WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr);
@ -174,7 +173,7 @@ static char *manufacturer_name(unsigned char *x)
} }
bool set_mode(struct drm_device *dev, struct drm_connector *connector, bool set_mode(struct drm_device *dev, struct drm_connector *connector,
mode_t *reqmode, bool strict) videomode_t *reqmode, bool strict)
{ {
struct drm_display_mode *mode = NULL, *tmpmode; struct drm_display_mode *mode = NULL, *tmpmode;
@ -315,7 +314,7 @@ static struct drm_connector* get_def_connector(struct drm_device *dev)
return def_connector; return def_connector;
}; };
bool init_display_kms(struct radeon_device *rdev, mode_t *usermode) bool init_display_kms(struct radeon_device *rdev, videomode_t *usermode)
{ {
struct drm_device *dev; struct drm_device *dev;
@ -383,7 +382,7 @@ bool init_display_kms(struct radeon_device *rdev, mode_t *usermode)
return retval; return retval;
}; };
int get_modes(mode_t *mode, int *count) int get_modes(videomode_t *mode, int *count)
{ {
int err = -1; int err = -1;
@ -424,7 +423,7 @@ int get_modes(mode_t *mode, int *count)
return err; return err;
} }
int set_user_mode(mode_t *mode) int set_user_mode(videomode_t *mode)
{ {
int err = -1; int err = -1;

View File

@ -356,6 +356,7 @@ static int rs400_mc_init(struct radeon_device *rdev)
rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16; rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16;
rdev->mc.gtt_location = 0xFFFFFFFFUL; rdev->mc.gtt_location = 0xFFFFFFFFUL;
r = radeon_mc_setup(rdev); r = radeon_mc_setup(rdev);
rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
if (r) if (r)
return r; return r;
return 0; return 0;
@ -395,6 +396,7 @@ static int rs400_startup(struct radeon_device *rdev)
return r; return r;
/* Enable IRQ */ /* Enable IRQ */
// r100_irq_set(rdev); // r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */ /* 1M ring buffer */
// r = r100_cp_init(rdev, 1024 * 1024); // r = r100_cp_init(rdev, 1024 * 1024);
// if (r) { // if (r) {
@ -452,6 +454,8 @@ int rs400_init(struct radeon_device *rdev)
/* Initialize clocks */ /* Initialize clocks */
radeon_get_clock_info(rdev->ddev); radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
/* Get vram informations */ /* Get vram informations */
rs400_vram_info(rdev); rs400_vram_info(rdev);
/* Initialize memory controller (also test AGP) */ /* Initialize memory controller (also test AGP) */

View File

@ -56,6 +56,7 @@ int rs600_mc_init(struct radeon_device *rdev)
rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16; rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16;
rdev->mc.gtt_location = 0xffffffffUL; rdev->mc.gtt_location = 0xffffffffUL;
r = radeon_mc_setup(rdev); r = radeon_mc_setup(rdev);
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
if (r) if (r)
return r; return r;
return 0; return 0;
@ -123,18 +124,19 @@ void rs600_hpd_init(struct radeon_device *rdev)
case RADEON_HPD_1: case RADEON_HPD_1:
WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL, WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
S_007D00_DC_HOT_PLUG_DETECT1_EN(1)); S_007D00_DC_HOT_PLUG_DETECT1_EN(1));
rdev->irq.hpd[0] = true; // rdev->irq.hpd[0] = true;
break; break;
case RADEON_HPD_2: case RADEON_HPD_2:
WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL, WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
S_007D10_DC_HOT_PLUG_DETECT2_EN(1)); S_007D10_DC_HOT_PLUG_DETECT2_EN(1));
rdev->irq.hpd[1] = true; // rdev->irq.hpd[1] = true;
break; break;
default: default:
break; break;
} }
} }
rs600_irq_set(rdev); // if (rdev->irq.installed)
// rs600_irq_set(rdev);
} }
void rs600_hpd_fini(struct radeon_device *rdev) void rs600_hpd_fini(struct radeon_device *rdev)
@ -148,12 +150,12 @@ void rs600_hpd_fini(struct radeon_device *rdev)
case RADEON_HPD_1: case RADEON_HPD_1:
WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL, WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
S_007D00_DC_HOT_PLUG_DETECT1_EN(0)); S_007D00_DC_HOT_PLUG_DETECT1_EN(0));
rdev->irq.hpd[0] = false; // rdev->irq.hpd[0] = false;
break; break;
case RADEON_HPD_2: case RADEON_HPD_2:
WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL, WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
S_007D10_DC_HOT_PLUG_DETECT2_EN(0)); S_007D10_DC_HOT_PLUG_DETECT2_EN(0));
rdev->irq.hpd[1] = false; // rdev->irq.hpd[1] = false;
break; break;
default: default:
break; break;
@ -302,6 +304,7 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
return 0; return 0;
} }
/*
int rs600_irq_set(struct radeon_device *rdev) int rs600_irq_set(struct radeon_device *rdev)
{ {
uint32_t tmp = 0; uint32_t tmp = 0;
@ -311,6 +314,11 @@ int rs600_irq_set(struct radeon_device *rdev)
u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) & u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
WREG32(R_000040_GEN_INT_CNTL, 0);
return -EINVAL;
}
if (rdev->irq.sw_int) { if (rdev->irq.sw_int) {
tmp |= S_000040_SW_INT_EN(1); tmp |= S_000040_SW_INT_EN(1);
} }
@ -332,6 +340,7 @@ int rs600_irq_set(struct radeon_device *rdev)
WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
return 0; return 0;
} }
*/
static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int)
{ {
@ -500,6 +509,7 @@ static int rs600_startup(struct radeon_device *rdev)
return r; return r;
/* Enable IRQ */ /* Enable IRQ */
// rs600_irq_set(rdev); // rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */ /* 1M ring buffer */
// r = r100_cp_init(rdev, 1024 * 1024); // r = r100_cp_init(rdev, 1024 * 1024);
// if (r) { // if (r) {

View File

@ -131,24 +131,25 @@ void rs690_pm_info(struct radeon_device *rdev)
void rs690_vram_info(struct radeon_device *rdev) void rs690_vram_info(struct radeon_device *rdev)
{ {
uint32_t tmp;
fixed20_12 a; fixed20_12 a;
rs400_gart_adjust_size(rdev); rs400_gart_adjust_size(rdev);
/* DDR for all card after R300 & IGP */
rdev->mc.vram_is_ddr = true; rdev->mc.vram_is_ddr = true;
/* FIXME: is this correct for RS690/RS740 ? */
tmp = RREG32(RADEON_MEM_CNTL);
if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
rdev->mc.vram_width = 128; rdev->mc.vram_width = 128;
} else {
rdev->mc.vram_width = 64;
}
rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
rdev->mc.mc_vram_size = rdev->mc.real_vram_size; rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
rdev->mc.mc_vram_size = rdev->mc.aper_size;
if (rdev->mc.real_vram_size > rdev->mc.aper_size)
rdev->mc.real_vram_size = rdev->mc.aper_size;
rs690_pm_info(rdev); rs690_pm_info(rdev);
/* FIXME: we should enforce default clock in case GPU is not in /* FIXME: we should enforce default clock in case GPU is not in
* default setup * default setup
@ -161,6 +162,22 @@ void rs690_vram_info(struct radeon_device *rdev)
rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a); rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
} }
static int rs690_mc_init(struct radeon_device *rdev)
{
int r;
u32 tmp;
/* Setup GPU memory space */
tmp = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
rdev->mc.vram_location = G_000100_MC_FB_START(tmp) << 16;
rdev->mc.gtt_location = 0xFFFFFFFFUL;
r = radeon_mc_setup(rdev);
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
if (r)
return r;
return 0;
}
void rs690_line_buffer_adjust(struct radeon_device *rdev, void rs690_line_buffer_adjust(struct radeon_device *rdev,
struct drm_display_mode *mode1, struct drm_display_mode *mode1,
struct drm_display_mode *mode2) struct drm_display_mode *mode2)
@ -244,8 +261,9 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
b.full = rfixed_const(mode->crtc_hdisplay); b.full = rfixed_const(mode->crtc_hdisplay);
c.full = rfixed_const(256); c.full = rfixed_const(256);
a.full = rfixed_mul(wm->num_line_pair, b); a.full = rfixed_div(b, c);
request_fifo_depth.full = rfixed_div(a, c); request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
if (a.full < rfixed_const(4)) { if (a.full < rfixed_const(4)) {
wm->lb_request_fifo_depth = 4; wm->lb_request_fifo_depth = 4;
} else { } else {
@ -374,6 +392,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
a.full = rfixed_const(16); a.full = rfixed_const(16);
wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
/* Determine estimated width */ /* Determine estimated width */
estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
@ -383,6 +402,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
} else { } else {
a.full = rfixed_const(16); a.full = rfixed_const(16);
wm->priority_mark.full = rfixed_div(estimated_width, a); wm->priority_mark.full = rfixed_div(estimated_width, a);
wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
} }
} }
@ -607,6 +627,7 @@ static int rs690_startup(struct radeon_device *rdev)
/* Enable IRQ */ /* Enable IRQ */
// rdev->irq.sw_int = true; // rdev->irq.sw_int = true;
// rs600_irq_set(rdev); // rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */ /* 1M ring buffer */
// r = r100_cp_init(rdev, 1024 * 1024); // r = r100_cp_init(rdev, 1024 * 1024);
// if (r) { // if (r) {
@ -659,10 +680,9 @@ int rs690_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT)); RREG32(R_0007C0_CP_STAT));
} }
/* check if cards are posted or not */ /* check if cards are posted or not */
if (!radeon_card_posted(rdev) && rdev->bios) { if (radeon_boot_test_post_card(rdev) == false)
DRM_INFO("GPU not posted. posting now...\n"); return -EINVAL;
atom_asic_init(rdev->mode_info.atom_context);
}
/* Initialize clocks */ /* Initialize clocks */
radeon_get_clock_info(rdev->ddev); radeon_get_clock_info(rdev->ddev);
/* Initialize power management */ /* Initialize power management */
@ -670,7 +690,7 @@ int rs690_init(struct radeon_device *rdev)
/* Get vram informations */ /* Get vram informations */
rs690_vram_info(rdev); rs690_vram_info(rdev);
/* Initialize memory controller (also test AGP) */ /* Initialize memory controller (also test AGP) */
r = r420_mc_init(rdev); r = rs690_mc_init(rdev);
if (r) if (r)
return r; return r;
rv515_debugfs(rdev); rv515_debugfs(rdev);
@ -682,7 +702,7 @@ int rs690_init(struct radeon_device *rdev)
// if (r) // if (r)
// return r; // return r;
/* Memory manager */ /* Memory manager */
r = radeon_object_init(rdev); r = radeon_bo_init(rdev);
if (r) if (r)
return r; return r;
r = rs400_gart_init(rdev); r = rs400_gart_init(rdev);

View File

@ -489,6 +489,7 @@ static int rv515_startup(struct radeon_device *rdev)
/* Enable IRQ */ /* Enable IRQ */
// rdev->irq.sw_int = true; // rdev->irq.sw_int = true;
// rs600_irq_set(rdev); // rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */ /* 1M ring buffer */
// r = r100_cp_init(rdev, 1024 * 1024); // r = r100_cp_init(rdev, 1024 * 1024);
// if (r) { // if (r) {
@ -543,10 +544,8 @@ int rv515_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT)); RREG32(R_0007C0_CP_STAT));
} }
/* check if cards are posted or not */ /* check if cards are posted or not */
if (!radeon_card_posted(rdev) && rdev->bios) { if (radeon_boot_test_post_card(rdev) == false)
DRM_INFO("GPU not posted. posting now...\n"); return -EINVAL;
atom_asic_init(rdev->mode_info.atom_context);
}
/* Initialize clocks */ /* Initialize clocks */
radeon_get_clock_info(rdev->ddev); radeon_get_clock_info(rdev->ddev);
/* Initialize power management */ /* Initialize power management */
@ -567,7 +566,7 @@ int rv515_init(struct radeon_device *rdev)
// if (r) // if (r)
// return r; // return r;
/* Memory manager */ /* Memory manager */
r = radeon_object_init(rdev); r = radeon_bo_init(rdev);
if (r) if (r)
return r; return r;
r = rv370_pcie_gart_init(rdev); r = rv370_pcie_gart_init(rdev);
@ -856,8 +855,9 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
b.full = rfixed_const(mode->crtc_hdisplay); b.full = rfixed_const(mode->crtc_hdisplay);
c.full = rfixed_const(256); c.full = rfixed_const(256);
a.full = rfixed_mul(wm->num_line_pair, b); a.full = rfixed_div(b, c);
request_fifo_depth.full = rfixed_div(a, c); request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
if (a.full < rfixed_const(4)) { if (a.full < rfixed_const(4)) {
wm->lb_request_fifo_depth = 4; wm->lb_request_fifo_depth = 4;
} else { } else {
@ -959,15 +959,17 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
a.full = rfixed_const(16); a.full = rfixed_const(16);
wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
/* Determine estimated width */ /* Determine estimated width */
estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
estimated_width.full = rfixed_div(estimated_width, consumption_time); estimated_width.full = rfixed_div(estimated_width, consumption_time);
if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
wm->priority_mark.full = rfixed_const(10); wm->priority_mark.full = wm->priority_mark_max.full;
} else { } else {
a.full = rfixed_const(16); a.full = rfixed_const(16);
wm->priority_mark.full = rfixed_div(estimated_width, a); wm->priority_mark.full = rfixed_div(estimated_width, a);
wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
} }
} }

View File

@ -25,6 +25,7 @@
* Alex Deucher * Alex Deucher
* Jerome Glisse * Jerome Glisse
*/ */
//#include <linux/firmware.h>
//#include <linux/platform_device.h> //#include <linux/platform_device.h>
#include "drmP.h" #include "drmP.h"
#include "radeon.h" #include "radeon.h"
@ -33,9 +34,6 @@
#include "atom.h" #include "atom.h"
#include "avivod.h" #include "avivod.h"
#include <linux/firmware.h>
#define R700_PFP_UCODE_SIZE 848 #define R700_PFP_UCODE_SIZE 848
#define R700_PM4_UCODE_SIZE 1360 #define R700_PM4_UCODE_SIZE 1360
@ -94,7 +92,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
void rv770_pcie_gart_disable(struct radeon_device *rdev) void rv770_pcie_gart_disable(struct radeon_device *rdev)
{ {
u32 tmp; u32 tmp;
int i; int i, r;
/* Disable all tables */ /* Disable all tables */
for (i = 0; i < 7; i++) for (i = 0; i < 7; i++)
@ -232,7 +230,7 @@ void r700_cp_stop(struct radeon_device *rdev)
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
} }
#if 0
static int rv770_cp_load_microcode(struct radeon_device *rdev) static int rv770_cp_load_microcode(struct radeon_device *rdev)
{ {
const __be32 *fw_data; const __be32 *fw_data;
@ -267,6 +265,7 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
return 0; return 0;
} }
#endif
/* /*
* Core functions * Core functions
@ -777,7 +776,6 @@ int rv770_mc_init(struct radeon_device *rdev)
fixed20_12 a; fixed20_12 a;
u32 tmp; u32 tmp;
int chansize, numchan; int chansize, numchan;
int r;
/* Get VRAM informations */ /* Get VRAM informations */
rdev->mc.vram_is_ddr = true; rdev->mc.vram_is_ddr = true;
@ -820,9 +818,6 @@ int rv770_mc_init(struct radeon_device *rdev)
rdev->mc.real_vram_size = rdev->mc.aper_size; rdev->mc.real_vram_size = rdev->mc.aper_size;
if (rdev->flags & RADEON_IS_AGP) { if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
if (r)
return r;
/* gtt_size is setup by radeon_agp_init */ /* gtt_size is setup by radeon_agp_init */
rdev->mc.gtt_location = rdev->mc.agp_base; rdev->mc.gtt_location = rdev->mc.agp_base;
tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size; tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
@ -935,7 +930,11 @@ int rv770_init(struct radeon_device *rdev)
if (r) if (r)
return r; return r;
/* Post card if necessary */ /* Post card if necessary */
if (!r600_card_posted(rdev) && rdev->bios) { if (!r600_card_posted(rdev)) {
if (!rdev->bios) {
dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
return -EINVAL;
}
DRM_INFO("GPU not posted. posting now...\n"); DRM_INFO("GPU not posted. posting now...\n");
atom_asic_init(rdev->mode_info.atom_context); atom_asic_init(rdev->mode_info.atom_context);
} }
@ -954,15 +953,18 @@ int rv770_init(struct radeon_device *rdev)
// r = radeon_fence_driver_init(rdev); // r = radeon_fence_driver_init(rdev);
// if (r) // if (r)
// return r; // return r;
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
if (r)
radeon_agp_disable(rdev);
}
r = rv770_mc_init(rdev); r = rv770_mc_init(rdev);
if (r) if (r)
return r; return r;
/* Memory manager */ /* Memory manager */
r = radeon_object_init(rdev); r = radeon_bo_init(rdev);
if (r) if (r)
return r; return r;
// rdev->cp.ring_obj = NULL;
// r600_ring_init(rdev, 1024 * 1024);
// if (!rdev->me_fw || !rdev->pfp_fw) { // if (!rdev->me_fw || !rdev->pfp_fw) {
// r = r600_cp_init_microcode(rdev); // r = r600_cp_init_microcode(rdev);