RC11 preview

git-svn-id: svn://kolibrios.org@1986 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge) 2011-06-29 05:52:36 +00:00
parent 486285bf0c
commit d2651e92d5
44 changed files with 1740 additions and 1165 deletions

View File

@ -590,8 +590,6 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
info = fb_helper->fbdev; info = fb_helper->fbdev;
#if 0
/* set the fb pointer */ /* set the fb pointer */
for (i = 0; i < fb_helper->crtc_count; i++) { for (i = 0; i < fb_helper->crtc_count; i++) {
fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb; fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
@ -611,7 +609,6 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
if (new_fb) if (new_fb)
list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list); list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
#endif
LEAVE(); LEAVE();
@ -926,9 +923,9 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
height = dev->mode_config.max_height; height = dev->mode_config.max_height;
/* clean out all the encoder/crtc combos */ /* clean out all the encoder/crtc combos */
// list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
// encoder->crtc = NULL; // encoder->crtc = NULL;
// } }
crtcs = kcalloc(dev->mode_config.num_connector, crtcs = kcalloc(dev->mode_config.num_connector,
sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL); sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
@ -1017,8 +1014,6 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
} }
drm_setup_crtcs(fb_helper); drm_setup_crtcs(fb_helper);
return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
} }
EXPORT_SYMBOL(drm_fb_helper_initial_config); EXPORT_SYMBOL(drm_fb_helper_initial_config);

View File

@ -53,9 +53,12 @@ NAME_SRC= \
$(DRM_TOPDIR)/i2c/i2c-core.c \ $(DRM_TOPDIR)/i2c/i2c-core.c \
$(DRM_TOPDIR)/i2c/i2c-algo-bit.c \ $(DRM_TOPDIR)/i2c/i2c-algo-bit.c \
radeon_device.c \ radeon_device.c \
evergreen.c \
evergreen_blit_shaders.c \
radeon_clocks.c \ radeon_clocks.c \
radeon_i2c.c \ radeon_i2c.c \
atom.c \ atom.c \
radeon_gem.c \
radeon_atombios.c \ radeon_atombios.c \
radeon_agp.c \ radeon_agp.c \
radeon_asic.c \ radeon_asic.c \
@ -133,6 +136,8 @@ SRC_DEP:=
NAME_OBJS = $(patsubst %.S, %.o, $(patsubst %.asm, %.o,\ NAME_OBJS = $(patsubst %.S, %.o, $(patsubst %.asm, %.o,\
$(patsubst %.c, %.o, $(NAME_SRC)))) $(patsubst %.c, %.o, $(NAME_SRC))))
all: $(NAME).dll all: $(NAME).dll
$(NAME).dll: $(NAME_OBJS) $(FW_BINS) $(SRC_DEP) $(HFILES) $(LIBPATH)/libcore.a $(LIBPATH)/libddk.a atikms.lds Makefile $(NAME).dll: $(NAME_OBJS) $(FW_BINS) $(SRC_DEP) $(HFILES) $(LIBPATH)/libcore.a $(LIBPATH)/libddk.a atikms.lds Makefile
@ -148,6 +153,7 @@ $(NAME).dll: $(NAME_OBJS) $(FW_BINS) $(SRC_DEP) $(HFILES) $(LIBPATH)/libcore.a $
fwblob.o: fwblob.asm $(FW_BINS) Makefile fwblob.o: fwblob.asm $(FW_BINS) Makefile
$(FASM) $< $@ $(FASM) $< $@
clean: clean:
-rm -f */*.o -rm -f */*.o

View File

@ -1200,6 +1200,7 @@ typedef struct _EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3
#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF 0x10 #define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF 0x10
#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING 0x11 #define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING 0x11
#define EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION 0x12 #define EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION 0x12
#define EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP 0x14
// ucConfig // ucConfig
#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_MASK 0x03 #define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_MASK 0x03

View File

@ -671,6 +671,13 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
DISPPLL_CONFIG_DUAL_LINK; DISPPLL_CONFIG_DUAL_LINK;
} }
} }
if (radeon_encoder_is_dp_bridge(encoder)) {
struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
args.v3.sInput.ucExtTransmitterID = ext_radeon_encoder->encoder_id;
} else
args.v3.sInput.ucExtTransmitterID = 0;
atom_execute_table(rdev->mode_info.atom_context, atom_execute_table(rdev->mode_info.atom_context,
index, (uint32_t *)&args); index, (uint32_t *)&args);
adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;

View File

@ -32,12 +32,14 @@
#include "atom.h" #include "atom.h"
#include "avivod.h" #include "avivod.h"
#include "evergreen_reg.h" #include "evergreen_reg.h"
#include "evergreen_blit_shaders.h"
#define EVERGREEN_PFP_UCODE_SIZE 1120 #define EVERGREEN_PFP_UCODE_SIZE 1120
#define EVERGREEN_PM4_UCODE_SIZE 1376 #define EVERGREEN_PM4_UCODE_SIZE 1376
static void evergreen_gpu_init(struct radeon_device *rdev); static void evergreen_gpu_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev); void evergreen_fini(struct radeon_device *rdev);
static void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
{ {
@ -217,11 +219,455 @@ void evergreen_hpd_fini(struct radeon_device *rdev)
} }
#endif #endif
/* watermark setup */
static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
struct radeon_crtc *radeon_crtc,
struct drm_display_mode *mode,
struct drm_display_mode *other_mode)
{
u32 tmp;
/*
* Line Buffer Setup
* There are 3 line buffers, each one shared by 2 display controllers.
* DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
* the display controllers. The paritioning is done via one of four
* preset allocations specified in bits 2:0:
* first display controller
* 0 - first half of lb (3840 * 2)
* 1 - first 3/4 of lb (5760 * 2)
* 2 - whole lb (7680 * 2), other crtc must be disabled
* 3 - first 1/4 of lb (1920 * 2)
* second display controller
* 4 - second half of lb (3840 * 2)
* 5 - second 3/4 of lb (5760 * 2)
* 6 - whole lb (7680 * 2), other crtc must be disabled
* 7 - last 1/4 of lb (1920 * 2)
*/
/* this can get tricky if we have two large displays on a paired group
* of crtcs. Ideally for multiple large displays we'd assign them to
* non-linked crtcs for maximum line buffer allocation.
*/
if (radeon_crtc->base.enabled && mode) {
if (other_mode)
tmp = 0; /* 1/2 */
else
tmp = 2; /* whole */
} else
tmp = 0;
/* second controller of the pair uses second half of the lb */
if (radeon_crtc->crtc_id % 2)
tmp += 4;
WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
if (radeon_crtc->base.enabled && mode) {
switch (tmp) {
case 0:
case 4:
default:
if (ASIC_IS_DCE5(rdev))
return 4096 * 2;
else
return 3840 * 2;
case 1:
case 5:
if (ASIC_IS_DCE5(rdev))
return 6144 * 2;
else
return 5760 * 2;
case 2:
case 6:
if (ASIC_IS_DCE5(rdev))
return 8192 * 2;
else
return 7680 * 2;
case 3:
case 7:
if (ASIC_IS_DCE5(rdev))
return 2048 * 2;
else
return 1920 * 2;
}
}
/* controller not enabled, so no lb used */
return 0;
}
static u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
{
u32 tmp = RREG32(MC_SHARED_CHMAP);
switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
case 0:
default:
return 1;
case 1:
return 2;
case 2:
return 4;
case 3:
return 8;
}
}
struct evergreen_wm_params {
u32 dram_channels; /* number of dram channels */
u32 yclk; /* bandwidth per dram data pin in kHz */
u32 sclk; /* engine clock in kHz */
u32 disp_clk; /* display clock in kHz */
u32 src_width; /* viewport width */
u32 active_time; /* active display time in ns */
u32 blank_time; /* blank time in ns */
bool interlaced; /* mode is interlaced */
fixed20_12 vsc; /* vertical scale ratio */
u32 num_heads; /* number of active crtcs */
u32 bytes_per_pixel; /* bytes per pixel display + overlay */
u32 lb_size; /* line buffer allocated to pipe */
u32 vtaps; /* vertical scaler taps */
};
static u32 evergreen_dram_bandwidth(struct evergreen_wm_params *wm)
{
/* Calculate DRAM Bandwidth and the part allocated to display. */
fixed20_12 dram_efficiency; /* 0.7 */
fixed20_12 yclk, dram_channels, bandwidth;
fixed20_12 a;
a.full = dfixed_const(1000);
yclk.full = dfixed_const(wm->yclk);
yclk.full = dfixed_div(yclk, a);
dram_channels.full = dfixed_const(wm->dram_channels * 4);
a.full = dfixed_const(10);
dram_efficiency.full = dfixed_const(7);
dram_efficiency.full = dfixed_div(dram_efficiency, a);
bandwidth.full = dfixed_mul(dram_channels, yclk);
bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
return dfixed_trunc(bandwidth);
}
static u32 evergreen_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
{
/* Calculate DRAM Bandwidth and the part allocated to display. */
fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
fixed20_12 yclk, dram_channels, bandwidth;
fixed20_12 a;
a.full = dfixed_const(1000);
yclk.full = dfixed_const(wm->yclk);
yclk.full = dfixed_div(yclk, a);
dram_channels.full = dfixed_const(wm->dram_channels * 4);
a.full = dfixed_const(10);
disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
bandwidth.full = dfixed_mul(dram_channels, yclk);
bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
return dfixed_trunc(bandwidth);
}
static u32 evergreen_data_return_bandwidth(struct evergreen_wm_params *wm)
{
/* Calculate the display Data return Bandwidth */
fixed20_12 return_efficiency; /* 0.8 */
fixed20_12 sclk, bandwidth;
fixed20_12 a;
a.full = dfixed_const(1000);
sclk.full = dfixed_const(wm->sclk);
sclk.full = dfixed_div(sclk, a);
a.full = dfixed_const(10);
return_efficiency.full = dfixed_const(8);
return_efficiency.full = dfixed_div(return_efficiency, a);
a.full = dfixed_const(32);
bandwidth.full = dfixed_mul(a, sclk);
bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
return dfixed_trunc(bandwidth);
}
static u32 evergreen_dmif_request_bandwidth(struct evergreen_wm_params *wm)
{
/* Calculate the DMIF Request Bandwidth */
fixed20_12 disp_clk_request_efficiency; /* 0.8 */
fixed20_12 disp_clk, bandwidth;
fixed20_12 a;
a.full = dfixed_const(1000);
disp_clk.full = dfixed_const(wm->disp_clk);
disp_clk.full = dfixed_div(disp_clk, a);
a.full = dfixed_const(10);
disp_clk_request_efficiency.full = dfixed_const(8);
disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
a.full = dfixed_const(32);
bandwidth.full = dfixed_mul(a, disp_clk);
bandwidth.full = dfixed_mul(bandwidth, disp_clk_request_efficiency);
return dfixed_trunc(bandwidth);
}
static u32 evergreen_available_bandwidth(struct evergreen_wm_params *wm)
{
/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
u32 dram_bandwidth = evergreen_dram_bandwidth(wm);
u32 data_return_bandwidth = evergreen_data_return_bandwidth(wm);
u32 dmif_req_bandwidth = evergreen_dmif_request_bandwidth(wm);
return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
}
static u32 evergreen_average_bandwidth(struct evergreen_wm_params *wm)
{
/* Calculate the display mode Average Bandwidth
* DisplayMode should contain the source and destination dimensions,
* timing, etc.
*/
fixed20_12 bpp;
fixed20_12 line_time;
fixed20_12 src_width;
fixed20_12 bandwidth;
fixed20_12 a;
a.full = dfixed_const(1000);
line_time.full = dfixed_const(wm->active_time + wm->blank_time);
line_time.full = dfixed_div(line_time, a);
bpp.full = dfixed_const(wm->bytes_per_pixel);
src_width.full = dfixed_const(wm->src_width);
bandwidth.full = dfixed_mul(src_width, bpp);
bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
bandwidth.full = dfixed_div(bandwidth, line_time);
return dfixed_trunc(bandwidth);
}
static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm)
{
/* First calcualte the latency in ns */
u32 mc_latency = 2000; /* 2000 ns. */
u32 available_bandwidth = evergreen_available_bandwidth(wm);
u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
(wm->num_heads * cursor_line_pair_return_time);
u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
fixed20_12 a, b, c;
if (wm->num_heads == 0)
return 0;
a.full = dfixed_const(2);
b.full = dfixed_const(1);
if ((wm->vsc.full > a.full) ||
((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
(wm->vtaps >= 5) ||
((wm->vsc.full >= a.full) && wm->interlaced))
max_src_lines_per_dst_line = 4;
else
max_src_lines_per_dst_line = 2;
a.full = dfixed_const(available_bandwidth);
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
b.full = dfixed_const(1000);
c.full = dfixed_const(wm->disp_clk);
b.full = dfixed_div(c, b);
c.full = dfixed_const(wm->bytes_per_pixel);
b.full = dfixed_mul(b, c);
lb_fill_bw = min(dfixed_trunc(a), dfixed_trunc(b));
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
c.full = dfixed_const(lb_fill_bw);
b.full = dfixed_div(c, b);
a.full = dfixed_div(a, b);
line_fill_time = dfixed_trunc(a);
if (line_fill_time < wm->active_time)
return latency;
else
return latency + (line_fill_time - wm->active_time);
}
static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
{
if (evergreen_average_bandwidth(wm) <=
(evergreen_dram_bandwidth_for_display(wm) / wm->num_heads))
return true;
else
return false;
};
static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params *wm)
{
if (evergreen_average_bandwidth(wm) <=
(evergreen_available_bandwidth(wm) / wm->num_heads))
return true;
else
return false;
};
static bool evergreen_check_latency_hiding(struct evergreen_wm_params *wm)
{
u32 lb_partitions = wm->lb_size / wm->src_width;
u32 line_time = wm->active_time + wm->blank_time;
u32 latency_tolerant_lines;
u32 latency_hiding;
fixed20_12 a;
a.full = dfixed_const(1);
if (wm->vsc.full > a.full)
latency_tolerant_lines = 1;
else {
if (lb_partitions <= (wm->vtaps + 1))
latency_tolerant_lines = 1;
else
latency_tolerant_lines = 2;
}
latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
if (evergreen_latency_watermark(wm) <= latency_hiding)
return true;
else
return false;
}
static void evergreen_program_watermarks(struct radeon_device *rdev,
struct radeon_crtc *radeon_crtc,
u32 lb_size, u32 num_heads)
{
struct drm_display_mode *mode = &radeon_crtc->base.mode;
struct evergreen_wm_params wm;
u32 pixel_period;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 priority_a_mark = 0, priority_b_mark = 0;
u32 priority_a_cnt = PRIORITY_OFF;
u32 priority_b_cnt = PRIORITY_OFF;
u32 pipe_offset = radeon_crtc->crtc_id * 16;
u32 tmp, arb_control3;
fixed20_12 a, b, c;
if (radeon_crtc->base.enabled && num_heads && mode) {
pixel_period = 1000000 / (u32)mode->clock;
line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
priority_a_cnt = 0;
priority_b_cnt = 0;
wm.yclk = rdev->pm.current_mclk * 10;
wm.sclk = rdev->pm.current_sclk * 10;
wm.disp_clk = mode->clock;
wm.src_width = mode->crtc_hdisplay;
wm.active_time = mode->crtc_hdisplay * pixel_period;
wm.blank_time = line_time - wm.active_time;
wm.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
wm.interlaced = true;
wm.vsc = radeon_crtc->vsc;
wm.vtaps = 1;
if (radeon_crtc->rmx_type != RMX_OFF)
wm.vtaps = 2;
wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
wm.lb_size = lb_size;
wm.dram_channels = evergreen_get_number_of_dram_channels(rdev);
wm.num_heads = num_heads;
/* set for high clocks */
latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535);
/* set for low clocks */
/* wm.yclk = low clk; wm.sclk = low clk */
latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535);
/* possibly force display priority to high */
/* should really do this at mode validation time... */
if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
!evergreen_average_bandwidth_vs_available_bandwidth(&wm) ||
!evergreen_check_latency_hiding(&wm) ||
(rdev->disp_priority == 2)) {
DRM_INFO("force priority to high\n");
priority_a_cnt |= PRIORITY_ALWAYS_ON;
priority_b_cnt |= PRIORITY_ALWAYS_ON;
}
a.full = dfixed_const(1000);
b.full = dfixed_const(mode->clock);
b.full = dfixed_div(b, a);
c.full = dfixed_const(latency_watermark_a);
c.full = dfixed_mul(c, b);
c.full = dfixed_mul(c, radeon_crtc->hsc);
c.full = dfixed_div(c, a);
a.full = dfixed_const(16);
c.full = dfixed_div(c, a);
priority_a_mark = dfixed_trunc(c);
priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
a.full = dfixed_const(1000);
b.full = dfixed_const(mode->clock);
b.full = dfixed_div(b, a);
c.full = dfixed_const(latency_watermark_b);
c.full = dfixed_mul(c, b);
c.full = dfixed_mul(c, radeon_crtc->hsc);
c.full = dfixed_div(c, a);
a.full = dfixed_const(16);
c.full = dfixed_div(c, a);
priority_b_mark = dfixed_trunc(c);
priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
}
/* select wm A */
arb_control3 = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
tmp = arb_control3;
tmp &= ~LATENCY_WATERMARK_MASK(3);
tmp |= LATENCY_WATERMARK_MASK(1);
WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
(LATENCY_LOW_WATERMARK(latency_watermark_a) |
LATENCY_HIGH_WATERMARK(line_time)));
/* select wm B */
tmp = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
tmp &= ~LATENCY_WATERMARK_MASK(3);
tmp |= LATENCY_WATERMARK_MASK(2);
WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
(LATENCY_LOW_WATERMARK(latency_watermark_b) |
LATENCY_HIGH_WATERMARK(line_time)));
/* restore original selection */
WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, arb_control3);
/* write the priority marks */
WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
}
void evergreen_bandwidth_update(struct radeon_device *rdev) void evergreen_bandwidth_update(struct radeon_device *rdev)
{ {
/* XXX */ struct drm_display_mode *mode0 = NULL;
struct drm_display_mode *mode1 = NULL;
u32 num_heads = 0, lb_size;
int i;
radeon_update_display_priority(rdev);
for (i = 0; i < rdev->num_crtc; i++) {
if (rdev->mode_info.crtcs[i]->base.enabled)
num_heads++;
}
for (i = 0; i < rdev->num_crtc; i += 2) {
mode0 = &rdev->mode_info.crtcs[i]->base.mode;
mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
}
} }
int evergreen_mc_wait_for_idle(struct radeon_device *rdev) int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
@ -608,10 +1054,25 @@ void evergreen_mc_program(struct radeon_device *rdev)
rv515_vga_render_disable(rdev); rv515_vga_render_disable(rdev);
} }
#if 0
/* /*
* CP. * CP.
*/ */
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
/* set to DX10/11 mode */
radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
radeon_ring_write(rdev, 1);
/* FIXME: implement */
radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
radeon_ring_write(rdev,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
(ib->gpu_addr & 0xFFFFFFFC));
radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
radeon_ring_write(rdev, ib->length_dw);
}
static int evergreen_cp_load_microcode(struct radeon_device *rdev) static int evergreen_cp_load_microcode(struct radeon_device *rdev)
{ {
@ -930,7 +1391,48 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
return backend_map; return backend_map;
} }
#endif
static void evergreen_program_channel_remap(struct radeon_device *rdev)
{
u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
tmp = RREG32(MC_SHARED_CHMAP);
switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
case 0:
case 1:
case 2:
case 3:
default:
/* default mapping */
mc_shared_chremap = 0x00fac688;
break;
}
switch (rdev->family) {
case CHIP_HEMLOCK:
case CHIP_CYPRESS:
case CHIP_BARTS:
tcp_chan_steer_lo = 0x54763210;
tcp_chan_steer_hi = 0x0000ba98;
break;
case CHIP_JUNIPER:
case CHIP_REDWOOD:
case CHIP_CEDAR:
case CHIP_PALM:
case CHIP_SUMO:
case CHIP_SUMO2:
case CHIP_TURKS:
case CHIP_CAICOS:
default:
tcp_chan_steer_lo = 0x76543210;
tcp_chan_steer_hi = 0x0000ba98;
break;
}
WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
}
static void evergreen_gpu_init(struct radeon_device *rdev) static void evergreen_gpu_init(struct radeon_device *rdev)
{ {
@ -1359,9 +1861,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.tile_config |= (3 << 0); rdev->config.evergreen.tile_config |= (3 << 0);
break; break;
} }
/* num banks is 8 on all fusion asics */ /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
if (rdev->flags & RADEON_IS_IGP) if (rdev->flags & RADEON_IS_IGP)
rdev->config.evergreen.tile_config |= 8 << 4; rdev->config.evergreen.tile_config |= 1 << 4;
else else
rdev->config.evergreen.tile_config |= rdev->config.evergreen.tile_config |=
((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
@ -1641,8 +2143,30 @@ int evergreen_mc_init(struct radeon_device *rdev)
bool evergreen_gpu_is_lockup(struct radeon_device *rdev) bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
{ {
/* FIXME: implement for evergreen */ u32 srbm_status;
u32 grbm_status;
u32 grbm_status_se0, grbm_status_se1;
struct r100_gpu_lockup *lockup = &rdev->config.evergreen.lockup;
int r;
srbm_status = RREG32(SRBM_STATUS);
grbm_status = RREG32(GRBM_STATUS);
grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
if (!(grbm_status & GUI_ACTIVE)) {
r100_gpu_lockup_update(lockup, &rdev->cp);
return false; return false;
}
/* force CP activities */
r = radeon_ring_lock(rdev, 2);
if (!r) {
/* PACKET2 NOP */
radeon_ring_write(rdev, 0x80000000);
radeon_ring_write(rdev, 0x80000000);
radeon_ring_unlock_commit(rdev);
}
rdev->cp.rptr = RREG32(CP_RB_RPTR);
return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
} }
static int evergreen_gpu_soft_reset(struct radeon_device *rdev) static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
@ -1807,13 +2331,6 @@ static int evergreen_startup(struct radeon_device *rdev)
#endif #endif
/* Enable IRQ */ /* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
DRM_ERROR("radeon: IH init failed (%d).\n", r);
radeon_irq_kms_fini(rdev);
return r;
}
// evergreen_irq_set(rdev);
r = radeon_ring_init(rdev, rdev->cp.ring_size); r = radeon_ring_init(rdev, rdev->cp.ring_size);
if (r) if (r)
@ -1824,79 +2341,15 @@ static int evergreen_startup(struct radeon_device *rdev)
r = evergreen_cp_resume(rdev); r = evergreen_cp_resume(rdev);
if (r) if (r)
return r; return r;
/* write back buffer are not vital so don't worry about failure */
r600_wb_enable(rdev);
return 0; return 0;
} }
int evergreen_resume(struct radeon_device *rdev)
{
int r;
/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
* posting will perform necessary task to bring back GPU into good
* shape.
*/
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
r = evergreen_startup(rdev);
if (r) {
DRM_ERROR("r600 startup failed on resume\n");
return r;
}
#if 0
r = r600_ib_test(rdev);
if (r) {
DRM_ERROR("radeon: failled testing IB (%d).\n", r);
return r;
}
#endif
return r;
}
int evergreen_suspend(struct radeon_device *rdev)
{
int r;
/* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev);
rdev->cp.ready = false;
r600_wb_disable(rdev);
evergreen_pcie_gart_disable(rdev);
#if 0
/* unpin shaders bo */
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
if (likely(r == 0)) {
radeon_bo_unpin(rdev->r600_blit.shader_obj);
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
}
#endif
return 0;
}
static bool evergreen_card_posted(struct radeon_device *rdev)
{
u32 reg;
/* first check CRTCs */
reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
if (reg & EVERGREEN_CRTC_MASTER_EN)
return true;
/* then check MEM_SIZE, in case the crtcs are off */
if (RREG32(CONFIG_MEMSIZE))
return true;
return false;
}
/* Plan is to move initialization in that function and use /* Plan is to move initialization in that function and use
* helper function so that radeon_device_init pretty much * helper function so that radeon_device_init pretty much
@ -1908,9 +2361,6 @@ int evergreen_init(struct radeon_device *rdev)
{ {
int r; int r;
r = radeon_dummy_page_init(rdev);
if (r)
return r;
/* This don't do much */ /* This don't do much */
r = radeon_gem_init(rdev); r = radeon_gem_init(rdev);
if (r) if (r)
@ -1922,14 +2372,19 @@ int evergreen_init(struct radeon_device *rdev)
} }
/* Must be an ATOMBIOS */ /* Must be an ATOMBIOS */
if (!rdev->is_atom_bios) { if (!rdev->is_atom_bios) {
dev_err(rdev->dev, "Expecting atombios for R600 GPU\n"); dev_err(rdev->dev, "Expecting atombios for evergreen GPU\n");
return -EINVAL; return -EINVAL;
} }
r = radeon_atombios_init(rdev); r = radeon_atombios_init(rdev);
if (r) if (r)
return r; return r;
/* reset the asic, the gfx blocks are often in a bad state
* after the driver is unloaded or after a resume
*/
if (radeon_asic_reset(rdev))
dev_warn(rdev->dev, "GPU reset failed !\n");
/* Post card if necessary */ /* Post card if necessary */
if (!evergreen_card_posted(rdev)) { if (!radeon_card_posted(rdev)) {
if (!rdev->bios) { if (!rdev->bios) {
dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
return -EINVAL; return -EINVAL;
@ -1944,9 +2399,6 @@ int evergreen_init(struct radeon_device *rdev)
/* Initialize clocks */ /* Initialize clocks */
radeon_get_clock_info(rdev->ddev); radeon_get_clock_info(rdev->ddev);
/* Fence driver */ /* Fence driver */
// r = radeon_fence_driver_init(rdev);
// if (r)
// return r;
/* initialize AGP */ /* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) { if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev); r = radeon_agp_init(rdev);
@ -1962,9 +2414,6 @@ int evergreen_init(struct radeon_device *rdev)
if (r) if (r)
return r; return r;
r = radeon_irq_kms_init(rdev);
if (r)
return r;
rdev->cp.ring_obj = NULL; rdev->cp.ring_obj = NULL;
r600_ring_init(rdev, 1024 * 1024); r600_ring_init(rdev, 1024 * 1024);
@ -1980,41 +2429,62 @@ int evergreen_init(struct radeon_device *rdev)
r = evergreen_startup(rdev); r = evergreen_startup(rdev);
if (r) { if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n"); dev_err(rdev->dev, "disabling GPU acceleration\n");
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
rdev->accel_working = false; rdev->accel_working = false;
} }
if (rdev->accel_working) { if (rdev->accel_working) {
r = radeon_ib_pool_init(rdev);
if (r) {
DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
rdev->accel_working = false;
}
r = r600_ib_test(rdev);
if (r) {
DRM_ERROR("radeon: failed testing IB (%d).\n", r);
rdev->accel_working = false;
}
} }
return 0; return 0;
} }
void evergreen_fini(struct radeon_device *rdev)
static void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
{ {
/*r600_blit_fini(rdev);*/ u32 link_width_cntl, speed_cntl;
r700_cp_fini(rdev);
r600_irq_fini(rdev); if (radeon_pcie_gen2 == 0)
radeon_wb_fini(rdev); return;
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev); if (rdev->flags & RADEON_IS_IGP)
radeon_gem_fini(rdev); return;
radeon_fence_driver_fini(rdev);
radeon_agp_fini(rdev); if (!(rdev->flags & RADEON_IS_PCIE))
radeon_bo_fini(rdev); return;
radeon_atombios_fini(rdev);
kfree(rdev->bios); /* x2 cards have a special sequence */
rdev->bios = NULL; if (ASIC_IS_X2(rdev))
radeon_dummy_page_fini(rdev); return;
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
(speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_GEN2_EN_STRAP;
WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
} else {
link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
if (1)
link_width_cntl |= LC_UPCONFIGURE_DIS;
else
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
} }

View File

@ -0,0 +1,356 @@
/*
* Copyright 2010 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Alex Deucher <alexander.deucher@amd.com>
*/
#include <linux/types.h>
#include <linux/kernel.h>
/*
* evergreen cards need to use the 3D engine to blit data which requires
* quite a bit of hw state setup. Rather than pull the whole 3D driver
* (which normally generates the 3D state) into the DRM, we opt to use
* statically generated state tables. The regsiter state and shaders
* were hand generated to support blitting functionality. See the 3D
* driver or documentation for descriptions of the registers and
* shader instructions.
*/
const u32 evergreen_default_state[] =
{
0xc0016900,
0x0000023b,
0x00000000, /* SQ_LDS_ALLOC_PS */
0xc0066900,
0x00000240,
0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0xc0046900,
0x00000247,
0x00000000, /* SQ_GS_VERT_ITEMSIZE */
0x00000000,
0x00000000,
0x00000000,
0xc0026900,
0x00000010,
0x00000000, /* DB_Z_INFO */
0x00000000, /* DB_STENCIL_INFO */
0xc0016900,
0x00000200,
0x00000000, /* DB_DEPTH_CONTROL */
0xc0066900,
0x00000000,
0x00000060, /* DB_RENDER_CONTROL */
0x00000000, /* DB_COUNT_CONTROL */
0x00000000, /* DB_DEPTH_VIEW */
0x0000002a, /* DB_RENDER_OVERRIDE */
0x00000000, /* DB_RENDER_OVERRIDE2 */
0x00000000, /* DB_HTILE_DATA_BASE */
0xc0026900,
0x0000000a,
0x00000000, /* DB_STENCIL_CLEAR */
0x00000000, /* DB_DEPTH_CLEAR */
0xc0016900,
0x000002dc,
0x0000aa00, /* DB_ALPHA_TO_MASK */
0xc0016900,
0x00000080,
0x00000000, /* PA_SC_WINDOW_OFFSET */
0xc00d6900,
0x00000083,
0x0000ffff, /* PA_SC_CLIPRECT_RULE */
0x00000000, /* PA_SC_CLIPRECT_0_TL */
0x20002000, /* PA_SC_CLIPRECT_0_BR */
0x00000000,
0x20002000,
0x00000000,
0x20002000,
0x00000000,
0x20002000,
0xaaaaaaaa, /* PA_SC_EDGERULE */
0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
0x0000000f, /* CB_TARGET_MASK */
0x0000000f, /* CB_SHADER_MASK */
0xc0226900,
0x00000094,
0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x00000000, /* PA_SC_VPORT_ZMIN_0 */
0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
0xc0016900,
0x000000d4,
0x00000000, /* SX_MISC */
0xc0026900,
0x00000292,
0x00000000, /* PA_SC_MODE_CNTL_0 */
0x00000000, /* PA_SC_MODE_CNTL_1 */
0xc0106900,
0x00000300,
0x00000000, /* PA_SC_LINE_CNTL */
0x00000000, /* PA_SC_AA_CONFIG */
0x00000005, /* PA_SU_VTX_CNTL */
0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
0x00000000, /* PA_SC_AA_SAMPLE_LOCS_0 */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* PA_SC_AA_SAMPLE_LOCS_7 */
0xffffffff, /* PA_SC_AA_MASK */
0xc00d6900,
0x00000202,
0x00cc0010, /* CB_COLOR_CONTROL */
0x00000210, /* DB_SHADER_CONTROL */
0x00010000, /* PA_CL_CLIP_CNTL */
0x00000004, /* PA_SU_SC_MODE_CNTL */
0x00000100, /* PA_CL_VTE_CNTL */
0x00000000, /* PA_CL_VS_OUT_CNTL */
0x00000000, /* PA_CL_NANINF_CNTL */
0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* SQ_DYN_GPR_RESOURCE_LIMIT_1 */
0xc0066900,
0x000002de,
0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0xc0016900,
0x00000229,
0x00000000, /* SQ_PGM_START_FS */
0xc0016900,
0x0000022a,
0x00000000, /* SQ_PGM_RESOURCES_FS */
0xc0096900,
0x00000100,
0x00ffffff, /* VGT_MAX_VTX_INDX */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* SX_ALPHA_TEST_CONTROL */
0x00000000, /* CB_BLEND_RED */
0x00000000, /* CB_BLEND_GREEN */
0x00000000, /* CB_BLEND_BLUE */
0x00000000, /* CB_BLEND_ALPHA */
0xc0026900,
0x000002a8,
0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
0x00000000, /* */
0xc0026900,
0x000002ad,
0x00000000, /* VGT_REUSE_OFF */
0x00000000, /* */
0xc0116900,
0x00000280,
0x00000000, /* PA_SU_POINT_SIZE */
0x00000000, /* PA_SU_POINT_MINMAX */
0x00000008, /* PA_SU_LINE_CNTL */
0x00000000, /* PA_SC_LINE_STIPPLE */
0x00000000, /* VGT_OUTPUT_PATH_CNTL */
0x00000000, /* VGT_HOS_CNTL */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* VGT_GS_MODE */
0xc0016900,
0x000002a1,
0x00000000, /* VGT_PRIMITIVEID_EN */
0xc0016900,
0x000002a5,
0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
0xc0016900,
0x000002d5,
0x00000000, /* VGT_SHADER_STAGES_EN */
0xc0026900,
0x000002e5,
0x00000000, /* VGT_STRMOUT_CONFIG */
0x00000000, /* */
0xc0016900,
0x000001e0,
0x00000000, /* CB_BLEND0_CONTROL */
0xc0016900,
0x000001b1,
0x00000000, /* SPI_VS_OUT_CONFIG */
0xc0016900,
0x00000187,
0x00000000, /* SPI_VS_OUT_ID_0 */
0xc0016900,
0x00000191,
0x00000100, /* SPI_PS_INPUT_CNTL_0 */
0xc00b6900,
0x000001b3,
0x20000001, /* SPI_PS_IN_CONTROL_0 */
0x00000000, /* SPI_PS_IN_CONTROL_1 */
0x00000000, /* SPI_INTERP_CONTROL_0 */
0x00000000, /* SPI_INPUT_Z */
0x00000000, /* SPI_FOG_CNTL */
0x00100000, /* SPI_BARYC_CNTL */
0x00000000, /* SPI_PS_IN_CONTROL_2 */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0xc0026900,
0x00000316,
0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
0x00000010, /* */
};
const u32 evergreen_vs[] =
{
0x00000004,
0x80800400,
0x0000a03c,
0x95000688,
0x00004000,
0x15200688,
0x00000000,
0x00000000,
0x3c000000,
0x67961001,
#ifdef __BIG_ENDIAN
0x000a0000,
#else
0x00080000,
#endif
0x00000000,
0x1c000000,
0x67961000,
#ifdef __BIG_ENDIAN
0x00020008,
#else
0x00000008,
#endif
0x00000000,
};
const u32 evergreen_ps[] =
{
0x00000003,
0xa00c0000,
0x00000008,
0x80400000,
0x00000000,
0x95200688,
0x00380400,
0x00146b10,
0x00380000,
0x20146b10,
0x00380400,
0x40146b00,
0x80380000,
0x60146b00,
0x00000000,
0x00000000,
0x00000010,
0x000d1000,
0xb0800000,
0x00000000,
};
const u32 evergreen_ps_size = ARRAY_SIZE(evergreen_ps);
const u32 evergreen_vs_size = ARRAY_SIZE(evergreen_vs);
const u32 evergreen_default_size = ARRAY_SIZE(evergreen_default_state);

View File

@ -0,0 +1,35 @@
/*
* Copyright 2009 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*/
#ifndef EVERGREEN_BLIT_SHADERS_H
#define EVERGREEN_BLIT_SHADERS_H
extern const u32 evergreen_ps[];
extern const u32 evergreen_vs[];
extern const u32 evergreen_default_state[];
extern const u32 evergreen_ps_size, evergreen_vs_size;
extern const u32 evergreen_default_size;
#endif

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -44,6 +44,8 @@ ___start_builtin_fw:
dd RS690CP_START dd RS690CP_START
dd (RS690CP_END - RS690CP_START) dd (RS690CP_END - RS690CP_START)
dd FIRMWARE_R600_ME dd FIRMWARE_R600_ME
dd R600ME_START dd R600ME_START
dd (R600ME_END - R600ME_START) dd (R600ME_END - R600ME_START)
@ -88,6 +90,26 @@ ___start_builtin_fw:
dd RV770ME_START dd RV770ME_START
dd (RV770ME_END - RV770ME_START) dd (RV770ME_END - RV770ME_START)
dd FIRMWARE_CYPRESS_ME
dd CYPRESSME_START
dd (CYPRESSME_END - CYPRESSME_START)
dd FIRMWARE_REDWOOD_ME
dd REDWOODME_START
dd (REDWOODME_END - REDWOODME_START)
dd FIRMWARE_CEDAR_ME
dd CEDARME_START
dd (CEDARME_END - CEDARME_START)
dd FIRMWARE_JUNIPER_ME
dd JUNIPERME_START
dd (JUNIPERME_END - JUNIPERME_START)
dd FIRMWARE_PALM_ME
dd PALMME_START
dd (PALMME_END - PALMME_START)
dd FIRMWARE_RV610_PFP dd FIRMWARE_RV610_PFP
dd RV610PFP_START dd RV610PFP_START
@ -122,6 +144,28 @@ ___start_builtin_fw:
dd RV770PFP_START dd RV770PFP_START
dd (RV770PFP_END - RV770PFP_START) dd (RV770PFP_END - RV770PFP_START)
dd FIRMWARE_CYPRESS_PFP
dd CYPRESSPFP_START
dd (CYPRESSPFP_END - CYPRESSPFP_START)
dd FIRMWARE_REDWOOD_PFP
dd REDWOODPFP_START
dd (REDWOODPFP_END - REDWOODPFP_START)
dd FIRMWARE_CEDAR_PFP
dd CEDARPFP_START
dd (CEDARPFP_END - CEDARPFP_START)
dd FIRMWARE_JUNIPER_PFP
dd JUNIPERPFP_START
dd (JUNIPERPFP_END - JUNIPERPFP_START)
dd FIRMWARE_PALM_PFP
dd PALMPFP_START
dd (PALMPFP_END - PALMPFP_START)
dd FIRMWARE_R600_RLC dd FIRMWARE_R600_RLC
dd R600RLC_START dd R600RLC_START
dd (R600RLC_END - R600RLC_START) dd (R600RLC_END - R600RLC_START)
@ -130,6 +174,22 @@ ___start_builtin_fw:
dd R700RLC_START dd R700RLC_START
dd (R700RLC_END - R700RLC_START) dd (R700RLC_END - R700RLC_START)
dd FIRMWARE_CYPRESS_RLC
dd CYPRESSRLC_START
dd (CYPRESSRLC_END - CYPRESSRLC_START)
dd FIRMWARE_REDWOOD_RLC
dd REDWOODRLC_START
dd (REDWOODRLC_END - REDWOODRLC_START)
dd FIRMWARE_CEDAR_RLC
dd CEDARRLC_START
dd (CEDARRLC_END - CEDARRLC_START)
dd FIRMWARE_JUNIPER_RLC
dd JUNIPERRLC_START
dd (JUNIPERRLC_END - JUNIPERRLC_START)
___end_builtin_fw: ___end_builtin_fw:
@ -155,6 +215,12 @@ FIRMWARE_RV670_ME db 'radeon/RV670_me.bin',0
FIRMWARE_RV710_ME db 'radeon/RV710_me.bin',0 FIRMWARE_RV710_ME db 'radeon/RV710_me.bin',0
FIRMWARE_RV730_ME db 'radeon/RV730_me.bin',0 FIRMWARE_RV730_ME db 'radeon/RV730_me.bin',0
FIRMWARE_RV770_ME db 'radeon/RV770_me.bin',0 FIRMWARE_RV770_ME db 'radeon/RV770_me.bin',0
FIRMWARE_CYPRESS_ME db 'radeon/CYPRESS_me.bin',0
FIRMWARE_REDWOOD_ME db 'radeon/REDWOOD_me.bin',0
FIRMWARE_CEDAR_ME db 'radeon/CEDAR_me.bin',0
FIRMWARE_JUNIPER_ME db 'radeon/JUNIPER_me.bin',0
FIRMWARE_PALM_ME db 'radeon/PALM_me.bin',0
FIRMWARE_R600_PFP db 'radeon/R600_pfp.bin',0 FIRMWARE_R600_PFP db 'radeon/R600_pfp.bin',0
FIRMWARE_RV610_PFP db 'radeon/RV610_pfp.bin',0 FIRMWARE_RV610_PFP db 'radeon/RV610_pfp.bin',0
@ -165,153 +231,235 @@ FIRMWARE_RV670_PFP db 'radeon/RV670_pfp.bin',0
FIRMWARE_RV710_PFP db 'radeon/RV710_pfp.bin',0 FIRMWARE_RV710_PFP db 'radeon/RV710_pfp.bin',0
FIRMWARE_RV730_PFP db 'radeon/RV730_pfp.bin',0 FIRMWARE_RV730_PFP db 'radeon/RV730_pfp.bin',0
FIRMWARE_RV770_PFP db 'radeon/RV770_pfp.bin',0 FIRMWARE_RV770_PFP db 'radeon/RV770_pfp.bin',0
FIRMWARE_CYPRESS_PFP db 'radeon/CYPRESS_pfp.bin',0
FIRMWARE_REDWOOD_PFP db 'radeon/REDWOOD_pfp.bin',0
FIRMWARE_CEDAR_PFP db 'radeon/CEDAR_pfp.bin',0
FIRMWARE_JUNIPER_PFP db 'radeon/JUNIPER_pfp.bin',0
FIRMWARE_PALM_PFP db 'radeon/PALM_pfp.bin',0
FIRMWARE_R600_RLC db 'radeon/R600_rlc.bin',0 FIRMWARE_R600_RLC db 'radeon/R600_rlc.bin',0
FIRMWARE_R700_RLC db 'radeon/R700_rlc.bin',0 FIRMWARE_R700_RLC db 'radeon/R700_rlc.bin',0
FIRMWARE_CYPRESS_RLC db 'radeon/CYPRESS_rlc.bin',0
FIRMWARE_REDWOOD_RLC db 'radeon/REDWOOD_rlc.bin',0
FIRMWARE_CEDAR_RLC db 'radeon/CEDAR_rlc.bin',0
FIRMWARE_JUNIPER_RLC db 'radeon/JUNIPER_rlc.bin',0
align 16 align 16
R100CP_START: R100CP_START:
file 'firmware/R100_cp.bin' file 'firmware/r100_cp.bin'
R100CP_END: R100CP_END:
align 16 align 16
R200CP_START: R200CP_START:
file 'firmware/R200_cp.bin' file 'firmware/r200_cp.bin'
R200CP_END: R200CP_END:
align 16 align 16
R300CP_START: R300CP_START:
file 'firmware/R300_cp.bin' file 'firmware/r300_cp.bin'
R300CP_END: R300CP_END:
align 16 align 16
R420CP_START: R420CP_START:
file 'firmware/R420_cp.bin' file 'firmware/r420_cp.bin'
R420CP_END: R420CP_END:
align 16 align 16
R520CP_START: R520CP_START:
file 'firmware/R520_cp.bin' file 'firmware/r520_cp.bin'
R520CP_END: R520CP_END:
align 16 align 16
RS600CP_START: RS600CP_START:
file 'firmware/RS600_cp.bin' file 'firmware/rs600_cp.bin'
RS600CP_END: RS600CP_END:
align 16 align 16
RS690CP_START: RS690CP_START:
file 'firmware/RS690_cp.bin' file 'firmware/rs690_cp.bin'
RS690CP_END: RS690CP_END:
align 16 align 16
RS780ME_START: RS780ME_START:
file 'firmware/RS780_me.bin' file 'firmware/rs780_me.bin'
RS780ME_END: RS780ME_END:
align 16 align 16
RS780PFP_START: RS780PFP_START:
file 'firmware/RS780_pfp.bin' file 'firmware/rs780_pfp.bin'
RS780PFP_END: RS780PFP_END:
align 16 align 16
R600ME_START: R600ME_START:
file 'firmware/R600_me.bin' file 'firmware/r600_me.bin'
R600ME_END: R600ME_END:
align 16 align 16
RV610ME_START: RV610ME_START:
file 'firmware/RV610_me.bin' file 'firmware/rv610_me.bin'
RV610ME_END: RV610ME_END:
align 16 align 16
RV620ME_START: RV620ME_START:
file 'firmware/RV620_me.bin' file 'firmware/rv620_me.bin'
RV620ME_END: RV620ME_END:
align 16 align 16
RV630ME_START: RV630ME_START:
file 'firmware/RV630_me.bin' file 'firmware/rv630_me.bin'
RV630ME_END: RV630ME_END:
align 16 align 16
RV635ME_START: RV635ME_START:
file 'firmware/RV635_me.bin' file 'firmware/rv635_me.bin'
RV635ME_END: RV635ME_END:
align 16 align 16
RV670ME_START: RV670ME_START:
file 'firmware/RV670_me.bin' file 'firmware/rv670_me.bin'
RV670ME_END: RV670ME_END:
align 16 align 16
RV710ME_START: RV710ME_START:
file 'firmware/RV710_me.bin' file 'firmware/rv710_me.bin'
RV710ME_END: RV710ME_END:
align 16 align 16
RV730ME_START: RV730ME_START:
file 'firmware/RV730_me.bin' file 'firmware/rv730_me.bin'
RV730ME_END: RV730ME_END:
align 16 align 16
RV770ME_START: RV770ME_START:
file 'firmware/RV770_me.bin' file 'firmware/rv770_me.bin'
RV770ME_END: RV770ME_END:
align 16
CYPRESSME_START:
file 'firmware/CYPRESS_me.bin'
CYPRESSME_END:
align 16
REDWOODME_START:
file 'firmware/REDWOOD_me.bin'
REDWOODME_END:
align 16
CEDARME_START:
file 'firmware/CEDAR_me.bin'
CEDARME_END:
align 16
JUNIPERME_START:
file 'firmware/JUNIPER_me.bin'
JUNIPERME_END:
align 16
PALMME_START:
file 'firmware/PALM_me.bin'
PALMME_END:
align 16 align 16
RV610PFP_START: RV610PFP_START:
file 'firmware/RV610_pfp.bin' file 'firmware/rv610_pfp.bin'
RV610PFP_END: RV610PFP_END:
align 16 align 16
RV620PFP_START: RV620PFP_START:
file 'firmware/RV620_pfp.bin' file 'firmware/rv620_pfp.bin'
RV620PFP_END: RV620PFP_END:
align 16 align 16
RV630PFP_START: RV630PFP_START:
file 'firmware/RV630_pfp.bin' file 'firmware/rv630_pfp.bin'
RV630PFP_END: RV630PFP_END:
align 16 align 16
RV635PFP_START: RV635PFP_START:
file 'firmware/RV635_pfp.bin' file 'firmware/rv635_pfp.bin'
RV635PFP_END: RV635PFP_END:
align 16 align 16
RV670PFP_START: RV670PFP_START:
file 'firmware/RV670_pfp.bin' file 'firmware/rv670_pfp.bin'
RV670PFP_END: RV670PFP_END:
align 16 align 16
RV710PFP_START: RV710PFP_START:
file 'firmware/RV710_pfp.bin' file 'firmware/rv710_pfp.bin'
RV710PFP_END: RV710PFP_END:
align 16 align 16
RV730PFP_START: RV730PFP_START:
file 'firmware/RV730_pfp.bin' file 'firmware/rv730_pfp.bin'
RV730PFP_END: RV730PFP_END:
align 16 align 16
RV770PFP_START: RV770PFP_START:
file 'firmware/RV770_pfp.bin' file 'firmware/rv770_pfp.bin'
RV770PFP_END: RV770PFP_END:
align 16
CYPRESSPFP_START:
file 'firmware/CYPRESS_pfp.bin'
CYPRESSPFP_END:
align 16
REDWOODPFP_START:
file 'firmware/REDWOOD_pfp.bin'
REDWOODPFP_END:
align 16
CEDARPFP_START:
file 'firmware/CEDAR_pfp.bin'
CEDARPFP_END:
align 16
JUNIPERPFP_START:
file 'firmware/JUNIPER_pfp.bin'
JUNIPERPFP_END:
align 16
PALMPFP_START:
file 'firmware/PALM_pfp.bin'
PALMPFP_END:
align 16 align 16
R600RLC_START: R600RLC_START:
file 'firmware/R600_rlc.bin' file 'firmware/r600_rlc.bin'
R600RLC_END: R600RLC_END:
align 16 align 16
R700RLC_START: R700RLC_START:
file 'firmware/R700_rlc.bin' file 'firmware/r700_rlc.bin'
R700RLC_END: R700RLC_END:
align 16
CYPRESSRLC_START:
file 'firmware/CYPRESS_rlc.bin'
CYPRESSRLC_END:
align 16
REDWOODRLC_START:
file 'firmware/REDWOOD_rlc.bin'
REDWOODRLC_END:
align 16
CEDARRLC_START:
file 'firmware/CEDAR_rlc.bin'
CEDARRLC_END:
align 16
JUNIPERRLC_START:
file 'firmware/JUNIPER_rlc.bin'
JUNIPERRLC_END:

View File

@ -1548,6 +1548,14 @@ int r600_init_microcode(struct radeon_device *rdev)
chip_name = "PALM"; chip_name = "PALM";
rlc_chip_name = "SUMO"; rlc_chip_name = "SUMO";
break; break;
case CHIP_SUMO:
chip_name = "SUMO";
rlc_chip_name = "SUMO";
break;
case CHIP_SUMO2:
chip_name = "SUMO2";
rlc_chip_name = "SUMO";
break;
default: BUG(); default: BUG();
} }

View File

@ -612,21 +612,139 @@ static struct radeon_asic rv770_asic = {
.hpd_sense = &r600_hpd_sense, .hpd_sense = &r600_hpd_sense,
.hpd_set_polarity = &r600_hpd_set_polarity, .hpd_set_polarity = &r600_hpd_set_polarity,
}; };
#if 0
static struct radeon_asic evergreen_asic = { static struct radeon_asic evergreen_asic = {
.init = &evergreen_init, .init = &evergreen_init,
// .fini = &evergreen_fini, // .fini = &evergreen_fini,
// .suspend = &evergreen_suspend, // .suspend = &evergreen_suspend,
// .resume = &evergreen_resume, // .resume = &evergreen_resume,
.cp_commit = NULL, .cp_commit = &r600_cp_commit,
.asic_reset = &evergreen_asic_reset, .asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state, .vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush, .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
// .ring_ib_execute = &r600_ring_ib_execute,
// .irq_set = &r600_irq_set,
// .irq_process = &r600_irq_process,
.fence_ring_emit = &r600_fence_ring_emit,
// .cs_parse = &r600_cs_parse,
// .copy_blit = &r600_copy_blit,
// .copy_dma = &r600_copy_blit,
// .copy = &r600_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &r600_get_pcie_lanes,
.set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = NULL,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &evergreen_bandwidth_update,
};
#if 0
static struct radeon_asic sumo_asic = {
.init = &evergreen_init,
.fini = &evergreen_fini,
.suspend = &evergreen_suspend,
.resume = &evergreen_resume,
.cp_commit = &r600_cp_commit,
.gpu_is_lockup = &evergreen_gpu_is_lockup,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
.ring_ib_execute = &evergreen_ring_ib_execute,
.irq_set = &evergreen_irq_set,
.irq_process = &evergreen_irq_process,
.get_vblank_counter = &evergreen_get_vblank_counter,
.fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &evergreen_cs_parse,
.copy_blit = &evergreen_copy_blit,
.copy_dma = &evergreen_copy_blit,
.copy = &evergreen_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = NULL,
.set_memory_clock = NULL,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &evergreen_bandwidth_update,
.gui_idle = &r600_gui_idle,
.pm_misc = &evergreen_pm_misc,
.pm_prepare = &evergreen_pm_prepare,
.pm_finish = &evergreen_pm_finish,
.pm_init_profile = &rs780_pm_init_profile,
.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
.pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
.post_page_flip = &evergreen_post_page_flip,
};
static struct radeon_asic btc_asic = {
.init = &evergreen_init,
.fini = &evergreen_fini,
.suspend = &evergreen_suspend,
.resume = &evergreen_resume,
.cp_commit = &r600_cp_commit,
.gpu_is_lockup = &evergreen_gpu_is_lockup,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page, .gart_set_page = &rs600_gart_set_page,
.ring_test = NULL, .ring_test = NULL,
// .ring_ib_execute = &r600_ring_ib_execute, // .ring_ib_execute = &r600_ring_ib_execute,
// .irq_set = &r600_irq_set, // .irq_set = &r600_irq_set,
// .irq_process = &r600_irq_process, // .irq_process = &r600_irq_process,
.get_vblank_counter = &evergreen_get_vblank_counter,
.fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &evergreen_cs_parse,
.copy_blit = &evergreen_copy_blit,
.copy_dma = &evergreen_copy_blit,
.copy = &evergreen_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &evergreen_bandwidth_update,
.gui_idle = &r600_gui_idle,
.pm_misc = &evergreen_pm_misc,
.pm_prepare = &evergreen_pm_prepare,
.pm_finish = &evergreen_pm_finish,
.pm_init_profile = &r600_pm_init_profile,
.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
.pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
.post_page_flip = &evergreen_post_page_flip,
};
static struct radeon_asic cayman_asic = {
.init = &cayman_init,
.fini = &cayman_fini,
.suspend = &cayman_suspend,
.resume = &cayman_resume,
.cp_commit = &r600_cp_commit,
.gpu_is_lockup = &cayman_gpu_is_lockup,
.asic_reset = &cayman_asic_reset,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &cayman_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
.ring_ib_execute = &evergreen_ring_ib_execute,
.irq_set = &evergreen_irq_set,
.irq_process = &evergreen_irq_process,
.get_vblank_counter = &evergreen_get_vblank_counter,
.fence_ring_emit = &r600_fence_ring_emit, .fence_ring_emit = &r600_fence_ring_emit,
// .cs_parse = &r600_cs_parse, // .cs_parse = &r600_cs_parse,
// .copy_blit = &r600_copy_blit, // .copy_blit = &r600_copy_blit,
@ -641,16 +759,28 @@ static struct radeon_asic evergreen_asic = {
.set_surface_reg = r600_set_surface_reg, .set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg, .clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &evergreen_bandwidth_update, .bandwidth_update = &evergreen_bandwidth_update,
.hpd_init = &evergreen_hpd_init, .gui_idle = &r600_gui_idle,
.hpd_fini = &evergreen_hpd_fini, .pm_misc = &evergreen_pm_misc,
.hpd_sense = &evergreen_hpd_sense, .pm_prepare = &evergreen_pm_prepare,
.hpd_set_polarity = &evergreen_hpd_set_polarity, .pm_finish = &evergreen_pm_finish,
.pm_init_profile = &r600_pm_init_profile,
.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
.pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
.post_page_flip = &evergreen_post_page_flip,
}; };
#endif #endif
int radeon_asic_init(struct radeon_device *rdev) int radeon_asic_init(struct radeon_device *rdev)
{ {
radeon_register_accessor_init(rdev); radeon_register_accessor_init(rdev);
/* set the number of crtcs */
if (rdev->flags & RADEON_SINGLE_CRTC)
rdev->num_crtc = 1;
else
rdev->num_crtc = 2;
switch (rdev->family) { switch (rdev->family) {
case CHIP_R100: case CHIP_R100:
case CHIP_RV100: case CHIP_RV100:
@ -725,6 +855,18 @@ int radeon_asic_init(struct radeon_device *rdev)
case CHIP_RV740: case CHIP_RV740:
rdev->asic = &rv770_asic; rdev->asic = &rv770_asic;
break; break;
case CHIP_CEDAR:
case CHIP_REDWOOD:
case CHIP_JUNIPER:
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
/* set num crtcs */
if (rdev->family == CHIP_CEDAR)
rdev->num_crtc = 4;
else
rdev->num_crtc = 6;
rdev->asic = &evergreen_asic;
break;
default: default:
/* FIXME: not supported yet */ /* FIXME: not supported yet */
return -EINVAL; return -EINVAL;
@ -735,18 +877,6 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->asic->set_memory_clock = NULL; rdev->asic->set_memory_clock = NULL;
} }
/* set the number of crtcs */
if (rdev->flags & RADEON_SINGLE_CRTC)
rdev->num_crtc = 1;
else {
if (ASIC_IS_DCE41(rdev))
rdev->num_crtc = 2;
else if (ASIC_IS_DCE4(rdev))
rdev->num_crtc = 6;
else
rdev->num_crtc = 2;
}
return 0; return 0;
} }

View File

@ -2320,6 +2320,14 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
le16_to_cpu(clock_info->r600.usVDDC); le16_to_cpu(clock_info->r600.usVDDC);
} }
/* patch up vddc if necessary */
if (rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage == 0xff01) {
u16 vddc;
if (radeon_atom_get_max_vddc(rdev, &vddc) == 0)
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = vddc;
}
if (rdev->flags & RADEON_IS_IGP) { if (rdev->flags & RADEON_IS_IGP) {
/* skip invalid modes */ /* skip invalid modes */
if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0) if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
@ -2607,6 +2615,10 @@ void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 v
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return; return;
/* 0xff01 is a flag rather then an actual voltage */
if (voltage_level == 0xff01)
return;
switch (crev) { switch (crev) {
case 1: case 1:
args.v1.ucVoltageType = voltage_type; args.v1.ucVoltageType = voltage_type;
@ -2626,7 +2638,35 @@ void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 v
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
} }
int radeon_atom_get_max_vddc(struct radeon_device *rdev,
u16 *voltage)
{
union set_voltage args;
int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
u8 frev, crev;
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return -EINVAL;
switch (crev) {
case 1:
return -EINVAL;
case 2:
args.v2.ucVoltageType = SET_VOLTAGE_GET_MAX_VOLTAGE;
args.v2.ucVoltageMode = 0;
args.v2.usVoltageLevel = 0;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
*voltage = le16_to_cpu(args.v2.usVoltageLevel);
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
return -EINVAL;
}
return 0;
}
void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev) void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
{ {

View File

@ -1553,9 +1553,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
(rdev->pdev->subsystem_device == 0x4a48)) { (rdev->pdev->subsystem_device == 0x4a48)) {
/* Mac X800 */ /* Mac X800 */
rdev->mode_info.connector_table = CT_MAC_X800; rdev->mode_info.connector_table = CT_MAC_X800;
} else if (of_machine_is_compatible("PowerMac7,2") || } else if ((of_machine_is_compatible("PowerMac7,2") ||
of_machine_is_compatible("PowerMac7,3")) { of_machine_is_compatible("PowerMac7,3")) &&
/* Mac G5 9600 */ (rdev->pdev->device == 0x4150) &&
(rdev->pdev->subsystem_vendor == 0x1002) &&
(rdev->pdev->subsystem_device == 0x4150)) {
/* Mac G5 tower 9600 */
rdev->mode_info.connector_table = CT_MAC_G5_9600; rdev->mode_info.connector_table = CT_MAC_G5_9600;
} else } else
#endif /* CONFIG_PPC_PMAC */ #endif /* CONFIG_PPC_PMAC */

View File

@ -44,6 +44,8 @@ extern void
radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder, radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
struct drm_connector *drm_connector); struct drm_connector *drm_connector);
bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector);
void radeon_connector_hotplug(struct drm_connector *connector) void radeon_connector_hotplug(struct drm_connector *connector)
{ {
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
@ -836,6 +838,13 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
if (!radeon_connector->edid) { if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
drm_get_connector_name(connector)); drm_get_connector_name(connector));
/* rs690 seems to have a problem with connectors not existing and always
* return a block of 0's. If we see this just stop polling on this output */
if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) && radeon_connector->base.null_edid_counter) {
ret = connector_status_disconnected;
DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n", drm_get_connector_name(connector));
radeon_connector->ddc_bus = NULL;
}
} else { } else {
radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
@ -1063,10 +1072,11 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
{ {
struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
int ret; int ret;
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
struct drm_encoder *encoder; (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
struct drm_display_mode *mode; struct drm_display_mode *mode;
if (!radeon_dig_connector->edp_on) if (!radeon_dig_connector->edp_on)
@ -1078,7 +1088,6 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
ATOM_TRANSMITTER_ACTION_POWER_OFF); ATOM_TRANSMITTER_ACTION_POWER_OFF);
if (ret > 0) { if (ret > 0) {
encoder = radeon_best_single_encoder(connector);
if (encoder) { if (encoder) {
radeon_fixup_lvds_native_mode(encoder, connector); radeon_fixup_lvds_native_mode(encoder, connector);
/* add scaled modes */ /* add scaled modes */
@ -1102,8 +1111,14 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
/* add scaled modes */ /* add scaled modes */
radeon_add_common_modes(encoder, connector); radeon_add_common_modes(encoder, connector);
} }
} else } else {
/* need to setup ddc on the bridge */
if (radeon_connector_encoder_is_dp_bridge(connector)) {
if (encoder)
radeon_atom_ext_encoder_setup_ddc(encoder);
}
ret = radeon_ddc_get_modes(radeon_connector); ret = radeon_ddc_get_modes(radeon_connector);
}
return ret; return ret;
} }
@ -1187,14 +1202,15 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector *radeon_connector = to_radeon_connector(connector);
enum drm_connector_status ret = connector_status_disconnected; enum drm_connector_status ret = connector_status_disconnected;
struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
if (radeon_connector->edid) { if (radeon_connector->edid) {
kfree(radeon_connector->edid); kfree(radeon_connector->edid);
radeon_connector->edid = NULL; radeon_connector->edid = NULL;
} }
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
struct drm_encoder *encoder = radeon_best_single_encoder(connector); (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
if (encoder) { if (encoder) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_display_mode *native_mode = &radeon_encoder->native_mode; struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
@ -1214,6 +1230,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
atombios_set_edp_panel_power(connector, atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_OFF); ATOM_TRANSMITTER_ACTION_POWER_OFF);
} else { } else {
/* need to setup ddc on the bridge */
if (radeon_connector_encoder_is_dp_bridge(connector)) {
if (encoder)
radeon_atom_ext_encoder_setup_ddc(encoder);
}
radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
ret = connector_status_connected; ret = connector_status_connected;
@ -1228,6 +1249,16 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
ret = connector_status_connected; ret = connector_status_connected;
} }
} }
if ((ret == connector_status_disconnected) &&
radeon_connector->dac_load_detect) {
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
struct drm_encoder_helper_funcs *encoder_funcs;
if (encoder) {
encoder_funcs = encoder->helper_private;
ret = encoder_funcs->detect(encoder, connector);
}
}
} }
radeon_connector_update_scratch_regs(connector, ret); radeon_connector_update_scratch_regs(connector, ret);
@ -1242,7 +1273,8 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
/* XXX check mode bandwidth */ /* XXX check mode bandwidth */
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
(connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
struct drm_encoder *encoder = radeon_best_single_encoder(connector); struct drm_encoder *encoder = radeon_best_single_encoder(connector);
if ((mode->hdisplay < 320) || (mode->vdisplay < 240)) if ((mode->hdisplay < 320) || (mode->vdisplay < 240))
@ -1401,6 +1433,10 @@ radeon_add_atom_connector(struct drm_device *dev,
default: default:
connector->interlace_allowed = true; connector->interlace_allowed = true;
connector->doublescan_allowed = true; connector->doublescan_allowed = true;
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1);
break; break;
case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID: case DRM_MODE_CONNECTOR_DVID:
@ -1422,6 +1458,12 @@ radeon_add_atom_connector(struct drm_device *dev,
connector->doublescan_allowed = true; connector->doublescan_allowed = true;
else else
connector->doublescan_allowed = false; connector->doublescan_allowed = false;
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1);
}
break; break;
case DRM_MODE_CONNECTOR_LVDS: case DRM_MODE_CONNECTOR_LVDS:
case DRM_MODE_CONNECTOR_eDP: case DRM_MODE_CONNECTOR_eDP:

View File

@ -125,6 +125,8 @@ static const char radeon_family_name[][16] = {
"CYPRESS", "CYPRESS",
"HEMLOCK", "HEMLOCK",
"PALM", "PALM",
"SUMO",
"SUMO2",
"BARTS", "BARTS",
"TURKS", "TURKS",
"CAICOS", "CAICOS",
@ -668,6 +670,7 @@ int radeon_device_init(struct radeon_device *rdev,
dma_bits = rdev->need_dma32 ? 32 : 40; dma_bits = rdev->need_dma32 ? 32 : 40;
r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
if (r) { if (r) {
rdev->need_dma32 = true;
printk(KERN_WARNING "radeon: No suitable DMA available.\n"); printk(KERN_WARNING "radeon: No suitable DMA available.\n");
} }
@ -804,9 +807,9 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret) if (ret)
goto err_g4; goto err_g4;
// if( radeon_modeset ) if( radeon_modeset )
// init_display_kms(dev->dev_private, &usermode); init_display_kms(dev->dev_private, &usermode);
// else else
init_display(dev->dev_private, &usermode); init_display(dev->dev_private, &usermode);
LEAVE(); LEAVE();

View File

@ -870,19 +870,6 @@ radeon_framebuffer_init(struct drm_device *dev,
drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd); drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
} }
static struct drm_framebuffer *
radeon_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
struct drm_mode_fb_cmd *mode_cmd)
{
struct drm_gem_object *obj;
return NULL;
// obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
//
// return radeon_framebuffer_create(dev, mode_cmd, obj);
}
static const struct drm_mode_config_funcs radeon_mode_funcs = { static const struct drm_mode_config_funcs radeon_mode_funcs = {

View File

@ -367,7 +367,8 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
} }
if (ASIC_IS_DCE3(rdev) && if (ASIC_IS_DCE3(rdev) &&
(radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT))) { ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
radeon_encoder_is_dp_bridge(encoder))) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
radeon_dp_set_link_config(connector, mode); radeon_dp_set_link_config(connector, mode);
} }
@ -660,21 +661,16 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
if (radeon_encoder_is_dp_bridge(encoder)) if (radeon_encoder_is_dp_bridge(encoder))
return ATOM_ENCODER_MODE_DP; return ATOM_ENCODER_MODE_DP;
/* DVO is always DVO */
if (radeon_encoder->encoder_id == ATOM_ENCODER_MODE_DVO)
return ATOM_ENCODER_MODE_DVO;
connector = radeon_get_connector_for_encoder(encoder); connector = radeon_get_connector_for_encoder(encoder);
if (!connector) { /* if we don't have an active device yet, just use one of
switch (radeon_encoder->encoder_id) { * the connectors tied to the encoder.
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: */
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: if (!connector)
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: connector = radeon_get_connector_for_encoder_init(encoder);
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
return ATOM_ENCODER_MODE_DVI;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
default:
return ATOM_ENCODER_MODE_CRT;
}
}
radeon_connector = to_radeon_connector(connector); radeon_connector = to_radeon_connector(connector);
switch (connector->connector_type) { switch (connector->connector_type) {
@ -1094,9 +1090,10 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
break; break;
} }
if (is_dp) if (is_dp) {
args.v2.acConfig.fCoherentMode = 1; args.v2.acConfig.fCoherentMode = 1;
else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { args.v2.acConfig.fDPConnector = 1;
} else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode) if (dig->coherent_mode)
args.v2.acConfig.fCoherentMode = 1; args.v2.acConfig.fCoherentMode = 1;
if (radeon_encoder->pixel_clock > 165000) if (radeon_encoder->pixel_clock > 165000)
@ -1435,6 +1432,10 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
if (is_dig) { if (is_dig) {
switch (mode) { switch (mode) {
case DRM_MODE_DPMS_ON: case DRM_MODE_DPMS_ON:
/* some early dce3.2 boards have a bug in their transmitter control table */
if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730))
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
else
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
@ -1526,26 +1527,29 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
} }
if (ext_encoder) { if (ext_encoder) {
int action;
switch (mode) { switch (mode) {
case DRM_MODE_DPMS_ON: case DRM_MODE_DPMS_ON:
default: default:
if (ASIC_IS_DCE41(rdev)) if (ASIC_IS_DCE41(rdev)) {
action = EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT; atombios_external_encoder_setup(encoder, ext_encoder,
else EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT);
action = ATOM_ENABLE; atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF);
} else
atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
break; break;
case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF: case DRM_MODE_DPMS_OFF:
if (ASIC_IS_DCE41(rdev)) if (ASIC_IS_DCE41(rdev)) {
action = EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT; atombios_external_encoder_setup(encoder, ext_encoder,
else EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING);
action = ATOM_DISABLE; atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT);
} else
atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE);
break; break;
} }
atombios_external_encoder_setup(encoder, ext_encoder, action);
} }
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
@ -2004,6 +2008,65 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec
return connector_status_disconnected; return connector_status_disconnected;
} }
static enum drm_connector_status
radeon_atom_dig_detect(struct drm_encoder *encoder, struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
u32 bios_0_scratch;
if (!ASIC_IS_DCE4(rdev))
return connector_status_unknown;
if (!ext_encoder)
return connector_status_unknown;
if ((radeon_connector->devices & ATOM_DEVICE_CRT_SUPPORT) == 0)
return connector_status_unknown;
/* load detect on the dp bridge */
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION);
bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
if (bios_0_scratch & ATOM_S0_CRT1_MASK)
return connector_status_connected;
}
if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
if (bios_0_scratch & ATOM_S0_CRT2_MASK)
return connector_status_connected;
}
if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
return connector_status_connected;
}
if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
return connector_status_connected; /* CTV */
else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
return connector_status_connected; /* STV */
}
return connector_status_disconnected;
}
void
radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder)
{
struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
if (ext_encoder)
/* ddc_setup on the dp bridge */
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP);
}
static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
{ {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@ -2167,7 +2230,7 @@ static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = {
.mode_set = radeon_atom_encoder_mode_set, .mode_set = radeon_atom_encoder_mode_set,
.commit = radeon_atom_encoder_commit, .commit = radeon_atom_encoder_commit,
.disable = radeon_atom_encoder_disable, .disable = radeon_atom_encoder_disable,
/* no detect for TMDS/LVDS yet */ .detect = radeon_atom_dig_detect,
}; };
static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = { static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = {

View File

@ -24,6 +24,7 @@
* David Airlie * David Airlie
*/ */
#include <linux/module.h> #include <linux/module.h>
#include <linux/slab.h>
#include <linux/fb.h> #include <linux/fb.h>
#include "drmP.h" #include "drmP.h"
@ -38,6 +39,10 @@
#include <drm_mm.h> #include <drm_mm.h>
#include "radeon_object.h" #include "radeon_object.h"
int radeonfb_create_object(struct radeon_fbdev *rfbdev,
struct drm_mode_fb_cmd *mode_cmd,
struct drm_gem_object **gobj_p);
/* object hierarchy - /* object hierarchy -
this contains a helper + a radeon fb this contains a helper + a radeon fb
the helper contains a pointer to radeon framebuffer baseclass. the helper contains a pointer to radeon framebuffer baseclass.
@ -86,95 +91,6 @@ int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tile
return aligned; return aligned;
} }
static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
{
struct radeon_bo *rbo = gobj->driver_private;
int ret;
ret = radeon_bo_reserve(rbo, false);
if (likely(ret == 0)) {
radeon_bo_kunmap(rbo);
radeon_bo_unpin(rbo);
radeon_bo_unreserve(rbo);
}
// drm_gem_object_unreference_unlocked(gobj);
}
static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
struct drm_mode_fb_cmd *mode_cmd,
struct drm_gem_object **gobj_p)
{
struct radeon_device *rdev = rfbdev->rdev;
struct drm_gem_object *gobj = NULL;
struct radeon_bo *rbo = NULL;
bool fb_tiled = false; /* useful for testing */
u32 tiling_flags = 0;
int ret;
int aligned_size, size;
/* need to align pitch with crtc limits */
mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
size = mode_cmd->pitch * mode_cmd->height;
aligned_size = ALIGN(size, PAGE_SIZE);
// ret = radeon_gem_object_create(rdev, aligned_size, 0,
// RADEON_GEM_DOMAIN_VRAM,
// false, true,
// &gobj);
if (ret) {
printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
aligned_size);
return -ENOMEM;
}
rbo = gobj->driver_private;
if (fb_tiled)
tiling_flags = RADEON_TILING_MACRO;
#ifdef __BIG_ENDIAN
switch (mode_cmd->bpp) {
case 32:
tiling_flags |= RADEON_TILING_SWAP_32BIT;
break;
case 16:
tiling_flags |= RADEON_TILING_SWAP_16BIT;
default:
break;
}
#endif
if (tiling_flags) {
ret = radeon_bo_set_tiling_flags(rbo,
tiling_flags | RADEON_TILING_SURFACE,
mode_cmd->pitch);
if (ret)
dev_err(rdev->dev, "FB failed to set tiling flags\n");
}
ret = radeon_bo_reserve(rbo, false);
if (unlikely(ret != 0))
goto out_unref;
ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, NULL);
if (ret) {
radeon_bo_unreserve(rbo);
goto out_unref;
}
if (fb_tiled)
radeon_bo_check_tiling(rbo, 0, 0);
ret = radeon_bo_kmap(rbo, NULL);
radeon_bo_unreserve(rbo);
if (ret) {
goto out_unref;
}
*gobj_p = gobj;
return 0;
out_unref:
radeonfb_destroy_pinned_object(gobj);
*gobj_p = NULL;
return ret;
}
static int radeonfb_create(struct radeon_fbdev *rfbdev, static int radeonfb_create(struct radeon_fbdev *rfbdev,
struct drm_fb_helper_surface_size *sizes) struct drm_fb_helper_surface_size *sizes)
@ -201,8 +117,8 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
mode_cmd.bpp = sizes->surface_bpp; mode_cmd.bpp = sizes->surface_bpp;
mode_cmd.depth = sizes->surface_depth; mode_cmd.depth = sizes->surface_depth;
// ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj); ret = radeonfb_create_object(rfbdev, &mode_cmd, &gobj);
// rbo = gobj->driver_private; rbo = gem_to_radeon_bo(gobj);
/* okay we have an object now allocate the framebuffer */ /* okay we have an object now allocate the framebuffer */
info = framebuffer_alloc(0, device); info = framebuffer_alloc(0, device);
@ -213,7 +129,6 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
info->par = rfbdev; info->par = rfbdev;
#if 0
radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj); radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
fb = &rfbdev->rfb.base; fb = &rfbdev->rfb.base;
@ -263,7 +178,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo)); DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo));
DRM_INFO("fb depth is %d\n", fb->depth); DRM_INFO("fb depth is %d\n", fb->depth);
DRM_INFO(" pitch is %d\n", fb->pitch); DRM_INFO(" pitch is %d\n", fb->pitch);
#endif
LEAVE(); LEAVE();
@ -307,7 +222,6 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
} }
if (rfb->obj) { if (rfb->obj) {
radeonfb_destroy_pinned_object(rfb->obj);
rfb->obj = NULL; rfb->obj = NULL;
} }
// drm_fb_helper_fini(&rfbdev->helper); // drm_fb_helper_fini(&rfbdev->helper);
@ -322,6 +236,8 @@ static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
.fb_probe = radeon_fb_find_or_create_single, .fb_probe = radeon_fb_find_or_create_single,
}; };
extern struct radeon_fbdev *kos_rfbdev;
int radeon_fbdev_init(struct radeon_device *rdev) int radeon_fbdev_init(struct radeon_device *rdev)
{ {
struct radeon_fbdev *rfbdev; struct radeon_fbdev *rfbdev;
@ -353,6 +269,8 @@ int radeon_fbdev_init(struct radeon_device *rdev)
drm_fb_helper_single_add_all_connectors(&rfbdev->helper); drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
kos_rfbdev = rfbdev;
LEAVE(); LEAVE();
return 0; return 0;
@ -374,14 +292,14 @@ int radeon_fbdev_total_size(struct radeon_device *rdev)
struct radeon_bo *robj; struct radeon_bo *robj;
int size = 0; int size = 0;
robj = rdev->mode_info.rfbdev->rfb.obj->driver_private; robj = gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj);
size += radeon_bo_size(robj); size += radeon_bo_size(robj);
return size; return size;
} }
bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj) bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
{ {
if (robj == rdev->mode_info.rfbdev->rfb.obj->driver_private) if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj))
return true; return true;
return false; return false;
} }

View File

@ -33,6 +33,7 @@
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/kref.h> #include <linux/kref.h>
#include <linux/slab.h>
#include "drmP.h" #include "drmP.h"
#include "drm.h" #include "drm.h"
#include "radeon_reg.h" #include "radeon_reg.h"
@ -56,9 +57,9 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
} else } else
radeon_fence_ring_emit(rdev, fence); radeon_fence_ring_emit(rdev, fence);
trace_radeon_fence_emit(rdev->ddev, fence->seq);
fence->emited = true; fence->emited = true;
list_del(&fence->list); list_move_tail(&fence->list, &rdev->fence_drv.emited);
list_add_tail(&fence->list, &rdev->fence_drv.emited);
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
return 0; return 0;
} }
@ -77,7 +78,7 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
else else
scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
seq = rdev->wb.wb[scratch_index/4]; seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
} else } else
seq = RREG32(rdev->fence_drv.scratch_reg); seq = RREG32(rdev->fence_drv.scratch_reg);
if (seq != rdev->fence_drv.last_seq) { if (seq != rdev->fence_drv.last_seq) {
@ -118,8 +119,7 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
i = n; i = n;
do { do {
n = i->prev; n = i->prev;
list_del(i); list_move_tail(i, &rdev->fence_drv.signaled);
list_add_tail(i, &rdev->fence_drv.signaled);
fence = list_entry(i, struct radeon_fence, list); fence = list_entry(i, struct radeon_fence, list);
fence->signaled = true; fence->signaled = true;
i = n; i = n;
@ -212,6 +212,7 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
retry: retry:
/* save current sequence used to check for GPU lockup */ /* save current sequence used to check for GPU lockup */
seq = rdev->fence_drv.last_seq; seq = rdev->fence_drv.last_seq;
trace_radeon_fence_wait_begin(rdev->ddev, seq);
if (intr) { if (intr) {
radeon_irq_kms_sw_irq_get(rdev); radeon_irq_kms_sw_irq_get(rdev);
r = wait_event_interruptible_timeout(rdev->fence_drv.queue, r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
@ -226,6 +227,7 @@ retry:
radeon_fence_signaled(fence), timeout); radeon_fence_signaled(fence), timeout);
radeon_irq_kms_sw_irq_put(rdev); radeon_irq_kms_sw_irq_put(rdev);
} }
trace_radeon_fence_wait_end(rdev->ddev, seq);
if (unlikely(!radeon_fence_signaled(fence))) { if (unlikely(!radeon_fence_signaled(fence))) {
/* we were interrupted for some reason and fence isn't /* we were interrupted for some reason and fence isn't
* isn't signaled yet, resume wait * isn't signaled yet, resume wait
@ -319,7 +321,7 @@ void radeon_fence_unref(struct radeon_fence **fence)
*fence = NULL; *fence = NULL;
if (tmp) { if (tmp) {
kref_put(&tmp->kref, &radeon_fence_destroy); kref_put(&tmp->kref, radeon_fence_destroy);
} }
} }

View File

@ -38,9 +38,8 @@ int radeon_gem_object_init(struct drm_gem_object *obj)
void radeon_gem_object_free(struct drm_gem_object *gobj) void radeon_gem_object_free(struct drm_gem_object *gobj)
{ {
struct radeon_bo *robj = gobj->driver_private; struct radeon_bo *robj = gem_to_radeon_bo(gobj);
gobj->driver_private = NULL;
if (robj) { if (robj) {
radeon_bo_unref(&robj); radeon_bo_unref(&robj);
} }
@ -51,34 +50,34 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
bool discardable, bool kernel, bool discardable, bool kernel,
struct drm_gem_object **obj) struct drm_gem_object **obj)
{ {
struct drm_gem_object *gobj;
struct radeon_bo *robj; struct radeon_bo *robj;
int r; int r;
*obj = NULL; *obj = NULL;
gobj = drm_gem_object_alloc(rdev->ddev, size);
if (!gobj) {
return -ENOMEM;
}
/* At least align on page size */ /* At least align on page size */
if (alignment < PAGE_SIZE) { if (alignment < PAGE_SIZE) {
alignment = PAGE_SIZE; alignment = PAGE_SIZE;
} }
r = radeon_bo_create(rdev, gobj, size, alignment, kernel, initial_domain, &robj); r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, &robj);
if (r) { if (r) {
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n", if (r != -ERESTARTSYS)
size, initial_domain, alignment); DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
size, initial_domain, alignment, r);
return r; return r;
} }
gobj->driver_private = robj; *obj = &robj->gem_base;
*obj = gobj;
mutex_lock(&rdev->gem.mutex);
list_add_tail(&robj->list, &rdev->gem.objects);
mutex_unlock(&rdev->gem.mutex);
return 0; return 0;
} }
int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
uint64_t *gpu_addr) uint64_t *gpu_addr)
{ {
struct radeon_bo *robj = obj->driver_private; struct radeon_bo *robj = gem_to_radeon_bo(obj);
int r; int r;
r = radeon_bo_reserve(robj, false); r = radeon_bo_reserve(robj, false);
@ -91,7 +90,7 @@ int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
void radeon_gem_object_unpin(struct drm_gem_object *obj) void radeon_gem_object_unpin(struct drm_gem_object *obj)
{ {
struct radeon_bo *robj = obj->driver_private; struct radeon_bo *robj = gem_to_radeon_bo(obj);
int r; int r;
r = radeon_bo_reserve(robj, false); r = radeon_bo_reserve(robj, false);
@ -109,7 +108,7 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj,
int r; int r;
/* FIXME: reeimplement */ /* FIXME: reeimplement */
robj = gobj->driver_private; robj = gem_to_radeon_bo(gobj);
/* work out where to validate the buffer to */ /* work out where to validate the buffer to */
domain = wdomain; domain = wdomain;
if (!domain) { if (!domain) {
@ -151,9 +150,12 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
{ {
struct radeon_device *rdev = dev->dev_private; struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_info *args = data; struct drm_radeon_gem_info *args = data;
struct ttm_mem_type_manager *man;
man = &rdev->mman.bdev.man[TTM_PL_VRAM];
args->vram_size = rdev->mc.real_vram_size; args->vram_size = rdev->mc.real_vram_size;
args->vram_visible = rdev->mc.real_vram_size; args->vram_visible = (u64)man->size << PAGE_SHIFT;
if (rdev->stollen_vga_memory) if (rdev->stollen_vga_memory)
args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
args->vram_visible -= radeon_fbdev_total_size(rdev); args->vram_visible -= radeon_fbdev_total_size(rdev);
@ -223,7 +225,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if (gobj == NULL) { if (gobj == NULL) {
return -ENOENT; return -ENOENT;
} }
robj = gobj->driver_private; robj = gem_to_radeon_bo(gobj);
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
@ -231,21 +233,29 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
return r; return r;
} }
int radeon_mode_dumb_mmap(struct drm_file *filp,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p)
{
struct drm_gem_object *gobj;
struct radeon_bo *robj;
gobj = drm_gem_object_lookup(dev, filp, handle);
if (gobj == NULL) {
return -ENOENT;
}
robj = gem_to_radeon_bo(gobj);
*offset_p = radeon_bo_mmap_offset(robj);
drm_gem_object_unreference_unlocked(gobj);
return 0;
}
int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp) struct drm_file *filp)
{ {
struct drm_radeon_gem_mmap *args = data; struct drm_radeon_gem_mmap *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
gobj = drm_gem_object_lookup(dev, filp, args->handle); return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
if (gobj == NULL) {
return -ENOENT;
}
robj = gobj->driver_private;
args->addr_ptr = radeon_bo_mmap_offset(robj);
drm_gem_object_unreference_unlocked(gobj);
return 0;
} }
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
@ -261,7 +271,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
if (gobj == NULL) { if (gobj == NULL) {
return -ENOENT; return -ENOENT;
} }
robj = gobj->driver_private; robj = gem_to_radeon_bo(gobj);
r = radeon_bo_wait(robj, &cur_placement, true); r = radeon_bo_wait(robj, &cur_placement, true);
switch (cur_placement) { switch (cur_placement) {
case TTM_PL_VRAM: case TTM_PL_VRAM:
@ -291,7 +301,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
if (gobj == NULL) { if (gobj == NULL) {
return -ENOENT; return -ENOENT;
} }
robj = gobj->driver_private; robj = gem_to_radeon_bo(gobj);
r = radeon_bo_wait(robj, NULL, false); r = radeon_bo_wait(robj, NULL, false);
/* callback hw specific functions if any */ /* callback hw specific functions if any */
if (robj->rdev->asic->ioctl_wait_idle) if (robj->rdev->asic->ioctl_wait_idle)
@ -312,7 +322,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
gobj = drm_gem_object_lookup(dev, filp, args->handle); gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) if (gobj == NULL)
return -ENOENT; return -ENOENT;
robj = gobj->driver_private; robj = gem_to_radeon_bo(gobj);
r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
drm_gem_object_unreference_unlocked(gobj); drm_gem_object_unreference_unlocked(gobj);
return r; return r;

View File

@ -795,8 +795,6 @@ static int radeon_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
struct radeon_i2c_bus_rec *rec = &i2c->rec; struct radeon_i2c_bus_rec *rec = &i2c->rec;
int ret = 0; int ret = 0;
ENTER();
switch (rdev->family) { switch (rdev->family) {
case CHIP_R100: case CHIP_R100:
case CHIP_RV100: case CHIP_RV100:
@ -862,7 +860,6 @@ static int radeon_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
ret = -EIO; ret = -EIO;
break; break;
} }
LEAVE();
return ret; return ret;
} }

View File

@ -480,6 +480,8 @@ extern void radeon_atom_encoder_init(struct radeon_device *rdev);
extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder, extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
int action, uint8_t lane_num, int action, uint8_t lane_num,
uint8_t lane_set); uint8_t lane_set);
extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder);
extern struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder);
extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
u8 write_byte, u8 *read_byte); u8 write_byte, u8 *read_byte);

View File

@ -1,764 +0,0 @@
/*
* Copyright 2009 Jerome Glisse.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*/
/*
* Authors:
* Jerome Glisse <glisse@freedesktop.org>
* Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
* Dave Airlie
*/
#include <linux/list.h>
#include <linux/slab.h>
#include <drm/drmP.h>
#include "radeon_drm.h"
#include "radeon.h"
#include <drm_mm.h>
#include "radeon_object.h"
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
int pages, u32_t *pagelist);
static struct drm_mm mm_gtt;
static struct drm_mm mm_vram;
int radeon_object_init(struct radeon_device *rdev)
{
int r = 0;
ENTER();
r = drm_mm_init(&mm_vram, 0xC00000 >> PAGE_SHIFT,
((rdev->mc.real_vram_size - 0xC00000) >> PAGE_SHIFT));
if (r) {
DRM_ERROR("Failed initializing VRAM heap.\n");
return r;
};
r = drm_mm_init(&mm_gtt, 0, ((rdev->mc.gtt_size) >> PAGE_SHIFT));
if (r) {
DRM_ERROR("Failed initializing GTT heap.\n");
return r;
}
return r;
// return radeon_ttm_init(rdev);
}
static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
{
uint32_t flags = 0;
if (domain & RADEON_GEM_DOMAIN_VRAM) {
flags |= TTM_PL_FLAG_VRAM;
}
if (domain & RADEON_GEM_DOMAIN_GTT) {
flags |= TTM_PL_FLAG_TT;
}
if (domain & RADEON_GEM_DOMAIN_CPU) {
flags |= TTM_PL_FLAG_SYSTEM;
}
if (!flags) {
flags |= TTM_PL_FLAG_SYSTEM;
}
return flags;
}
int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align, bool kernel, u32 domain,
struct radeon_bo **bo_ptr)
{
struct radeon_bo *bo;
enum ttm_bo_type type;
uint32_t flags;
int r;
if (kernel) {
type = ttm_bo_type_kernel;
} else {
type = ttm_bo_type_device;
}
*bo_ptr = NULL;
bo = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
if (bo == NULL) {
return -ENOMEM;
}
bo->rdev = rdev;
INIT_LIST_HEAD(&bo->list);
flags = radeon_object_flags_from_domain(domain);
bo->flags = flags;
if( flags & TTM_PL_FLAG_VRAM)
{
size_t num_pages;
struct drm_mm_node *vm_node;
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (num_pages == 0) {
dbgprintf("Illegal buffer object size.\n");
return -EINVAL;
}
retry_pre_get:
r = drm_mm_pre_get(&mm_vram);
if (unlikely(r != 0))
return r;
vm_node = drm_mm_search_free(&mm_vram, num_pages, 0, 0);
if (unlikely(vm_node == NULL)) {
r = -ENOMEM;
return r;
}
bo->mm_node = drm_mm_get_block_atomic(vm_node, num_pages, 0);
if (unlikely(bo->mm_node == NULL)) {
goto retry_pre_get;
}
bo->vm_addr = ((uint32_t)bo->mm_node->start);
// dbgprintf("alloc vram: base %x size %x\n",
// robj->vm_addr << PAGE_SHIFT, num_pages << PAGE_SHIFT);
};
if( flags & TTM_PL_FLAG_TT)
{
size_t num_pages;
struct drm_mm_node *vm_node;
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (num_pages == 0) {
dbgprintf("Illegal buffer object size.\n");
return -EINVAL;
}
retry_pre_get1:
r = drm_mm_pre_get(&mm_gtt);
if (unlikely(r != 0))
return r;
vm_node = drm_mm_search_free(&mm_gtt, num_pages, 0, 0);
if (unlikely(vm_node == NULL)) {
r = -ENOMEM;
return r;
}
robj->mm_node = drm_mm_get_block_atomic(vm_node, num_pages, 0);
if (unlikely(robj->mm_node == NULL)) {
goto retry_pre_get1;
}
bo->vm_addr = ((uint32_t)bo->mm_node->start) ;
// dbgprintf("alloc gtt: base %x size %x\n",
// robj->vm_addr << PAGE_SHIFT, num_pages << PAGE_SHIFT);
};
// r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags,
// 0, 0, false, NULL, size,
// &radeon_ttm_object_object_destroy);
if (unlikely(r != 0)) {
/* ttm call radeon_ttm_object_object_destroy if error happen */
DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n",
size, flags, 0);
return r;
}
*robj_ptr = robj;
// if (gobj) {
// list_add_tail(&robj->list, &rdev->gem.objects);
// }
return 0;
}
#define page_tabs 0xFDC00000
int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
uint64_t *gpu_addr)
{
uint32_t flags;
uint32_t tmp;
int r = 0;
// flags = radeon_object_flags_from_domain(domain);
// spin_lock(&robj->tobj.lock);
if (robj->pin_count) {
robj->pin_count++;
if (gpu_addr != NULL) {
*gpu_addr = robj->gpu_addr;
}
// spin_unlock(&robj->tobj.lock);
return 0;
}
// spin_unlock(&robj->tobj.lock);
// r = radeon_object_reserve(robj, false);
// if (unlikely(r != 0)) {
// DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
// return r;
// }
// tmp = robj->tobj.mem.placement;
// ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
// robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
// r = ttm_buffer_object_validate(&robj->tobj,
// robj->tobj.proposed_placement,
// false, false);
robj->gpu_addr = ((u64)robj->vm_addr) << PAGE_SHIFT;
if(robj->flags & TTM_PL_FLAG_VRAM)
robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
else if (robj->flags & TTM_PL_FLAG_TT)
{
u32_t *pagelist;
robj->kptr = KernelAlloc( robj->mm_node->size << PAGE_SHIFT );
dbgprintf("kernel alloc %x\n", robj->kptr );
pagelist = &((u32_t*)page_tabs)[(u32_t)robj->kptr >> 12];
dbgprintf("pagelist %x\n", pagelist);
radeon_gart_bind(robj->rdev, robj->gpu_addr,
robj->mm_node->size, pagelist);
robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
}
else
{
DRM_ERROR("Unknown placement %d\n", robj->flags);
robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
r = -1;
};
// flags & TTM_PL_FLAG_VRAM
if (gpu_addr != NULL) {
*gpu_addr = robj->gpu_addr;
}
robj->pin_count = 1;
if (unlikely(r != 0)) {
DRM_ERROR("radeon: failed to pin object.\n");
}
return r;
}
int radeon_object_kmap(struct radeon_object *robj, void **ptr)
{
int r = 0;
// spin_lock(&robj->tobj.lock);
if (robj->kptr) {
if (ptr) {
*ptr = robj->kptr;
}
// spin_unlock(&robj->tobj.lock);
return 0;
}
// spin_unlock(&robj->tobj.lock);
if(robj->flags & TTM_PL_FLAG_VRAM)
{
robj->cpu_addr = robj->rdev->mc.aper_base +
(robj->vm_addr << PAGE_SHIFT);
robj->kptr = (void*)MapIoMem(robj->cpu_addr,
robj->mm_node->size << 12, PG_SW);
}
else
{
return -1;
}
if (ptr) {
*ptr = robj->kptr;
}
return 0;
}
void radeon_object_kunmap(struct radeon_object *robj)
{
// spin_lock(&robj->tobj.lock);
if (robj->kptr == NULL) {
// spin_unlock(&robj->tobj.lock);
return;
}
if (robj->flags & TTM_PL_FLAG_VRAM)
{
FreeKernelSpace(robj->kptr);
robj->kptr = NULL;
}
// spin_unlock(&robj->tobj.lock);
}
void radeon_object_unpin(struct radeon_object *robj)
{
uint32_t flags;
int r;
// spin_lock(&robj->tobj.lock);
if (!robj->pin_count) {
// spin_unlock(&robj->tobj.lock);
printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
return;
}
robj->pin_count--;
if (robj->pin_count) {
// spin_unlock(&robj->tobj.lock);
return;
}
// spin_unlock(&robj->tobj.lock);
drm_mm_put_block(robj->mm_node);
kfree(robj);
}
#if 0
/*
* To exclude mutual BO access we rely on bo_reserve exclusion, as all
* function are calling it.
*/
static int radeon_object_reserve(struct radeon_object *robj, bool interruptible)
{
return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0);
}
static void radeon_object_unreserve(struct radeon_object *robj)
{
ttm_bo_unreserve(&robj->tobj);
}
static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj)
{
struct radeon_object *robj;
robj = container_of(tobj, struct radeon_object, tobj);
// list_del_init(&robj->list);
kfree(robj);
}
static inline void radeon_object_gpu_addr(struct radeon_object *robj)
{
/* Default gpu address */
robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
if (robj->tobj.mem.mm_node == NULL) {
return;
}
robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT;
switch (robj->tobj.mem.mem_type) {
case TTM_PL_VRAM:
robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
break;
case TTM_PL_TT:
robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
break;
default:
DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type);
robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
return;
}
}
int radeon_object_create(struct radeon_device *rdev,
struct drm_gem_object *gobj,
unsigned long size,
bool kernel,
uint32_t domain,
bool interruptible,
struct radeon_object **robj_ptr)
{
struct radeon_object *robj;
enum ttm_bo_type type;
uint32_t flags;
int r;
// if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
// rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
// }
if (kernel) {
type = ttm_bo_type_kernel;
} else {
type = ttm_bo_type_device;
}
*robj_ptr = NULL;
robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
if (robj == NULL) {
return -ENOMEM;
}
robj->rdev = rdev;
robj->gobj = gobj;
// INIT_LIST_HEAD(&robj->list);
flags = radeon_object_flags_from_domain(domain);
// r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags,
// 0, 0, false, NULL, size,
// &radeon_ttm_object_object_destroy);
if (unlikely(r != 0)) {
/* ttm call radeon_ttm_object_object_destroy if error happen */
DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n",
size, flags, 0);
return r;
}
*robj_ptr = robj;
// if (gobj) {
// list_add_tail(&robj->list, &rdev->gem.objects);
// }
return 0;
}
int radeon_object_kmap(struct radeon_object *robj, void **ptr)
{
int r;
// spin_lock(&robj->tobj.lock);
if (robj->kptr) {
if (ptr) {
*ptr = robj->kptr;
}
// spin_unlock(&robj->tobj.lock);
return 0;
}
// spin_unlock(&robj->tobj.lock);
r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap);
if (r) {
return r;
}
// spin_lock(&robj->tobj.lock);
robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem);
// spin_unlock(&robj->tobj.lock);
if (ptr) {
*ptr = robj->kptr;
}
return 0;
}
void radeon_object_kunmap(struct radeon_object *robj)
{
// spin_lock(&robj->tobj.lock);
if (robj->kptr == NULL) {
// spin_unlock(&robj->tobj.lock);
return;
}
robj->kptr = NULL;
// spin_unlock(&robj->tobj.lock);
ttm_bo_kunmap(&robj->kmap);
}
void radeon_object_unref(struct radeon_object **robj)
{
struct ttm_buffer_object *tobj;
if ((*robj) == NULL) {
return;
}
tobj = &((*robj)->tobj);
ttm_bo_unref(&tobj);
if (tobj == NULL) {
*robj = NULL;
}
}
int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset)
{
*offset = robj->tobj.addr_space_offset;
return 0;
}
int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
uint64_t *gpu_addr)
{
uint32_t flags;
uint32_t tmp;
int r;
flags = radeon_object_flags_from_domain(domain);
// spin_lock(&robj->tobj.lock);
if (robj->pin_count) {
robj->pin_count++;
if (gpu_addr != NULL) {
*gpu_addr = robj->gpu_addr;
}
// spin_unlock(&robj->tobj.lock);
return 0;
}
// spin_unlock(&robj->tobj.lock);
r = radeon_object_reserve(robj, false);
if (unlikely(r != 0)) {
DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
return r;
}
tmp = robj->tobj.mem.placement;
ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
r = ttm_buffer_object_validate(&robj->tobj,
robj->tobj.proposed_placement,
false, false);
radeon_object_gpu_addr(robj);
if (gpu_addr != NULL) {
*gpu_addr = robj->gpu_addr;
}
robj->pin_count = 1;
if (unlikely(r != 0)) {
DRM_ERROR("radeon: failed to pin object.\n");
}
radeon_object_unreserve(robj);
return r;
}
void radeon_object_unpin(struct radeon_object *robj)
{
uint32_t flags;
int r;
// spin_lock(&robj->tobj.lock);
if (!robj->pin_count) {
// spin_unlock(&robj->tobj.lock);
printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
return;
}
robj->pin_count--;
if (robj->pin_count) {
// spin_unlock(&robj->tobj.lock);
return;
}
// spin_unlock(&robj->tobj.lock);
r = radeon_object_reserve(robj, false);
if (unlikely(r != 0)) {
DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
return;
}
flags = robj->tobj.mem.placement;
robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
r = ttm_buffer_object_validate(&robj->tobj,
robj->tobj.proposed_placement,
false, false);
if (unlikely(r != 0)) {
DRM_ERROR("radeon: failed to unpin buffer.\n");
}
radeon_object_unreserve(robj);
}
int radeon_object_wait(struct radeon_object *robj)
{
int r = 0;
/* FIXME: should use block reservation instead */
r = radeon_object_reserve(robj, true);
if (unlikely(r != 0)) {
DRM_ERROR("radeon: failed to reserve object for waiting.\n");
return r;
}
// spin_lock(&robj->tobj.lock);
if (robj->tobj.sync_obj) {
r = ttm_bo_wait(&robj->tobj, true, false, false);
}
// spin_unlock(&robj->tobj.lock);
radeon_object_unreserve(robj);
return r;
}
int radeon_object_evict_vram(struct radeon_device *rdev)
{
if (rdev->flags & RADEON_IS_IGP) {
/* Useless to evict on IGP chips */
return 0;
}
return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
}
void radeon_object_force_delete(struct radeon_device *rdev)
{
struct radeon_object *robj, *n;
struct drm_gem_object *gobj;
if (list_empty(&rdev->gem.objects)) {
return;
}
DRM_ERROR("Userspace still has active objects !\n");
list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) {
mutex_lock(&rdev->ddev->struct_mutex);
gobj = robj->gobj;
DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n",
gobj, robj, (unsigned long)gobj->size,
*((unsigned long *)&gobj->refcount));
list_del_init(&robj->list);
radeon_object_unref(&robj);
gobj->driver_private = NULL;
drm_gem_object_unreference(gobj);
mutex_unlock(&rdev->ddev->struct_mutex);
}
}
void radeon_object_fini(struct radeon_device *rdev)
{
radeon_ttm_fini(rdev);
}
void radeon_object_list_add_object(struct radeon_object_list *lobj,
struct list_head *head)
{
if (lobj->wdomain) {
list_add(&lobj->list, head);
} else {
list_add_tail(&lobj->list, head);
}
}
int radeon_object_list_reserve(struct list_head *head)
{
struct radeon_object_list *lobj;
struct list_head *i;
int r;
list_for_each(i, head) {
lobj = list_entry(i, struct radeon_object_list, list);
if (!lobj->robj->pin_count) {
r = radeon_object_reserve(lobj->robj, true);
if (unlikely(r != 0)) {
DRM_ERROR("radeon: failed to reserve object.\n");
return r;
}
} else {
}
}
return 0;
}
void radeon_object_list_unreserve(struct list_head *head)
{
struct radeon_object_list *lobj;
struct list_head *i;
list_for_each(i, head) {
lobj = list_entry(i, struct radeon_object_list, list);
if (!lobj->robj->pin_count) {
radeon_object_unreserve(lobj->robj);
} else {
}
}
}
int radeon_object_list_validate(struct list_head *head, void *fence)
{
struct radeon_object_list *lobj;
struct radeon_object *robj;
struct radeon_fence *old_fence = NULL;
struct list_head *i;
uint32_t flags;
int r;
r = radeon_object_list_reserve(head);
if (unlikely(r != 0)) {
radeon_object_list_unreserve(head);
return r;
}
list_for_each(i, head) {
lobj = list_entry(i, struct radeon_object_list, list);
robj = lobj->robj;
if (lobj->wdomain) {
flags = radeon_object_flags_from_domain(lobj->wdomain);
flags |= TTM_PL_FLAG_TT;
} else {
flags = radeon_object_flags_from_domain(lobj->rdomain);
flags |= TTM_PL_FLAG_TT;
flags |= TTM_PL_FLAG_VRAM;
}
if (!robj->pin_count) {
robj->tobj.proposed_placement = flags | TTM_PL_MASK_CACHING;
r = ttm_buffer_object_validate(&robj->tobj,
robj->tobj.proposed_placement,
true, false);
if (unlikely(r)) {
radeon_object_list_unreserve(head);
DRM_ERROR("radeon: failed to validate.\n");
return r;
}
radeon_object_gpu_addr(robj);
}
lobj->gpu_offset = robj->gpu_addr;
if (fence) {
old_fence = (struct radeon_fence *)robj->tobj.sync_obj;
robj->tobj.sync_obj = radeon_fence_ref(fence);
robj->tobj.sync_obj_arg = NULL;
}
if (old_fence) {
radeon_fence_unref(&old_fence);
}
}
return 0;
}
void radeon_object_list_unvalidate(struct list_head *head)
{
struct radeon_object_list *lobj;
struct radeon_fence *old_fence = NULL;
struct list_head *i;
list_for_each(i, head) {
lobj = list_entry(i, struct radeon_object_list, list);
old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj;
lobj->robj->tobj.sync_obj = NULL;
if (old_fence) {
radeon_fence_unref(&old_fence);
}
}
radeon_object_list_unreserve(head);
}
void radeon_object_list_clean(struct list_head *head)
{
radeon_object_list_unreserve(head);
}
int radeon_object_fbdev_mmap(struct radeon_object *robj,
struct vm_area_struct *vma)
{
return ttm_fbdev_mmap(vma, &robj->tobj);
}
#endif
unsigned long radeon_object_size(struct radeon_object *robj)
{
return robj->tobj.num_pages << PAGE_SHIFT;
}

View File

@ -8,6 +8,26 @@
static struct drm_mm mm_gtt; static struct drm_mm mm_gtt;
static struct drm_mm mm_vram; static struct drm_mm mm_vram;
/**
* Initialize an already allocate GEM object of the specified size with
* shmfs backing store.
*/
int drm_gem_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size)
{
BUG_ON((size & (PAGE_SIZE - 1)) != 0);
obj->dev = dev;
obj->filp = NULL;
atomic_set(&obj->handle_count, 0);
obj->size = size;
return 0;
}
int drm_mm_alloc(struct drm_mm *mm, size_t num_pages, int drm_mm_alloc(struct drm_mm *mm, size_t num_pages,
struct drm_mm_node **node) struct drm_mm_node **node)
{ {
@ -38,6 +58,7 @@ retry_pre_get:
}; };
void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
{ {
u32 c = 0; u32 c = 0;
@ -102,13 +123,12 @@ void ttm_bo_unreserve(struct ttm_buffer_object *bo)
} }
int radeon_bo_create(struct radeon_device *rdev, int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align, unsigned long size, int byte_align, bool kernel, u32 domain,
bool kernel, u32 domain,
struct radeon_bo **bo_ptr) struct radeon_bo **bo_ptr)
{ {
struct radeon_bo *bo;
enum ttm_bo_type type; enum ttm_bo_type type;
struct radeon_bo *bo;
size_t num_pages; size_t num_pages;
struct drm_mm *mman; struct drm_mm *mman;
u32 bo_domain; u32 bo_domain;
@ -143,7 +163,13 @@ int radeon_bo_create(struct radeon_device *rdev,
if (bo == NULL) if (bo == NULL)
return -ENOMEM; return -ENOMEM;
r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
if (unlikely(r)) {
kfree(bo);
return r;
}
bo->rdev = rdev; bo->rdev = rdev;
bo->gem_base.driver_private = NULL;
bo->surface_reg = -1; bo->surface_reg = -1;
bo->tbo.num_pages = num_pages; bo->tbo.num_pages = num_pages;
bo->domain = domain; bo->domain = domain;

View File

@ -23,6 +23,7 @@
#include "drmP.h" #include "drmP.h"
#include "radeon.h" #include "radeon.h"
#include "avivod.h" #include "avivod.h"
#include "atom.h"
#define DRM_DEBUG_DRIVER(fmt, args...) #define DRM_DEBUG_DRIVER(fmt, args...)

View File

@ -6,6 +6,16 @@
#include "radeon.h" #include "radeon.h"
#include "radeon_object.h" #include "radeon_object.h"
#include "display.h" #include "display.h"
#include "drm_fb_helper.h"
struct radeon_fbdev {
struct drm_fb_helper helper;
struct radeon_framebuffer rfb;
struct list_head fbdev_list;
struct radeon_device *rdev;
};
struct radeon_fbdev *kos_rfbdev;
static cursor_t* __stdcall select_cursor_kms(cursor_t *cursor); static cursor_t* __stdcall select_cursor_kms(cursor_t *cursor);
@ -197,6 +207,11 @@ bool set_mode(struct drm_device *dev, struct drm_connector *connector,
{ {
struct drm_display_mode *mode = NULL, *tmpmode; struct drm_display_mode *mode = NULL, *tmpmode;
struct drm_fb_helper *fb_helper;
fb_helper = &kos_rfbdev->helper;
bool ret = false; bool ret = false;
ENTER(); ENTER();
@ -260,9 +275,12 @@ do_set:
dbgprintf("set mode %d %d connector %s encoder %s\n", dbgprintf("set mode %d %d connector %s encoder %s\n",
reqmode->width, reqmode->height, con_name, enc_name); reqmode->width, reqmode->height, con_name, enc_name);
fb = fb_helper->fb;
fb->width = reqmode->width; fb->width = reqmode->width;
fb->height = reqmode->height; fb->height = reqmode->height;
fb->pitch = radeon_align_pitch(dev->dev_private, reqmode->width, 32, false) * ((32 + 1) / 8); fb->pitch = radeon_align_pitch(dev->dev_private, reqmode->width, 32, false) * ((32 + 1) / 8);
fb->bits_per_pixel = 32;
crtc->fb = fb; crtc->fb = fb;
crtc->enabled = true; crtc->enabled = true;
@ -318,9 +336,6 @@ static struct drm_connector* get_def_connector(struct drm_device *dev)
struct drm_encoder *encoder; struct drm_encoder *encoder;
struct drm_crtc *crtc; struct drm_crtc *crtc;
dbgprintf("CONNECTOR %x ID: %d status %d encoder %x\n", connector,
connector->base.id, connector->status, connector->encoder);
if( connector->status != connector_status_connected) if( connector->status != connector_status_connected)
continue; continue;
@ -332,18 +347,25 @@ static struct drm_connector* get_def_connector(struct drm_device *dev)
connector->encoder = encoder; connector->encoder = encoder;
crtc = encoder->crtc; crtc = encoder->crtc;
dbgprintf("encoder %x crtc %x\n", encoder, crtc);
if(crtc == NULL) dbgprintf("CONNECTOR %x ID: %d status %d encoder %x\n crtc %x",
continue; connector, connector->base.id,
connector->status, connector->encoder,
crtc);
// if (crtc == NULL)
// continue;
def_connector = connector; def_connector = connector;
break; break;
}; };
return def_connector; return def_connector;
}; };
bool init_display_kms(struct radeon_device *rdev, videomode_t *usermode) bool init_display_kms(struct radeon_device *rdev, videomode_t *usermode)
{ {
struct drm_device *dev; struct drm_device *dev;
@ -352,6 +374,11 @@ bool init_display_kms(struct radeon_device *rdev, videomode_t *usermode)
bool retval = false; bool retval = false;
u32_t ifl; u32_t ifl;
struct radeon_fbdev *rfbdev;
struct drm_fb_helper *fb_helper;
int i;
ENTER(); ENTER();
rdisplay = GetDisplay(); rdisplay = GetDisplay();
@ -367,6 +394,32 @@ bool init_display_kms(struct radeon_device *rdev, videomode_t *usermode)
}; };
safe_sti(ifl); safe_sti(ifl);
rfbdev = rdev->mode_info.rfbdev;
fb_helper = &rfbdev->helper;
// for (i = 0; i < fb_helper->crtc_count; i++)
// {
struct drm_mode_set *mode_set = &fb_helper->crtc_info[0].mode_set;
struct drm_crtc *crtc;
struct drm_display_mode *mode;
crtc = mode_set->crtc;
// if (!crtc->enabled)
// continue;
mode = mode_set->mode;
dbgprintf("crtc %d width %d height %d vrefresh %d\n",
crtc->base.id,
drm_mode_width(mode), drm_mode_height(mode),
drm_mode_vrefresh(mode));
// }
rdisplay->connector = get_def_connector(dev); rdisplay->connector = get_def_connector(dev);
if( rdisplay->connector == 0 ) if( rdisplay->connector == 0 )
{ {
@ -374,7 +427,9 @@ bool init_display_kms(struct radeon_device *rdev, videomode_t *usermode)
return false; return false;
}; };
rdisplay->crtc = rdisplay->connector->encoder->crtc;
rdisplay->crtc = rdisplay->connector->encoder->crtc = crtc;
rdisplay->supported_modes = count_connector_modes(rdisplay->connector); rdisplay->supported_modes = count_connector_modes(rdisplay->connector);
dbgprintf("current mode %d x %d x %d\n", dbgprintf("current mode %d x %d x %d\n",
@ -477,40 +532,72 @@ int set_user_mode(videomode_t *mode)
return err; return err;
}; };
#if 0
void drm_helper_disable_unused_functions(struct drm_device *dev)
int radeonfb_create_object(struct radeon_fbdev *rfbdev,
struct drm_mode_fb_cmd *mode_cmd,
struct drm_gem_object **gobj_p)
{ {
struct drm_encoder *encoder; struct radeon_device *rdev = rfbdev->rdev;
struct drm_connector *connector; struct drm_gem_object *gobj = NULL;
struct drm_encoder_helper_funcs *encoder_funcs; struct radeon_bo *rbo = NULL;
struct drm_crtc *crtc; bool fb_tiled = false; /* useful for testing */
u32 tiling_flags = 0;
int ret;
int aligned_size, size;
int height = mode_cmd->height;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { static struct radeon_bo kos_bo;
if (!connector->encoder) static struct drm_mm_node vm_node;
continue;
if (connector->status == connector_status_disconnected) /* need to align pitch with crtc limits */
connector->encoder = NULL; mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
if (rdev->family >= CHIP_R600)
height = ALIGN(mode_cmd->height, 8);
size = mode_cmd->pitch * height;
aligned_size = ALIGN(size, PAGE_SIZE);
ret = drm_gem_object_init(rdev->ddev, &kos_bo.gem_base, aligned_size);
if (unlikely(ret)) {
return ret;
} }
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { kos_bo.rdev = rdev;
encoder_funcs = encoder->helper_private; kos_bo.gem_base.driver_private = NULL;
if (!drm_helper_encoder_in_use(encoder)) { kos_bo.surface_reg = -1;
if (encoder_funcs->disable) kos_bo.domain = RADEON_GEM_DOMAIN_VRAM;
(*encoder_funcs->disable)(encoder);
else INIT_LIST_HEAD(&kos_bo.list);
(*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
/* disconnector encoder from any connector */ gobj = &kos_bo.gem_base;
encoder->crtc = NULL; rbo = gem_to_radeon_bo(gobj);
}
if (fb_tiled)
tiling_flags = RADEON_TILING_MACRO;
if (tiling_flags) {
rbo->tiling_flags = tiling_flags | RADEON_TILING_SURFACE;
rbo->pitch = mode_cmd->pitch;
} }
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { vm_node.size = 0xC00000 >> 12;
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; vm_node.start = 0;
crtc->enabled = drm_helper_crtc_in_use(crtc); vm_node.mm = NULL;
if (!crtc->enabled) {
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); rbo->tbo.vm_node = &vm_node;
crtc->fb = NULL; rbo->tbo.offset = rbo->tbo.vm_node->start << PAGE_SHIFT;
} rbo->tbo.offset += (u64)rbo->rdev->mc.vram_start;
} rbo->kptr = (void*)0xFE000000;
rbo->pin_count = 1;
// if (fb_tiled)
// radeon_bo_check_tiling(rbo, 0, 0);
*gobj_p = gobj;
return 0;
} }
#endif

View File

@ -26,6 +26,7 @@
* Jerome Glisse * Jerome Glisse
*/ */
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/slab.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include "radeon.h" #include "radeon.h"
#include "radeon_asic.h" #include "radeon_asic.h"

View File

@ -26,6 +26,7 @@
* Jerome Glisse * Jerome Glisse
*/ */
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/slab.h>
#include "drmP.h" #include "drmP.h"
#include "rv515d.h" #include "rv515d.h"
#include "radeon.h" #include "radeon.h"