drm: 3.17-rc6

git-svn-id: svn://kolibrios.org@5139 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge) 2014-09-22 12:59:19 +00:00
parent e584013fa6
commit 22d99e8448
12 changed files with 45 additions and 54 deletions

View File

@ -1585,6 +1585,10 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
pipe_config->adjusted_mode.flags |= flags; pipe_config->adjusted_mode.flags |= flags;
if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
tmp & DP_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
pipe_config->has_dp_encoder = true; pipe_config->has_dp_encoder = true;
intel_dp_get_m_n(crtc, pipe_config); intel_dp_get_m_n(crtc, pipe_config);

View File

@ -712,7 +712,8 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config) struct intel_crtc_config *pipe_config)
{ {
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp, flags = 0; u32 tmp, flags = 0;
int dotclock; int dotclock;
@ -734,6 +735,10 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (tmp & HDMI_MODE_SELECT_HDMI) if (tmp & HDMI_MODE_SELECT_HDMI)
pipe_config->has_audio = true; pipe_config->has_audio = true;
if (!HAS_PCH_SPLIT(dev) &&
tmp & HDMI_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
pipe_config->adjusted_mode.flags |= flags; pipe_config->adjusted_mode.flags |= flags;
if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc) if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)

View File

@ -1397,7 +1397,7 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
*/ */
intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA); intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096); intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 1024); intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
intel_ring_emit(ring, cs_offset); intel_ring_emit(ring, cs_offset);
intel_ring_emit(ring, 4096); intel_ring_emit(ring, 4096);
intel_ring_emit(ring, offset); intel_ring_emit(ring, offset);

View File

@ -489,13 +489,6 @@ int cik_sdma_resume(struct radeon_device *rdev)
{ {
int r; int r;
/* Reset dma */
WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
RREG32(SRBM_SOFT_RESET);
udelay(50);
WREG32(SRBM_SOFT_RESET, 0);
RREG32(SRBM_SOFT_RESET);
r = cik_sdma_load_microcode(rdev); r = cik_sdma_load_microcode(rdev);
if (r) if (r)
return r; return r;

View File

@ -33,6 +33,8 @@
#define KV_MINIMUM_ENGINE_CLOCK 800 #define KV_MINIMUM_ENGINE_CLOCK 800
#define SMC_RAM_END 0x40000 #define SMC_RAM_END 0x40000
static int kv_enable_nb_dpm(struct radeon_device *rdev,
bool enable);
static void kv_init_graphics_levels(struct radeon_device *rdev); static void kv_init_graphics_levels(struct radeon_device *rdev);
static int kv_calculate_ds_divider(struct radeon_device *rdev); static int kv_calculate_ds_divider(struct radeon_device *rdev);
static int kv_calculate_nbps_level_settings(struct radeon_device *rdev); static int kv_calculate_nbps_level_settings(struct radeon_device *rdev);
@ -1295,6 +1297,9 @@ void kv_dpm_disable(struct radeon_device *rdev)
{ {
kv_smc_bapm_enable(rdev, false); kv_smc_bapm_enable(rdev, false);
if (rdev->family == CHIP_MULLINS)
kv_enable_nb_dpm(rdev, false);
/* powerup blocks */ /* powerup blocks */
kv_dpm_powergate_acp(rdev, false); kv_dpm_powergate_acp(rdev, false);
kv_dpm_powergate_samu(rdev, false); kv_dpm_powergate_samu(rdev, false);
@ -1769,16 +1774,25 @@ static int kv_update_dfs_bypass_settings(struct radeon_device *rdev,
return ret; return ret;
} }
static int kv_enable_nb_dpm(struct radeon_device *rdev) static int kv_enable_nb_dpm(struct radeon_device *rdev,
bool enable)
{ {
struct kv_power_info *pi = kv_get_pi(rdev); struct kv_power_info *pi = kv_get_pi(rdev);
int ret = 0; int ret = 0;
if (enable) {
if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) { if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable); ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable);
if (ret == 0) if (ret == 0)
pi->nb_dpm_enabled = true; pi->nb_dpm_enabled = true;
} }
} else {
if (pi->enable_nb_dpm && pi->nb_dpm_enabled) {
ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Disable);
if (ret == 0)
pi->nb_dpm_enabled = false;
}
}
return ret; return ret;
} }
@ -1864,7 +1878,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
} }
kv_update_sclk_t(rdev); kv_update_sclk_t(rdev);
if (rdev->family == CHIP_MULLINS) if (rdev->family == CHIP_MULLINS)
kv_enable_nb_dpm(rdev); kv_enable_nb_dpm(rdev, true);
} }
} else { } else {
if (pi->enable_dpm) { if (pi->enable_dpm) {
@ -1889,7 +1903,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
} }
kv_update_acp_boot_level(rdev); kv_update_acp_boot_level(rdev);
kv_update_sclk_t(rdev); kv_update_sclk_t(rdev);
kv_enable_nb_dpm(rdev); kv_enable_nb_dpm(rdev, true);
} }
} }

View File

@ -191,12 +191,6 @@ int cayman_dma_resume(struct radeon_device *rdev)
u32 reg_offset, wb_offset; u32 reg_offset, wb_offset;
int i, r; int i, r;
/* Reset dma */
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
RREG32(SRBM_SOFT_RESET);
udelay(50);
WREG32(SRBM_SOFT_RESET, 0);
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
if (i == 0) { if (i == 0) {
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];

View File

@ -821,6 +821,20 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
return RREG32(RADEON_CRTC2_CRNT_FRAME); return RREG32(RADEON_CRTC2_CRNT_FRAME);
} }
/**
* r100_ring_hdp_flush - flush Host Data Path via the ring buffer
* rdev: radeon device structure
* ring: ring buffer struct for emitting packets
*/
static void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
{
radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
RADEON_HDP_READ_BUFFER_INVALIDATE);
radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
}
/* Who ever call radeon_fence_emit should call ring_lock and ask /* Who ever call radeon_fence_emit should call ring_lock and ask
* for enough space (today caller are ib schedule and buffer move) */ * for enough space (today caller are ib schedule and buffer move) */
void r100_fence_ring_emit(struct radeon_device *rdev, void r100_fence_ring_emit(struct radeon_device *rdev,
@ -1056,20 +1070,6 @@ void r100_gfx_set_wptr(struct radeon_device *rdev,
(void)RREG32(RADEON_CP_RB_WPTR); (void)RREG32(RADEON_CP_RB_WPTR);
} }
/**
* r100_ring_hdp_flush - flush Host Data Path via the ring buffer
* rdev: radeon device structure
* ring: ring buffer struct for emitting packets
*/
void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
{
radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
RADEON_HDP_READ_BUFFER_INVALIDATE);
radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
}
static void r100_cp_load_microcode(struct radeon_device *rdev) static void r100_cp_load_microcode(struct radeon_device *rdev)
{ {
const __be32 *fw_data; const __be32 *fw_data;

View File

@ -124,15 +124,6 @@ int r600_dma_resume(struct radeon_device *rdev)
u32 rb_bufsz; u32 rb_bufsz;
int r; int r;
/* Reset dma */
if (rdev->family >= CHIP_RV770)
WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
else
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
RREG32(SRBM_SOFT_RESET);
udelay(50);
WREG32(SRBM_SOFT_RESET, 0);
WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0); WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0); WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);

View File

@ -44,13 +44,6 @@
#define R6XX_MAX_PIPES 8 #define R6XX_MAX_PIPES 8
#define R6XX_MAX_PIPES_MASK 0xff #define R6XX_MAX_PIPES_MASK 0xff
/* PTE flags */
#define PTE_VALID (1 << 0)
#define PTE_SYSTEM (1 << 1)
#define PTE_SNOOPED (1 << 2)
#define PTE_READABLE (1 << 5)
#define PTE_WRITEABLE (1 << 6)
/* tiling bits */ /* tiling bits */
#define ARRAY_LINEAR_GENERAL 0x00000000 #define ARRAY_LINEAR_GENERAL 0x00000000
#define ARRAY_LINEAR_ALIGNED 0x00000001 #define ARRAY_LINEAR_ALIGNED 0x00000001

View File

@ -185,7 +185,6 @@ static struct radeon_asic_ring r100_gfx_ring = {
.get_rptr = &r100_gfx_get_rptr, .get_rptr = &r100_gfx_get_rptr,
.get_wptr = &r100_gfx_get_wptr, .get_wptr = &r100_gfx_get_wptr,
.set_wptr = &r100_gfx_set_wptr, .set_wptr = &r100_gfx_set_wptr,
.hdp_flush = &r100_ring_hdp_flush,
}; };
static struct radeon_asic r100_asic = { static struct radeon_asic r100_asic = {
@ -332,7 +331,6 @@ static struct radeon_asic_ring r300_gfx_ring = {
.get_rptr = &r100_gfx_get_rptr, .get_rptr = &r100_gfx_get_rptr,
.get_wptr = &r100_gfx_get_wptr, .get_wptr = &r100_gfx_get_wptr,
.set_wptr = &r100_gfx_set_wptr, .set_wptr = &r100_gfx_set_wptr,
.hdp_flush = &r100_ring_hdp_flush,
}; };
static struct radeon_asic r300_asic = { static struct radeon_asic r300_asic = {

View File

@ -148,8 +148,7 @@ u32 r100_gfx_get_wptr(struct radeon_device *rdev,
struct radeon_ring *ring); struct radeon_ring *ring);
void r100_gfx_set_wptr(struct radeon_device *rdev, void r100_gfx_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring); struct radeon_ring *ring);
void r100_ring_hdp_flush(struct radeon_device *rdev,
struct radeon_ring *ring);
/* /*
* r200,rv250,rs300,rv280 * r200,rv250,rs300,rv280
*/ */

View File

@ -221,9 +221,9 @@ void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
entry = (lower_32_bits(addr) & PAGE_MASK) | entry = (lower_32_bits(addr) & PAGE_MASK) |
((upper_32_bits(addr) & 0xff) << 4); ((upper_32_bits(addr) & 0xff) << 4);
if (flags & RADEON_GART_PAGE_READ) if (flags & RADEON_GART_PAGE_READ)
addr |= RS400_PTE_READABLE; entry |= RS400_PTE_READABLE;
if (flags & RADEON_GART_PAGE_WRITE) if (flags & RADEON_GART_PAGE_WRITE)
addr |= RS400_PTE_WRITEABLE; entry |= RS400_PTE_WRITEABLE;
if (!(flags & RADEON_GART_PAGE_SNOOP)) if (!(flags & RADEON_GART_PAGE_SNOOP))
entry |= RS400_PTE_UNSNOOPED; entry |= RS400_PTE_UNSNOOPED;
entry = cpu_to_le32(entry); entry = cpu_to_le32(entry);