drm: ati-3.17.3

git-svn-id: svn://kolibrios.org@5179 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge) 2014-11-16 11:41:43 +00:00
parent 46307e7dc7
commit 340b70a352
14 changed files with 109 additions and 55 deletions

View File

@ -4803,7 +4803,7 @@ struct bonaire_mqd
*/ */
static int cik_cp_compute_resume(struct radeon_device *rdev) static int cik_cp_compute_resume(struct radeon_device *rdev)
{ {
int r, i, idx; int r, i, j, idx;
u32 tmp; u32 tmp;
bool use_doorbell = true; bool use_doorbell = true;
u64 hqd_gpu_addr; u64 hqd_gpu_addr;
@ -4922,7 +4922,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
mqd->queue_state.cp_hqd_pq_wptr= 0; mqd->queue_state.cp_hqd_pq_wptr= 0;
if (RREG32(CP_HQD_ACTIVE) & 1) { if (RREG32(CP_HQD_ACTIVE) & 1) {
WREG32(CP_HQD_DEQUEUE_REQUEST, 1); WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
for (i = 0; i < rdev->usec_timeout; i++) { for (j = 0; j < rdev->usec_timeout; j++) {
if (!(RREG32(CP_HQD_ACTIVE) & 1)) if (!(RREG32(CP_HQD_ACTIVE) & 1))
break; break;
udelay(1); udelay(1);
@ -7751,17 +7751,17 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
wptr = RREG32(IH_RB_WPTR); wptr = RREG32(IH_RB_WPTR);
if (wptr & RB_OVERFLOW) { if (wptr & RB_OVERFLOW) {
wptr &= ~RB_OVERFLOW;
/* When a ring buffer overflow happen start parsing interrupt /* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 16). Hopefully * from the last not overwritten vector (wptr + 16). Hopefully
* this should allow us to catchup. * this should allow us to catchup.
*/ */
dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
tmp = RREG32(IH_RB_CNTL); tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR; tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp); WREG32(IH_RB_CNTL, tmp);
wptr &= ~RB_OVERFLOW;
} }
return (wptr & rdev->ih.ptr_mask); return (wptr & rdev->ih.ptr_mask);
} }
@ -8225,9 +8225,9 @@ restart_ih:
/* wptr/rptr are in bytes! */ /* wptr/rptr are in bytes! */
rptr += 16; rptr += 16;
rptr &= rdev->ih.ptr_mask; rptr &= rdev->ih.ptr_mask;
WREG32(IH_RB_RPTR, rptr);
} }
rdev->ih.rptr = rptr; rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
atomic_set(&rdev->ih.lock, 0); atomic_set(&rdev->ih.lock, 0);
/* make sure wptr hasn't changed while processing */ /* make sure wptr hasn't changed while processing */

View File

@ -610,16 +610,19 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
{ {
unsigned i; unsigned i;
int r; int r;
void __iomem *ptr = (void *)rdev->vram_scratch.ptr; unsigned index;
u32 tmp; u32 tmp;
u64 gpu_addr;
if (!ptr) { if (ring->idx == R600_RING_TYPE_DMA_INDEX)
DRM_ERROR("invalid vram scratch pointer\n"); index = R600_WB_DMA_RING_TEST_OFFSET;
return -EINVAL; else
} index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
gpu_addr = rdev->wb.gpu_addr + index;
tmp = 0xCAFEDEAD; tmp = 0xCAFEDEAD;
writel(tmp, ptr); rdev->wb.wb[index/4] = cpu_to_le32(tmp);
r = radeon_ring_lock(rdev, ring, 5); r = radeon_ring_lock(rdev, ring, 5);
if (r) { if (r) {
@ -627,14 +630,14 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
return r; return r;
} }
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); radeon_ring_write(ring, lower_32_bits(gpu_addr));
radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr)); radeon_ring_write(ring, upper_32_bits(gpu_addr));
radeon_ring_write(ring, 1); /* number of DWs to follow */ radeon_ring_write(ring, 1); /* number of DWs to follow */
radeon_ring_write(ring, 0xDEADBEEF); radeon_ring_write(ring, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev, ring, false); radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) { for (i = 0; i < rdev->usec_timeout; i++) {
tmp = readl(ptr); tmp = le32_to_cpu(rdev->wb.wb[index/4]);
if (tmp == 0xDEADBEEF) if (tmp == 0xDEADBEEF)
break; break;
DRM_UDELAY(1); DRM_UDELAY(1);

View File

@ -49,8 +49,8 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
if (sad_count < 0) { if (sad_count < 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
return; sad_count = 0;
} }
/* program the speaker allocation */ /* program the speaker allocation */

View File

@ -176,9 +176,9 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
} }
sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb); sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb);
if (sad_count <= 0) { if (sad_count < 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
return; sad_count = 0;
} }
/* program the speaker allocation */ /* program the speaker allocation */

View File

@ -4749,17 +4749,17 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
wptr = RREG32(IH_RB_WPTR); wptr = RREG32(IH_RB_WPTR);
if (wptr & RB_OVERFLOW) { if (wptr & RB_OVERFLOW) {
wptr &= ~RB_OVERFLOW;
/* When a ring buffer overflow happen start parsing interrupt /* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 16). Hopefully * from the last not overwritten vector (wptr + 16). Hopefully
* this should allow us to catchup. * this should allow us to catchup.
*/ */
dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
tmp = RREG32(IH_RB_CNTL); tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR; tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp); WREG32(IH_RB_CNTL, tmp);
wptr &= ~RB_OVERFLOW;
} }
return (wptr & rdev->ih.ptr_mask); return (wptr & rdev->ih.ptr_mask);
} }

View File

@ -118,9 +118,9 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
} }
sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb); sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb);
if (sad_count <= 0) { if (sad_count < 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
return; sad_count = 0;
} }
/* program the speaker allocation */ /* program the speaker allocation */

View File

@ -2725,6 +2725,10 @@ int kv_dpm_init(struct radeon_device *rdev)
pi->sram_end = SMC_RAM_END; pi->sram_end = SMC_RAM_END;
/* Enabling nb dpm on an asrock system prevents dpm from working */
if (rdev->pdev->subsystem_vendor == 0x1849)
pi->enable_nb_dpm = false;
else
pi->enable_nb_dpm = true; pi->enable_nb_dpm = true;
pi->caps_power_containment = true; pi->caps_power_containment = true;
@ -2740,10 +2744,19 @@ int kv_dpm_init(struct radeon_device *rdev)
pi->caps_sclk_ds = true; pi->caps_sclk_ds = true;
pi->enable_auto_thermal_throttling = true; pi->enable_auto_thermal_throttling = true;
pi->disable_nb_ps3_in_battery = false; pi->disable_nb_ps3_in_battery = false;
if (radeon_bapm == 0) if (radeon_bapm == -1) {
/* There are stability issues reported on with
* bapm enabled on an asrock system.
*/
if (rdev->pdev->subsystem_vendor == 0x1849)
pi->bapm_enable = false; pi->bapm_enable = false;
else else
pi->bapm_enable = true; pi->bapm_enable = true;
} else if (radeon_bapm == 0) {
pi->bapm_enable = false;
} else {
pi->bapm_enable = true;
}
pi->voltage_drop_t = 0; pi->voltage_drop_t = 0;
pi->caps_sclk_throttle_low_notification = false; pi->caps_sclk_throttle_low_notification = false;
pi->caps_fps = false; /* true? */ pi->caps_fps = false; /* true? */

View File

@ -3715,17 +3715,17 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev)
wptr = RREG32(IH_RB_WPTR); wptr = RREG32(IH_RB_WPTR);
if (wptr & RB_OVERFLOW) { if (wptr & RB_OVERFLOW) {
wptr &= ~RB_OVERFLOW;
/* When a ring buffer overflow happen start parsing interrupt /* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 16). Hopefully * from the last not overwritten vector (wptr + 16). Hopefully
* this should allow us to catchup. * this should allow us to catchup.
*/ */
dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
tmp = RREG32(IH_RB_CNTL); tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR; tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp); WREG32(IH_RB_CNTL, tmp);
wptr &= ~RB_OVERFLOW;
} }
return (wptr & rdev->ih.ptr_mask); return (wptr & rdev->ih.ptr_mask);
} }
@ -3963,9 +3963,9 @@ restart_ih:
/* wptr/rptr are in bytes! */ /* wptr/rptr are in bytes! */
rptr += 16; rptr += 16;
rptr &= rdev->ih.ptr_mask; rptr &= rdev->ih.ptr_mask;
WREG32(IH_RB_RPTR, rptr);
} }
rdev->ih.rptr = rptr; rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
atomic_set(&rdev->ih.lock, 0); atomic_set(&rdev->ih.lock, 0);
/* make sure wptr hasn't changed while processing */ /* make sure wptr hasn't changed while processing */

View File

@ -232,16 +232,19 @@ int r600_dma_ring_test(struct radeon_device *rdev,
{ {
unsigned i; unsigned i;
int r; int r;
void __iomem *ptr = (void *)rdev->vram_scratch.ptr; unsigned index;
u32 tmp; u32 tmp;
u64 gpu_addr;
if (!ptr) { if (ring->idx == R600_RING_TYPE_DMA_INDEX)
DRM_ERROR("invalid vram scratch pointer\n"); index = R600_WB_DMA_RING_TEST_OFFSET;
return -EINVAL; else
} index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
gpu_addr = rdev->wb.gpu_addr + index;
tmp = 0xCAFEDEAD; tmp = 0xCAFEDEAD;
writel(tmp, ptr); rdev->wb.wb[index/4] = cpu_to_le32(tmp);
r = radeon_ring_lock(rdev, ring, 4); r = radeon_ring_lock(rdev, ring, 4);
if (r) { if (r) {
@ -249,13 +252,13 @@ int r600_dma_ring_test(struct radeon_device *rdev,
return r; return r;
} }
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); radeon_ring_write(ring, lower_32_bits(gpu_addr));
radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff); radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xff);
radeon_ring_write(ring, 0xDEADBEEF); radeon_ring_write(ring, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev, ring, false); radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) { for (i = 0; i < rdev->usec_timeout; i++) {
tmp = readl(ptr); tmp = le32_to_cpu(rdev->wb.wb[index/4]);
if (tmp == 0xDEADBEEF) if (tmp == 0xDEADBEEF)
break; break;
DRM_UDELAY(1); DRM_UDELAY(1);

View File

@ -114,6 +114,7 @@ extern int radeon_vm_block_size;
extern int radeon_deep_color; extern int radeon_deep_color;
extern int radeon_use_pflipirq; extern int radeon_use_pflipirq;
extern int radeon_bapm; extern int radeon_bapm;
extern int radeon_backlight;
typedef struct pm_message { typedef struct pm_message {
@ -1140,6 +1141,8 @@ struct radeon_wb {
#define R600_WB_EVENT_OFFSET 3072 #define R600_WB_EVENT_OFFSET 3072
#define CIK_WB_CP1_WPTR_OFFSET 3328 #define CIK_WB_CP1_WPTR_OFFSET 3328
#define CIK_WB_CP2_WPTR_OFFSET 3584 #define CIK_WB_CP2_WPTR_OFFSET 3584
#define R600_WB_DMA_RING_TEST_OFFSET 3588
#define CAYMAN_WB_DMA1_RING_TEST_OFFSET 3592
/** /**
* struct radeon_pm - power management datas * struct radeon_pm - power management datas

View File

@ -71,7 +71,7 @@ int radeon_deep_color = 0;
int radeon_use_pflipirq = 2; int radeon_use_pflipirq = 2;
int irq_override = 0; int irq_override = 0;
int radeon_bapm = -1; int radeon_bapm = -1;
int radeon_backlight = 0;
extern display_t *os_display; extern display_t *os_display;
extern struct drm_device *main_device; extern struct drm_device *main_device;
@ -183,6 +183,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
* https://bugzilla.kernel.org/show_bug.cgi?id=51381 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
*/ */
{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX }, { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
* https://bugzilla.kernel.org/show_bug.cgi?id=51381
*/
{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
/* macbook pro 8.2 */ /* macbook pro 8.2 */
{ PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP }, { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
{ 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 },
@ -1172,7 +1176,7 @@ static void radeon_check_arguments(struct radeon_device *rdev)
if (radeon_vm_block_size == -1) { if (radeon_vm_block_size == -1) {
/* Total bits covered by PD + PTs */ /* Total bits covered by PD + PTs */
unsigned bits = ilog2(radeon_vm_size) + 17; unsigned bits = ilog2(radeon_vm_size) + 18;
/* Make sure the PD is 4K in size up to 8GB address space. /* Make sure the PD is 4K in size up to 8GB address space.
Above that split equal between PD and PTs */ Above that split equal between PD and PTs */

View File

@ -158,10 +158,43 @@ radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8
return ret; return ret;
} }
static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder,
struct drm_connector *connector)
{
struct drm_device *dev = radeon_encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
bool use_bl = false;
if (!(radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)))
return;
if (radeon_backlight == 0) {
return;
} else if (radeon_backlight == 1) {
use_bl = true;
} else if (radeon_backlight == -1) {
/* Quirks */
/* Amilo Xi 2550 only works with acpi bl */
if ((rdev->pdev->device == 0x9583) &&
(rdev->pdev->subsystem_vendor == 0x1734) &&
(rdev->pdev->subsystem_device == 0x1107))
use_bl = false;
else
use_bl = true;
}
if (use_bl) {
if (rdev->is_atom_bios)
radeon_atom_backlight_init(radeon_encoder, connector);
else
radeon_legacy_backlight_init(radeon_encoder, connector);
rdev->mode_info.bl_encoder = radeon_encoder;
}
}
void void
radeon_link_encoder_connector(struct drm_device *dev) radeon_link_encoder_connector(struct drm_device *dev)
{ {
struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector; struct drm_connector *connector;
struct radeon_connector *radeon_connector; struct radeon_connector *radeon_connector;
struct drm_encoder *encoder; struct drm_encoder *encoder;
@ -174,13 +207,8 @@ radeon_link_encoder_connector(struct drm_device *dev)
radeon_encoder = to_radeon_encoder(encoder); radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->devices & radeon_connector->devices) { if (radeon_encoder->devices & radeon_connector->devices) {
drm_mode_connector_attach_encoder(connector, encoder); drm_mode_connector_attach_encoder(connector, encoder);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
if (rdev->is_atom_bios) radeon_encoder_add_backlight(radeon_encoder, connector);
radeon_atom_backlight_init(radeon_encoder, connector);
else
radeon_legacy_backlight_init(radeon_encoder, connector);
rdev->mode_info.bl_encoder = radeon_encoder;
}
} }
} }
} }

View File

@ -6316,17 +6316,17 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
wptr = RREG32(IH_RB_WPTR); wptr = RREG32(IH_RB_WPTR);
if (wptr & RB_OVERFLOW) { if (wptr & RB_OVERFLOW) {
wptr &= ~RB_OVERFLOW;
/* When a ring buffer overflow happen start parsing interrupt /* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 16). Hopefully * from the last not overwritten vector (wptr + 16). Hopefully
* this should allow us to catchup. * this should allow us to catchup.
*/ */
dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
tmp = RREG32(IH_RB_CNTL); tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR; tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp); WREG32(IH_RB_CNTL, tmp);
wptr &= ~RB_OVERFLOW;
} }
return (wptr & rdev->ih.ptr_mask); return (wptr & rdev->ih.ptr_mask);
} }
@ -6662,11 +6662,11 @@ restart_ih:
/* wptr/rptr are in bytes! */ /* wptr/rptr are in bytes! */
rptr += 16; rptr += 16;
rptr &= rdev->ih.ptr_mask; rptr &= rdev->ih.ptr_mask;
WREG32(IH_RB_RPTR, rptr);
} }
// if (queue_hotplug) // if (queue_hotplug)
// schedule_work(&rdev->hotplug_work); // schedule_work(&rdev->hotplug_work);
rdev->ih.rptr = rptr; rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
atomic_set(&rdev->ih.lock, 0); atomic_set(&rdev->ih.lock, 0);
/* make sure wptr hasn't changed while processing */ /* make sure wptr hasn't changed while processing */

View File

@ -6255,7 +6255,7 @@ static void si_parse_pplib_clock_info(struct radeon_device *rdev,
if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) && if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) &&
index == 0) { index == 0) {
/* XXX disable for A0 tahiti */ /* XXX disable for A0 tahiti */
si_pi->ulv.supported = true; si_pi->ulv.supported = false;
si_pi->ulv.pl = *pl; si_pi->ulv.pl = *pl;
si_pi->ulv.one_pcie_lane_in_ulv = false; si_pi->ulv.one_pcie_lane_in_ulv = false;
si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT; si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT;