forked from KolibriOS/kolibrios
drm: v3.10
git-svn-id: svn://kolibrios.org@3764 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
parent
a3e5776171
commit
ab74087413
@ -249,6 +249,7 @@ char *drm_get_connector_status_name(enum drm_connector_status status)
|
|||||||
else
|
else
|
||||||
return "unknown";
|
return "unknown";
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_get_connector_status_name);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* drm_mode_object_get - allocate a new modeset identifier
|
* drm_mode_object_get - allocate a new modeset identifier
|
||||||
|
@ -121,6 +121,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
|
|||||||
connector->helper_private;
|
connector->helper_private;
|
||||||
int count = 0;
|
int count = 0;
|
||||||
int mode_flags = 0;
|
int mode_flags = 0;
|
||||||
|
bool verbose_prune = true;
|
||||||
|
|
||||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
|
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
|
||||||
drm_get_connector_name(connector));
|
drm_get_connector_name(connector));
|
||||||
@ -136,10 +137,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
|
|||||||
if (connector->funcs->force)
|
if (connector->funcs->force)
|
||||||
connector->funcs->force(connector);
|
connector->funcs->force(connector);
|
||||||
} else {
|
} else {
|
||||||
// dbgprintf("call detect funcs %p ", connector->funcs);
|
|
||||||
// dbgprintf("detect %p\n", connector->funcs->detect);
|
|
||||||
connector->status = connector->funcs->detect(connector, true);
|
connector->status = connector->funcs->detect(connector, true);
|
||||||
// dbgprintf("status %x\n", connector->status);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Re-enable polling in case the global poll config changed. */
|
/* Re-enable polling in case the global poll config changed. */
|
||||||
@ -152,6 +150,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
|
|||||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
|
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
|
||||||
connector->base.id, drm_get_connector_name(connector));
|
connector->base.id, drm_get_connector_name(connector));
|
||||||
drm_mode_connector_update_edid_property(connector, NULL);
|
drm_mode_connector_update_edid_property(connector, NULL);
|
||||||
|
verbose_prune = false;
|
||||||
goto prune;
|
goto prune;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -185,7 +184,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
|
|||||||
}
|
}
|
||||||
|
|
||||||
prune:
|
prune:
|
||||||
drm_mode_prune_invalid(dev, &connector->modes, true);
|
drm_mode_prune_invalid(dev, &connector->modes, verbose_prune);
|
||||||
|
|
||||||
if (list_empty(&connector->modes))
|
if (list_empty(&connector->modes))
|
||||||
return 0;
|
return 0;
|
||||||
@ -1007,13 +1006,21 @@ static void output_poll_execute(struct work_struct *work)
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
connector->status = connector->funcs->detect(connector, false);
|
connector->status = connector->funcs->detect(connector, false);
|
||||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
|
if (old_status != connector->status) {
|
||||||
|
const char *old, *new;
|
||||||
|
|
||||||
|
old = drm_get_connector_status_name(old_status);
|
||||||
|
new = drm_get_connector_status_name(connector->status);
|
||||||
|
|
||||||
|
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] "
|
||||||
|
"status updated from %s to %s\n",
|
||||||
connector->base.id,
|
connector->base.id,
|
||||||
drm_get_connector_name(connector),
|
drm_get_connector_name(connector),
|
||||||
old_status, connector->status);
|
old, new);
|
||||||
if (old_status != connector->status)
|
|
||||||
changed = true;
|
changed = true;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
mutex_unlock(&dev->mode_config.mutex);
|
mutex_unlock(&dev->mode_config.mutex);
|
||||||
|
|
||||||
@ -1085,10 +1092,11 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
|
|||||||
old_status = connector->status;
|
old_status = connector->status;
|
||||||
|
|
||||||
connector->status = connector->funcs->detect(connector, false);
|
connector->status = connector->funcs->detect(connector, false);
|
||||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
|
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
|
||||||
connector->base.id,
|
connector->base.id,
|
||||||
drm_get_connector_name(connector),
|
drm_get_connector_name(connector),
|
||||||
old_status, connector->status);
|
drm_get_connector_status_name(old_status),
|
||||||
|
drm_get_connector_status_name(connector->status));
|
||||||
if (old_status != connector->status)
|
if (old_status != connector->status)
|
||||||
changed = true;
|
changed = true;
|
||||||
}
|
}
|
||||||
|
@ -31,6 +31,7 @@
|
|||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
#include <linux/bug.h>
|
||||||
#include <drm/drm_global.h>
|
#include <drm/drm_global.h>
|
||||||
|
|
||||||
struct drm_global_item {
|
struct drm_global_item {
|
||||||
|
@ -755,34 +755,36 @@ void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
|
|||||||
EXPORT_SYMBOL(drm_mm_debug_table);
|
EXPORT_SYMBOL(drm_mm_debug_table);
|
||||||
|
|
||||||
#if defined(CONFIG_DEBUG_FS)
|
#if defined(CONFIG_DEBUG_FS)
|
||||||
|
static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
|
||||||
|
{
|
||||||
|
unsigned long hole_start, hole_end, hole_size;
|
||||||
|
|
||||||
|
if (entry->hole_follows) {
|
||||||
|
hole_start = drm_mm_hole_node_start(entry);
|
||||||
|
hole_end = drm_mm_hole_node_end(entry);
|
||||||
|
hole_size = hole_end - hole_start;
|
||||||
|
seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
|
||||||
|
hole_start, hole_end, hole_size);
|
||||||
|
return hole_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
|
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
|
||||||
{
|
{
|
||||||
struct drm_mm_node *entry;
|
struct drm_mm_node *entry;
|
||||||
unsigned long total_used = 0, total_free = 0, total = 0;
|
unsigned long total_used = 0, total_free = 0, total = 0;
|
||||||
unsigned long hole_start, hole_end, hole_size;
|
|
||||||
|
|
||||||
hole_start = drm_mm_hole_node_start(&mm->head_node);
|
total_free += drm_mm_dump_hole(m, &mm->head_node);
|
||||||
hole_end = drm_mm_hole_node_end(&mm->head_node);
|
|
||||||
hole_size = hole_end - hole_start;
|
|
||||||
if (hole_size)
|
|
||||||
seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
|
|
||||||
hole_start, hole_end, hole_size);
|
|
||||||
total_free += hole_size;
|
|
||||||
|
|
||||||
drm_mm_for_each_node(entry, mm) {
|
drm_mm_for_each_node(entry, mm) {
|
||||||
seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
|
seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
|
||||||
entry->start, entry->start + entry->size,
|
entry->start, entry->start + entry->size,
|
||||||
entry->size);
|
entry->size);
|
||||||
total_used += entry->size;
|
total_used += entry->size;
|
||||||
if (entry->hole_follows) {
|
total_free += drm_mm_dump_hole(m, entry);
|
||||||
hole_start = drm_mm_hole_node_start(entry);
|
|
||||||
hole_end = drm_mm_hole_node_end(entry);
|
|
||||||
hole_size = hole_end - hole_start;
|
|
||||||
seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
|
|
||||||
hole_start, hole_end, hole_size);
|
|
||||||
total_free += hole_size;
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
total = total_free + total_used;
|
total = total_free + total_used;
|
||||||
|
|
||||||
seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
|
seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
|
||||||
|
@ -661,8 +661,6 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
|
|||||||
gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
|
gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
// gtt_size -= LFB_SIZE;
|
|
||||||
|
|
||||||
i915_gem_setup_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size);
|
i915_gem_setup_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size);
|
||||||
|
|
||||||
ret = i915_gem_init_aliasing_ppgtt(dev);
|
ret = i915_gem_init_aliasing_ppgtt(dev);
|
||||||
|
@ -312,6 +312,71 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct drm_i915_gem_object *
|
||||||
|
i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
|
||||||
|
u32 stolen_offset,
|
||||||
|
u32 gtt_offset,
|
||||||
|
u32 size)
|
||||||
|
{
|
||||||
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
struct drm_i915_gem_object *obj;
|
||||||
|
struct drm_mm_node *stolen;
|
||||||
|
|
||||||
|
if (dev_priv->mm.stolen_base == 0)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
|
||||||
|
stolen_offset, gtt_offset, size);
|
||||||
|
|
||||||
|
/* KISS and expect everything to be page-aligned */
|
||||||
|
BUG_ON(stolen_offset & 4095);
|
||||||
|
BUG_ON(gtt_offset & 4095);
|
||||||
|
BUG_ON(size & 4095);
|
||||||
|
|
||||||
|
if (WARN_ON(size == 0))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
stolen = drm_mm_create_block(&dev_priv->mm.stolen,
|
||||||
|
stolen_offset, size,
|
||||||
|
false);
|
||||||
|
if (stolen == NULL) {
|
||||||
|
DRM_DEBUG_KMS("failed to allocate stolen space\n");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
obj = _i915_gem_object_create_stolen(dev, stolen);
|
||||||
|
if (obj == NULL) {
|
||||||
|
DRM_DEBUG_KMS("failed to allocate stolen object\n");
|
||||||
|
drm_mm_put_block(stolen);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* To simplify the initialisation sequence between KMS and GTT,
|
||||||
|
* we allow construction of the stolen object prior to
|
||||||
|
* setting up the GTT space. The actual reservation will occur
|
||||||
|
* later.
|
||||||
|
*/
|
||||||
|
if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
|
||||||
|
obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
|
||||||
|
gtt_offset, size,
|
||||||
|
false);
|
||||||
|
if (obj->gtt_space == NULL) {
|
||||||
|
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
|
||||||
|
drm_gem_object_unreference(&obj->base);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
} else
|
||||||
|
obj->gtt_space = I915_GTT_RESERVED;
|
||||||
|
|
||||||
|
obj->gtt_offset = gtt_offset;
|
||||||
|
obj->has_global_gtt_mapping = 1;
|
||||||
|
|
||||||
|
list_add_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
|
||||||
|
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
|
||||||
|
|
||||||
|
return obj;
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
|
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
|
||||||
{
|
{
|
||||||
|
@ -54,6 +54,7 @@ int i915_mask_update(struct drm_device *dev, void *data,
|
|||||||
static char log[256];
|
static char log[256];
|
||||||
|
|
||||||
struct workqueue_struct *system_wq;
|
struct workqueue_struct *system_wq;
|
||||||
|
int driver_wq_state;
|
||||||
|
|
||||||
int x86_clflush_size;
|
int x86_clflush_size;
|
||||||
unsigned int tsc_khz;
|
unsigned int tsc_khz;
|
||||||
@ -66,7 +67,10 @@ u32_t __attribute__((externally_visible)) drvEntry(int action, char *cmdline)
|
|||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
if(action != 1)
|
if(action != 1)
|
||||||
|
{
|
||||||
|
driver_wq_state = 0;
|
||||||
return 0;
|
return 0;
|
||||||
|
};
|
||||||
|
|
||||||
if( GetService("DISPLAY") != 0 )
|
if( GetService("DISPLAY") != 0 )
|
||||||
return 0;
|
return 0;
|
||||||
@ -85,7 +89,7 @@ u32_t __attribute__((externally_visible)) drvEntry(int action, char *cmdline)
|
|||||||
return 0;
|
return 0;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
dbgprintf(" i915 v3.9-rc8\n cmdline: %s\n", cmdline);
|
dbgprintf(" i915 v3.10\n cmdline: %s\n", cmdline);
|
||||||
|
|
||||||
cpu_detect();
|
cpu_detect();
|
||||||
// dbgprintf("\ncache line size %d\n", x86_clflush_size);
|
// dbgprintf("\ncache line size %d\n", x86_clflush_size);
|
||||||
@ -106,7 +110,7 @@ u32_t __attribute__((externally_visible)) drvEntry(int action, char *cmdline)
|
|||||||
dbgprintf("Set DISPLAY handler\n");
|
dbgprintf("Set DISPLAY handler\n");
|
||||||
|
|
||||||
struct drm_i915_private *dev_priv = main_device->dev_private;
|
struct drm_i915_private *dev_priv = main_device->dev_private;
|
||||||
|
driver_wq_state = 1;
|
||||||
run_workqueue(dev_priv->wq);
|
run_workqueue(dev_priv->wq);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
|
@ -19,7 +19,7 @@ CFLAGS = -c -Os $(INCLUDES) $(DEFINES) -march=i686 -fomit-frame-pointer -fno-b
|
|||||||
|
|
||||||
LIBPATH:= $(DRV_TOPDIR)/ddk
|
LIBPATH:= $(DRV_TOPDIR)/ddk
|
||||||
|
|
||||||
LIBS:= -lddk -lcore
|
LIBS:= -lddk -lcore -lgcc
|
||||||
|
|
||||||
LDFLAGS = -nostdlib -shared -s -Map atikms.map --image-base 0\
|
LDFLAGS = -nostdlib -shared -s -Map atikms.map --image-base 0\
|
||||||
--file-alignment 512 --section-alignment 4096
|
--file-alignment 512 --section-alignment 4096
|
||||||
@ -42,11 +42,14 @@ HFILES:= $(DRV_INCLUDES)/linux/types.h \
|
|||||||
|
|
||||||
NAME_SRC= \
|
NAME_SRC= \
|
||||||
pci.c \
|
pci.c \
|
||||||
|
$(DRM_TOPDIR)/drm_cache.c \
|
||||||
$(DRM_TOPDIR)/drm_crtc.c \
|
$(DRM_TOPDIR)/drm_crtc.c \
|
||||||
$(DRM_TOPDIR)/drm_crtc_helper.c \
|
$(DRM_TOPDIR)/drm_crtc_helper.c \
|
||||||
$(DRM_TOPDIR)/drm_dp_helper.c \
|
$(DRM_TOPDIR)/drm_dp_helper.c \
|
||||||
$(DRM_TOPDIR)/drm_edid.c \
|
$(DRM_TOPDIR)/drm_edid.c \
|
||||||
$(DRM_TOPDIR)/drm_fb_helper.c \
|
$(DRM_TOPDIR)/drm_fb_helper.c \
|
||||||
|
$(DRM_TOPDIR)/drm_gem.c \
|
||||||
|
$(DRM_TOPDIR)/drm_global.c \
|
||||||
$(DRM_TOPDIR)/drm_irq.c \
|
$(DRM_TOPDIR)/drm_irq.c \
|
||||||
$(DRM_TOPDIR)/drm_mm.c \
|
$(DRM_TOPDIR)/drm_mm.c \
|
||||||
$(DRM_TOPDIR)/drm_modes.c \
|
$(DRM_TOPDIR)/drm_modes.c \
|
||||||
@ -66,6 +69,10 @@ NAME_SRC= \
|
|||||||
radeon_clocks.c \
|
radeon_clocks.c \
|
||||||
atom.c \
|
atom.c \
|
||||||
ni.c \
|
ni.c \
|
||||||
|
atombios_crtc.c \
|
||||||
|
atombios_dp.c \
|
||||||
|
atombios_encoders.c \
|
||||||
|
atombios_i2c.c \
|
||||||
radeon_agp.c \
|
radeon_agp.c \
|
||||||
radeon_asic.c \
|
radeon_asic.c \
|
||||||
radeon_atombios.c \
|
radeon_atombios.c \
|
||||||
@ -73,25 +80,22 @@ NAME_SRC= \
|
|||||||
radeon_bios.c \
|
radeon_bios.c \
|
||||||
radeon_combios.c \
|
radeon_combios.c \
|
||||||
radeon_connectors.c \
|
radeon_connectors.c \
|
||||||
atombios_crtc.c \
|
radeon_display.c \
|
||||||
atombios_dp.c \
|
|
||||||
atombios_encoders.c \
|
|
||||||
atombios_i2c.c \
|
|
||||||
radeon_encoders.c \
|
radeon_encoders.c \
|
||||||
radeon_fence.c \
|
radeon_fence.c \
|
||||||
|
radeon_fb.c \
|
||||||
|
radeon_gart.c \
|
||||||
radeon_gem.c \
|
radeon_gem.c \
|
||||||
radeon_i2c.c \
|
radeon_i2c.c \
|
||||||
radeon_irq_kms.c \
|
radeon_irq_kms.c \
|
||||||
radeon_legacy_crtc.c \
|
radeon_legacy_crtc.c \
|
||||||
radeon_legacy_encoders.c \
|
radeon_legacy_encoders.c \
|
||||||
radeon_legacy_tv.c \
|
radeon_legacy_tv.c \
|
||||||
radeon_display.c \
|
|
||||||
radeon_gart.c \
|
|
||||||
radeon_ring.c \
|
|
||||||
radeon_object_kos.c \
|
radeon_object_kos.c \
|
||||||
|
radeon_pm.c \
|
||||||
|
radeon_ring.c \
|
||||||
radeon_sa.c \
|
radeon_sa.c \
|
||||||
radeon_semaphore.c \
|
radeon_semaphore.c \
|
||||||
radeon_pm.c \
|
|
||||||
r100.c \
|
r100.c \
|
||||||
r200.c \
|
r200.c \
|
||||||
r300.c \
|
r300.c \
|
||||||
@ -106,12 +110,12 @@ NAME_SRC= \
|
|||||||
rs600.c \
|
rs600.c \
|
||||||
rs690.c \
|
rs690.c \
|
||||||
rv770.c \
|
rv770.c \
|
||||||
radeon_fb.c \
|
|
||||||
rdisplay.c \
|
rdisplay.c \
|
||||||
rdisplay_kms.c \
|
rdisplay_kms.c \
|
||||||
cmdline.c \
|
cmdline.c \
|
||||||
si.c \
|
si.c \
|
||||||
si_blit_shaders.c \
|
si_blit_shaders.c \
|
||||||
|
utils.c \
|
||||||
fwblob.asm
|
fwblob.asm
|
||||||
|
|
||||||
FW_BINS= \
|
FW_BINS= \
|
||||||
|
@ -54,8 +54,6 @@ NAME_SRC= \
|
|||||||
$(DRM_TOPDIR)/drm_stub.c \
|
$(DRM_TOPDIR)/drm_stub.c \
|
||||||
$(DRM_TOPDIR)/i2c/i2c-core.c \
|
$(DRM_TOPDIR)/i2c/i2c-core.c \
|
||||||
$(DRM_TOPDIR)/i2c/i2c-algo-bit.c \
|
$(DRM_TOPDIR)/i2c/i2c-algo-bit.c \
|
||||||
bitmap.c \
|
|
||||||
hmm.c \
|
|
||||||
r700_vs.c \
|
r700_vs.c \
|
||||||
radeon_device.c \
|
radeon_device.c \
|
||||||
evergreen.c \
|
evergreen.c \
|
||||||
|
@ -33,6 +33,7 @@ SECTIONS
|
|||||||
*(.debug$F)
|
*(.debug$F)
|
||||||
*(.drectve)
|
*(.drectve)
|
||||||
*(.edata)
|
*(.edata)
|
||||||
|
*(.eh_frame)
|
||||||
}
|
}
|
||||||
|
|
||||||
.idata ALIGN(__section_alignment__):
|
.idata ALIGN(__section_alignment__):
|
||||||
|
@ -1236,6 +1236,8 @@ static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
|
|||||||
static void atom_index_iio(struct atom_context *ctx, int base)
|
static void atom_index_iio(struct atom_context *ctx, int base)
|
||||||
{
|
{
|
||||||
ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
|
ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
|
||||||
|
if (!ctx->iio)
|
||||||
|
return;
|
||||||
while (CU8(base) == ATOM_IIO_START) {
|
while (CU8(base) == ATOM_IIO_START) {
|
||||||
ctx->iio[CU8(base + 1)] = base + 2;
|
ctx->iio[CU8(base + 1)] = base + 2;
|
||||||
base += 2;
|
base += 2;
|
||||||
@ -1285,6 +1287,10 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
|
|||||||
ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
|
ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
|
||||||
ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
|
ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
|
||||||
atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
|
atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
|
||||||
|
if (!ctx->iio) {
|
||||||
|
atom_destroy(ctx);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
str = CSTR(CU16(base + ATOM_ROM_MSG_PTR));
|
str = CSTR(CU16(base + ATOM_ROM_MSG_PTR));
|
||||||
while (*str && ((*str == '\n') || (*str == '\r')))
|
while (*str && ((*str == '\n') || (*str == '\r')))
|
||||||
@ -1333,7 +1339,6 @@ int atom_asic_init(struct atom_context *ctx)
|
|||||||
|
|
||||||
void atom_destroy(struct atom_context *ctx)
|
void atom_destroy(struct atom_context *ctx)
|
||||||
{
|
{
|
||||||
if (ctx->iio)
|
|
||||||
kfree(ctx->iio);
|
kfree(ctx->iio);
|
||||||
kfree(ctx);
|
kfree(ctx);
|
||||||
}
|
}
|
||||||
@ -1387,10 +1392,10 @@ int atom_allocate_fb_scratch(struct atom_context *ctx)
|
|||||||
firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
|
firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
|
||||||
|
|
||||||
DRM_DEBUG("atom firmware requested %08x %dkb\n",
|
DRM_DEBUG("atom firmware requested %08x %dkb\n",
|
||||||
firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
|
le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
|
||||||
firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
|
le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
|
||||||
|
|
||||||
usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
|
usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
|
||||||
}
|
}
|
||||||
ctx->scratch_size_bytes = 0;
|
ctx->scratch_size_bytes = 0;
|
||||||
if (usage_bytes == 0)
|
if (usage_bytes == 0)
|
||||||
|
@ -458,6 +458,7 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3
|
|||||||
union
|
union
|
||||||
{
|
{
|
||||||
ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
|
ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
|
||||||
|
ULONG ulClockParams; //ULONG access for BE
|
||||||
ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter
|
ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter
|
||||||
};
|
};
|
||||||
UCHAR ucRefDiv; //Output Parameter
|
UCHAR ucRefDiv; //Output Parameter
|
||||||
@ -490,6 +491,7 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5
|
|||||||
union
|
union
|
||||||
{
|
{
|
||||||
ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
|
ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
|
||||||
|
ULONG ulClockParams; //ULONG access for BE
|
||||||
ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter
|
ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter
|
||||||
};
|
};
|
||||||
UCHAR ucRefDiv; //Output Parameter
|
UCHAR ucRefDiv; //Output Parameter
|
||||||
|
@ -252,8 +252,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
|
|||||||
radeon_crtc->enabled = true;
|
radeon_crtc->enabled = true;
|
||||||
/* adjust pm to dpms changes BEFORE enabling crtcs */
|
/* adjust pm to dpms changes BEFORE enabling crtcs */
|
||||||
radeon_pm_compute_clocks(rdev);
|
radeon_pm_compute_clocks(rdev);
|
||||||
if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
|
|
||||||
atombios_powergate_crtc(crtc, ATOM_DISABLE);
|
|
||||||
atombios_enable_crtc(crtc, ATOM_ENABLE);
|
atombios_enable_crtc(crtc, ATOM_ENABLE);
|
||||||
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
|
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
|
||||||
atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
|
atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
|
||||||
@ -271,8 +269,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
|
|||||||
atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
|
atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
|
||||||
atombios_enable_crtc(crtc, ATOM_DISABLE);
|
atombios_enable_crtc(crtc, ATOM_DISABLE);
|
||||||
radeon_crtc->enabled = false;
|
radeon_crtc->enabled = false;
|
||||||
if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
|
|
||||||
atombios_powergate_crtc(crtc, ATOM_ENABLE);
|
|
||||||
/* adjust pm to dpms changes AFTER disabling crtcs */
|
/* adjust pm to dpms changes AFTER disabling crtcs */
|
||||||
radeon_pm_compute_clocks(rdev);
|
radeon_pm_compute_clocks(rdev);
|
||||||
break;
|
break;
|
||||||
@ -561,6 +557,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
|
|||||||
/* use frac fb div on APUs */
|
/* use frac fb div on APUs */
|
||||||
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
|
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
|
||||||
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
|
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
|
||||||
|
/* use frac fb div on RS780/RS880 */
|
||||||
|
if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
|
||||||
|
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
|
||||||
if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
|
if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
|
||||||
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
|
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
|
||||||
} else {
|
} else {
|
||||||
@ -1812,12 +1811,9 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
|
|||||||
|
|
||||||
static void atombios_crtc_prepare(struct drm_crtc *crtc)
|
static void atombios_crtc_prepare(struct drm_crtc *crtc)
|
||||||
{
|
{
|
||||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
|
||||||
struct drm_device *dev = crtc->dev;
|
struct drm_device *dev = crtc->dev;
|
||||||
struct radeon_device *rdev = dev->dev_private;
|
struct radeon_device *rdev = dev->dev_private;
|
||||||
|
|
||||||
radeon_crtc->in_mode_set = true;
|
|
||||||
|
|
||||||
/* disable crtc pair power gating before programming */
|
/* disable crtc pair power gating before programming */
|
||||||
if (ASIC_IS_DCE6(rdev))
|
if (ASIC_IS_DCE6(rdev))
|
||||||
atombios_powergate_crtc(crtc, ATOM_DISABLE);
|
atombios_powergate_crtc(crtc, ATOM_DISABLE);
|
||||||
@ -1828,11 +1824,8 @@ static void atombios_crtc_prepare(struct drm_crtc *crtc)
|
|||||||
|
|
||||||
static void atombios_crtc_commit(struct drm_crtc *crtc)
|
static void atombios_crtc_commit(struct drm_crtc *crtc)
|
||||||
{
|
{
|
||||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
|
||||||
|
|
||||||
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
|
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
|
||||||
atombios_lock_crtc(crtc, ATOM_DISABLE);
|
atombios_lock_crtc(crtc, ATOM_DISABLE);
|
||||||
radeon_crtc->in_mode_set = false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void atombios_crtc_disable(struct drm_crtc *crtc)
|
static void atombios_crtc_disable(struct drm_crtc *crtc)
|
||||||
@ -1844,6 +1837,8 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
|
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
|
||||||
|
if (ASIC_IS_DCE6(rdev))
|
||||||
|
atombios_powergate_crtc(crtc, ATOM_ENABLE);
|
||||||
|
|
||||||
for (i = 0; i < rdev->num_crtc; i++) {
|
for (i = 0; i < rdev->num_crtc; i++) {
|
||||||
if (rdev->mode_info.crtcs[i] &&
|
if (rdev->mode_info.crtcs[i] &&
|
||||||
|
@ -450,8 +450,6 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
|
|||||||
u8 msg[DP_DPCD_SIZE];
|
u8 msg[DP_DPCD_SIZE];
|
||||||
int ret, i;
|
int ret, i;
|
||||||
|
|
||||||
ENTER();
|
|
||||||
|
|
||||||
ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg,
|
ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg,
|
||||||
DP_DPCD_SIZE, 0);
|
DP_DPCD_SIZE, 0);
|
||||||
if (ret > 0) {
|
if (ret > 0) {
|
||||||
@ -462,10 +460,9 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
|
|||||||
DRM_DEBUG_KMS("\n");
|
DRM_DEBUG_KMS("\n");
|
||||||
|
|
||||||
radeon_dp_probe_oui(radeon_connector);
|
radeon_dp_probe_oui(radeon_connector);
|
||||||
LEAVE();
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
FAIL();
|
|
||||||
dig_connector->dpcd[0] = 0;
|
dig_connector->dpcd[0] = 0;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -667,6 +667,8 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
|
|||||||
int
|
int
|
||||||
atombios_get_encoder_mode(struct drm_encoder *encoder)
|
atombios_get_encoder_mode(struct drm_encoder *encoder)
|
||||||
{
|
{
|
||||||
|
struct drm_device *dev = encoder->dev;
|
||||||
|
struct radeon_device *rdev = dev->dev_private;
|
||||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct radeon_connector *radeon_connector;
|
struct radeon_connector *radeon_connector;
|
||||||
@ -693,7 +695,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
|
|||||||
case DRM_MODE_CONNECTOR_DVII:
|
case DRM_MODE_CONNECTOR_DVII:
|
||||||
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
|
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
|
||||||
if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
|
if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
|
||||||
radeon_audio)
|
radeon_audio &&
|
||||||
|
!ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
|
||||||
return ATOM_ENCODER_MODE_HDMI;
|
return ATOM_ENCODER_MODE_HDMI;
|
||||||
else if (radeon_connector->use_digital)
|
else if (radeon_connector->use_digital)
|
||||||
return ATOM_ENCODER_MODE_DVI;
|
return ATOM_ENCODER_MODE_DVI;
|
||||||
@ -704,7 +707,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
|
|||||||
case DRM_MODE_CONNECTOR_HDMIA:
|
case DRM_MODE_CONNECTOR_HDMIA:
|
||||||
default:
|
default:
|
||||||
if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
|
if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
|
||||||
radeon_audio)
|
radeon_audio &&
|
||||||
|
!ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
|
||||||
return ATOM_ENCODER_MODE_HDMI;
|
return ATOM_ENCODER_MODE_HDMI;
|
||||||
else
|
else
|
||||||
return ATOM_ENCODER_MODE_DVI;
|
return ATOM_ENCODER_MODE_DVI;
|
||||||
@ -718,7 +722,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
|
|||||||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
|
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
|
||||||
return ATOM_ENCODER_MODE_DP;
|
return ATOM_ENCODER_MODE_DP;
|
||||||
else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
|
else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
|
||||||
radeon_audio)
|
radeon_audio &&
|
||||||
|
!ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
|
||||||
return ATOM_ENCODER_MODE_HDMI;
|
return ATOM_ENCODER_MODE_HDMI;
|
||||||
else
|
else
|
||||||
return ATOM_ENCODER_MODE_DVI;
|
return ATOM_ENCODER_MODE_DVI;
|
||||||
@ -2150,13 +2155,10 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
|
|||||||
atombios_apply_encoder_quirks(encoder, adjusted_mode);
|
atombios_apply_encoder_quirks(encoder, adjusted_mode);
|
||||||
|
|
||||||
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
|
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
|
||||||
r600_hdmi_enable(encoder);
|
if (rdev->asic->display.hdmi_enable)
|
||||||
if (ASIC_IS_DCE6(rdev))
|
radeon_hdmi_enable(rdev, encoder, true);
|
||||||
; /* TODO (use pointers instead of if-s?) */
|
if (rdev->asic->display.hdmi_setmode)
|
||||||
else if (ASIC_IS_DCE4(rdev))
|
radeon_hdmi_setmode(rdev, encoder, adjusted_mode);
|
||||||
evergreen_hdmi_setmode(encoder, adjusted_mode);
|
|
||||||
else
|
|
||||||
r600_hdmi_setmode(encoder, adjusted_mode);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2413,8 +2415,10 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
|
|||||||
|
|
||||||
disable_done:
|
disable_done:
|
||||||
if (radeon_encoder_is_digital(encoder)) {
|
if (radeon_encoder_is_digital(encoder)) {
|
||||||
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
|
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
|
||||||
r600_hdmi_disable(encoder);
|
if (rdev->asic->display.hdmi_enable)
|
||||||
|
radeon_hdmi_enable(rdev, encoder, false);
|
||||||
|
}
|
||||||
dig = radeon_encoder->enc_priv;
|
dig = radeon_encoder->enc_priv;
|
||||||
dig->dig_encoder = -1;
|
dig->dig_encoder = -1;
|
||||||
}
|
}
|
||||||
|
@ -5,9 +5,10 @@
|
|||||||
#include "hmm.h"
|
#include "hmm.h"
|
||||||
#include "bitmap.h"
|
#include "bitmap.h"
|
||||||
|
|
||||||
//#define DRIVER_CAPS_0 HW_BIT_BLIT
|
|
||||||
|
|
||||||
#define DRIVER_CAPS_0 0
|
#define DRIVER_CAPS_0 HW_BIT_BLIT
|
||||||
|
|
||||||
|
//#define DRIVER_CAPS_0 0
|
||||||
#define DRIVER_CAPS_1 0
|
#define DRIVER_CAPS_1 0
|
||||||
|
|
||||||
struct context *context_map[256];
|
struct context *context_map[256];
|
||||||
@ -16,6 +17,7 @@ struct hmm bm_mm;
|
|||||||
|
|
||||||
extern struct drm_device *main_drm_device;
|
extern struct drm_device *main_drm_device;
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
|
||||||
void __attribute__((regparm(1))) destroy_bitmap(bitmap_t *bitmap)
|
void __attribute__((regparm(1))) destroy_bitmap(bitmap_t *bitmap)
|
||||||
{
|
{
|
||||||
@ -44,7 +46,6 @@ void __attribute__((regparm(1))) destroy_bitmap(bitmap_t *bitmap)
|
|||||||
__DestroyObject(bitmap);
|
__DestroyObject(bitmap);
|
||||||
};
|
};
|
||||||
|
|
||||||
#if 0
|
|
||||||
static int bitmap_get_pages_gtt(struct drm_i915_gem_object *obj)
|
static int bitmap_get_pages_gtt(struct drm_i915_gem_object *obj)
|
||||||
{
|
{
|
||||||
int page_count;
|
int page_count;
|
||||||
@ -110,7 +111,6 @@ struct io_call_10 /* SRV_CREATE_SURFACE */
|
|||||||
u32 format; // reserved mbz
|
u32 format; // reserved mbz
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
int create_surface(struct drm_device *dev, struct io_call_10 *pbitmap)
|
int create_surface(struct drm_device *dev, struct io_call_10 *pbitmap)
|
||||||
{
|
{
|
||||||
@ -183,9 +183,11 @@ int create_surface(struct drm_device *dev, struct io_call_10 *pbitmap)
|
|||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
goto err3;
|
goto err3;
|
||||||
|
|
||||||
|
#ifndef __TTM__
|
||||||
ret = radeon_bo_user_map(obj, (void**)&uaddr);
|
ret = radeon_bo_user_map(obj, (void**)&uaddr);
|
||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
goto err3;
|
goto err3;
|
||||||
|
#endif
|
||||||
|
|
||||||
bitmap->page_count = size/PAGE_SIZE;
|
bitmap->page_count = size/PAGE_SIZE;
|
||||||
bitmap->max_count = max_size/PAGE_SIZE;
|
bitmap->max_count = max_size/PAGE_SIZE;
|
||||||
@ -482,4 +484,5 @@ err:
|
|||||||
return NULL;
|
return NULL;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -668,10 +668,10 @@ int evergreen_blit_init(struct radeon_device *rdev)
|
|||||||
r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true,
|
r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true,
|
||||||
RADEON_GEM_DOMAIN_VRAM,
|
RADEON_GEM_DOMAIN_VRAM,
|
||||||
NULL, &rdev->r600_blit.shader_obj);
|
NULL, &rdev->r600_blit.shader_obj);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("evergreen failed to allocate shader\n");
|
DRM_ERROR("evergreen failed to allocate shader\n");
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
|
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
|
||||||
if (unlikely(r != 0))
|
if (unlikely(r != 0))
|
||||||
@ -724,7 +724,7 @@ int evergreen_blit_init(struct radeon_device *rdev)
|
|||||||
radeon_bo_kunmap(rdev->r600_blit.shader_obj);
|
radeon_bo_kunmap(rdev->r600_blit.shader_obj);
|
||||||
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
|
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
|
||||||
|
|
||||||
// radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
|
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -24,6 +24,7 @@
|
|||||||
* Authors: Christian König
|
* Authors: Christian König
|
||||||
* Rafał Miłecki
|
* Rafał Miłecki
|
||||||
*/
|
*/
|
||||||
|
#include <linux/hdmi.h>
|
||||||
#include <drm/drmP.h>
|
#include <drm/drmP.h>
|
||||||
#include <drm/radeon_drm.h>
|
#include <drm/radeon_drm.h>
|
||||||
#include "radeon.h"
|
#include "radeon.h"
|
||||||
@ -53,80 +54,81 @@ static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t cloc
|
|||||||
WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz);
|
WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
|
||||||
* calculate the crc for a given info frame
|
|
||||||
*/
|
|
||||||
static void evergreen_hdmi_infoframe_checksum(uint8_t packetType,
|
|
||||||
uint8_t versionNumber,
|
|
||||||
uint8_t length,
|
|
||||||
uint8_t *frame)
|
|
||||||
{
|
{
|
||||||
int i;
|
struct radeon_device *rdev = encoder->dev->dev_private;
|
||||||
frame[0] = packetType + versionNumber + length;
|
struct drm_connector *connector;
|
||||||
for (i = 1; i <= length; i++)
|
struct radeon_connector *radeon_connector = NULL;
|
||||||
frame[0] += frame[i];
|
struct cea_sad *sads;
|
||||||
frame[0] = 0x100 - frame[0];
|
int i, sad_count;
|
||||||
|
|
||||||
|
static const u16 eld_reg_to_type[][2] = {
|
||||||
|
{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
|
||||||
|
{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
|
||||||
|
{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
|
||||||
|
{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
|
||||||
|
{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
|
||||||
|
{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
|
||||||
|
{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
|
||||||
|
{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
|
||||||
|
{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
|
||||||
|
{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
|
||||||
|
{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
|
||||||
|
{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
|
||||||
|
};
|
||||||
|
|
||||||
|
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
|
||||||
|
if (connector->encoder == encoder)
|
||||||
|
radeon_connector = to_radeon_connector(connector);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!radeon_connector) {
|
||||||
|
DRM_ERROR("Couldn't find encoder's connector\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
|
||||||
|
if (sad_count < 0) {
|
||||||
|
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
BUG_ON(!sads);
|
||||||
|
|
||||||
|
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
|
||||||
|
u32 value = 0;
|
||||||
|
int j;
|
||||||
|
|
||||||
|
for (j = 0; j < sad_count; j++) {
|
||||||
|
struct cea_sad *sad = &sads[j];
|
||||||
|
|
||||||
|
if (sad->format == eld_reg_to_type[i][1]) {
|
||||||
|
value = MAX_CHANNELS(sad->channels) |
|
||||||
|
DESCRIPTOR_BYTE_2(sad->byte2) |
|
||||||
|
SUPPORTED_FREQUENCIES(sad->freq);
|
||||||
|
if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
|
||||||
|
value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
WREG32(eld_reg_to_type[i][0], value);
|
||||||
|
}
|
||||||
|
|
||||||
|
kfree(sads);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* build a HDMI Video Info Frame
|
* build a HDMI Video Info Frame
|
||||||
*/
|
*/
|
||||||
static void evergreen_hdmi_videoinfoframe(
|
static void evergreen_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
|
||||||
struct drm_encoder *encoder,
|
void *buffer, size_t size)
|
||||||
uint8_t color_format,
|
|
||||||
int active_information_present,
|
|
||||||
uint8_t active_format_aspect_ratio,
|
|
||||||
uint8_t scan_information,
|
|
||||||
uint8_t colorimetry,
|
|
||||||
uint8_t ex_colorimetry,
|
|
||||||
uint8_t quantization,
|
|
||||||
int ITC,
|
|
||||||
uint8_t picture_aspect_ratio,
|
|
||||||
uint8_t video_format_identification,
|
|
||||||
uint8_t pixel_repetition,
|
|
||||||
uint8_t non_uniform_picture_scaling,
|
|
||||||
uint8_t bar_info_data_valid,
|
|
||||||
uint16_t top_bar,
|
|
||||||
uint16_t bottom_bar,
|
|
||||||
uint16_t left_bar,
|
|
||||||
uint16_t right_bar
|
|
||||||
)
|
|
||||||
{
|
{
|
||||||
struct drm_device *dev = encoder->dev;
|
struct drm_device *dev = encoder->dev;
|
||||||
struct radeon_device *rdev = dev->dev_private;
|
struct radeon_device *rdev = dev->dev_private;
|
||||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||||
uint32_t offset = dig->afmt->offset;
|
uint32_t offset = dig->afmt->offset;
|
||||||
|
uint8_t *frame = buffer + 3;
|
||||||
|
|
||||||
uint8_t frame[14];
|
|
||||||
|
|
||||||
frame[0x0] = 0;
|
|
||||||
frame[0x1] =
|
|
||||||
(scan_information & 0x3) |
|
|
||||||
((bar_info_data_valid & 0x3) << 2) |
|
|
||||||
((active_information_present & 0x1) << 4) |
|
|
||||||
((color_format & 0x3) << 5);
|
|
||||||
frame[0x2] =
|
|
||||||
(active_format_aspect_ratio & 0xF) |
|
|
||||||
((picture_aspect_ratio & 0x3) << 4) |
|
|
||||||
((colorimetry & 0x3) << 6);
|
|
||||||
frame[0x3] =
|
|
||||||
(non_uniform_picture_scaling & 0x3) |
|
|
||||||
((quantization & 0x3) << 2) |
|
|
||||||
((ex_colorimetry & 0x7) << 4) |
|
|
||||||
((ITC & 0x1) << 7);
|
|
||||||
frame[0x4] = (video_format_identification & 0x7F);
|
|
||||||
frame[0x5] = (pixel_repetition & 0xF);
|
|
||||||
frame[0x6] = (top_bar & 0xFF);
|
|
||||||
frame[0x7] = (top_bar >> 8);
|
|
||||||
frame[0x8] = (bottom_bar & 0xFF);
|
|
||||||
frame[0x9] = (bottom_bar >> 8);
|
|
||||||
frame[0xA] = (left_bar & 0xFF);
|
|
||||||
frame[0xB] = (left_bar >> 8);
|
|
||||||
frame[0xC] = (right_bar & 0xFF);
|
|
||||||
frame[0xD] = (right_bar >> 8);
|
|
||||||
|
|
||||||
evergreen_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
|
|
||||||
/* Our header values (type, version, length) should be alright, Intel
|
/* Our header values (type, version, length) should be alright, Intel
|
||||||
* is using the same. Checksum function also seems to be OK, it works
|
* is using the same. Checksum function also seems to be OK, it works
|
||||||
* fine for audio infoframe. However calculated value is always lower
|
* fine for audio infoframe. However calculated value is always lower
|
||||||
@ -154,7 +156,10 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
|
|||||||
struct radeon_device *rdev = dev->dev_private;
|
struct radeon_device *rdev = dev->dev_private;
|
||||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||||
|
u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
|
||||||
|
struct hdmi_avi_infoframe frame;
|
||||||
uint32_t offset;
|
uint32_t offset;
|
||||||
|
ssize_t err;
|
||||||
|
|
||||||
/* Silent, r600_hdmi_enable will raise WARN for us */
|
/* Silent, r600_hdmi_enable will raise WARN for us */
|
||||||
if (!dig->afmt->enabled)
|
if (!dig->afmt->enabled)
|
||||||
@ -200,9 +205,19 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
|
|||||||
|
|
||||||
WREG32(HDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
|
WREG32(HDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
|
||||||
|
|
||||||
evergreen_hdmi_videoinfoframe(encoder, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
|
||||||
0, 0, 0, 0, 0, 0);
|
if (err < 0) {
|
||||||
|
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
|
||||||
|
if (err < 0) {
|
||||||
|
DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
evergreen_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer));
|
||||||
evergreen_hdmi_update_ACR(encoder, mode->clock);
|
evergreen_hdmi_update_ACR(encoder, mode->clock);
|
||||||
|
|
||||||
/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
|
/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
|
||||||
|
@ -223,8 +223,11 @@
|
|||||||
#define EVERGREEN_CRTC_STATUS 0x6e8c
|
#define EVERGREEN_CRTC_STATUS 0x6e8c
|
||||||
# define EVERGREEN_CRTC_V_BLANK (1 << 0)
|
# define EVERGREEN_CRTC_V_BLANK (1 << 0)
|
||||||
#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
|
#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
|
||||||
|
#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0
|
||||||
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
|
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
|
||||||
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
|
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
|
||||||
|
#define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4
|
||||||
|
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
|
||||||
|
|
||||||
#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0
|
#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0
|
||||||
#define EVERGREEN_DC_GPIO_HPD_A 0x64b4
|
#define EVERGREEN_DC_GPIO_HPD_A 0x64b4
|
||||||
|
@ -53,6 +53,43 @@
|
|||||||
#define RCU_IND_INDEX 0x100
|
#define RCU_IND_INDEX 0x100
|
||||||
#define RCU_IND_DATA 0x104
|
#define RCU_IND_DATA 0x104
|
||||||
|
|
||||||
|
/* discrete uvd clocks */
|
||||||
|
#define CG_UPLL_FUNC_CNTL 0x718
|
||||||
|
# define UPLL_RESET_MASK 0x00000001
|
||||||
|
# define UPLL_SLEEP_MASK 0x00000002
|
||||||
|
# define UPLL_BYPASS_EN_MASK 0x00000004
|
||||||
|
# define UPLL_CTLREQ_MASK 0x00000008
|
||||||
|
# define UPLL_REF_DIV_MASK 0x003F0000
|
||||||
|
# define UPLL_VCO_MODE_MASK 0x00000200
|
||||||
|
# define UPLL_CTLACK_MASK 0x40000000
|
||||||
|
# define UPLL_CTLACK2_MASK 0x80000000
|
||||||
|
#define CG_UPLL_FUNC_CNTL_2 0x71c
|
||||||
|
# define UPLL_PDIV_A(x) ((x) << 0)
|
||||||
|
# define UPLL_PDIV_A_MASK 0x0000007F
|
||||||
|
# define UPLL_PDIV_B(x) ((x) << 8)
|
||||||
|
# define UPLL_PDIV_B_MASK 0x00007F00
|
||||||
|
# define VCLK_SRC_SEL(x) ((x) << 20)
|
||||||
|
# define VCLK_SRC_SEL_MASK 0x01F00000
|
||||||
|
# define DCLK_SRC_SEL(x) ((x) << 25)
|
||||||
|
# define DCLK_SRC_SEL_MASK 0x3E000000
|
||||||
|
#define CG_UPLL_FUNC_CNTL_3 0x720
|
||||||
|
# define UPLL_FB_DIV(x) ((x) << 0)
|
||||||
|
# define UPLL_FB_DIV_MASK 0x01FFFFFF
|
||||||
|
#define CG_UPLL_FUNC_CNTL_4 0x854
|
||||||
|
# define UPLL_SPARE_ISPARE9 0x00020000
|
||||||
|
#define CG_UPLL_SPREAD_SPECTRUM 0x79c
|
||||||
|
# define SSEN_MASK 0x00000001
|
||||||
|
|
||||||
|
/* fusion uvd clocks */
|
||||||
|
#define CG_DCLK_CNTL 0x610
|
||||||
|
# define DCLK_DIVIDER_MASK 0x7f
|
||||||
|
# define DCLK_DIR_CNTL_EN (1 << 8)
|
||||||
|
#define CG_DCLK_STATUS 0x614
|
||||||
|
# define DCLK_STATUS (1 << 0)
|
||||||
|
#define CG_VCLK_CNTL 0x618
|
||||||
|
#define CG_VCLK_STATUS 0x61c
|
||||||
|
#define CG_SCRATCH1 0x820
|
||||||
|
|
||||||
#define GRBM_GFX_INDEX 0x802C
|
#define GRBM_GFX_INDEX 0x802C
|
||||||
#define INSTANCE_INDEX(x) ((x) << 0)
|
#define INSTANCE_INDEX(x) ((x) << 0)
|
||||||
#define SE_INDEX(x) ((x) << 16)
|
#define SE_INDEX(x) ((x) << 16)
|
||||||
@ -197,6 +234,7 @@
|
|||||||
# define HDMI_MPEG_INFO_CONT (1 << 9)
|
# define HDMI_MPEG_INFO_CONT (1 << 9)
|
||||||
#define HDMI_INFOFRAME_CONTROL1 0x7048
|
#define HDMI_INFOFRAME_CONTROL1 0x7048
|
||||||
# define HDMI_AVI_INFO_LINE(x) (((x) & 0x3f) << 0)
|
# define HDMI_AVI_INFO_LINE(x) (((x) & 0x3f) << 0)
|
||||||
|
# define HDMI_AVI_INFO_LINE_MASK (0x3f << 0)
|
||||||
# define HDMI_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8)
|
# define HDMI_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8)
|
||||||
# define HDMI_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16)
|
# define HDMI_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16)
|
||||||
#define HDMI_GENERIC_PACKET_CONTROL 0x704c
|
#define HDMI_GENERIC_PACKET_CONTROL 0x704c
|
||||||
@ -729,6 +767,18 @@
|
|||||||
#define WAIT_UNTIL 0x8040
|
#define WAIT_UNTIL 0x8040
|
||||||
|
|
||||||
#define SRBM_STATUS 0x0E50
|
#define SRBM_STATUS 0x0E50
|
||||||
|
#define RLC_RQ_PENDING (1 << 3)
|
||||||
|
#define GRBM_RQ_PENDING (1 << 5)
|
||||||
|
#define VMC_BUSY (1 << 8)
|
||||||
|
#define MCB_BUSY (1 << 9)
|
||||||
|
#define MCB_NON_DISPLAY_BUSY (1 << 10)
|
||||||
|
#define MCC_BUSY (1 << 11)
|
||||||
|
#define MCD_BUSY (1 << 12)
|
||||||
|
#define SEM_BUSY (1 << 14)
|
||||||
|
#define RLC_BUSY (1 << 15)
|
||||||
|
#define IH_BUSY (1 << 17)
|
||||||
|
#define SRBM_STATUS2 0x0EC4
|
||||||
|
#define DMA_BUSY (1 << 5)
|
||||||
#define SRBM_SOFT_RESET 0x0E60
|
#define SRBM_SOFT_RESET 0x0E60
|
||||||
#define SRBM_SOFT_RESET_ALL_MASK 0x00FEEFA6
|
#define SRBM_SOFT_RESET_ALL_MASK 0x00FEEFA6
|
||||||
#define SOFT_RESET_BIF (1 << 1)
|
#define SOFT_RESET_BIF (1 << 1)
|
||||||
@ -924,10 +974,13 @@
|
|||||||
#define CAYMAN_DMA1_CNTL 0xd82c
|
#define CAYMAN_DMA1_CNTL 0xd82c
|
||||||
|
|
||||||
/* async DMA packets */
|
/* async DMA packets */
|
||||||
#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
|
#define DMA_PACKET(cmd, sub_cmd, n) ((((cmd) & 0xF) << 28) | \
|
||||||
(((t) & 0x1) << 23) | \
|
(((sub_cmd) & 0xFF) << 20) |\
|
||||||
(((s) & 0x1) << 22) | \
|
|
||||||
(((n) & 0xFFFFF) << 0))
|
(((n) & 0xFFFFF) << 0))
|
||||||
|
#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
|
||||||
|
#define GET_DMA_COUNT(h) ((h) & 0x000fffff)
|
||||||
|
#define GET_DMA_SUB_CMD(h) (((h) & 0x0ff00000) >> 20)
|
||||||
|
|
||||||
/* async DMA Packet types */
|
/* async DMA Packet types */
|
||||||
#define DMA_PACKET_WRITE 0x2
|
#define DMA_PACKET_WRITE 0x2
|
||||||
#define DMA_PACKET_COPY 0x3
|
#define DMA_PACKET_COPY 0x3
|
||||||
@ -977,19 +1030,20 @@
|
|||||||
# define TARGET_LINK_SPEED_MASK (0xf << 0)
|
# define TARGET_LINK_SPEED_MASK (0xf << 0)
|
||||||
# define SELECTABLE_DEEMPHASIS (1 << 6)
|
# define SELECTABLE_DEEMPHASIS (1 << 6)
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* UVD
|
||||||
|
*/
|
||||||
|
#define UVD_UDEC_ADDR_CONFIG 0xef4c
|
||||||
|
#define UVD_UDEC_DB_ADDR_CONFIG 0xef50
|
||||||
|
#define UVD_UDEC_DBW_ADDR_CONFIG 0xef54
|
||||||
|
#define UVD_RBC_RB_RPTR 0xf690
|
||||||
|
#define UVD_RBC_RB_WPTR 0xf694
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* PM4
|
* PM4
|
||||||
*/
|
*/
|
||||||
#define PACKET_TYPE0 0
|
#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
|
||||||
#define PACKET_TYPE1 1
|
|
||||||
#define PACKET_TYPE2 2
|
|
||||||
#define PACKET_TYPE3 3
|
|
||||||
|
|
||||||
#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
|
|
||||||
#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
|
|
||||||
#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
|
|
||||||
#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
|
|
||||||
#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
|
|
||||||
(((reg) >> 2) & 0xFFFF) | \
|
(((reg) >> 2) & 0xFFFF) | \
|
||||||
((n) & 0x3FFF) << 16)
|
((n) & 0x3FFF) << 16)
|
||||||
#define CP_PACKET2 0x80000000
|
#define CP_PACKET2 0x80000000
|
||||||
@ -998,7 +1052,7 @@
|
|||||||
|
|
||||||
#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
|
#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
|
||||||
|
|
||||||
#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
|
#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
|
||||||
(((op) & 0xFF) << 8) | \
|
(((op) & 0xFF) << 8) | \
|
||||||
((n) & 0x3FFF) << 16)
|
((n) & 0x3FFF) << 16)
|
||||||
|
|
||||||
|
BIN
drivers/video/drm/radeon/firmware/ARUBA_me.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/ARUBA_me.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/ARUBA_pfp.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/ARUBA_pfp.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/ARUBA_rlc.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/ARUBA_rlc.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/BARTS_smc.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/BARTS_smc.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/BONAIRE_ce.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/BONAIRE_ce.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/BONAIRE_mc.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/BONAIRE_mc.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/BONAIRE_me.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/BONAIRE_me.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/BONAIRE_mec.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/BONAIRE_mec.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/BONAIRE_pfp.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/BONAIRE_pfp.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/BONAIRE_rlc.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/BONAIRE_rlc.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/BONAIRE_sdma.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/BONAIRE_sdma.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/BONAIRE_uvd.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/BONAIRE_uvd.bin
Normal file
Binary file not shown.
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/CAICOS_smc.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/CAICOS_smc.bin
Normal file
Binary file not shown.
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/CAYMAN_smc.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/CAYMAN_smc.bin
Normal file
Binary file not shown.
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/CEDAR_smc.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/CEDAR_smc.bin
Normal file
Binary file not shown.
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/CYPRESS_smc.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/CYPRESS_smc.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/CYPRESS_uvd.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/CYPRESS_uvd.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/HAINAN_ce.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/HAINAN_ce.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/HAINAN_mc.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/HAINAN_mc.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/HAINAN_me.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/HAINAN_me.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/HAINAN_pfp.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/HAINAN_pfp.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/HAINAN_rlc.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/HAINAN_rlc.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/HAINAN_smc.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/HAINAN_smc.bin
Normal file
Binary file not shown.
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/JUNIPER_smc.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/JUNIPER_smc.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/KABINI_ce.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/KABINI_ce.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/KABINI_me.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/KABINI_me.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/KABINI_mec.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/KABINI_mec.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/KABINI_pfp.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/KABINI_pfp.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/KABINI_rlc.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/KABINI_rlc.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/KABINI_sdma.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/KABINI_sdma.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/OLAND_ce.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/OLAND_ce.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/OLAND_mc.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/OLAND_mc.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/OLAND_me.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/OLAND_me.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/OLAND_pfp.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/OLAND_pfp.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/OLAND_rlc.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/OLAND_rlc.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/OLAND_smc.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/OLAND_smc.bin
Normal file
Binary file not shown.
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/PITCAIRN_smc.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/PITCAIRN_smc.bin
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/REDWOOD_smc.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/REDWOOD_smc.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/RV710_smc.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/RV710_smc.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/RV710_uvd.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/RV710_uvd.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/RV730_smc.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/RV730_smc.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/RV740_smc.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/RV740_smc.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/RV770_smc.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/RV770_smc.bin
Normal file
Binary file not shown.
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/SUMO_uvd.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/SUMO_uvd.bin
Normal file
Binary file not shown.
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/TAHITI_smc.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/TAHITI_smc.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/TAHITI_uvd.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/TAHITI_uvd.bin
Normal file
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/TURKS_smc.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/TURKS_smc.bin
Normal file
Binary file not shown.
Binary file not shown.
BIN
drivers/video/drm/radeon/firmware/VERDE_smc.bin
Normal file
BIN
drivers/video/drm/radeon/firmware/VERDE_smc.bin
Normal file
Binary file not shown.
@ -259,7 +259,7 @@ macro SI_code [arg]
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
SI_code TAHITI, PITCAIRN, VERDE
|
SI_code TAHITI, PITCAIRN, VERDE, OLAND, HAINAN
|
||||||
|
|
||||||
___end_builtin_fw:
|
___end_builtin_fw:
|
||||||
|
|
||||||
@ -380,7 +380,7 @@ arg#_RLC_END:
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
SI_firmware TAHITI,PITCAIRN,VERDE
|
SI_firmware TAHITI, PITCAIRN, VERDE, OLAND, HAINAN
|
||||||
|
|
||||||
align 16
|
align 16
|
||||||
R100CP_START:
|
R100CP_START:
|
||||||
|
@ -34,6 +34,8 @@
|
|||||||
#include "ni_reg.h"
|
#include "ni_reg.h"
|
||||||
#include "cayman_blit_shaders.h"
|
#include "cayman_blit_shaders.h"
|
||||||
|
|
||||||
|
extern bool evergreen_is_display_hung(struct radeon_device *rdev);
|
||||||
|
extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
|
||||||
extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
|
extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
|
||||||
extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
|
extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
|
||||||
extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
|
extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
|
||||||
@ -76,6 +78,282 @@ MODULE_FIRMWARE("radeon/ARUBA_pfp.bin");
|
|||||||
MODULE_FIRMWARE("radeon/ARUBA_me.bin");
|
MODULE_FIRMWARE("radeon/ARUBA_me.bin");
|
||||||
MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
|
MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
|
||||||
|
|
||||||
|
|
||||||
|
static const u32 cayman_golden_registers2[] =
|
||||||
|
{
|
||||||
|
0x3e5c, 0xffffffff, 0x00000000,
|
||||||
|
0x3e48, 0xffffffff, 0x00000000,
|
||||||
|
0x3e4c, 0xffffffff, 0x00000000,
|
||||||
|
0x3e64, 0xffffffff, 0x00000000,
|
||||||
|
0x3e50, 0xffffffff, 0x00000000,
|
||||||
|
0x3e60, 0xffffffff, 0x00000000
|
||||||
|
};
|
||||||
|
|
||||||
|
static const u32 cayman_golden_registers[] =
|
||||||
|
{
|
||||||
|
0x5eb4, 0xffffffff, 0x00000002,
|
||||||
|
0x5e78, 0x8f311ff1, 0x001000f0,
|
||||||
|
0x3f90, 0xffff0000, 0xff000000,
|
||||||
|
0x9148, 0xffff0000, 0xff000000,
|
||||||
|
0x3f94, 0xffff0000, 0xff000000,
|
||||||
|
0x914c, 0xffff0000, 0xff000000,
|
||||||
|
0xc78, 0x00000080, 0x00000080,
|
||||||
|
0xbd4, 0x70073777, 0x00011003,
|
||||||
|
0xd02c, 0xbfffff1f, 0x08421000,
|
||||||
|
0xd0b8, 0x73773777, 0x02011003,
|
||||||
|
0x5bc0, 0x00200000, 0x50100000,
|
||||||
|
0x98f8, 0x33773777, 0x02011003,
|
||||||
|
0x98fc, 0xffffffff, 0x76541032,
|
||||||
|
0x7030, 0x31000311, 0x00000011,
|
||||||
|
0x2f48, 0x33773777, 0x42010001,
|
||||||
|
0x6b28, 0x00000010, 0x00000012,
|
||||||
|
0x7728, 0x00000010, 0x00000012,
|
||||||
|
0x10328, 0x00000010, 0x00000012,
|
||||||
|
0x10f28, 0x00000010, 0x00000012,
|
||||||
|
0x11b28, 0x00000010, 0x00000012,
|
||||||
|
0x12728, 0x00000010, 0x00000012,
|
||||||
|
0x240c, 0x000007ff, 0x00000000,
|
||||||
|
0x8a14, 0xf000001f, 0x00000007,
|
||||||
|
0x8b24, 0x3fff3fff, 0x00ff0fff,
|
||||||
|
0x8b10, 0x0000ff0f, 0x00000000,
|
||||||
|
0x28a4c, 0x07ffffff, 0x06000000,
|
||||||
|
0x10c, 0x00000001, 0x00010003,
|
||||||
|
0xa02c, 0xffffffff, 0x0000009b,
|
||||||
|
0x913c, 0x0000010f, 0x01000100,
|
||||||
|
0x8c04, 0xf8ff00ff, 0x40600060,
|
||||||
|
0x28350, 0x00000f01, 0x00000000,
|
||||||
|
0x9508, 0x3700001f, 0x00000002,
|
||||||
|
0x960c, 0xffffffff, 0x54763210,
|
||||||
|
0x88c4, 0x001f3ae3, 0x00000082,
|
||||||
|
0x88d0, 0xffffffff, 0x0f40df40,
|
||||||
|
0x88d4, 0x0000001f, 0x00000010,
|
||||||
|
0x8974, 0xffffffff, 0x00000000
|
||||||
|
};
|
||||||
|
|
||||||
|
static const u32 dvst_golden_registers2[] =
|
||||||
|
{
|
||||||
|
0x8f8, 0xffffffff, 0,
|
||||||
|
0x8fc, 0x00380000, 0,
|
||||||
|
0x8f8, 0xffffffff, 1,
|
||||||
|
0x8fc, 0x0e000000, 0
|
||||||
|
};
|
||||||
|
|
||||||
|
static const u32 dvst_golden_registers[] =
|
||||||
|
{
|
||||||
|
0x690, 0x3fff3fff, 0x20c00033,
|
||||||
|
0x918c, 0x0fff0fff, 0x00010006,
|
||||||
|
0x91a8, 0x0fff0fff, 0x00010006,
|
||||||
|
0x9150, 0xffffdfff, 0x6e944040,
|
||||||
|
0x917c, 0x0fff0fff, 0x00030002,
|
||||||
|
0x9198, 0x0fff0fff, 0x00030002,
|
||||||
|
0x915c, 0x0fff0fff, 0x00010000,
|
||||||
|
0x3f90, 0xffff0001, 0xff000000,
|
||||||
|
0x9178, 0x0fff0fff, 0x00070000,
|
||||||
|
0x9194, 0x0fff0fff, 0x00070000,
|
||||||
|
0x9148, 0xffff0001, 0xff000000,
|
||||||
|
0x9190, 0x0fff0fff, 0x00090008,
|
||||||
|
0x91ac, 0x0fff0fff, 0x00090008,
|
||||||
|
0x3f94, 0xffff0000, 0xff000000,
|
||||||
|
0x914c, 0xffff0000, 0xff000000,
|
||||||
|
0x929c, 0x00000fff, 0x00000001,
|
||||||
|
0x55e4, 0xff607fff, 0xfc000100,
|
||||||
|
0x8a18, 0xff000fff, 0x00000100,
|
||||||
|
0x8b28, 0xff000fff, 0x00000100,
|
||||||
|
0x9144, 0xfffc0fff, 0x00000100,
|
||||||
|
0x6ed8, 0x00010101, 0x00010000,
|
||||||
|
0x9830, 0xffffffff, 0x00000000,
|
||||||
|
0x9834, 0xf00fffff, 0x00000400,
|
||||||
|
0x9838, 0xfffffffe, 0x00000000,
|
||||||
|
0xd0c0, 0xff000fff, 0x00000100,
|
||||||
|
0xd02c, 0xbfffff1f, 0x08421000,
|
||||||
|
0xd0b8, 0x73773777, 0x12010001,
|
||||||
|
0x5bb0, 0x000000f0, 0x00000070,
|
||||||
|
0x98f8, 0x73773777, 0x12010001,
|
||||||
|
0x98fc, 0xffffffff, 0x00000010,
|
||||||
|
0x9b7c, 0x00ff0000, 0x00fc0000,
|
||||||
|
0x8030, 0x00001f0f, 0x0000100a,
|
||||||
|
0x2f48, 0x73773777, 0x12010001,
|
||||||
|
0x2408, 0x00030000, 0x000c007f,
|
||||||
|
0x8a14, 0xf000003f, 0x00000007,
|
||||||
|
0x8b24, 0x3fff3fff, 0x00ff0fff,
|
||||||
|
0x8b10, 0x0000ff0f, 0x00000000,
|
||||||
|
0x28a4c, 0x07ffffff, 0x06000000,
|
||||||
|
0x4d8, 0x00000fff, 0x00000100,
|
||||||
|
0xa008, 0xffffffff, 0x00010000,
|
||||||
|
0x913c, 0xffff03ff, 0x01000100,
|
||||||
|
0x8c00, 0x000000ff, 0x00000003,
|
||||||
|
0x8c04, 0xf8ff00ff, 0x40600060,
|
||||||
|
0x8cf0, 0x1fff1fff, 0x08e00410,
|
||||||
|
0x28350, 0x00000f01, 0x00000000,
|
||||||
|
0x9508, 0xf700071f, 0x00000002,
|
||||||
|
0x960c, 0xffffffff, 0x54763210,
|
||||||
|
0x20ef8, 0x01ff01ff, 0x00000002,
|
||||||
|
0x20e98, 0xfffffbff, 0x00200000,
|
||||||
|
0x2015c, 0xffffffff, 0x00000f40,
|
||||||
|
0x88c4, 0x001f3ae3, 0x00000082,
|
||||||
|
0x8978, 0x3fffffff, 0x04050140,
|
||||||
|
0x88d4, 0x0000001f, 0x00000010,
|
||||||
|
0x8974, 0xffffffff, 0x00000000
|
||||||
|
};
|
||||||
|
|
||||||
|
static const u32 scrapper_golden_registers[] =
|
||||||
|
{
|
||||||
|
0x690, 0x3fff3fff, 0x20c00033,
|
||||||
|
0x918c, 0x0fff0fff, 0x00010006,
|
||||||
|
0x918c, 0x0fff0fff, 0x00010006,
|
||||||
|
0x91a8, 0x0fff0fff, 0x00010006,
|
||||||
|
0x91a8, 0x0fff0fff, 0x00010006,
|
||||||
|
0x9150, 0xffffdfff, 0x6e944040,
|
||||||
|
0x9150, 0xffffdfff, 0x6e944040,
|
||||||
|
0x917c, 0x0fff0fff, 0x00030002,
|
||||||
|
0x917c, 0x0fff0fff, 0x00030002,
|
||||||
|
0x9198, 0x0fff0fff, 0x00030002,
|
||||||
|
0x9198, 0x0fff0fff, 0x00030002,
|
||||||
|
0x915c, 0x0fff0fff, 0x00010000,
|
||||||
|
0x915c, 0x0fff0fff, 0x00010000,
|
||||||
|
0x3f90, 0xffff0001, 0xff000000,
|
||||||
|
0x3f90, 0xffff0001, 0xff000000,
|
||||||
|
0x9178, 0x0fff0fff, 0x00070000,
|
||||||
|
0x9178, 0x0fff0fff, 0x00070000,
|
||||||
|
0x9194, 0x0fff0fff, 0x00070000,
|
||||||
|
0x9194, 0x0fff0fff, 0x00070000,
|
||||||
|
0x9148, 0xffff0001, 0xff000000,
|
||||||
|
0x9148, 0xffff0001, 0xff000000,
|
||||||
|
0x9190, 0x0fff0fff, 0x00090008,
|
||||||
|
0x9190, 0x0fff0fff, 0x00090008,
|
||||||
|
0x91ac, 0x0fff0fff, 0x00090008,
|
||||||
|
0x91ac, 0x0fff0fff, 0x00090008,
|
||||||
|
0x3f94, 0xffff0000, 0xff000000,
|
||||||
|
0x3f94, 0xffff0000, 0xff000000,
|
||||||
|
0x914c, 0xffff0000, 0xff000000,
|
||||||
|
0x914c, 0xffff0000, 0xff000000,
|
||||||
|
0x929c, 0x00000fff, 0x00000001,
|
||||||
|
0x929c, 0x00000fff, 0x00000001,
|
||||||
|
0x55e4, 0xff607fff, 0xfc000100,
|
||||||
|
0x8a18, 0xff000fff, 0x00000100,
|
||||||
|
0x8a18, 0xff000fff, 0x00000100,
|
||||||
|
0x8b28, 0xff000fff, 0x00000100,
|
||||||
|
0x8b28, 0xff000fff, 0x00000100,
|
||||||
|
0x9144, 0xfffc0fff, 0x00000100,
|
||||||
|
0x9144, 0xfffc0fff, 0x00000100,
|
||||||
|
0x6ed8, 0x00010101, 0x00010000,
|
||||||
|
0x9830, 0xffffffff, 0x00000000,
|
||||||
|
0x9830, 0xffffffff, 0x00000000,
|
||||||
|
0x9834, 0xf00fffff, 0x00000400,
|
||||||
|
0x9834, 0xf00fffff, 0x00000400,
|
||||||
|
0x9838, 0xfffffffe, 0x00000000,
|
||||||
|
0x9838, 0xfffffffe, 0x00000000,
|
||||||
|
0xd0c0, 0xff000fff, 0x00000100,
|
||||||
|
0xd02c, 0xbfffff1f, 0x08421000,
|
||||||
|
0xd02c, 0xbfffff1f, 0x08421000,
|
||||||
|
0xd0b8, 0x73773777, 0x12010001,
|
||||||
|
0xd0b8, 0x73773777, 0x12010001,
|
||||||
|
0x5bb0, 0x000000f0, 0x00000070,
|
||||||
|
0x98f8, 0x73773777, 0x12010001,
|
||||||
|
0x98f8, 0x73773777, 0x12010001,
|
||||||
|
0x98fc, 0xffffffff, 0x00000010,
|
||||||
|
0x98fc, 0xffffffff, 0x00000010,
|
||||||
|
0x9b7c, 0x00ff0000, 0x00fc0000,
|
||||||
|
0x9b7c, 0x00ff0000, 0x00fc0000,
|
||||||
|
0x8030, 0x00001f0f, 0x0000100a,
|
||||||
|
0x8030, 0x00001f0f, 0x0000100a,
|
||||||
|
0x2f48, 0x73773777, 0x12010001,
|
||||||
|
0x2f48, 0x73773777, 0x12010001,
|
||||||
|
0x2408, 0x00030000, 0x000c007f,
|
||||||
|
0x8a14, 0xf000003f, 0x00000007,
|
||||||
|
0x8a14, 0xf000003f, 0x00000007,
|
||||||
|
0x8b24, 0x3fff3fff, 0x00ff0fff,
|
||||||
|
0x8b24, 0x3fff3fff, 0x00ff0fff,
|
||||||
|
0x8b10, 0x0000ff0f, 0x00000000,
|
||||||
|
0x8b10, 0x0000ff0f, 0x00000000,
|
||||||
|
0x28a4c, 0x07ffffff, 0x06000000,
|
||||||
|
0x28a4c, 0x07ffffff, 0x06000000,
|
||||||
|
0x4d8, 0x00000fff, 0x00000100,
|
||||||
|
0x4d8, 0x00000fff, 0x00000100,
|
||||||
|
0xa008, 0xffffffff, 0x00010000,
|
||||||
|
0xa008, 0xffffffff, 0x00010000,
|
||||||
|
0x913c, 0xffff03ff, 0x01000100,
|
||||||
|
0x913c, 0xffff03ff, 0x01000100,
|
||||||
|
0x90e8, 0x001fffff, 0x010400c0,
|
||||||
|
0x8c00, 0x000000ff, 0x00000003,
|
||||||
|
0x8c00, 0x000000ff, 0x00000003,
|
||||||
|
0x8c04, 0xf8ff00ff, 0x40600060,
|
||||||
|
0x8c04, 0xf8ff00ff, 0x40600060,
|
||||||
|
0x8c30, 0x0000000f, 0x00040005,
|
||||||
|
0x8cf0, 0x1fff1fff, 0x08e00410,
|
||||||
|
0x8cf0, 0x1fff1fff, 0x08e00410,
|
||||||
|
0x900c, 0x00ffffff, 0x0017071f,
|
||||||
|
0x28350, 0x00000f01, 0x00000000,
|
||||||
|
0x28350, 0x00000f01, 0x00000000,
|
||||||
|
0x9508, 0xf700071f, 0x00000002,
|
||||||
|
0x9508, 0xf700071f, 0x00000002,
|
||||||
|
0x9688, 0x00300000, 0x0017000f,
|
||||||
|
0x960c, 0xffffffff, 0x54763210,
|
||||||
|
0x960c, 0xffffffff, 0x54763210,
|
||||||
|
0x20ef8, 0x01ff01ff, 0x00000002,
|
||||||
|
0x20e98, 0xfffffbff, 0x00200000,
|
||||||
|
0x2015c, 0xffffffff, 0x00000f40,
|
||||||
|
0x88c4, 0x001f3ae3, 0x00000082,
|
||||||
|
0x88c4, 0x001f3ae3, 0x00000082,
|
||||||
|
0x8978, 0x3fffffff, 0x04050140,
|
||||||
|
0x8978, 0x3fffffff, 0x04050140,
|
||||||
|
0x88d4, 0x0000001f, 0x00000010,
|
||||||
|
0x88d4, 0x0000001f, 0x00000010,
|
||||||
|
0x8974, 0xffffffff, 0x00000000,
|
||||||
|
0x8974, 0xffffffff, 0x00000000
|
||||||
|
};
|
||||||
|
|
||||||
|
static void ni_init_golden_registers(struct radeon_device *rdev)
|
||||||
|
{
|
||||||
|
switch (rdev->family) {
|
||||||
|
case CHIP_CAYMAN:
|
||||||
|
radeon_program_register_sequence(rdev,
|
||||||
|
cayman_golden_registers,
|
||||||
|
(const u32)ARRAY_SIZE(cayman_golden_registers));
|
||||||
|
radeon_program_register_sequence(rdev,
|
||||||
|
cayman_golden_registers2,
|
||||||
|
(const u32)ARRAY_SIZE(cayman_golden_registers2));
|
||||||
|
break;
|
||||||
|
case CHIP_ARUBA:
|
||||||
|
if ((rdev->pdev->device == 0x9900) ||
|
||||||
|
(rdev->pdev->device == 0x9901) ||
|
||||||
|
(rdev->pdev->device == 0x9903) ||
|
||||||
|
(rdev->pdev->device == 0x9904) ||
|
||||||
|
(rdev->pdev->device == 0x9905) ||
|
||||||
|
(rdev->pdev->device == 0x9906) ||
|
||||||
|
(rdev->pdev->device == 0x9907) ||
|
||||||
|
(rdev->pdev->device == 0x9908) ||
|
||||||
|
(rdev->pdev->device == 0x9909) ||
|
||||||
|
(rdev->pdev->device == 0x990A) ||
|
||||||
|
(rdev->pdev->device == 0x990B) ||
|
||||||
|
(rdev->pdev->device == 0x990C) ||
|
||||||
|
(rdev->pdev->device == 0x990D) ||
|
||||||
|
(rdev->pdev->device == 0x990E) ||
|
||||||
|
(rdev->pdev->device == 0x990F) ||
|
||||||
|
(rdev->pdev->device == 0x9910) ||
|
||||||
|
(rdev->pdev->device == 0x9913) ||
|
||||||
|
(rdev->pdev->device == 0x9917) ||
|
||||||
|
(rdev->pdev->device == 0x9918)) {
|
||||||
|
radeon_program_register_sequence(rdev,
|
||||||
|
dvst_golden_registers,
|
||||||
|
(const u32)ARRAY_SIZE(dvst_golden_registers));
|
||||||
|
radeon_program_register_sequence(rdev,
|
||||||
|
dvst_golden_registers2,
|
||||||
|
(const u32)ARRAY_SIZE(dvst_golden_registers2));
|
||||||
|
} else {
|
||||||
|
radeon_program_register_sequence(rdev,
|
||||||
|
scrapper_golden_registers,
|
||||||
|
(const u32)ARRAY_SIZE(scrapper_golden_registers));
|
||||||
|
radeon_program_register_sequence(rdev,
|
||||||
|
dvst_golden_registers2,
|
||||||
|
(const u32)ARRAY_SIZE(dvst_golden_registers2));
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#define BTC_IO_MC_REGS_SIZE 29
|
#define BTC_IO_MC_REGS_SIZE 29
|
||||||
|
|
||||||
static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
|
static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
|
||||||
@ -466,21 +744,32 @@ static void cayman_gpu_init(struct radeon_device *rdev)
|
|||||||
(rdev->pdev->device == 0x9907) ||
|
(rdev->pdev->device == 0x9907) ||
|
||||||
(rdev->pdev->device == 0x9908) ||
|
(rdev->pdev->device == 0x9908) ||
|
||||||
(rdev->pdev->device == 0x9909) ||
|
(rdev->pdev->device == 0x9909) ||
|
||||||
|
(rdev->pdev->device == 0x990B) ||
|
||||||
|
(rdev->pdev->device == 0x990C) ||
|
||||||
|
(rdev->pdev->device == 0x990F) ||
|
||||||
(rdev->pdev->device == 0x9910) ||
|
(rdev->pdev->device == 0x9910) ||
|
||||||
(rdev->pdev->device == 0x9917)) {
|
(rdev->pdev->device == 0x9917) ||
|
||||||
|
(rdev->pdev->device == 0x9999) ||
|
||||||
|
(rdev->pdev->device == 0x999C)) {
|
||||||
rdev->config.cayman.max_simds_per_se = 6;
|
rdev->config.cayman.max_simds_per_se = 6;
|
||||||
rdev->config.cayman.max_backends_per_se = 2;
|
rdev->config.cayman.max_backends_per_se = 2;
|
||||||
} else if ((rdev->pdev->device == 0x9903) ||
|
} else if ((rdev->pdev->device == 0x9903) ||
|
||||||
(rdev->pdev->device == 0x9904) ||
|
(rdev->pdev->device == 0x9904) ||
|
||||||
(rdev->pdev->device == 0x990A) ||
|
(rdev->pdev->device == 0x990A) ||
|
||||||
|
(rdev->pdev->device == 0x990D) ||
|
||||||
|
(rdev->pdev->device == 0x990E) ||
|
||||||
(rdev->pdev->device == 0x9913) ||
|
(rdev->pdev->device == 0x9913) ||
|
||||||
(rdev->pdev->device == 0x9918)) {
|
(rdev->pdev->device == 0x9918) ||
|
||||||
|
(rdev->pdev->device == 0x999D)) {
|
||||||
rdev->config.cayman.max_simds_per_se = 4;
|
rdev->config.cayman.max_simds_per_se = 4;
|
||||||
rdev->config.cayman.max_backends_per_se = 2;
|
rdev->config.cayman.max_backends_per_se = 2;
|
||||||
} else if ((rdev->pdev->device == 0x9919) ||
|
} else if ((rdev->pdev->device == 0x9919) ||
|
||||||
(rdev->pdev->device == 0x9990) ||
|
(rdev->pdev->device == 0x9990) ||
|
||||||
(rdev->pdev->device == 0x9991) ||
|
(rdev->pdev->device == 0x9991) ||
|
||||||
(rdev->pdev->device == 0x9994) ||
|
(rdev->pdev->device == 0x9994) ||
|
||||||
|
(rdev->pdev->device == 0x9995) ||
|
||||||
|
(rdev->pdev->device == 0x9996) ||
|
||||||
|
(rdev->pdev->device == 0x999A) ||
|
||||||
(rdev->pdev->device == 0x99A0)) {
|
(rdev->pdev->device == 0x99A0)) {
|
||||||
rdev->config.cayman.max_simds_per_se = 3;
|
rdev->config.cayman.max_simds_per_se = 3;
|
||||||
rdev->config.cayman.max_backends_per_se = 1;
|
rdev->config.cayman.max_backends_per_se = 1;
|
||||||
@ -604,21 +893,45 @@ static void cayman_gpu_init(struct radeon_device *rdev)
|
|||||||
}
|
}
|
||||||
/* enabled rb are just the one not disabled :) */
|
/* enabled rb are just the one not disabled :) */
|
||||||
disabled_rb_mask = tmp;
|
disabled_rb_mask = tmp;
|
||||||
|
tmp = 0;
|
||||||
|
for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
|
||||||
|
tmp |= (1 << i);
|
||||||
|
/* if all the backends are disabled, fix it up here */
|
||||||
|
if ((disabled_rb_mask & tmp) == tmp) {
|
||||||
|
for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
|
||||||
|
disabled_rb_mask &= ~(1 << i);
|
||||||
|
}
|
||||||
|
|
||||||
WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
|
WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
|
||||||
WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
|
WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
|
||||||
|
|
||||||
WREG32(GB_ADDR_CONFIG, gb_addr_config);
|
WREG32(GB_ADDR_CONFIG, gb_addr_config);
|
||||||
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
|
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
|
||||||
|
if (ASIC_IS_DCE6(rdev))
|
||||||
|
WREG32(DMIF_ADDR_CALC, gb_addr_config);
|
||||||
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
|
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
|
||||||
WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
|
WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
|
||||||
WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
|
WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
|
||||||
|
WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
|
||||||
|
WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
|
||||||
|
WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
|
||||||
|
|
||||||
|
if ((rdev->config.cayman.max_backends_per_se == 1) &&
|
||||||
|
(rdev->flags & RADEON_IS_IGP)) {
|
||||||
|
if ((disabled_rb_mask & 3) == 1) {
|
||||||
|
/* RB0 disabled, RB1 enabled */
|
||||||
|
tmp = 0x11111111;
|
||||||
|
} else {
|
||||||
|
/* RB1 disabled, RB0 enabled */
|
||||||
|
tmp = 0x00000000;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
tmp = gb_addr_config & NUM_PIPES_MASK;
|
tmp = gb_addr_config & NUM_PIPES_MASK;
|
||||||
tmp = r6xx_remap_render_backend(rdev, tmp,
|
tmp = r6xx_remap_render_backend(rdev, tmp,
|
||||||
rdev->config.cayman.max_backends_per_se *
|
rdev->config.cayman.max_backends_per_se *
|
||||||
rdev->config.cayman.max_shader_engines,
|
rdev->config.cayman.max_shader_engines,
|
||||||
CAYMAN_MAX_BACKENDS, disabled_rb_mask);
|
CAYMAN_MAX_BACKENDS, disabled_rb_mask);
|
||||||
|
}
|
||||||
WREG32(GB_BACKEND_MAP, tmp);
|
WREG32(GB_BACKEND_MAP, tmp);
|
||||||
|
|
||||||
cgts_tcc_disable = 0xffff0000;
|
cgts_tcc_disable = 0xffff0000;
|
||||||
@ -876,7 +1189,7 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
|
|||||||
if (ring->rptr_save_reg) {
|
if (ring->rptr_save_reg) {
|
||||||
uint32_t next_rptr = ring->wptr + 3 + 4 + 8;
|
uint32_t next_rptr = ring->wptr + 3 + 4 + 8;
|
||||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||||
radeon_ring_write(ring, ((ring->rptr_save_reg -
|
radeon_ring_write(ring, ((ring->rptr_save_reg -
|
||||||
PACKET3_SET_CONFIG_REG_START) >> 2));
|
PACKET3_SET_CONFIG_REG_START) >> 2));
|
||||||
radeon_ring_write(ring, next_rptr);
|
radeon_ring_write(ring, next_rptr);
|
||||||
}
|
}
|
||||||
@ -888,7 +1201,7 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
|
|||||||
#endif
|
#endif
|
||||||
(ib->gpu_addr & 0xFFFFFFFC));
|
(ib->gpu_addr & 0xFFFFFFFC));
|
||||||
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
|
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
|
||||||
radeon_ring_write(ring, ib->length_dw |
|
radeon_ring_write(ring, ib->length_dw |
|
||||||
(ib->vm ? (ib->vm->id << 24) : 0));
|
(ib->vm ? (ib->vm->id << 24) : 0));
|
||||||
|
|
||||||
/* flush read cache over gart for this vmid */
|
/* flush read cache over gart for this vmid */
|
||||||
@ -902,6 +1215,23 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
|
|||||||
radeon_ring_write(ring, 10); /* poll interval */
|
radeon_ring_write(ring, 10); /* poll interval */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
|
||||||
|
struct radeon_ring *ring,
|
||||||
|
struct radeon_semaphore *semaphore,
|
||||||
|
bool emit_wait)
|
||||||
|
{
|
||||||
|
uint64_t addr = semaphore->gpu_addr;
|
||||||
|
|
||||||
|
radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
|
||||||
|
radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
|
||||||
|
|
||||||
|
radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
|
||||||
|
radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
|
||||||
|
|
||||||
|
radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
|
||||||
|
radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
|
||||||
|
}
|
||||||
|
|
||||||
static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
|
static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
|
||||||
{
|
{
|
||||||
if (enable)
|
if (enable)
|
||||||
@ -1202,7 +1532,7 @@ void cayman_dma_stop(struct radeon_device *rdev)
|
|||||||
int cayman_dma_resume(struct radeon_device *rdev)
|
int cayman_dma_resume(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
struct radeon_ring *ring;
|
struct radeon_ring *ring;
|
||||||
u32 rb_cntl, dma_cntl;
|
u32 rb_cntl, dma_cntl, ib_cntl;
|
||||||
u32 rb_bufsz;
|
u32 rb_bufsz;
|
||||||
u32 reg_offset, wb_offset;
|
u32 reg_offset, wb_offset;
|
||||||
int i, r;
|
int i, r;
|
||||||
@ -1251,7 +1581,11 @@ int cayman_dma_resume(struct radeon_device *rdev)
|
|||||||
WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
|
WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
|
||||||
|
|
||||||
/* enable DMA IBs */
|
/* enable DMA IBs */
|
||||||
WREG32(DMA_IB_CNTL + reg_offset, DMA_IB_ENABLE | CMD_VMID_FORCE);
|
ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
|
||||||
|
#ifdef __BIG_ENDIAN
|
||||||
|
ib_cntl |= DMA_IB_SWAP_ENABLE;
|
||||||
|
#endif
|
||||||
|
WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);
|
||||||
|
|
||||||
dma_cntl = RREG32(DMA_CNTL + reg_offset);
|
dma_cntl = RREG32(DMA_CNTL + reg_offset);
|
||||||
dma_cntl &= ~CTXEMPTY_INT_ENABLE;
|
dma_cntl &= ~CTXEMPTY_INT_ENABLE;
|
||||||
@ -1292,114 +1626,96 @@ void cayman_dma_fini(struct radeon_device *rdev)
|
|||||||
radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
|
radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cayman_gpu_soft_reset_gfx(struct radeon_device *rdev)
|
static u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
|
||||||
{
|
|
||||||
u32 grbm_reset = 0;
|
|
||||||
|
|
||||||
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
|
|
||||||
return;
|
|
||||||
|
|
||||||
dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
|
|
||||||
RREG32(GRBM_STATUS));
|
|
||||||
dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
|
|
||||||
RREG32(GRBM_STATUS_SE0));
|
|
||||||
dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
|
|
||||||
RREG32(GRBM_STATUS_SE1));
|
|
||||||
dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
|
|
||||||
RREG32(SRBM_STATUS));
|
|
||||||
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
|
|
||||||
RREG32(CP_STALLED_STAT1));
|
|
||||||
dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
|
|
||||||
RREG32(CP_STALLED_STAT2));
|
|
||||||
dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
|
|
||||||
RREG32(CP_BUSY_STAT));
|
|
||||||
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
|
|
||||||
RREG32(CP_STAT));
|
|
||||||
|
|
||||||
/* Disable CP parsing/prefetching */
|
|
||||||
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
|
|
||||||
|
|
||||||
/* reset all the gfx blocks */
|
|
||||||
grbm_reset = (SOFT_RESET_CP |
|
|
||||||
SOFT_RESET_CB |
|
|
||||||
SOFT_RESET_DB |
|
|
||||||
SOFT_RESET_GDS |
|
|
||||||
SOFT_RESET_PA |
|
|
||||||
SOFT_RESET_SC |
|
|
||||||
SOFT_RESET_SPI |
|
|
||||||
SOFT_RESET_SH |
|
|
||||||
SOFT_RESET_SX |
|
|
||||||
SOFT_RESET_TC |
|
|
||||||
SOFT_RESET_TA |
|
|
||||||
SOFT_RESET_VGT |
|
|
||||||
SOFT_RESET_IA);
|
|
||||||
|
|
||||||
dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
|
|
||||||
WREG32(GRBM_SOFT_RESET, grbm_reset);
|
|
||||||
(void)RREG32(GRBM_SOFT_RESET);
|
|
||||||
udelay(50);
|
|
||||||
WREG32(GRBM_SOFT_RESET, 0);
|
|
||||||
(void)RREG32(GRBM_SOFT_RESET);
|
|
||||||
|
|
||||||
dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
|
|
||||||
RREG32(GRBM_STATUS));
|
|
||||||
dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
|
|
||||||
RREG32(GRBM_STATUS_SE0));
|
|
||||||
dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
|
|
||||||
RREG32(GRBM_STATUS_SE1));
|
|
||||||
dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
|
|
||||||
RREG32(SRBM_STATUS));
|
|
||||||
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
|
|
||||||
RREG32(CP_STALLED_STAT1));
|
|
||||||
dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
|
|
||||||
RREG32(CP_STALLED_STAT2));
|
|
||||||
dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
|
|
||||||
RREG32(CP_BUSY_STAT));
|
|
||||||
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
|
|
||||||
RREG32(CP_STAT));
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
static void cayman_gpu_soft_reset_dma(struct radeon_device *rdev)
|
|
||||||
{
|
{
|
||||||
|
u32 reset_mask = 0;
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
|
|
||||||
if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
|
/* GRBM_STATUS */
|
||||||
return;
|
tmp = RREG32(GRBM_STATUS);
|
||||||
|
if (tmp & (PA_BUSY | SC_BUSY |
|
||||||
|
SH_BUSY | SX_BUSY |
|
||||||
|
TA_BUSY | VGT_BUSY |
|
||||||
|
DB_BUSY | CB_BUSY |
|
||||||
|
GDS_BUSY | SPI_BUSY |
|
||||||
|
IA_BUSY | IA_BUSY_NO_DMA))
|
||||||
|
reset_mask |= RADEON_RESET_GFX;
|
||||||
|
|
||||||
dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
|
if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
|
||||||
RREG32(DMA_STATUS_REG));
|
CP_BUSY | CP_COHERENCY_BUSY))
|
||||||
|
reset_mask |= RADEON_RESET_CP;
|
||||||
|
|
||||||
/* dma0 */
|
if (tmp & GRBM_EE_BUSY)
|
||||||
tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
|
reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
|
||||||
tmp &= ~DMA_RB_ENABLE;
|
|
||||||
WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
|
|
||||||
|
|
||||||
/* dma1 */
|
/* DMA_STATUS_REG 0 */
|
||||||
tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
|
tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
|
||||||
tmp &= ~DMA_RB_ENABLE;
|
if (!(tmp & DMA_IDLE))
|
||||||
WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
|
reset_mask |= RADEON_RESET_DMA;
|
||||||
|
|
||||||
/* Reset dma */
|
/* DMA_STATUS_REG 1 */
|
||||||
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
|
tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
|
||||||
RREG32(SRBM_SOFT_RESET);
|
if (!(tmp & DMA_IDLE))
|
||||||
udelay(50);
|
reset_mask |= RADEON_RESET_DMA1;
|
||||||
WREG32(SRBM_SOFT_RESET, 0);
|
|
||||||
|
|
||||||
dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
|
/* SRBM_STATUS2 */
|
||||||
RREG32(DMA_STATUS_REG));
|
tmp = RREG32(SRBM_STATUS2);
|
||||||
|
if (tmp & DMA_BUSY)
|
||||||
|
reset_mask |= RADEON_RESET_DMA;
|
||||||
|
|
||||||
|
if (tmp & DMA1_BUSY)
|
||||||
|
reset_mask |= RADEON_RESET_DMA1;
|
||||||
|
|
||||||
|
/* SRBM_STATUS */
|
||||||
|
tmp = RREG32(SRBM_STATUS);
|
||||||
|
if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
|
||||||
|
reset_mask |= RADEON_RESET_RLC;
|
||||||
|
|
||||||
|
if (tmp & IH_BUSY)
|
||||||
|
reset_mask |= RADEON_RESET_IH;
|
||||||
|
|
||||||
|
if (tmp & SEM_BUSY)
|
||||||
|
reset_mask |= RADEON_RESET_SEM;
|
||||||
|
|
||||||
|
if (tmp & GRBM_RQ_PENDING)
|
||||||
|
reset_mask |= RADEON_RESET_GRBM;
|
||||||
|
|
||||||
|
if (tmp & VMC_BUSY)
|
||||||
|
reset_mask |= RADEON_RESET_VMC;
|
||||||
|
|
||||||
|
if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
|
||||||
|
MCC_BUSY | MCD_BUSY))
|
||||||
|
reset_mask |= RADEON_RESET_MC;
|
||||||
|
|
||||||
|
if (evergreen_is_display_hung(rdev))
|
||||||
|
reset_mask |= RADEON_RESET_DISPLAY;
|
||||||
|
|
||||||
|
/* VM_L2_STATUS */
|
||||||
|
tmp = RREG32(VM_L2_STATUS);
|
||||||
|
if (tmp & L2_BUSY)
|
||||||
|
reset_mask |= RADEON_RESET_VMC;
|
||||||
|
|
||||||
|
/* Skip MC reset as it's mostly likely not hung, just busy */
|
||||||
|
if (reset_mask & RADEON_RESET_MC) {
|
||||||
|
DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
|
||||||
|
reset_mask &= ~RADEON_RESET_MC;
|
||||||
|
}
|
||||||
|
|
||||||
|
return reset_mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
|
static void cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
|
||||||
{
|
{
|
||||||
struct evergreen_mc_save save;
|
struct evergreen_mc_save save;
|
||||||
|
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
|
||||||
|
u32 tmp;
|
||||||
|
|
||||||
if (reset_mask == 0)
|
if (reset_mask == 0)
|
||||||
return 0;
|
return;
|
||||||
|
|
||||||
dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
|
dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
|
||||||
|
|
||||||
|
evergreen_print_gpu_status_regs(rdev);
|
||||||
dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
|
dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
|
||||||
RREG32(0x14F8));
|
RREG32(0x14F8));
|
||||||
dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
|
dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
|
||||||
@ -1409,29 +1725,158 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
|
|||||||
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
|
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
|
||||||
RREG32(0x14DC));
|
RREG32(0x14DC));
|
||||||
|
|
||||||
|
/* Disable CP parsing/prefetching */
|
||||||
|
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
|
||||||
|
|
||||||
|
if (reset_mask & RADEON_RESET_DMA) {
|
||||||
|
/* dma0 */
|
||||||
|
tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
|
||||||
|
tmp &= ~DMA_RB_ENABLE;
|
||||||
|
WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (reset_mask & RADEON_RESET_DMA1) {
|
||||||
|
/* dma1 */
|
||||||
|
tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
|
||||||
|
tmp &= ~DMA_RB_ENABLE;
|
||||||
|
WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
|
||||||
|
}
|
||||||
|
|
||||||
|
udelay(50);
|
||||||
|
|
||||||
evergreen_mc_stop(rdev, &save);
|
evergreen_mc_stop(rdev, &save);
|
||||||
if (evergreen_mc_wait_for_idle(rdev)) {
|
if (evergreen_mc_wait_for_idle(rdev)) {
|
||||||
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
|
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
|
if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
|
||||||
cayman_gpu_soft_reset_gfx(rdev);
|
grbm_soft_reset = SOFT_RESET_CB |
|
||||||
|
SOFT_RESET_DB |
|
||||||
|
SOFT_RESET_GDS |
|
||||||
|
SOFT_RESET_PA |
|
||||||
|
SOFT_RESET_SC |
|
||||||
|
SOFT_RESET_SPI |
|
||||||
|
SOFT_RESET_SH |
|
||||||
|
SOFT_RESET_SX |
|
||||||
|
SOFT_RESET_TC |
|
||||||
|
SOFT_RESET_TA |
|
||||||
|
SOFT_RESET_VGT |
|
||||||
|
SOFT_RESET_IA;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (reset_mask & RADEON_RESET_CP) {
|
||||||
|
grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
|
||||||
|
|
||||||
|
srbm_soft_reset |= SOFT_RESET_GRBM;
|
||||||
|
}
|
||||||
|
|
||||||
if (reset_mask & RADEON_RESET_DMA)
|
if (reset_mask & RADEON_RESET_DMA)
|
||||||
cayman_gpu_soft_reset_dma(rdev);
|
srbm_soft_reset |= SOFT_RESET_DMA;
|
||||||
|
|
||||||
|
if (reset_mask & RADEON_RESET_DMA1)
|
||||||
|
srbm_soft_reset |= SOFT_RESET_DMA1;
|
||||||
|
|
||||||
|
if (reset_mask & RADEON_RESET_DISPLAY)
|
||||||
|
srbm_soft_reset |= SOFT_RESET_DC;
|
||||||
|
|
||||||
|
if (reset_mask & RADEON_RESET_RLC)
|
||||||
|
srbm_soft_reset |= SOFT_RESET_RLC;
|
||||||
|
|
||||||
|
if (reset_mask & RADEON_RESET_SEM)
|
||||||
|
srbm_soft_reset |= SOFT_RESET_SEM;
|
||||||
|
|
||||||
|
if (reset_mask & RADEON_RESET_IH)
|
||||||
|
srbm_soft_reset |= SOFT_RESET_IH;
|
||||||
|
|
||||||
|
if (reset_mask & RADEON_RESET_GRBM)
|
||||||
|
srbm_soft_reset |= SOFT_RESET_GRBM;
|
||||||
|
|
||||||
|
if (reset_mask & RADEON_RESET_VMC)
|
||||||
|
srbm_soft_reset |= SOFT_RESET_VMC;
|
||||||
|
|
||||||
|
if (!(rdev->flags & RADEON_IS_IGP)) {
|
||||||
|
if (reset_mask & RADEON_RESET_MC)
|
||||||
|
srbm_soft_reset |= SOFT_RESET_MC;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (grbm_soft_reset) {
|
||||||
|
tmp = RREG32(GRBM_SOFT_RESET);
|
||||||
|
tmp |= grbm_soft_reset;
|
||||||
|
dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
|
||||||
|
WREG32(GRBM_SOFT_RESET, tmp);
|
||||||
|
tmp = RREG32(GRBM_SOFT_RESET);
|
||||||
|
|
||||||
|
udelay(50);
|
||||||
|
|
||||||
|
tmp &= ~grbm_soft_reset;
|
||||||
|
WREG32(GRBM_SOFT_RESET, tmp);
|
||||||
|
tmp = RREG32(GRBM_SOFT_RESET);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (srbm_soft_reset) {
|
||||||
|
tmp = RREG32(SRBM_SOFT_RESET);
|
||||||
|
tmp |= srbm_soft_reset;
|
||||||
|
dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
|
||||||
|
WREG32(SRBM_SOFT_RESET, tmp);
|
||||||
|
tmp = RREG32(SRBM_SOFT_RESET);
|
||||||
|
|
||||||
|
udelay(50);
|
||||||
|
|
||||||
|
tmp &= ~srbm_soft_reset;
|
||||||
|
WREG32(SRBM_SOFT_RESET, tmp);
|
||||||
|
tmp = RREG32(SRBM_SOFT_RESET);
|
||||||
|
}
|
||||||
|
|
||||||
/* Wait a little for things to settle down */
|
/* Wait a little for things to settle down */
|
||||||
udelay(50);
|
udelay(50);
|
||||||
|
|
||||||
evergreen_mc_resume(rdev, &save);
|
evergreen_mc_resume(rdev, &save);
|
||||||
return 0;
|
udelay(50);
|
||||||
|
|
||||||
|
evergreen_print_gpu_status_regs(rdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
int cayman_asic_reset(struct radeon_device *rdev)
|
int cayman_asic_reset(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
return cayman_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
|
u32 reset_mask;
|
||||||
|
|
||||||
|
reset_mask = cayman_gpu_check_soft_reset(rdev);
|
||||||
|
|
||||||
|
if (reset_mask)
|
||||||
|
r600_set_bios_scratch_engine_hung(rdev, true);
|
||||||
|
|
||||||
|
cayman_gpu_soft_reset(rdev, reset_mask);
|
||||||
|
|
||||||
|
reset_mask = cayman_gpu_check_soft_reset(rdev);
|
||||||
|
|
||||||
|
if (!reset_mask)
|
||||||
|
r600_set_bios_scratch_engine_hung(rdev, false);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* cayman_gfx_is_lockup - Check if the GFX engine is locked up
|
||||||
|
*
|
||||||
|
* @rdev: radeon_device pointer
|
||||||
|
* @ring: radeon_ring structure holding ring information
|
||||||
|
*
|
||||||
|
* Check if the GFX engine is locked up.
|
||||||
|
* Returns true if the engine appears to be locked up, false if not.
|
||||||
|
*/
|
||||||
|
bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||||
|
{
|
||||||
|
u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
|
||||||
|
|
||||||
|
if (!(reset_mask & (RADEON_RESET_GFX |
|
||||||
RADEON_RESET_COMPUTE |
|
RADEON_RESET_COMPUTE |
|
||||||
RADEON_RESET_DMA));
|
RADEON_RESET_CP))) {
|
||||||
|
radeon_ring_lockup_update(ring);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
/* force CP activities */
|
||||||
|
radeon_ring_force_activity(rdev, ring);
|
||||||
|
return radeon_ring_test_lockup(rdev, ring);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1440,18 +1885,20 @@ int cayman_asic_reset(struct radeon_device *rdev)
|
|||||||
* @rdev: radeon_device pointer
|
* @rdev: radeon_device pointer
|
||||||
* @ring: radeon_ring structure holding ring information
|
* @ring: radeon_ring structure holding ring information
|
||||||
*
|
*
|
||||||
* Check if the async DMA engine is locked up (cayman-SI).
|
* Check if the async DMA engine is locked up.
|
||||||
* Returns true if the engine appears to be locked up, false if not.
|
* Returns true if the engine appears to be locked up, false if not.
|
||||||
*/
|
*/
|
||||||
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||||
{
|
{
|
||||||
u32 dma_status_reg;
|
u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
|
||||||
|
u32 mask;
|
||||||
|
|
||||||
if (ring->idx == R600_RING_TYPE_DMA_INDEX)
|
if (ring->idx == R600_RING_TYPE_DMA_INDEX)
|
||||||
dma_status_reg = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
|
mask = RADEON_RESET_DMA;
|
||||||
else
|
else
|
||||||
dma_status_reg = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
|
mask = RADEON_RESET_DMA1;
|
||||||
if (dma_status_reg & DMA_IDLE) {
|
|
||||||
|
if (!(reset_mask & mask)) {
|
||||||
radeon_ring_lockup_update(ring);
|
radeon_ring_lockup_update(ring);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -1529,6 +1976,16 @@ static int cayman_startup(struct radeon_device *rdev)
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// r = rv770_uvd_resume(rdev);
|
||||||
|
// if (!r) {
|
||||||
|
// r = radeon_fence_driver_start_ring(rdev,
|
||||||
|
// R600_RING_TYPE_UVD_INDEX);
|
||||||
|
// if (r)
|
||||||
|
// dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
|
||||||
|
// }
|
||||||
|
// if (r)
|
||||||
|
// rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
|
||||||
|
|
||||||
r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
|
r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
|
||||||
if (r) {
|
if (r) {
|
||||||
dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
|
dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
|
||||||
@ -1554,6 +2011,12 @@ static int cayman_startup(struct radeon_device *rdev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Enable IRQ */
|
/* Enable IRQ */
|
||||||
|
if (!rdev->irq.installed) {
|
||||||
|
r = radeon_irq_kms_init(rdev);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
r = r600_irq_init(rdev);
|
r = r600_irq_init(rdev);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("radeon: IH init failed (%d).\n", r);
|
DRM_ERROR("radeon: IH init failed (%d).\n", r);
|
||||||
@ -1595,11 +2058,31 @@ static int cayman_startup(struct radeon_device *rdev)
|
|||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
|
||||||
|
if (ring->ring_size) {
|
||||||
|
r = radeon_ring_init(rdev, ring, ring->ring_size,
|
||||||
|
R600_WB_UVD_RPTR_OFFSET,
|
||||||
|
UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
|
||||||
|
0, 0xfffff, RADEON_CP_PACKET2);
|
||||||
|
if (!r)
|
||||||
|
r = r600_uvd_init(rdev);
|
||||||
|
if (r)
|
||||||
|
DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
|
||||||
|
}
|
||||||
|
|
||||||
r = radeon_ib_pool_init(rdev);
|
r = radeon_ib_pool_init(rdev);
|
||||||
if (r) {
|
if (r) {
|
||||||
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
|
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
r = radeon_vm_manager_init(rdev);
|
||||||
|
if (r) {
|
||||||
|
dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1641,6 +2124,8 @@ int cayman_init(struct radeon_device *rdev)
|
|||||||
DRM_INFO("GPU not posted. posting now...\n");
|
DRM_INFO("GPU not posted. posting now...\n");
|
||||||
atom_asic_init(rdev->mode_info.atom_context);
|
atom_asic_init(rdev->mode_info.atom_context);
|
||||||
}
|
}
|
||||||
|
/* init golden registers */
|
||||||
|
ni_init_golden_registers(rdev);
|
||||||
/* Initialize scratch registers */
|
/* Initialize scratch registers */
|
||||||
r600_scratch_init(rdev);
|
r600_scratch_init(rdev);
|
||||||
/* Initialize surface registers */
|
/* Initialize surface registers */
|
||||||
@ -1660,10 +2145,6 @@ int cayman_init(struct radeon_device *rdev)
|
|||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
r = radeon_irq_kms_init(rdev);
|
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
|
|
||||||
ring->ring_obj = NULL;
|
ring->ring_obj = NULL;
|
||||||
r600_ring_init(rdev, ring, 1024 * 1024);
|
r600_ring_init(rdev, ring, 1024 * 1024);
|
||||||
|
|
||||||
@ -1675,6 +2156,13 @@ int cayman_init(struct radeon_device *rdev)
|
|||||||
ring->ring_obj = NULL;
|
ring->ring_obj = NULL;
|
||||||
r600_ring_init(rdev, ring, 64 * 1024);
|
r600_ring_init(rdev, ring, 64 * 1024);
|
||||||
|
|
||||||
|
// r = radeon_uvd_init(rdev);
|
||||||
|
// if (!r) {
|
||||||
|
// ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
|
||||||
|
// ring->ring_obj = NULL;
|
||||||
|
// r600_ring_init(rdev, ring, 4096);
|
||||||
|
// }
|
||||||
|
|
||||||
rdev->ih.ring_obj = NULL;
|
rdev->ih.ring_obj = NULL;
|
||||||
r600_ih_ring_init(rdev, 64 * 1024);
|
r600_ih_ring_init(rdev, 64 * 1024);
|
||||||
|
|
||||||
@ -1748,19 +2236,21 @@ uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags)
|
|||||||
* cayman_vm_set_page - update the page tables using the CP
|
* cayman_vm_set_page - update the page tables using the CP
|
||||||
*
|
*
|
||||||
* @rdev: radeon_device pointer
|
* @rdev: radeon_device pointer
|
||||||
|
* @ib: indirect buffer to fill with commands
|
||||||
* @pe: addr of the page entry
|
* @pe: addr of the page entry
|
||||||
* @addr: dst addr to write into pe
|
* @addr: dst addr to write into pe
|
||||||
* @count: number of page entries to update
|
* @count: number of page entries to update
|
||||||
* @incr: increase next addr by incr bytes
|
* @incr: increase next addr by incr bytes
|
||||||
* @flags: access flags
|
* @flags: access flags
|
||||||
*
|
*
|
||||||
* Update the page tables using the CP (cayman-si).
|
* Update the page tables using the CP (cayman/TN).
|
||||||
*/
|
*/
|
||||||
void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
|
void cayman_vm_set_page(struct radeon_device *rdev,
|
||||||
|
struct radeon_ib *ib,
|
||||||
|
uint64_t pe,
|
||||||
uint64_t addr, unsigned count,
|
uint64_t addr, unsigned count,
|
||||||
uint32_t incr, uint32_t flags)
|
uint32_t incr, uint32_t flags)
|
||||||
{
|
{
|
||||||
struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
|
|
||||||
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
|
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
|
||||||
uint64_t value;
|
uint64_t value;
|
||||||
unsigned ndw;
|
unsigned ndw;
|
||||||
@ -1771,9 +2261,9 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
|
|||||||
if (ndw > 0x3FFF)
|
if (ndw > 0x3FFF)
|
||||||
ndw = 0x3FFF;
|
ndw = 0x3FFF;
|
||||||
|
|
||||||
radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
|
ib->ptr[ib->length_dw++] = PACKET3(PACKET3_ME_WRITE, ndw);
|
||||||
radeon_ring_write(ring, pe);
|
ib->ptr[ib->length_dw++] = pe;
|
||||||
radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
|
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||||
for (; ndw > 1; ndw -= 2, --count, pe += 8) {
|
for (; ndw > 1; ndw -= 2, --count, pe += 8) {
|
||||||
if (flags & RADEON_VM_PAGE_SYSTEM) {
|
if (flags & RADEON_VM_PAGE_SYSTEM) {
|
||||||
value = radeon_vm_map_gart(rdev, addr);
|
value = radeon_vm_map_gart(rdev, addr);
|
||||||
@ -1785,20 +2275,22 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
|
|||||||
}
|
}
|
||||||
addr += incr;
|
addr += incr;
|
||||||
value |= r600_flags;
|
value |= r600_flags;
|
||||||
radeon_ring_write(ring, value);
|
ib->ptr[ib->length_dw++] = value;
|
||||||
radeon_ring_write(ring, upper_32_bits(value));
|
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
if ((flags & RADEON_VM_PAGE_SYSTEM) ||
|
||||||
|
(count == 1)) {
|
||||||
while (count) {
|
while (count) {
|
||||||
ndw = count * 2;
|
ndw = count * 2;
|
||||||
if (ndw > 0xFFFFE)
|
if (ndw > 0xFFFFE)
|
||||||
ndw = 0xFFFFE;
|
ndw = 0xFFFFE;
|
||||||
|
|
||||||
/* for non-physically contiguous pages (system) */
|
/* for non-physically contiguous pages (system) */
|
||||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw));
|
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
|
||||||
radeon_ring_write(ring, pe);
|
ib->ptr[ib->length_dw++] = pe;
|
||||||
radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
|
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||||
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
|
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
|
||||||
if (flags & RADEON_VM_PAGE_SYSTEM) {
|
if (flags & RADEON_VM_PAGE_SYSTEM) {
|
||||||
value = radeon_vm_map_gart(rdev, addr);
|
value = radeon_vm_map_gart(rdev, addr);
|
||||||
@ -1810,10 +2302,39 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
|
|||||||
}
|
}
|
||||||
addr += incr;
|
addr += incr;
|
||||||
value |= r600_flags;
|
value |= r600_flags;
|
||||||
radeon_ring_write(ring, value);
|
ib->ptr[ib->length_dw++] = value;
|
||||||
radeon_ring_write(ring, upper_32_bits(value));
|
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
while (ib->length_dw & 0x7)
|
||||||
|
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
|
||||||
|
} else {
|
||||||
|
while (count) {
|
||||||
|
ndw = count * 2;
|
||||||
|
if (ndw > 0xFFFFE)
|
||||||
|
ndw = 0xFFFFE;
|
||||||
|
|
||||||
|
if (flags & RADEON_VM_PAGE_VALID)
|
||||||
|
value = addr;
|
||||||
|
else
|
||||||
|
value = 0;
|
||||||
|
/* for physically contiguous pages (vram) */
|
||||||
|
ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
|
||||||
|
ib->ptr[ib->length_dw++] = pe; /* dst addr */
|
||||||
|
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||||
|
ib->ptr[ib->length_dw++] = r600_flags; /* mask */
|
||||||
|
ib->ptr[ib->length_dw++] = 0;
|
||||||
|
ib->ptr[ib->length_dw++] = value; /* value */
|
||||||
|
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||||
|
ib->ptr[ib->length_dw++] = incr; /* increment size */
|
||||||
|
ib->ptr[ib->length_dw++] = 0;
|
||||||
|
pe += ndw * 4;
|
||||||
|
addr += (ndw / 2) * incr;
|
||||||
|
count -= ndw / 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
while (ib->length_dw & 0x7)
|
||||||
|
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -45,10 +45,24 @@
|
|||||||
#define ARUBA_GB_ADDR_CONFIG_GOLDEN 0x12010001
|
#define ARUBA_GB_ADDR_CONFIG_GOLDEN 0x12010001
|
||||||
|
|
||||||
#define DMIF_ADDR_CONFIG 0xBD4
|
#define DMIF_ADDR_CONFIG 0xBD4
|
||||||
|
|
||||||
|
/* DCE6 only */
|
||||||
|
#define DMIF_ADDR_CALC 0xC00
|
||||||
|
|
||||||
#define SRBM_GFX_CNTL 0x0E44
|
#define SRBM_GFX_CNTL 0x0E44
|
||||||
#define RINGID(x) (((x) & 0x3) << 0)
|
#define RINGID(x) (((x) & 0x3) << 0)
|
||||||
#define VMID(x) (((x) & 0x7) << 0)
|
#define VMID(x) (((x) & 0x7) << 0)
|
||||||
#define SRBM_STATUS 0x0E50
|
#define SRBM_STATUS 0x0E50
|
||||||
|
#define RLC_RQ_PENDING (1 << 3)
|
||||||
|
#define GRBM_RQ_PENDING (1 << 5)
|
||||||
|
#define VMC_BUSY (1 << 8)
|
||||||
|
#define MCB_BUSY (1 << 9)
|
||||||
|
#define MCB_NON_DISPLAY_BUSY (1 << 10)
|
||||||
|
#define MCC_BUSY (1 << 11)
|
||||||
|
#define MCD_BUSY (1 << 12)
|
||||||
|
#define SEM_BUSY (1 << 14)
|
||||||
|
#define RLC_BUSY (1 << 15)
|
||||||
|
#define IH_BUSY (1 << 17)
|
||||||
|
|
||||||
#define SRBM_SOFT_RESET 0x0E60
|
#define SRBM_SOFT_RESET 0x0E60
|
||||||
#define SOFT_RESET_BIF (1 << 1)
|
#define SOFT_RESET_BIF (1 << 1)
|
||||||
@ -65,9 +79,13 @@
|
|||||||
#define SOFT_RESET_VMC (1 << 17)
|
#define SOFT_RESET_VMC (1 << 17)
|
||||||
#define SOFT_RESET_DMA (1 << 20)
|
#define SOFT_RESET_DMA (1 << 20)
|
||||||
#define SOFT_RESET_TST (1 << 21)
|
#define SOFT_RESET_TST (1 << 21)
|
||||||
#define SOFT_RESET_REGBB (1 << 22)
|
#define SOFT_RESET_REGBB (1 << 22)
|
||||||
#define SOFT_RESET_ORB (1 << 23)
|
#define SOFT_RESET_ORB (1 << 23)
|
||||||
|
|
||||||
|
#define SRBM_STATUS2 0x0EC4
|
||||||
|
#define DMA_BUSY (1 << 5)
|
||||||
|
#define DMA1_BUSY (1 << 6)
|
||||||
|
|
||||||
#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
|
#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
|
||||||
#define REQUEST_TYPE(x) (((x) & 0xf) << 0)
|
#define REQUEST_TYPE(x) (((x) & 0xf) << 0)
|
||||||
#define RESPONSE_TYPE_MASK 0x000000F0
|
#define RESPONSE_TYPE_MASK 0x000000F0
|
||||||
@ -471,19 +489,22 @@
|
|||||||
# define CACHE_FLUSH_AND_INV_EVENT_TS (0x14 << 0)
|
# define CACHE_FLUSH_AND_INV_EVENT_TS (0x14 << 0)
|
||||||
# define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0)
|
# define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* UVD
|
||||||
|
*/
|
||||||
|
#define UVD_SEMA_ADDR_LOW 0xEF00
|
||||||
|
#define UVD_SEMA_ADDR_HIGH 0xEF04
|
||||||
|
#define UVD_SEMA_CMD 0xEF08
|
||||||
|
#define UVD_UDEC_ADDR_CONFIG 0xEF4C
|
||||||
|
#define UVD_UDEC_DB_ADDR_CONFIG 0xEF50
|
||||||
|
#define UVD_UDEC_DBW_ADDR_CONFIG 0xEF54
|
||||||
|
#define UVD_RBC_RB_RPTR 0xF690
|
||||||
|
#define UVD_RBC_RB_WPTR 0xF694
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* PM4
|
* PM4
|
||||||
*/
|
*/
|
||||||
#define PACKET_TYPE0 0
|
#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
|
||||||
#define PACKET_TYPE1 1
|
|
||||||
#define PACKET_TYPE2 2
|
|
||||||
#define PACKET_TYPE3 3
|
|
||||||
|
|
||||||
#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
|
|
||||||
#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
|
|
||||||
#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
|
|
||||||
#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
|
|
||||||
#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
|
|
||||||
(((reg) >> 2) & 0xFFFF) | \
|
(((reg) >> 2) & 0xFFFF) | \
|
||||||
((n) & 0x3FFF) << 16)
|
((n) & 0x3FFF) << 16)
|
||||||
#define CP_PACKET2 0x80000000
|
#define CP_PACKET2 0x80000000
|
||||||
@ -492,7 +513,7 @@
|
|||||||
|
|
||||||
#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
|
#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
|
||||||
|
|
||||||
#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
|
#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
|
||||||
(((op) & 0xFF) << 8) | \
|
(((op) & 0xFF) << 8) | \
|
||||||
((n) & 0x3FFF) << 16)
|
((n) & 0x3FFF) << 16)
|
||||||
|
|
||||||
@ -663,6 +684,11 @@
|
|||||||
(((vmid) & 0xF) << 20) | \
|
(((vmid) & 0xF) << 20) | \
|
||||||
(((n) & 0xFFFFF) << 0))
|
(((n) & 0xFFFFF) << 0))
|
||||||
|
|
||||||
|
#define DMA_PTE_PDE_PACKET(n) ((2 << 28) | \
|
||||||
|
(1 << 26) | \
|
||||||
|
(1 << 21) | \
|
||||||
|
(((n) & 0xFFFFF) << 0))
|
||||||
|
|
||||||
/* async DMA Packet types */
|
/* async DMA Packet types */
|
||||||
#define DMA_PACKET_WRITE 0x2
|
#define DMA_PACKET_WRITE 0x2
|
||||||
#define DMA_PACKET_COPY 0x3
|
#define DMA_PACKET_COPY 0x3
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
|
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/types.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
#include <linux/mod_devicetable.h>
|
#include <linux/mod_devicetable.h>
|
||||||
@ -635,7 +636,9 @@ struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from)
|
|||||||
/* Create a virtual mapping cookie for an IO port range */
|
/* Create a virtual mapping cookie for an IO port range */
|
||||||
void __iomem *ioport_map(unsigned long port, unsigned int nr)
|
void __iomem *ioport_map(unsigned long port, unsigned int nr)
|
||||||
{
|
{
|
||||||
return (void __iomem *) port;
|
if (port > PIO_MASK)
|
||||||
|
return NULL;
|
||||||
|
return (void __iomem *) (unsigned long) (port + PIO_OFFSET);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
|
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
|
||||||
@ -786,7 +789,8 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
|
|||||||
start = (loff_t)0xC0000;
|
start = (loff_t)0xC0000;
|
||||||
*size = 0x20000; /* cover C000:0 through E000:0 */
|
*size = 0x20000; /* cover C000:0 through E000:0 */
|
||||||
} else {
|
} else {
|
||||||
if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) {
|
if (res->flags &
|
||||||
|
(IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) {
|
||||||
*size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
|
*size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
|
||||||
return (void __iomem *)(unsigned long)
|
return (void __iomem *)(unsigned long)
|
||||||
pci_resource_start(pdev, PCI_ROM_RESOURCE);
|
pci_resource_start(pdev, PCI_ROM_RESOURCE);
|
||||||
@ -841,15 +845,6 @@ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom)
|
|||||||
pci_disable_rom(pdev);
|
pci_disable_rom(pdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
|
|
||||||
{
|
|
||||||
dev->dma_mask = mask;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
static void __pci_set_master(struct pci_dev *dev, bool enable)
|
static void __pci_set_master(struct pci_dev *dev, bool enable)
|
||||||
{
|
{
|
||||||
u16 old_cmd, cmd;
|
u16 old_cmd, cmd;
|
||||||
|
@ -67,6 +67,38 @@ MODULE_FIRMWARE(FIRMWARE_R520);
|
|||||||
* and others in some cases.
|
* and others in some cases.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
static bool r100_is_in_vblank(struct radeon_device *rdev, int crtc)
|
||||||
|
{
|
||||||
|
if (crtc == 0) {
|
||||||
|
if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
|
||||||
|
return true;
|
||||||
|
else
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
|
||||||
|
return true;
|
||||||
|
else
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool r100_is_counter_moving(struct radeon_device *rdev, int crtc)
|
||||||
|
{
|
||||||
|
u32 vline1, vline2;
|
||||||
|
|
||||||
|
if (crtc == 0) {
|
||||||
|
vline1 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
|
||||||
|
vline2 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
|
||||||
|
} else {
|
||||||
|
vline1 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
|
||||||
|
vline2 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
|
||||||
|
}
|
||||||
|
if (vline1 != vline2)
|
||||||
|
return true;
|
||||||
|
else
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* r100_wait_for_vblank - vblank wait asic callback.
|
* r100_wait_for_vblank - vblank wait asic callback.
|
||||||
*
|
*
|
||||||
@ -77,36 +109,33 @@ MODULE_FIRMWARE(FIRMWARE_R520);
|
|||||||
*/
|
*/
|
||||||
void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
|
void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
|
||||||
{
|
{
|
||||||
int i;
|
unsigned i = 0;
|
||||||
|
|
||||||
if (crtc >= rdev->num_crtc)
|
if (crtc >= rdev->num_crtc)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (crtc == 0) {
|
if (crtc == 0) {
|
||||||
if (RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN) {
|
if (!(RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN))
|
||||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
return;
|
||||||
if (!(RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR))
|
} else {
|
||||||
|
if (!(RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN))
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* depending on when we hit vblank, we may be close to active; if so,
|
||||||
|
* wait for another frame.
|
||||||
|
*/
|
||||||
|
while (r100_is_in_vblank(rdev, crtc)) {
|
||||||
|
if (i++ % 100 == 0) {
|
||||||
|
if (!r100_is_counter_moving(rdev, crtc))
|
||||||
break;
|
break;
|
||||||
udelay(1);
|
|
||||||
}
|
|
||||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
|
||||||
if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
|
|
||||||
break;
|
|
||||||
udelay(1);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN) {
|
while (!r100_is_in_vblank(rdev, crtc)) {
|
||||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
if (i++ % 100 == 0) {
|
||||||
if (!(RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR))
|
if (!r100_is_counter_moving(rdev, crtc))
|
||||||
break;
|
break;
|
||||||
udelay(1);
|
|
||||||
}
|
|
||||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
|
||||||
if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
|
|
||||||
break;
|
|
||||||
udelay(1);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -857,11 +886,11 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
|
|||||||
struct radeon_cs_reloc *reloc;
|
struct radeon_cs_reloc *reloc;
|
||||||
u32 value;
|
u32 value;
|
||||||
|
|
||||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||||
idx, reg);
|
idx, reg);
|
||||||
r100_cs_dump_packet(p, pkt);
|
radeon_cs_dump_packet(p, pkt);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -875,7 +904,7 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
|
|||||||
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
|
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
|
||||||
if (reg == RADEON_SRC_PITCH_OFFSET) {
|
if (reg == RADEON_SRC_PITCH_OFFSET) {
|
||||||
DRM_ERROR("Cannot src blit from microtiled surface\n");
|
DRM_ERROR("Cannot src blit from microtiled surface\n");
|
||||||
r100_cs_dump_packet(p, pkt);
|
radeon_cs_dump_packet(p, pkt);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
tile_flags |= RADEON_DST_TILE_MICRO;
|
tile_flags |= RADEON_DST_TILE_MICRO;
|
||||||
@ -905,16 +934,16 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
|
|||||||
if (c > 16) {
|
if (c > 16) {
|
||||||
DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
|
DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
|
||||||
pkt->opcode);
|
pkt->opcode);
|
||||||
r100_cs_dump_packet(p, pkt);
|
radeon_cs_dump_packet(p, pkt);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
track->num_arrays = c;
|
track->num_arrays = c;
|
||||||
for (i = 0; i < (c - 1); i+=2, idx+=3) {
|
for (i = 0; i < (c - 1); i+=2, idx+=3) {
|
||||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("No reloc for packet3 %d\n",
|
DRM_ERROR("No reloc for packet3 %d\n",
|
||||||
pkt->opcode);
|
pkt->opcode);
|
||||||
r100_cs_dump_packet(p, pkt);
|
radeon_cs_dump_packet(p, pkt);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
idx_value = radeon_get_ib_value(p, idx);
|
idx_value = radeon_get_ib_value(p, idx);
|
||||||
@ -923,11 +952,11 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
|
|||||||
track->arrays[i + 0].esize = idx_value >> 8;
|
track->arrays[i + 0].esize = idx_value >> 8;
|
||||||
track->arrays[i + 0].robj = reloc->robj;
|
track->arrays[i + 0].robj = reloc->robj;
|
||||||
track->arrays[i + 0].esize &= 0x7F;
|
track->arrays[i + 0].esize &= 0x7F;
|
||||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("No reloc for packet3 %d\n",
|
DRM_ERROR("No reloc for packet3 %d\n",
|
||||||
pkt->opcode);
|
pkt->opcode);
|
||||||
r100_cs_dump_packet(p, pkt);
|
radeon_cs_dump_packet(p, pkt);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
|
ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
|
||||||
@ -936,11 +965,11 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
|
|||||||
track->arrays[i + 1].esize &= 0x7F;
|
track->arrays[i + 1].esize &= 0x7F;
|
||||||
}
|
}
|
||||||
if (c & 1) {
|
if (c & 1) {
|
||||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("No reloc for packet3 %d\n",
|
DRM_ERROR("No reloc for packet3 %d\n",
|
||||||
pkt->opcode);
|
pkt->opcode);
|
||||||
r100_cs_dump_packet(p, pkt);
|
radeon_cs_dump_packet(p, pkt);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
idx_value = radeon_get_ib_value(p, idx);
|
idx_value = radeon_get_ib_value(p, idx);
|
||||||
@ -1086,7 +1115,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
|
|||||||
ib = p->ib.ptr;
|
ib = p->ib.ptr;
|
||||||
|
|
||||||
/* parse the wait until */
|
/* parse the wait until */
|
||||||
r = r100_cs_packet_parse(p, &waitreloc, p->idx);
|
r = radeon_cs_packet_parse(p, &waitreloc, p->idx);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
@ -1103,7 +1132,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* jump over the NOP */
|
/* jump over the NOP */
|
||||||
r = r100_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
|
r = radeon_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
@ -1113,7 +1142,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
|
|||||||
|
|
||||||
header = radeon_get_ib_value(p, h_idx);
|
header = radeon_get_ib_value(p, h_idx);
|
||||||
crtc_id = radeon_get_ib_value(p, h_idx + 5);
|
crtc_id = radeon_get_ib_value(p, h_idx + 5);
|
||||||
reg = CP_PACKET0_GET_REG(header);
|
reg = R100_CP_PACKET0_GET_REG(header);
|
||||||
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
|
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
|
||||||
if (!obj) {
|
if (!obj) {
|
||||||
DRM_ERROR("cannot find crtc %d\n", crtc_id);
|
DRM_ERROR("cannot find crtc %d\n", crtc_id);
|
||||||
@ -1148,54 +1177,6 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
|
|
||||||
* @parser: parser structure holding parsing context.
|
|
||||||
* @data: pointer to relocation data
|
|
||||||
* @offset_start: starting offset
|
|
||||||
* @offset_mask: offset mask (to align start offset on)
|
|
||||||
* @reloc: reloc informations
|
|
||||||
*
|
|
||||||
* Check next packet is relocation packet3, do bo validation and compute
|
|
||||||
* GPU offset using the provided start.
|
|
||||||
**/
|
|
||||||
int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
|
|
||||||
struct radeon_cs_reloc **cs_reloc)
|
|
||||||
{
|
|
||||||
struct radeon_cs_chunk *relocs_chunk;
|
|
||||||
struct radeon_cs_packet p3reloc;
|
|
||||||
unsigned idx;
|
|
||||||
int r;
|
|
||||||
|
|
||||||
if (p->chunk_relocs_idx == -1) {
|
|
||||||
DRM_ERROR("No relocation chunk !\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
*cs_reloc = NULL;
|
|
||||||
relocs_chunk = &p->chunks[p->chunk_relocs_idx];
|
|
||||||
r = r100_cs_packet_parse(p, &p3reloc, p->idx);
|
|
||||||
if (r) {
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
p->idx += p3reloc.count + 2;
|
|
||||||
if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
|
|
||||||
DRM_ERROR("No packet3 for relocation for packet at %d.\n",
|
|
||||||
p3reloc.idx);
|
|
||||||
r100_cs_dump_packet(p, &p3reloc);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
idx = radeon_get_ib_value(p, p3reloc.idx + 1);
|
|
||||||
if (idx >= relocs_chunk->length_dw) {
|
|
||||||
DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
|
|
||||||
idx, relocs_chunk->length_dw);
|
|
||||||
r100_cs_dump_packet(p, &p3reloc);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
/* FIXME: we assume reloc size is 4 dwords */
|
|
||||||
*cs_reloc = p->relocs_ptr[(idx / 4)];
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int r100_get_vtx_size(uint32_t vtx_fmt)
|
static int r100_get_vtx_size(uint32_t vtx_fmt)
|
||||||
{
|
{
|
||||||
int vtx_size;
|
int vtx_size;
|
||||||
@ -1273,7 +1254,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
|
|||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||||
idx, reg);
|
idx, reg);
|
||||||
r100_cs_dump_packet(p, pkt);
|
radeon_cs_dump_packet(p, pkt);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@ -1286,11 +1267,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
|
|||||||
return r;
|
return r;
|
||||||
break;
|
break;
|
||||||
case RADEON_RB3D_DEPTHOFFSET:
|
case RADEON_RB3D_DEPTHOFFSET:
|
||||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||||
idx, reg);
|
idx, reg);
|
||||||
r100_cs_dump_packet(p, pkt);
|
radeon_cs_dump_packet(p, pkt);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
track->zb.robj = reloc->robj;
|
track->zb.robj = reloc->robj;
|
||||||
@ -1299,11 +1280,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
|
|||||||
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
|
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
|
||||||
break;
|
break;
|
||||||
case RADEON_RB3D_COLOROFFSET:
|
case RADEON_RB3D_COLOROFFSET:
|
||||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||||
idx, reg);
|
idx, reg);
|
||||||
r100_cs_dump_packet(p, pkt);
|
radeon_cs_dump_packet(p, pkt);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
track->cb[0].robj = reloc->robj;
|
track->cb[0].robj = reloc->robj;
|
||||||
@ -1315,11 +1296,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
|
|||||||
case RADEON_PP_TXOFFSET_1:
|
case RADEON_PP_TXOFFSET_1:
|
||||||
case RADEON_PP_TXOFFSET_2:
|
case RADEON_PP_TXOFFSET_2:
|
||||||
i = (reg - RADEON_PP_TXOFFSET_0) / 24;
|
i = (reg - RADEON_PP_TXOFFSET_0) / 24;
|
||||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||||
idx, reg);
|
idx, reg);
|
||||||
r100_cs_dump_packet(p, pkt);
|
radeon_cs_dump_packet(p, pkt);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
|
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
|
||||||
@ -1342,11 +1323,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
|
|||||||
case RADEON_PP_CUBIC_OFFSET_T0_3:
|
case RADEON_PP_CUBIC_OFFSET_T0_3:
|
||||||
case RADEON_PP_CUBIC_OFFSET_T0_4:
|
case RADEON_PP_CUBIC_OFFSET_T0_4:
|
||||||
i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
|
i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
|
||||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||||
idx, reg);
|
idx, reg);
|
||||||
r100_cs_dump_packet(p, pkt);
|
radeon_cs_dump_packet(p, pkt);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
track->textures[0].cube_info[i].offset = idx_value;
|
track->textures[0].cube_info[i].offset = idx_value;
|
||||||
@ -1360,11 +1341,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
|
|||||||
case RADEON_PP_CUBIC_OFFSET_T1_3:
|
case RADEON_PP_CUBIC_OFFSET_T1_3:
|
||||||
case RADEON_PP_CUBIC_OFFSET_T1_4:
|
case RADEON_PP_CUBIC_OFFSET_T1_4:
|
||||||
i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
|
i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
|
||||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||||
idx, reg);
|
idx, reg);
|
||||||
r100_cs_dump_packet(p, pkt);
|
radeon_cs_dump_packet(p, pkt);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
track->textures[1].cube_info[i].offset = idx_value;
|
track->textures[1].cube_info[i].offset = idx_value;
|
||||||
@ -1378,11 +1359,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
|
|||||||
case RADEON_PP_CUBIC_OFFSET_T2_3:
|
case RADEON_PP_CUBIC_OFFSET_T2_3:
|
||||||
case RADEON_PP_CUBIC_OFFSET_T2_4:
|
case RADEON_PP_CUBIC_OFFSET_T2_4:
|
||||||
i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
|
i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
|
||||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||||
idx, reg);
|
idx, reg);
|
||||||
r100_cs_dump_packet(p, pkt);
|
radeon_cs_dump_packet(p, pkt);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
track->textures[2].cube_info[i].offset = idx_value;
|
track->textures[2].cube_info[i].offset = idx_value;
|
||||||
@ -1396,11 +1377,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
|
|||||||
track->zb_dirty = true;
|
track->zb_dirty = true;
|
||||||
break;
|
break;
|
||||||
case RADEON_RB3D_COLORPITCH:
|
case RADEON_RB3D_COLORPITCH:
|
||||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||||
idx, reg);
|
idx, reg);
|
||||||
r100_cs_dump_packet(p, pkt);
|
radeon_cs_dump_packet(p, pkt);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
|
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
|
||||||
@ -1467,11 +1448,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
|
|||||||
track->zb_dirty = true;
|
track->zb_dirty = true;
|
||||||
break;
|
break;
|
||||||
case RADEON_RB3D_ZPASS_ADDR:
|
case RADEON_RB3D_ZPASS_ADDR:
|
||||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||||
idx, reg);
|
idx, reg);
|
||||||
r100_cs_dump_packet(p, pkt);
|
radeon_cs_dump_packet(p, pkt);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
|
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
|
||||||
@ -1628,10 +1609,10 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
|
|||||||
return r;
|
return r;
|
||||||
break;
|
break;
|
||||||
case PACKET3_INDX_BUFFER:
|
case PACKET3_INDX_BUFFER:
|
||||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
|
DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
|
||||||
r100_cs_dump_packet(p, pkt);
|
radeon_cs_dump_packet(p, pkt);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
|
ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
|
||||||
@ -1642,10 +1623,10 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
|
|||||||
break;
|
break;
|
||||||
case 0x23:
|
case 0x23:
|
||||||
/* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
|
/* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
|
||||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
|
DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
|
||||||
r100_cs_dump_packet(p, pkt);
|
radeon_cs_dump_packet(p, pkt);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
|
ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
|
||||||
@ -1742,13 +1723,13 @@ int r100_cs_parse(struct radeon_cs_parser *p)
|
|||||||
r100_cs_track_clear(p->rdev, track);
|
r100_cs_track_clear(p->rdev, track);
|
||||||
p->track = track;
|
p->track = track;
|
||||||
do {
|
do {
|
||||||
r = r100_cs_packet_parse(p, &pkt, p->idx);
|
r = radeon_cs_packet_parse(p, &pkt, p->idx);
|
||||||
if (r) {
|
if (r) {
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
p->idx += pkt.count + 2;
|
p->idx += pkt.count + 2;
|
||||||
switch (pkt.type) {
|
switch (pkt.type) {
|
||||||
case PACKET_TYPE0:
|
case RADEON_PACKET_TYPE0:
|
||||||
if (p->rdev->family >= CHIP_R200)
|
if (p->rdev->family >= CHIP_R200)
|
||||||
r = r100_cs_parse_packet0(p, &pkt,
|
r = r100_cs_parse_packet0(p, &pkt,
|
||||||
p->rdev->config.r100.reg_safe_bm,
|
p->rdev->config.r100.reg_safe_bm,
|
||||||
@ -1760,9 +1741,9 @@ int r100_cs_parse(struct radeon_cs_parser *p)
|
|||||||
p->rdev->config.r100.reg_safe_bm_size,
|
p->rdev->config.r100.reg_safe_bm_size,
|
||||||
&r100_packet0_check);
|
&r100_packet0_check);
|
||||||
break;
|
break;
|
||||||
case PACKET_TYPE2:
|
case RADEON_PACKET_TYPE2:
|
||||||
break;
|
break;
|
||||||
case PACKET_TYPE3:
|
case RADEON_PACKET_TYPE3:
|
||||||
r = r100_packet3_check(p, &pkt);
|
r = r100_packet3_check(p, &pkt);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
@ -1770,9 +1751,8 @@ int r100_cs_parse(struct radeon_cs_parser *p)
|
|||||||
pkt.type);
|
pkt.type);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
if (r) {
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
}
|
|
||||||
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
|
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -3593,6 +3573,12 @@ static int r100_startup(struct radeon_device *rdev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Enable IRQ */
|
/* Enable IRQ */
|
||||||
|
if (!rdev->irq.installed) {
|
||||||
|
r = radeon_irq_kms_init(rdev);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
r100_irq_set(rdev);
|
r100_irq_set(rdev);
|
||||||
rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
|
rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
|
||||||
/* 1M ring buffer */
|
/* 1M ring buffer */
|
||||||
@ -3689,9 +3675,6 @@ int r100_init(struct radeon_device *rdev)
|
|||||||
r100_mc_init(rdev);
|
r100_mc_init(rdev);
|
||||||
/* Fence driver */
|
/* Fence driver */
|
||||||
r = radeon_fence_driver_init(rdev);
|
r = radeon_fence_driver_init(rdev);
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
r = radeon_irq_kms_init(rdev);
|
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
/* Memory manager */
|
/* Memory manager */
|
||||||
|
@ -64,17 +64,6 @@
|
|||||||
REG_SET(PACKET3_IT_OPCODE, (op)) | \
|
REG_SET(PACKET3_IT_OPCODE, (op)) | \
|
||||||
REG_SET(PACKET3_COUNT, (n)))
|
REG_SET(PACKET3_COUNT, (n)))
|
||||||
|
|
||||||
#define PACKET_TYPE0 0
|
|
||||||
#define PACKET_TYPE1 1
|
|
||||||
#define PACKET_TYPE2 2
|
|
||||||
#define PACKET_TYPE3 3
|
|
||||||
|
|
||||||
#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
|
|
||||||
#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
|
|
||||||
#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
|
|
||||||
#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
|
|
||||||
#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
|
|
||||||
|
|
||||||
/* Registers */
|
/* Registers */
|
||||||
#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
|
#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
|
||||||
#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
|
#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
|
||||||
|
@ -165,7 +165,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
|
|||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||||
idx, reg);
|
idx, reg);
|
||||||
r100_cs_dump_packet(p, pkt);
|
radeon_cs_dump_packet(p, pkt);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@ -178,11 +178,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
|
|||||||
return r;
|
return r;
|
||||||
break;
|
break;
|
||||||
case RADEON_RB3D_DEPTHOFFSET:
|
case RADEON_RB3D_DEPTHOFFSET:
|
||||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||||
idx, reg);
|
idx, reg);
|
||||||
r100_cs_dump_packet(p, pkt);
|
radeon_cs_dump_packet(p, pkt);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
track->zb.robj = reloc->robj;
|
track->zb.robj = reloc->robj;
|
||||||
@ -191,11 +191,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
|
|||||||
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
|
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
|
||||||
break;
|
break;
|
||||||
case RADEON_RB3D_COLOROFFSET:
|
case RADEON_RB3D_COLOROFFSET:
|
||||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||||
idx, reg);
|
idx, reg);
|
||||||
r100_cs_dump_packet(p, pkt);
|
radeon_cs_dump_packet(p, pkt);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
track->cb[0].robj = reloc->robj;
|
track->cb[0].robj = reloc->robj;
|
||||||
@ -210,11 +210,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
|
|||||||
case R200_PP_TXOFFSET_4:
|
case R200_PP_TXOFFSET_4:
|
||||||
case R200_PP_TXOFFSET_5:
|
case R200_PP_TXOFFSET_5:
|
||||||
i = (reg - R200_PP_TXOFFSET_0) / 24;
|
i = (reg - R200_PP_TXOFFSET_0) / 24;
|
||||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||||
idx, reg);
|
idx, reg);
|
||||||
r100_cs_dump_packet(p, pkt);
|
radeon_cs_dump_packet(p, pkt);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
|
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
|
||||||
@ -263,11 +263,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
|
|||||||
case R200_PP_CUBIC_OFFSET_F5_5:
|
case R200_PP_CUBIC_OFFSET_F5_5:
|
||||||
i = (reg - R200_PP_TXOFFSET_0) / 24;
|
i = (reg - R200_PP_TXOFFSET_0) / 24;
|
||||||
face = (reg - ((i * 24) + R200_PP_TXOFFSET_0)) / 4;
|
face = (reg - ((i * 24) + R200_PP_TXOFFSET_0)) / 4;
|
||||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||||
idx, reg);
|
idx, reg);
|
||||||
r100_cs_dump_packet(p, pkt);
|
radeon_cs_dump_packet(p, pkt);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
track->textures[i].cube_info[face - 1].offset = idx_value;
|
track->textures[i].cube_info[face - 1].offset = idx_value;
|
||||||
@ -281,11 +281,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
|
|||||||
track->zb_dirty = true;
|
track->zb_dirty = true;
|
||||||
break;
|
break;
|
||||||
case RADEON_RB3D_COLORPITCH:
|
case RADEON_RB3D_COLORPITCH:
|
||||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||||
idx, reg);
|
idx, reg);
|
||||||
r100_cs_dump_packet(p, pkt);
|
radeon_cs_dump_packet(p, pkt);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -358,11 +358,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
|
|||||||
track->zb_dirty = true;
|
track->zb_dirty = true;
|
||||||
break;
|
break;
|
||||||
case RADEON_RB3D_ZPASS_ADDR:
|
case RADEON_RB3D_ZPASS_ADDR:
|
||||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||||
idx, reg);
|
idx, reg);
|
||||||
r100_cs_dump_packet(p, pkt);
|
radeon_cs_dump_packet(p, pkt);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
|
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
|
||||||
|
@ -618,7 +618,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
|
|||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||||
idx, reg);
|
idx, reg);
|
||||||
r100_cs_dump_packet(p, pkt);
|
radeon_cs_dump_packet(p, pkt);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@ -633,11 +633,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
|
|||||||
case R300_RB3D_COLOROFFSET2:
|
case R300_RB3D_COLOROFFSET2:
|
||||||
case R300_RB3D_COLOROFFSET3:
|
case R300_RB3D_COLOROFFSET3:
|
||||||
i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
|
i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
|
||||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||||
idx, reg);
|
idx, reg);
|
||||||
r100_cs_dump_packet(p, pkt);
|
radeon_cs_dump_packet(p, pkt);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
track->cb[i].robj = reloc->robj;
|
track->cb[i].robj = reloc->robj;
|
||||||
@ -646,11 +646,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
|
|||||||
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
|
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
|
||||||
break;
|
break;
|
||||||
case R300_ZB_DEPTHOFFSET:
|
case R300_ZB_DEPTHOFFSET:
|
||||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||||
idx, reg);
|
idx, reg);
|
||||||
r100_cs_dump_packet(p, pkt);
|
radeon_cs_dump_packet(p, pkt);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
track->zb.robj = reloc->robj;
|
track->zb.robj = reloc->robj;
|
||||||
@ -675,11 +675,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
|
|||||||
case R300_TX_OFFSET_0+56:
|
case R300_TX_OFFSET_0+56:
|
||||||
case R300_TX_OFFSET_0+60:
|
case R300_TX_OFFSET_0+60:
|
||||||
i = (reg - R300_TX_OFFSET_0) >> 2;
|
i = (reg - R300_TX_OFFSET_0) >> 2;
|
||||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||||
idx, reg);
|
idx, reg);
|
||||||
r100_cs_dump_packet(p, pkt);
|
radeon_cs_dump_packet(p, pkt);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -748,11 +748,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
|
|||||||
/* RB3D_COLORPITCH2 */
|
/* RB3D_COLORPITCH2 */
|
||||||
/* RB3D_COLORPITCH3 */
|
/* RB3D_COLORPITCH3 */
|
||||||
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
|
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
|
||||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||||
idx, reg);
|
idx, reg);
|
||||||
r100_cs_dump_packet(p, pkt);
|
radeon_cs_dump_packet(p, pkt);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -833,11 +833,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
|
|||||||
case 0x4F24:
|
case 0x4F24:
|
||||||
/* ZB_DEPTHPITCH */
|
/* ZB_DEPTHPITCH */
|
||||||
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
|
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
|
||||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||||
idx, reg);
|
idx, reg);
|
||||||
r100_cs_dump_packet(p, pkt);
|
radeon_cs_dump_packet(p, pkt);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1048,11 +1048,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
|
|||||||
track->tex_dirty = true;
|
track->tex_dirty = true;
|
||||||
break;
|
break;
|
||||||
case R300_ZB_ZPASS_ADDR:
|
case R300_ZB_ZPASS_ADDR:
|
||||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||||
idx, reg);
|
idx, reg);
|
||||||
r100_cs_dump_packet(p, pkt);
|
radeon_cs_dump_packet(p, pkt);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
|
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
|
||||||
@ -1090,11 +1090,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
|
|||||||
track->cb_dirty = true;
|
track->cb_dirty = true;
|
||||||
break;
|
break;
|
||||||
case R300_RB3D_AARESOLVE_OFFSET:
|
case R300_RB3D_AARESOLVE_OFFSET:
|
||||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||||
idx, reg);
|
idx, reg);
|
||||||
r100_cs_dump_packet(p, pkt);
|
radeon_cs_dump_packet(p, pkt);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
track->aa.robj = reloc->robj;
|
track->aa.robj = reloc->robj;
|
||||||
@ -1159,10 +1159,10 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
|
|||||||
return r;
|
return r;
|
||||||
break;
|
break;
|
||||||
case PACKET3_INDX_BUFFER:
|
case PACKET3_INDX_BUFFER:
|
||||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
|
DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
|
||||||
r100_cs_dump_packet(p, pkt);
|
radeon_cs_dump_packet(p, pkt);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
|
ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
|
||||||
@ -1260,21 +1260,21 @@ int r300_cs_parse(struct radeon_cs_parser *p)
|
|||||||
r100_cs_track_clear(p->rdev, track);
|
r100_cs_track_clear(p->rdev, track);
|
||||||
p->track = track;
|
p->track = track;
|
||||||
do {
|
do {
|
||||||
r = r100_cs_packet_parse(p, &pkt, p->idx);
|
r = radeon_cs_packet_parse(p, &pkt, p->idx);
|
||||||
if (r) {
|
if (r) {
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
p->idx += pkt.count + 2;
|
p->idx += pkt.count + 2;
|
||||||
switch (pkt.type) {
|
switch (pkt.type) {
|
||||||
case PACKET_TYPE0:
|
case RADEON_PACKET_TYPE0:
|
||||||
r = r100_cs_parse_packet0(p, &pkt,
|
r = r100_cs_parse_packet0(p, &pkt,
|
||||||
p->rdev->config.r300.reg_safe_bm,
|
p->rdev->config.r300.reg_safe_bm,
|
||||||
p->rdev->config.r300.reg_safe_bm_size,
|
p->rdev->config.r300.reg_safe_bm_size,
|
||||||
&r300_packet0_check);
|
&r300_packet0_check);
|
||||||
break;
|
break;
|
||||||
case PACKET_TYPE2:
|
case RADEON_PACKET_TYPE2:
|
||||||
break;
|
break;
|
||||||
case PACKET_TYPE3:
|
case RADEON_PACKET_TYPE3:
|
||||||
r = r300_packet3_check(p, &pkt);
|
r = r300_packet3_check(p, &pkt);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
@ -1387,6 +1387,12 @@ static int r300_startup(struct radeon_device *rdev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Enable IRQ */
|
/* Enable IRQ */
|
||||||
|
if (!rdev->irq.installed) {
|
||||||
|
r = radeon_irq_kms_init(rdev);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
r100_irq_set(rdev);
|
r100_irq_set(rdev);
|
||||||
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
|
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
|
||||||
/* 1M ring buffer */
|
/* 1M ring buffer */
|
||||||
@ -1460,9 +1466,6 @@ int r300_init(struct radeon_device *rdev)
|
|||||||
r300_mc_init(rdev);
|
r300_mc_init(rdev);
|
||||||
/* Fence driver */
|
/* Fence driver */
|
||||||
r = radeon_fence_driver_init(rdev);
|
r = radeon_fence_driver_init(rdev);
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
r = radeon_irq_kms_init(rdev);
|
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
/* Memory manager */
|
/* Memory manager */
|
||||||
|
@ -1,354 +1,343 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||||
* Copyright 2008 Red Hat Inc.
|
* Copyright 2008 Red Hat Inc.
|
||||||
* Copyright 2009 Jerome Glisse.
|
* Copyright 2009 Jerome Glisse.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
* to deal in the Software without restriction, including without limitation
|
* to deal in the Software without restriction, including without limitation
|
||||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
* and/or sell copies of the Software, and to permit persons to whom the
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
* Software is furnished to do so, subject to the following conditions:
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
*
|
*
|
||||||
* The above copyright notice and this permission notice shall be included in
|
* The above copyright notice and this permission notice shall be included in
|
||||||
* all copies or substantial portions of the Software.
|
* all copies or substantial portions of the Software.
|
||||||
*
|
*
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||||
* OTHER DEALINGS IN THE SOFTWARE.
|
* OTHER DEALINGS IN THE SOFTWARE.
|
||||||
*
|
*
|
||||||
* Authors: Dave Airlie
|
* Authors: Dave Airlie
|
||||||
* Alex Deucher
|
* Alex Deucher
|
||||||
* Jerome Glisse
|
* Jerome Glisse
|
||||||
*/
|
*/
|
||||||
#ifndef __R300D_H__
|
#ifndef __R300D_H__
|
||||||
#define __R300D_H__
|
#define __R300D_H__
|
||||||
|
|
||||||
#define CP_PACKET0 0x00000000
|
#define CP_PACKET0 0x00000000
|
||||||
#define PACKET0_BASE_INDEX_SHIFT 0
|
#define PACKET0_BASE_INDEX_SHIFT 0
|
||||||
#define PACKET0_BASE_INDEX_MASK (0x1ffff << 0)
|
#define PACKET0_BASE_INDEX_MASK (0x1ffff << 0)
|
||||||
#define PACKET0_COUNT_SHIFT 16
|
#define PACKET0_COUNT_SHIFT 16
|
||||||
#define PACKET0_COUNT_MASK (0x3fff << 16)
|
#define PACKET0_COUNT_MASK (0x3fff << 16)
|
||||||
#define CP_PACKET1 0x40000000
|
#define CP_PACKET1 0x40000000
|
||||||
#define CP_PACKET2 0x80000000
|
#define CP_PACKET2 0x80000000
|
||||||
#define PACKET2_PAD_SHIFT 0
|
#define PACKET2_PAD_SHIFT 0
|
||||||
#define PACKET2_PAD_MASK (0x3fffffff << 0)
|
#define PACKET2_PAD_MASK (0x3fffffff << 0)
|
||||||
#define CP_PACKET3 0xC0000000
|
#define CP_PACKET3 0xC0000000
|
||||||
#define PACKET3_IT_OPCODE_SHIFT 8
|
#define PACKET3_IT_OPCODE_SHIFT 8
|
||||||
#define PACKET3_IT_OPCODE_MASK (0xff << 8)
|
#define PACKET3_IT_OPCODE_MASK (0xff << 8)
|
||||||
#define PACKET3_COUNT_SHIFT 16
|
#define PACKET3_COUNT_SHIFT 16
|
||||||
#define PACKET3_COUNT_MASK (0x3fff << 16)
|
#define PACKET3_COUNT_MASK (0x3fff << 16)
|
||||||
/* PACKET3 op code */
|
/* PACKET3 op code */
|
||||||
#define PACKET3_NOP 0x10
|
#define PACKET3_NOP 0x10
|
||||||
#define PACKET3_3D_DRAW_VBUF 0x28
|
#define PACKET3_3D_DRAW_VBUF 0x28
|
||||||
#define PACKET3_3D_DRAW_IMMD 0x29
|
#define PACKET3_3D_DRAW_IMMD 0x29
|
||||||
#define PACKET3_3D_DRAW_INDX 0x2A
|
#define PACKET3_3D_DRAW_INDX 0x2A
|
||||||
#define PACKET3_3D_LOAD_VBPNTR 0x2F
|
#define PACKET3_3D_LOAD_VBPNTR 0x2F
|
||||||
#define PACKET3_3D_CLEAR_ZMASK 0x32
|
#define PACKET3_3D_CLEAR_ZMASK 0x32
|
||||||
#define PACKET3_INDX_BUFFER 0x33
|
#define PACKET3_INDX_BUFFER 0x33
|
||||||
#define PACKET3_3D_DRAW_VBUF_2 0x34
|
#define PACKET3_3D_DRAW_VBUF_2 0x34
|
||||||
#define PACKET3_3D_DRAW_IMMD_2 0x35
|
#define PACKET3_3D_DRAW_IMMD_2 0x35
|
||||||
#define PACKET3_3D_DRAW_INDX_2 0x36
|
#define PACKET3_3D_DRAW_INDX_2 0x36
|
||||||
#define PACKET3_3D_CLEAR_HIZ 0x37
|
#define PACKET3_3D_CLEAR_HIZ 0x37
|
||||||
#define PACKET3_3D_CLEAR_CMASK 0x38
|
#define PACKET3_3D_CLEAR_CMASK 0x38
|
||||||
#define PACKET3_BITBLT_MULTI 0x9B
|
#define PACKET3_BITBLT_MULTI 0x9B
|
||||||
|
|
||||||
#define PACKET0(reg, n) (CP_PACKET0 | \
|
#define PACKET0(reg, n) (CP_PACKET0 | \
|
||||||
REG_SET(PACKET0_BASE_INDEX, (reg) >> 2) | \
|
REG_SET(PACKET0_BASE_INDEX, (reg) >> 2) | \
|
||||||
REG_SET(PACKET0_COUNT, (n)))
|
REG_SET(PACKET0_COUNT, (n)))
|
||||||
#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
|
#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
|
||||||
#define PACKET3(op, n) (CP_PACKET3 | \
|
#define PACKET3(op, n) (CP_PACKET3 | \
|
||||||
REG_SET(PACKET3_IT_OPCODE, (op)) | \
|
REG_SET(PACKET3_IT_OPCODE, (op)) | \
|
||||||
REG_SET(PACKET3_COUNT, (n)))
|
REG_SET(PACKET3_COUNT, (n)))
|
||||||
|
|
||||||
#define PACKET_TYPE0 0
|
/* Registers */
|
||||||
#define PACKET_TYPE1 1
|
#define R_000148_MC_FB_LOCATION 0x000148
|
||||||
#define PACKET_TYPE2 2
|
#define S_000148_MC_FB_START(x) (((x) & 0xFFFF) << 0)
|
||||||
#define PACKET_TYPE3 3
|
#define G_000148_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
|
||||||
|
#define C_000148_MC_FB_START 0xFFFF0000
|
||||||
#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
|
#define S_000148_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
|
||||||
#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
|
#define G_000148_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
|
||||||
#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
|
#define C_000148_MC_FB_TOP 0x0000FFFF
|
||||||
#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
|
#define R_00014C_MC_AGP_LOCATION 0x00014C
|
||||||
#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
|
#define S_00014C_MC_AGP_START(x) (((x) & 0xFFFF) << 0)
|
||||||
|
#define G_00014C_MC_AGP_START(x) (((x) >> 0) & 0xFFFF)
|
||||||
/* Registers */
|
#define C_00014C_MC_AGP_START 0xFFFF0000
|
||||||
#define R_000148_MC_FB_LOCATION 0x000148
|
#define S_00014C_MC_AGP_TOP(x) (((x) & 0xFFFF) << 16)
|
||||||
#define S_000148_MC_FB_START(x) (((x) & 0xFFFF) << 0)
|
#define G_00014C_MC_AGP_TOP(x) (((x) >> 16) & 0xFFFF)
|
||||||
#define G_000148_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
|
#define C_00014C_MC_AGP_TOP 0x0000FFFF
|
||||||
#define C_000148_MC_FB_START 0xFFFF0000
|
#define R_00015C_AGP_BASE_2 0x00015C
|
||||||
#define S_000148_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
|
#define S_00015C_AGP_BASE_ADDR_2(x) (((x) & 0xF) << 0)
|
||||||
#define G_000148_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
|
#define G_00015C_AGP_BASE_ADDR_2(x) (((x) >> 0) & 0xF)
|
||||||
#define C_000148_MC_FB_TOP 0x0000FFFF
|
#define C_00015C_AGP_BASE_ADDR_2 0xFFFFFFF0
|
||||||
#define R_00014C_MC_AGP_LOCATION 0x00014C
|
#define R_000170_AGP_BASE 0x000170
|
||||||
#define S_00014C_MC_AGP_START(x) (((x) & 0xFFFF) << 0)
|
#define S_000170_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0)
|
||||||
#define G_00014C_MC_AGP_START(x) (((x) >> 0) & 0xFFFF)
|
#define G_000170_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF)
|
||||||
#define C_00014C_MC_AGP_START 0xFFFF0000
|
#define C_000170_AGP_BASE_ADDR 0x00000000
|
||||||
#define S_00014C_MC_AGP_TOP(x) (((x) & 0xFFFF) << 16)
|
#define R_0007C0_CP_STAT 0x0007C0
|
||||||
#define G_00014C_MC_AGP_TOP(x) (((x) >> 16) & 0xFFFF)
|
#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0)
|
||||||
#define C_00014C_MC_AGP_TOP 0x0000FFFF
|
#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1)
|
||||||
#define R_00015C_AGP_BASE_2 0x00015C
|
#define C_0007C0_MRU_BUSY 0xFFFFFFFE
|
||||||
#define S_00015C_AGP_BASE_ADDR_2(x) (((x) & 0xF) << 0)
|
#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1)
|
||||||
#define G_00015C_AGP_BASE_ADDR_2(x) (((x) >> 0) & 0xF)
|
#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1)
|
||||||
#define C_00015C_AGP_BASE_ADDR_2 0xFFFFFFF0
|
#define C_0007C0_MWU_BUSY 0xFFFFFFFD
|
||||||
#define R_000170_AGP_BASE 0x000170
|
#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2)
|
||||||
#define S_000170_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0)
|
#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1)
|
||||||
#define G_000170_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF)
|
#define C_0007C0_RSIU_BUSY 0xFFFFFFFB
|
||||||
#define C_000170_AGP_BASE_ADDR 0x00000000
|
#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3)
|
||||||
#define R_0007C0_CP_STAT 0x0007C0
|
#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1)
|
||||||
#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0)
|
#define C_0007C0_RCIU_BUSY 0xFFFFFFF7
|
||||||
#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1)
|
#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9)
|
||||||
#define C_0007C0_MRU_BUSY 0xFFFFFFFE
|
#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1)
|
||||||
#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1)
|
#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF
|
||||||
#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1)
|
#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10)
|
||||||
#define C_0007C0_MWU_BUSY 0xFFFFFFFD
|
#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1)
|
||||||
#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2)
|
#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF
|
||||||
#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1)
|
#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11)
|
||||||
#define C_0007C0_RSIU_BUSY 0xFFFFFFFB
|
#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1)
|
||||||
#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3)
|
#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF
|
||||||
#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1)
|
#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12)
|
||||||
#define C_0007C0_RCIU_BUSY 0xFFFFFFF7
|
#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1)
|
||||||
#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9)
|
#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF
|
||||||
#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1)
|
#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13)
|
||||||
#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF
|
#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1)
|
||||||
#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10)
|
#define C_0007C0_CSI_BUSY 0xFFFFDFFF
|
||||||
#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1)
|
#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14)
|
||||||
#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF
|
#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1)
|
||||||
#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11)
|
#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF
|
||||||
#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1)
|
#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15)
|
||||||
#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF
|
#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1)
|
||||||
#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12)
|
#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF
|
||||||
#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1)
|
#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28)
|
||||||
#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF
|
#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1)
|
||||||
#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13)
|
#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF
|
||||||
#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1)
|
#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29)
|
||||||
#define C_0007C0_CSI_BUSY 0xFFFFDFFF
|
#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1)
|
||||||
#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14)
|
#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF
|
||||||
#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1)
|
#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30)
|
||||||
#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF
|
#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1)
|
||||||
#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15)
|
#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF
|
||||||
#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1)
|
#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31)
|
||||||
#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF
|
#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1)
|
||||||
#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28)
|
#define C_0007C0_CP_BUSY 0x7FFFFFFF
|
||||||
#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1)
|
#define R_000E40_RBBM_STATUS 0x000E40
|
||||||
#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF
|
#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0)
|
||||||
#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29)
|
#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F)
|
||||||
#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1)
|
#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80
|
||||||
#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF
|
#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8)
|
||||||
#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30)
|
#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1)
|
||||||
#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1)
|
#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF
|
||||||
#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF
|
#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9)
|
||||||
#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31)
|
#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1)
|
||||||
#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1)
|
#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF
|
||||||
#define C_0007C0_CP_BUSY 0x7FFFFFFF
|
#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10)
|
||||||
#define R_000E40_RBBM_STATUS 0x000E40
|
#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1)
|
||||||
#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0)
|
#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF
|
||||||
#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F)
|
#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11)
|
||||||
#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80
|
#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1)
|
||||||
#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8)
|
#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF
|
||||||
#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1)
|
#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12)
|
||||||
#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF
|
#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1)
|
||||||
#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9)
|
#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF
|
||||||
#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1)
|
#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13)
|
||||||
#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF
|
#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1)
|
||||||
#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10)
|
#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF
|
||||||
#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1)
|
#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14)
|
||||||
#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF
|
#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1)
|
||||||
#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11)
|
#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF
|
||||||
#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1)
|
#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15)
|
||||||
#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF
|
#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1)
|
||||||
#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12)
|
#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF
|
||||||
#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1)
|
#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16)
|
||||||
#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF
|
#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1)
|
||||||
#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13)
|
#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF
|
||||||
#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1)
|
#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17)
|
||||||
#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF
|
#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1)
|
||||||
#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14)
|
#define C_000E40_E2_BUSY 0xFFFDFFFF
|
||||||
#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1)
|
#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18)
|
||||||
#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF
|
#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1)
|
||||||
#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15)
|
#define C_000E40_RB2D_BUSY 0xFFFBFFFF
|
||||||
#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1)
|
#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19)
|
||||||
#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF
|
#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1)
|
||||||
#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16)
|
#define C_000E40_RB3D_BUSY 0xFFF7FFFF
|
||||||
#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1)
|
#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20)
|
||||||
#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF
|
#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1)
|
||||||
#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17)
|
#define C_000E40_VAP_BUSY 0xFFEFFFFF
|
||||||
#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1)
|
#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21)
|
||||||
#define C_000E40_E2_BUSY 0xFFFDFFFF
|
#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1)
|
||||||
#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18)
|
#define C_000E40_RE_BUSY 0xFFDFFFFF
|
||||||
#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1)
|
#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22)
|
||||||
#define C_000E40_RB2D_BUSY 0xFFFBFFFF
|
#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1)
|
||||||
#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19)
|
#define C_000E40_TAM_BUSY 0xFFBFFFFF
|
||||||
#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1)
|
#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23)
|
||||||
#define C_000E40_RB3D_BUSY 0xFFF7FFFF
|
#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1)
|
||||||
#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20)
|
#define C_000E40_TDM_BUSY 0xFF7FFFFF
|
||||||
#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1)
|
#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24)
|
||||||
#define C_000E40_VAP_BUSY 0xFFEFFFFF
|
#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1)
|
||||||
#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21)
|
#define C_000E40_PB_BUSY 0xFEFFFFFF
|
||||||
#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1)
|
#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25)
|
||||||
#define C_000E40_RE_BUSY 0xFFDFFFFF
|
#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1)
|
||||||
#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22)
|
#define C_000E40_TIM_BUSY 0xFDFFFFFF
|
||||||
#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1)
|
#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26)
|
||||||
#define C_000E40_TAM_BUSY 0xFFBFFFFF
|
#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1)
|
||||||
#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23)
|
#define C_000E40_GA_BUSY 0xFBFFFFFF
|
||||||
#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1)
|
#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27)
|
||||||
#define C_000E40_TDM_BUSY 0xFF7FFFFF
|
#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1)
|
||||||
#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24)
|
#define C_000E40_CBA2D_BUSY 0xF7FFFFFF
|
||||||
#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1)
|
#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
|
||||||
#define C_000E40_PB_BUSY 0xFEFFFFFF
|
#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
|
||||||
#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25)
|
#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
|
||||||
#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1)
|
#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
|
||||||
#define C_000E40_TIM_BUSY 0xFDFFFFFF
|
#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
|
||||||
#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26)
|
#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
|
||||||
#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1)
|
#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
|
||||||
#define C_000E40_GA_BUSY 0xFBFFFFFF
|
#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
|
||||||
#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27)
|
#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
|
||||||
#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1)
|
#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
|
||||||
#define C_000E40_CBA2D_BUSY 0xF7FFFFFF
|
#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2)
|
||||||
#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
|
#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1)
|
||||||
#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
|
#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB
|
||||||
#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
|
#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
|
||||||
#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
|
#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
|
||||||
#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
|
#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
|
||||||
#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
|
#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
|
||||||
#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
|
#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
|
||||||
#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
|
#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
|
||||||
#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
|
#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
|
||||||
#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
|
#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
|
||||||
#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2)
|
#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
|
||||||
#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1)
|
#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
|
||||||
#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB
|
#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
|
||||||
#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
|
#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
|
||||||
#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
|
#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
|
||||||
#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
|
#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
|
||||||
#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
|
#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
|
||||||
#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
|
#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
|
||||||
#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
|
#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
|
||||||
#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
|
#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
|
||||||
#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
|
#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
|
||||||
#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
|
#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
|
||||||
#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
|
#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
|
||||||
#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
|
#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
|
||||||
#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
|
#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
|
||||||
#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
|
#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
|
||||||
#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
|
#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
|
||||||
#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
|
#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
|
||||||
#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
|
#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
|
||||||
#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
|
#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
|
||||||
#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
|
#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
|
||||||
#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
|
#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
|
||||||
#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
|
#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13)
|
||||||
#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
|
#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1)
|
||||||
#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
|
#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF
|
||||||
#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
|
#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14)
|
||||||
#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
|
#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1)
|
||||||
#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
|
#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF
|
||||||
#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
|
|
||||||
#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
|
#define R_00000D_SCLK_CNTL 0x00000D
|
||||||
#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
|
#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0)
|
||||||
#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
|
#define G_00000D_SCLK_SRC_SEL(x) (((x) >> 0) & 0x7)
|
||||||
#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
|
#define C_00000D_SCLK_SRC_SEL 0xFFFFFFF8
|
||||||
#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13)
|
#define S_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 3)
|
||||||
#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1)
|
#define G_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) >> 3) & 0x1)
|
||||||
#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF
|
#define C_00000D_CP_MAX_DYN_STOP_LAT 0xFFFFFFF7
|
||||||
#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14)
|
#define S_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 4)
|
||||||
#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1)
|
#define G_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) >> 4) & 0x1)
|
||||||
#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF
|
#define C_00000D_HDP_MAX_DYN_STOP_LAT 0xFFFFFFEF
|
||||||
|
#define S_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 5)
|
||||||
#define R_00000D_SCLK_CNTL 0x00000D
|
#define G_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) >> 5) & 0x1)
|
||||||
#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0)
|
#define C_00000D_TV_MAX_DYN_STOP_LAT 0xFFFFFFDF
|
||||||
#define G_00000D_SCLK_SRC_SEL(x) (((x) >> 0) & 0x7)
|
#define S_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 6)
|
||||||
#define C_00000D_SCLK_SRC_SEL 0xFFFFFFF8
|
#define G_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) >> 6) & 0x1)
|
||||||
#define S_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 3)
|
#define C_00000D_E2_MAX_DYN_STOP_LAT 0xFFFFFFBF
|
||||||
#define G_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) >> 3) & 0x1)
|
#define S_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 7)
|
||||||
#define C_00000D_CP_MAX_DYN_STOP_LAT 0xFFFFFFF7
|
#define G_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) >> 7) & 0x1)
|
||||||
#define S_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 4)
|
#define C_00000D_SE_MAX_DYN_STOP_LAT 0xFFFFFF7F
|
||||||
#define G_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) >> 4) & 0x1)
|
#define S_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 8)
|
||||||
#define C_00000D_HDP_MAX_DYN_STOP_LAT 0xFFFFFFEF
|
#define G_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) >> 8) & 0x1)
|
||||||
#define S_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 5)
|
#define C_00000D_IDCT_MAX_DYN_STOP_LAT 0xFFFFFEFF
|
||||||
#define G_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) >> 5) & 0x1)
|
#define S_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 9)
|
||||||
#define C_00000D_TV_MAX_DYN_STOP_LAT 0xFFFFFFDF
|
#define G_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) >> 9) & 0x1)
|
||||||
#define S_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 6)
|
#define C_00000D_VIP_MAX_DYN_STOP_LAT 0xFFFFFDFF
|
||||||
#define G_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) >> 6) & 0x1)
|
#define S_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 10)
|
||||||
#define C_00000D_E2_MAX_DYN_STOP_LAT 0xFFFFFFBF
|
#define G_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) >> 10) & 0x1)
|
||||||
#define S_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 7)
|
#define C_00000D_RE_MAX_DYN_STOP_LAT 0xFFFFFBFF
|
||||||
#define G_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) >> 7) & 0x1)
|
#define S_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 11)
|
||||||
#define C_00000D_SE_MAX_DYN_STOP_LAT 0xFFFFFF7F
|
#define G_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) >> 11) & 0x1)
|
||||||
#define S_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 8)
|
#define C_00000D_PB_MAX_DYN_STOP_LAT 0xFFFFF7FF
|
||||||
#define G_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) >> 8) & 0x1)
|
#define S_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 12)
|
||||||
#define C_00000D_IDCT_MAX_DYN_STOP_LAT 0xFFFFFEFF
|
#define G_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) >> 12) & 0x1)
|
||||||
#define S_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 9)
|
#define C_00000D_TAM_MAX_DYN_STOP_LAT 0xFFFFEFFF
|
||||||
#define G_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) >> 9) & 0x1)
|
#define S_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 13)
|
||||||
#define C_00000D_VIP_MAX_DYN_STOP_LAT 0xFFFFFDFF
|
#define G_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) >> 13) & 0x1)
|
||||||
#define S_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 10)
|
#define C_00000D_TDM_MAX_DYN_STOP_LAT 0xFFFFDFFF
|
||||||
#define G_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) >> 10) & 0x1)
|
#define S_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 14)
|
||||||
#define C_00000D_RE_MAX_DYN_STOP_LAT 0xFFFFFBFF
|
#define G_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) >> 14) & 0x1)
|
||||||
#define S_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 11)
|
#define C_00000D_RB_MAX_DYN_STOP_LAT 0xFFFFBFFF
|
||||||
#define G_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) >> 11) & 0x1)
|
#define S_00000D_FORCE_DISP2(x) (((x) & 0x1) << 15)
|
||||||
#define C_00000D_PB_MAX_DYN_STOP_LAT 0xFFFFF7FF
|
#define G_00000D_FORCE_DISP2(x) (((x) >> 15) & 0x1)
|
||||||
#define S_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 12)
|
#define C_00000D_FORCE_DISP2 0xFFFF7FFF
|
||||||
#define G_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) >> 12) & 0x1)
|
#define S_00000D_FORCE_CP(x) (((x) & 0x1) << 16)
|
||||||
#define C_00000D_TAM_MAX_DYN_STOP_LAT 0xFFFFEFFF
|
#define G_00000D_FORCE_CP(x) (((x) >> 16) & 0x1)
|
||||||
#define S_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 13)
|
#define C_00000D_FORCE_CP 0xFFFEFFFF
|
||||||
#define G_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) >> 13) & 0x1)
|
#define S_00000D_FORCE_HDP(x) (((x) & 0x1) << 17)
|
||||||
#define C_00000D_TDM_MAX_DYN_STOP_LAT 0xFFFFDFFF
|
#define G_00000D_FORCE_HDP(x) (((x) >> 17) & 0x1)
|
||||||
#define S_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 14)
|
#define C_00000D_FORCE_HDP 0xFFFDFFFF
|
||||||
#define G_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) >> 14) & 0x1)
|
#define S_00000D_FORCE_DISP1(x) (((x) & 0x1) << 18)
|
||||||
#define C_00000D_RB_MAX_DYN_STOP_LAT 0xFFFFBFFF
|
#define G_00000D_FORCE_DISP1(x) (((x) >> 18) & 0x1)
|
||||||
#define S_00000D_FORCE_DISP2(x) (((x) & 0x1) << 15)
|
#define C_00000D_FORCE_DISP1 0xFFFBFFFF
|
||||||
#define G_00000D_FORCE_DISP2(x) (((x) >> 15) & 0x1)
|
#define S_00000D_FORCE_TOP(x) (((x) & 0x1) << 19)
|
||||||
#define C_00000D_FORCE_DISP2 0xFFFF7FFF
|
#define G_00000D_FORCE_TOP(x) (((x) >> 19) & 0x1)
|
||||||
#define S_00000D_FORCE_CP(x) (((x) & 0x1) << 16)
|
#define C_00000D_FORCE_TOP 0xFFF7FFFF
|
||||||
#define G_00000D_FORCE_CP(x) (((x) >> 16) & 0x1)
|
#define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20)
|
||||||
#define C_00000D_FORCE_CP 0xFFFEFFFF
|
#define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1)
|
||||||
#define S_00000D_FORCE_HDP(x) (((x) & 0x1) << 17)
|
#define C_00000D_FORCE_E2 0xFFEFFFFF
|
||||||
#define G_00000D_FORCE_HDP(x) (((x) >> 17) & 0x1)
|
#define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21)
|
||||||
#define C_00000D_FORCE_HDP 0xFFFDFFFF
|
#define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1)
|
||||||
#define S_00000D_FORCE_DISP1(x) (((x) & 0x1) << 18)
|
#define C_00000D_FORCE_SE 0xFFDFFFFF
|
||||||
#define G_00000D_FORCE_DISP1(x) (((x) >> 18) & 0x1)
|
#define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22)
|
||||||
#define C_00000D_FORCE_DISP1 0xFFFBFFFF
|
#define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1)
|
||||||
#define S_00000D_FORCE_TOP(x) (((x) & 0x1) << 19)
|
#define C_00000D_FORCE_IDCT 0xFFBFFFFF
|
||||||
#define G_00000D_FORCE_TOP(x) (((x) >> 19) & 0x1)
|
#define S_00000D_FORCE_VIP(x) (((x) & 0x1) << 23)
|
||||||
#define C_00000D_FORCE_TOP 0xFFF7FFFF
|
#define G_00000D_FORCE_VIP(x) (((x) >> 23) & 0x1)
|
||||||
#define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20)
|
#define C_00000D_FORCE_VIP 0xFF7FFFFF
|
||||||
#define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1)
|
#define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24)
|
||||||
#define C_00000D_FORCE_E2 0xFFEFFFFF
|
#define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1)
|
||||||
#define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21)
|
#define C_00000D_FORCE_RE 0xFEFFFFFF
|
||||||
#define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1)
|
#define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25)
|
||||||
#define C_00000D_FORCE_SE 0xFFDFFFFF
|
#define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1)
|
||||||
#define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22)
|
#define C_00000D_FORCE_PB 0xFDFFFFFF
|
||||||
#define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1)
|
#define S_00000D_FORCE_TAM(x) (((x) & 0x1) << 26)
|
||||||
#define C_00000D_FORCE_IDCT 0xFFBFFFFF
|
#define G_00000D_FORCE_TAM(x) (((x) >> 26) & 0x1)
|
||||||
#define S_00000D_FORCE_VIP(x) (((x) & 0x1) << 23)
|
#define C_00000D_FORCE_TAM 0xFBFFFFFF
|
||||||
#define G_00000D_FORCE_VIP(x) (((x) >> 23) & 0x1)
|
#define S_00000D_FORCE_TDM(x) (((x) & 0x1) << 27)
|
||||||
#define C_00000D_FORCE_VIP 0xFF7FFFFF
|
#define G_00000D_FORCE_TDM(x) (((x) >> 27) & 0x1)
|
||||||
#define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24)
|
#define C_00000D_FORCE_TDM 0xF7FFFFFF
|
||||||
#define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1)
|
#define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28)
|
||||||
#define C_00000D_FORCE_RE 0xFEFFFFFF
|
#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1)
|
||||||
#define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25)
|
#define C_00000D_FORCE_RB 0xEFFFFFFF
|
||||||
#define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1)
|
#define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29)
|
||||||
#define C_00000D_FORCE_PB 0xFDFFFFFF
|
#define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1)
|
||||||
#define S_00000D_FORCE_TAM(x) (((x) & 0x1) << 26)
|
#define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF
|
||||||
#define G_00000D_FORCE_TAM(x) (((x) >> 26) & 0x1)
|
#define S_00000D_FORCE_SUBPIC(x) (((x) & 0x1) << 30)
|
||||||
#define C_00000D_FORCE_TAM 0xFBFFFFFF
|
#define G_00000D_FORCE_SUBPIC(x) (((x) >> 30) & 0x1)
|
||||||
#define S_00000D_FORCE_TDM(x) (((x) & 0x1) << 27)
|
#define C_00000D_FORCE_SUBPIC 0xBFFFFFFF
|
||||||
#define G_00000D_FORCE_TDM(x) (((x) >> 27) & 0x1)
|
#define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31)
|
||||||
#define C_00000D_FORCE_TDM 0xF7FFFFFF
|
#define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1)
|
||||||
#define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28)
|
#define C_00000D_FORCE_OV0 0x7FFFFFFF
|
||||||
#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1)
|
|
||||||
#define C_00000D_FORCE_RB 0xEFFFFFFF
|
#endif
|
||||||
#define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29)
|
|
||||||
#define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1)
|
|
||||||
#define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF
|
|
||||||
#define S_00000D_FORCE_SUBPIC(x) (((x) & 0x1) << 30)
|
|
||||||
#define G_00000D_FORCE_SUBPIC(x) (((x) >> 30) & 0x1)
|
|
||||||
#define C_00000D_FORCE_SUBPIC 0xBFFFFFFF
|
|
||||||
#define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31)
|
|
||||||
#define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1)
|
|
||||||
#define C_00000D_FORCE_OV0 0x7FFFFFFF
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
@ -36,6 +36,45 @@
|
|||||||
#include "r420d.h"
|
#include "r420d.h"
|
||||||
#include "r420_reg_safe.h"
|
#include "r420_reg_safe.h"
|
||||||
|
|
||||||
|
void r420_pm_init_profile(struct radeon_device *rdev)
|
||||||
|
{
|
||||||
|
/* default */
|
||||||
|
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
|
||||||
|
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
|
||||||
|
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
|
||||||
|
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
|
||||||
|
/* low sh */
|
||||||
|
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
|
||||||
|
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
|
||||||
|
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
|
||||||
|
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
|
||||||
|
/* mid sh */
|
||||||
|
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
|
||||||
|
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
|
||||||
|
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
|
||||||
|
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
|
||||||
|
/* high sh */
|
||||||
|
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
|
||||||
|
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
|
||||||
|
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
|
||||||
|
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
|
||||||
|
/* low mh */
|
||||||
|
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
|
||||||
|
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
|
||||||
|
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
|
||||||
|
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
|
||||||
|
/* mid mh */
|
||||||
|
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
|
||||||
|
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
|
||||||
|
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
|
||||||
|
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
|
||||||
|
/* high mh */
|
||||||
|
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
|
||||||
|
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
|
||||||
|
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
|
||||||
|
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void r420_set_reg_safe(struct radeon_device *rdev)
|
static void r420_set_reg_safe(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
rdev->config.r300.reg_safe_bm = r420_reg_safe_bm;
|
rdev->config.r300.reg_safe_bm = r420_reg_safe_bm;
|
||||||
@ -226,6 +265,12 @@ static int r420_startup(struct radeon_device *rdev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Enable IRQ */
|
/* Enable IRQ */
|
||||||
|
if (!rdev->irq.installed) {
|
||||||
|
r = radeon_irq_kms_init(rdev);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
r100_irq_set(rdev);
|
r100_irq_set(rdev);
|
||||||
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
|
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
|
||||||
/* 1M ring buffer */
|
/* 1M ring buffer */
|
||||||
@ -305,10 +350,6 @@ int r420_init(struct radeon_device *rdev)
|
|||||||
if (r) {
|
if (r) {
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
r = radeon_irq_kms_init(rdev);
|
|
||||||
if (r) {
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
/* Memory manager */
|
/* Memory manager */
|
||||||
r = radeon_bo_init(rdev);
|
r = radeon_bo_init(rdev);
|
||||||
if (r) {
|
if (r) {
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -194,6 +194,12 @@ static int r520_startup(struct radeon_device *rdev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Enable IRQ */
|
/* Enable IRQ */
|
||||||
|
if (!rdev->irq.installed) {
|
||||||
|
r = radeon_irq_kms_init(rdev);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
rs600_irq_set(rdev);
|
rs600_irq_set(rdev);
|
||||||
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
|
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
|
||||||
/* 1M ring buffer */
|
/* 1M ring buffer */
|
||||||
@ -267,9 +273,6 @@ int r520_init(struct radeon_device *rdev)
|
|||||||
rv515_debugfs(rdev);
|
rv515_debugfs(rdev);
|
||||||
/* Fence driver */
|
/* Fence driver */
|
||||||
r = radeon_fence_driver_init(rdev);
|
r = radeon_fence_driver_init(rdev);
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
r = radeon_irq_kms_init(rdev);
|
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
/* Memory manager */
|
/* Memory manager */
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -57,10 +57,7 @@ static bool radeon_dig_encoder(struct drm_encoder *encoder)
|
|||||||
*/
|
*/
|
||||||
static int r600_audio_chipset_supported(struct radeon_device *rdev)
|
static int r600_audio_chipset_supported(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE6(rdev))
|
return ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE6(rdev);
|
||||||
|| rdev->family == CHIP_RS600
|
|
||||||
|| rdev->family == CHIP_RS690
|
|
||||||
|| rdev->family == CHIP_RS740;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct r600_audio r600_audio_status(struct radeon_device *rdev)
|
struct r600_audio r600_audio_status(struct radeon_device *rdev)
|
||||||
@ -183,65 +180,6 @@ int r600_audio_init(struct radeon_device *rdev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* atach the audio codec to the clock source of the encoder
|
|
||||||
*/
|
|
||||||
void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
|
|
||||||
{
|
|
||||||
struct drm_device *dev = encoder->dev;
|
|
||||||
struct radeon_device *rdev = dev->dev_private;
|
|
||||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
|
||||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
|
||||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
|
|
||||||
int base_rate = 48000;
|
|
||||||
|
|
||||||
switch (radeon_encoder->encoder_id) {
|
|
||||||
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
|
|
||||||
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
|
|
||||||
WREG32_P(R600_AUDIO_TIMING, 0, ~0x301);
|
|
||||||
break;
|
|
||||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
|
|
||||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
|
|
||||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
|
|
||||||
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
|
|
||||||
WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
dev_err(rdev->dev, "Unsupported encoder type 0x%02X\n",
|
|
||||||
radeon_encoder->encoder_id);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ASIC_IS_DCE4(rdev)) {
|
|
||||||
/* TODO: other PLLs? */
|
|
||||||
WREG32(EVERGREEN_AUDIO_PLL1_MUL, base_rate * 10);
|
|
||||||
WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10);
|
|
||||||
WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071);
|
|
||||||
|
|
||||||
/* Select DTO source */
|
|
||||||
WREG32(0x5ac, radeon_crtc->crtc_id);
|
|
||||||
} else {
|
|
||||||
switch (dig->dig_encoder) {
|
|
||||||
case 0:
|
|
||||||
WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50);
|
|
||||||
WREG32(R600_AUDIO_PLL1_DIV, clock * 100);
|
|
||||||
WREG32(R600_AUDIO_CLK_SRCSEL, 0);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 1:
|
|
||||||
WREG32(R600_AUDIO_PLL2_MUL, base_rate * 50);
|
|
||||||
WREG32(R600_AUDIO_PLL2_DIV, clock * 100);
|
|
||||||
WREG32(R600_AUDIO_CLK_SRCSEL, 1);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
dev_err(rdev->dev,
|
|
||||||
"Unsupported DIG on encoder 0x%02X\n",
|
|
||||||
radeon_encoder->encoder_id);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* release the audio timer
|
* release the audio timer
|
||||||
* TODO: How to do this correctly on SMP systems?
|
* TODO: How to do this correctly on SMP systems?
|
||||||
|
@ -23,6 +23,7 @@
|
|||||||
*
|
*
|
||||||
* Authors: Christian König
|
* Authors: Christian König
|
||||||
*/
|
*/
|
||||||
|
#include <linux/hdmi.h>
|
||||||
#include <drm/drmP.h>
|
#include <drm/drmP.h>
|
||||||
#include <drm/radeon_drm.h>
|
#include <drm/radeon_drm.h>
|
||||||
#include "radeon.h"
|
#include "radeon.h"
|
||||||
@ -120,80 +121,19 @@ static void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
|
|||||||
WREG32(HDMI0_ACR_48_1 + offset, acr.n_48khz);
|
WREG32(HDMI0_ACR_48_1 + offset, acr.n_48khz);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* calculate the crc for a given info frame
|
|
||||||
*/
|
|
||||||
static void r600_hdmi_infoframe_checksum(uint8_t packetType,
|
|
||||||
uint8_t versionNumber,
|
|
||||||
uint8_t length,
|
|
||||||
uint8_t *frame)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
frame[0] = packetType + versionNumber + length;
|
|
||||||
for (i = 1; i <= length; i++)
|
|
||||||
frame[0] += frame[i];
|
|
||||||
frame[0] = 0x100 - frame[0];
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* build a HDMI Video Info Frame
|
* build a HDMI Video Info Frame
|
||||||
*/
|
*/
|
||||||
static void r600_hdmi_videoinfoframe(
|
static void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
|
||||||
struct drm_encoder *encoder,
|
void *buffer, size_t size)
|
||||||
enum r600_hdmi_color_format color_format,
|
|
||||||
int active_information_present,
|
|
||||||
uint8_t active_format_aspect_ratio,
|
|
||||||
uint8_t scan_information,
|
|
||||||
uint8_t colorimetry,
|
|
||||||
uint8_t ex_colorimetry,
|
|
||||||
uint8_t quantization,
|
|
||||||
int ITC,
|
|
||||||
uint8_t picture_aspect_ratio,
|
|
||||||
uint8_t video_format_identification,
|
|
||||||
uint8_t pixel_repetition,
|
|
||||||
uint8_t non_uniform_picture_scaling,
|
|
||||||
uint8_t bar_info_data_valid,
|
|
||||||
uint16_t top_bar,
|
|
||||||
uint16_t bottom_bar,
|
|
||||||
uint16_t left_bar,
|
|
||||||
uint16_t right_bar
|
|
||||||
)
|
|
||||||
{
|
{
|
||||||
struct drm_device *dev = encoder->dev;
|
struct drm_device *dev = encoder->dev;
|
||||||
struct radeon_device *rdev = dev->dev_private;
|
struct radeon_device *rdev = dev->dev_private;
|
||||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||||
uint32_t offset = dig->afmt->offset;
|
uint32_t offset = dig->afmt->offset;
|
||||||
|
uint8_t *frame = buffer + 3;
|
||||||
|
|
||||||
uint8_t frame[14];
|
|
||||||
|
|
||||||
frame[0x0] = 0;
|
|
||||||
frame[0x1] =
|
|
||||||
(scan_information & 0x3) |
|
|
||||||
((bar_info_data_valid & 0x3) << 2) |
|
|
||||||
((active_information_present & 0x1) << 4) |
|
|
||||||
((color_format & 0x3) << 5);
|
|
||||||
frame[0x2] =
|
|
||||||
(active_format_aspect_ratio & 0xF) |
|
|
||||||
((picture_aspect_ratio & 0x3) << 4) |
|
|
||||||
((colorimetry & 0x3) << 6);
|
|
||||||
frame[0x3] =
|
|
||||||
(non_uniform_picture_scaling & 0x3) |
|
|
||||||
((quantization & 0x3) << 2) |
|
|
||||||
((ex_colorimetry & 0x7) << 4) |
|
|
||||||
((ITC & 0x1) << 7);
|
|
||||||
frame[0x4] = (video_format_identification & 0x7F);
|
|
||||||
frame[0x5] = (pixel_repetition & 0xF);
|
|
||||||
frame[0x6] = (top_bar & 0xFF);
|
|
||||||
frame[0x7] = (top_bar >> 8);
|
|
||||||
frame[0x8] = (bottom_bar & 0xFF);
|
|
||||||
frame[0x9] = (bottom_bar >> 8);
|
|
||||||
frame[0xA] = (left_bar & 0xFF);
|
|
||||||
frame[0xB] = (left_bar >> 8);
|
|
||||||
frame[0xC] = (right_bar & 0xFF);
|
|
||||||
frame[0xD] = (right_bar >> 8);
|
|
||||||
|
|
||||||
r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
|
|
||||||
/* Our header values (type, version, length) should be alright, Intel
|
/* Our header values (type, version, length) should be alright, Intel
|
||||||
* is using the same. Checksum function also seems to be OK, it works
|
* is using the same. Checksum function also seems to be OK, it works
|
||||||
* fine for audio infoframe. However calculated value is always lower
|
* fine for audio infoframe. However calculated value is always lower
|
||||||
@ -215,39 +155,15 @@ static void r600_hdmi_videoinfoframe(
|
|||||||
/*
|
/*
|
||||||
* build a Audio Info Frame
|
* build a Audio Info Frame
|
||||||
*/
|
*/
|
||||||
static void r600_hdmi_audioinfoframe(
|
static void r600_hdmi_update_audio_infoframe(struct drm_encoder *encoder,
|
||||||
struct drm_encoder *encoder,
|
const void *buffer, size_t size)
|
||||||
uint8_t channel_count,
|
|
||||||
uint8_t coding_type,
|
|
||||||
uint8_t sample_size,
|
|
||||||
uint8_t sample_frequency,
|
|
||||||
uint8_t format,
|
|
||||||
uint8_t channel_allocation,
|
|
||||||
uint8_t level_shift,
|
|
||||||
int downmix_inhibit
|
|
||||||
)
|
|
||||||
{
|
{
|
||||||
struct drm_device *dev = encoder->dev;
|
struct drm_device *dev = encoder->dev;
|
||||||
struct radeon_device *rdev = dev->dev_private;
|
struct radeon_device *rdev = dev->dev_private;
|
||||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||||
uint32_t offset = dig->afmt->offset;
|
uint32_t offset = dig->afmt->offset;
|
||||||
|
const u8 *frame = buffer + 3;
|
||||||
uint8_t frame[11];
|
|
||||||
|
|
||||||
frame[0x0] = 0;
|
|
||||||
frame[0x1] = (channel_count & 0x7) | ((coding_type & 0xF) << 4);
|
|
||||||
frame[0x2] = (sample_size & 0x3) | ((sample_frequency & 0x7) << 2);
|
|
||||||
frame[0x3] = format;
|
|
||||||
frame[0x4] = channel_allocation;
|
|
||||||
frame[0x5] = ((level_shift & 0xF) << 3) | ((downmix_inhibit & 0x1) << 7);
|
|
||||||
frame[0x6] = 0;
|
|
||||||
frame[0x7] = 0;
|
|
||||||
frame[0x8] = 0;
|
|
||||||
frame[0x9] = 0;
|
|
||||||
frame[0xA] = 0;
|
|
||||||
|
|
||||||
r600_hdmi_infoframe_checksum(0x84, 0x01, 0x0A, frame);
|
|
||||||
|
|
||||||
WREG32(HDMI0_AUDIO_INFO0 + offset,
|
WREG32(HDMI0_AUDIO_INFO0 + offset,
|
||||||
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
|
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
|
||||||
@ -310,6 +226,38 @@ static void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
|
|||||||
value, ~HDMI0_AUDIO_TEST_EN);
|
value, ~HDMI0_AUDIO_TEST_EN);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = encoder->dev;
|
||||||
|
struct radeon_device *rdev = dev->dev_private;
|
||||||
|
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||||
|
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||||
|
u32 base_rate = 24000;
|
||||||
|
|
||||||
|
if (!dig || !dig->afmt)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT.
|
||||||
|
* doesn't matter which one you use. Just use the first one.
|
||||||
|
*/
|
||||||
|
/* XXX two dtos; generally use dto0 for hdmi */
|
||||||
|
/* Express [24MHz / target pixel clock] as an exact rational
|
||||||
|
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
|
||||||
|
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
|
||||||
|
*/
|
||||||
|
if (ASIC_IS_DCE3(rdev)) {
|
||||||
|
/* according to the reg specs, this should DCE3.2 only, but in
|
||||||
|
* practice it seems to cover DCE3.0 as well.
|
||||||
|
*/
|
||||||
|
WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
|
||||||
|
WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
|
||||||
|
WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
|
||||||
|
} else {
|
||||||
|
/* according to the reg specs, this should be DCE2.0 and DCE3.0 */
|
||||||
|
WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
|
||||||
|
AUDIO_DTO_MODULE(clock / 10));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* update the info frames with the data from the current display mode
|
* update the info frames with the data from the current display mode
|
||||||
@ -320,7 +268,10 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
|
|||||||
struct radeon_device *rdev = dev->dev_private;
|
struct radeon_device *rdev = dev->dev_private;
|
||||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||||
|
u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
|
||||||
|
struct hdmi_avi_infoframe frame;
|
||||||
uint32_t offset;
|
uint32_t offset;
|
||||||
|
ssize_t err;
|
||||||
|
|
||||||
/* Silent, r600_hdmi_enable will raise WARN for us */
|
/* Silent, r600_hdmi_enable will raise WARN for us */
|
||||||
if (!dig->afmt->enabled)
|
if (!dig->afmt->enabled)
|
||||||
@ -371,9 +322,19 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
|
|||||||
|
|
||||||
WREG32(HDMI0_GC + offset, 0); /* unset HDMI0_GC_AVMUTE */
|
WREG32(HDMI0_GC + offset, 0); /* unset HDMI0_GC_AVMUTE */
|
||||||
|
|
||||||
r600_hdmi_videoinfoframe(encoder, RGB, 0, 0, 0, 0,
|
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
|
||||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
|
if (err < 0) {
|
||||||
|
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
|
||||||
|
if (err < 0) {
|
||||||
|
DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
r600_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer));
|
||||||
r600_hdmi_update_ACR(encoder, mode->clock);
|
r600_hdmi_update_ACR(encoder, mode->clock);
|
||||||
|
|
||||||
/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
|
/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
|
||||||
@ -396,8 +357,11 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
|
|||||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||||
struct r600_audio audio = r600_audio_status(rdev);
|
struct r600_audio audio = r600_audio_status(rdev);
|
||||||
|
uint8_t buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE];
|
||||||
|
struct hdmi_audio_infoframe frame;
|
||||||
uint32_t offset;
|
uint32_t offset;
|
||||||
uint32_t iec;
|
uint32_t iec;
|
||||||
|
ssize_t err;
|
||||||
|
|
||||||
if (!dig->afmt || !dig->afmt->enabled)
|
if (!dig->afmt || !dig->afmt->enabled)
|
||||||
return;
|
return;
|
||||||
@ -463,9 +427,21 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
|
|||||||
iec |= 0x5 << 16;
|
iec |= 0x5 << 16;
|
||||||
WREG32_P(HDMI0_60958_1 + offset, iec, ~0x5000f);
|
WREG32_P(HDMI0_60958_1 + offset, iec, ~0x5000f);
|
||||||
|
|
||||||
r600_hdmi_audioinfoframe(encoder, audio.channels - 1, 0, 0, 0, 0, 0, 0,
|
err = hdmi_audio_infoframe_init(&frame);
|
||||||
0);
|
if (err < 0) {
|
||||||
|
DRM_ERROR("failed to setup audio infoframe\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
frame.channels = audio.channels;
|
||||||
|
|
||||||
|
err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
|
||||||
|
if (err < 0) {
|
||||||
|
DRM_ERROR("failed to pack audio infoframe\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
r600_hdmi_update_audio_infoframe(encoder, buffer, sizeof(buffer));
|
||||||
r600_hdmi_audio_workaround(encoder);
|
r600_hdmi_audio_workaround(encoder);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@ -473,42 +449,51 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
|
|||||||
/*
|
/*
|
||||||
* enable the HDMI engine
|
* enable the HDMI engine
|
||||||
*/
|
*/
|
||||||
void r600_hdmi_enable(struct drm_encoder *encoder)
|
void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = encoder->dev;
|
struct drm_device *dev = encoder->dev;
|
||||||
struct radeon_device *rdev = dev->dev_private;
|
struct radeon_device *rdev = dev->dev_private;
|
||||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||||
uint32_t offset;
|
u32 hdmi = HDMI0_ERROR_ACK;
|
||||||
u32 hdmi;
|
|
||||||
|
|
||||||
if (ASIC_IS_DCE6(rdev))
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* Silent, r600_hdmi_enable will raise WARN for us */
|
/* Silent, r600_hdmi_enable will raise WARN for us */
|
||||||
if (dig->afmt->enabled)
|
if (enable && dig->afmt->enabled)
|
||||||
|
return;
|
||||||
|
if (!enable && !dig->afmt->enabled)
|
||||||
return;
|
return;
|
||||||
offset = dig->afmt->offset;
|
|
||||||
|
|
||||||
/* Older chipsets require setting HDMI and routing manually */
|
/* Older chipsets require setting HDMI and routing manually */
|
||||||
if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
|
if (!ASIC_IS_DCE3(rdev)) {
|
||||||
hdmi = HDMI0_ERROR_ACK | HDMI0_ENABLE;
|
if (enable)
|
||||||
|
hdmi |= HDMI0_ENABLE;
|
||||||
switch (radeon_encoder->encoder_id) {
|
switch (radeon_encoder->encoder_id) {
|
||||||
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
|
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
|
||||||
WREG32_P(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN,
|
if (enable) {
|
||||||
~AVIVO_TMDSA_CNTL_HDMI_EN);
|
WREG32_OR(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN);
|
||||||
hdmi |= HDMI0_STREAM(HDMI0_STREAM_TMDSA);
|
hdmi |= HDMI0_STREAM(HDMI0_STREAM_TMDSA);
|
||||||
|
} else {
|
||||||
|
WREG32_AND(AVIVO_TMDSA_CNTL, ~AVIVO_TMDSA_CNTL_HDMI_EN);
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
|
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
|
||||||
WREG32_P(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN,
|
if (enable) {
|
||||||
~AVIVO_LVTMA_CNTL_HDMI_EN);
|
WREG32_OR(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN);
|
||||||
hdmi |= HDMI0_STREAM(HDMI0_STREAM_LVTMA);
|
hdmi |= HDMI0_STREAM(HDMI0_STREAM_LVTMA);
|
||||||
|
} else {
|
||||||
|
WREG32_AND(AVIVO_LVTMA_CNTL, ~AVIVO_LVTMA_CNTL_HDMI_EN);
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case ENCODER_OBJECT_ID_INTERNAL_DDI:
|
case ENCODER_OBJECT_ID_INTERNAL_DDI:
|
||||||
WREG32_P(DDIA_CNTL, DDIA_HDMI_EN, ~DDIA_HDMI_EN);
|
if (enable) {
|
||||||
|
WREG32_OR(DDIA_CNTL, DDIA_HDMI_EN);
|
||||||
hdmi |= HDMI0_STREAM(HDMI0_STREAM_DDIA);
|
hdmi |= HDMI0_STREAM(HDMI0_STREAM_DDIA);
|
||||||
|
} else {
|
||||||
|
WREG32_AND(DDIA_CNTL, ~DDIA_HDMI_EN);
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
|
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
|
||||||
|
if (enable)
|
||||||
hdmi |= HDMI0_STREAM(HDMI0_STREAM_DVOA);
|
hdmi |= HDMI0_STREAM(HDMI0_STREAM_DVOA);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
@ -516,72 +501,21 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
|
|||||||
radeon_encoder->encoder_id);
|
radeon_encoder->encoder_id);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
WREG32(HDMI0_CONTROL + offset, hdmi);
|
WREG32(HDMI0_CONTROL + dig->afmt->offset, hdmi);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rdev->irq.installed) {
|
if (rdev->irq.installed) {
|
||||||
/* if irq is available use it */
|
/* if irq is available use it */
|
||||||
// radeon_irq_kms_enable_afmt(rdev, dig->afmt->id);
|
/* XXX: shouldn't need this on any asics. Double check DCE2/3 */
|
||||||
|
// if (enable)
|
||||||
|
// radeon_irq_kms_enable_afmt(rdev, dig->afmt->id);
|
||||||
|
// else
|
||||||
|
// radeon_irq_kms_disable_afmt(rdev, dig->afmt->id);
|
||||||
}
|
}
|
||||||
|
|
||||||
dig->afmt->enabled = true;
|
dig->afmt->enabled = enable;
|
||||||
|
|
||||||
DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n",
|
DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
|
||||||
offset, radeon_encoder->encoder_id);
|
enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* disable the HDMI engine
|
|
||||||
*/
|
|
||||||
void r600_hdmi_disable(struct drm_encoder *encoder)
|
|
||||||
{
|
|
||||||
struct drm_device *dev = encoder->dev;
|
|
||||||
struct radeon_device *rdev = dev->dev_private;
|
|
||||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
|
||||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
|
||||||
uint32_t offset;
|
|
||||||
|
|
||||||
if (ASIC_IS_DCE6(rdev))
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* Called for ATOM_ENCODER_MODE_HDMI only */
|
|
||||||
if (!dig || !dig->afmt) {
|
|
||||||
WARN_ON(1);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (!dig->afmt->enabled)
|
|
||||||
return;
|
|
||||||
offset = dig->afmt->offset;
|
|
||||||
|
|
||||||
DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n",
|
|
||||||
offset, radeon_encoder->encoder_id);
|
|
||||||
|
|
||||||
/* disable irq */
|
|
||||||
// radeon_irq_kms_disable_afmt(rdev, dig->afmt->id);
|
|
||||||
|
|
||||||
/* Older chipsets not handled by AtomBIOS */
|
|
||||||
if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
|
|
||||||
switch (radeon_encoder->encoder_id) {
|
|
||||||
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
|
|
||||||
WREG32_P(AVIVO_TMDSA_CNTL, 0,
|
|
||||||
~AVIVO_TMDSA_CNTL_HDMI_EN);
|
|
||||||
break;
|
|
||||||
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
|
|
||||||
WREG32_P(AVIVO_LVTMA_CNTL, 0,
|
|
||||||
~AVIVO_LVTMA_CNTL_HDMI_EN);
|
|
||||||
break;
|
|
||||||
case ENCODER_OBJECT_ID_INTERNAL_DDI:
|
|
||||||
WREG32_P(DDIA_CNTL, 0, ~DDIA_HDMI_EN);
|
|
||||||
break;
|
|
||||||
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n",
|
|
||||||
radeon_encoder->encoder_id);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
WREG32(HDMI0_CONTROL + offset, HDMI0_ERROR_ACK);
|
|
||||||
}
|
|
||||||
|
|
||||||
dig->afmt->enabled = false;
|
|
||||||
}
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -102,7 +102,7 @@ extern int radeon_hw_i2c;
|
|||||||
extern int radeon_pcie_gen2;
|
extern int radeon_pcie_gen2;
|
||||||
extern int radeon_msi;
|
extern int radeon_msi;
|
||||||
extern int radeon_lockup_timeout;
|
extern int radeon_lockup_timeout;
|
||||||
|
extern int radeon_fastfb;
|
||||||
|
|
||||||
|
|
||||||
typedef struct pm_message {
|
typedef struct pm_message {
|
||||||
@ -124,10 +124,10 @@ static inline u32 ioread32(const volatile void __iomem *addr)
|
|||||||
return in32((u32)addr);
|
return in32((u32)addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void iowrite32(uint32_t b, volatile void __iomem *addr)
|
//static inline void iowrite32(uint32_t b, volatile void __iomem *addr)
|
||||||
{
|
//{
|
||||||
out32((u32)addr, b);
|
// out32((u32)addr, b);
|
||||||
}
|
//}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -143,7 +143,7 @@ static inline void iowrite32(uint32_t b, volatile void __iomem *addr)
|
|||||||
#define RADEON_BIOS_NUM_SCRATCH 8
|
#define RADEON_BIOS_NUM_SCRATCH 8
|
||||||
|
|
||||||
/* max number of rings */
|
/* max number of rings */
|
||||||
#define RADEON_NUM_RINGS 5
|
#define RADEON_NUM_RINGS 6
|
||||||
|
|
||||||
/* fence seq are set to this number when signaled */
|
/* fence seq are set to this number when signaled */
|
||||||
#define RADEON_FENCE_SIGNALED_SEQ 0LL
|
#define RADEON_FENCE_SIGNALED_SEQ 0LL
|
||||||
@ -161,6 +161,9 @@ static inline void iowrite32(uint32_t b, volatile void __iomem *addr)
|
|||||||
/* cayman add a second async dma ring */
|
/* cayman add a second async dma ring */
|
||||||
#define CAYMAN_RING_TYPE_DMA1_INDEX 4
|
#define CAYMAN_RING_TYPE_DMA1_INDEX 4
|
||||||
|
|
||||||
|
/* R600+ */
|
||||||
|
#define R600_RING_TYPE_UVD_INDEX 5
|
||||||
|
|
||||||
/* hardcode those limit for now */
|
/* hardcode those limit for now */
|
||||||
#define RADEON_VA_IB_OFFSET (1 << 20)
|
#define RADEON_VA_IB_OFFSET (1 << 20)
|
||||||
#define RADEON_VA_RESERVED_SIZE (8 << 20)
|
#define RADEON_VA_RESERVED_SIZE (8 << 20)
|
||||||
@ -170,6 +173,15 @@ static inline void iowrite32(uint32_t b, volatile void __iomem *addr)
|
|||||||
#define RADEON_RESET_GFX (1 << 0)
|
#define RADEON_RESET_GFX (1 << 0)
|
||||||
#define RADEON_RESET_COMPUTE (1 << 1)
|
#define RADEON_RESET_COMPUTE (1 << 1)
|
||||||
#define RADEON_RESET_DMA (1 << 2)
|
#define RADEON_RESET_DMA (1 << 2)
|
||||||
|
#define RADEON_RESET_CP (1 << 3)
|
||||||
|
#define RADEON_RESET_GRBM (1 << 4)
|
||||||
|
#define RADEON_RESET_DMA1 (1 << 5)
|
||||||
|
#define RADEON_RESET_RLC (1 << 6)
|
||||||
|
#define RADEON_RESET_SEM (1 << 7)
|
||||||
|
#define RADEON_RESET_IH (1 << 8)
|
||||||
|
#define RADEON_RESET_VMC (1 << 9)
|
||||||
|
#define RADEON_RESET_MC (1 << 10)
|
||||||
|
#define RADEON_RESET_DISPLAY (1 << 11)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Errata workarounds.
|
* Errata workarounds.
|
||||||
@ -227,6 +239,11 @@ void radeon_pm_suspend(struct radeon_device *rdev);
|
|||||||
void radeon_pm_resume(struct radeon_device *rdev);
|
void radeon_pm_resume(struct radeon_device *rdev);
|
||||||
void radeon_combios_get_power_modes(struct radeon_device *rdev);
|
void radeon_combios_get_power_modes(struct radeon_device *rdev);
|
||||||
void radeon_atombios_get_power_modes(struct radeon_device *rdev);
|
void radeon_atombios_get_power_modes(struct radeon_device *rdev);
|
||||||
|
int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
|
||||||
|
u8 clock_type,
|
||||||
|
u32 clock,
|
||||||
|
bool strobe_mode,
|
||||||
|
struct atom_clock_dividers *dividers);
|
||||||
void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
|
void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
|
||||||
void rs690_pm_info(struct radeon_device *rdev);
|
void rs690_pm_info(struct radeon_device *rdev);
|
||||||
extern int rv6xx_get_temp(struct radeon_device *rdev);
|
extern int rv6xx_get_temp(struct radeon_device *rdev);
|
||||||
@ -329,7 +346,7 @@ struct radeon_surface_reg {
|
|||||||
*/
|
*/
|
||||||
struct radeon_mman {
|
struct radeon_mman {
|
||||||
struct ttm_bo_global_ref bo_global_ref;
|
struct ttm_bo_global_ref bo_global_ref;
|
||||||
// struct drm_global_reference mem_global_ref;
|
struct drm_global_reference mem_global_ref;
|
||||||
struct ttm_bo_device bdev;
|
struct ttm_bo_device bdev;
|
||||||
bool mem_global_referenced;
|
bool mem_global_referenced;
|
||||||
bool initialized;
|
bool initialized;
|
||||||
@ -358,7 +375,7 @@ struct radeon_bo {
|
|||||||
struct list_head list;
|
struct list_head list;
|
||||||
/* Protected by tbo.reserved */
|
/* Protected by tbo.reserved */
|
||||||
u32 placements[3];
|
u32 placements[3];
|
||||||
u32 busy_placements[3];
|
u32 domain;
|
||||||
struct ttm_placement placement;
|
struct ttm_placement placement;
|
||||||
struct ttm_buffer_object tbo;
|
struct ttm_buffer_object tbo;
|
||||||
struct ttm_bo_kmap_obj kmap;
|
struct ttm_bo_kmap_obj kmap;
|
||||||
@ -377,8 +394,7 @@ struct radeon_bo {
|
|||||||
struct radeon_device *rdev;
|
struct radeon_device *rdev;
|
||||||
struct drm_gem_object gem_base;
|
struct drm_gem_object gem_base;
|
||||||
|
|
||||||
u32 domain;
|
struct ttm_bo_kmap_obj dma_buf_vmap;
|
||||||
int vmapping_count;
|
|
||||||
};
|
};
|
||||||
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
|
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
|
||||||
|
|
||||||
@ -390,6 +406,8 @@ struct radeon_bo_list {
|
|||||||
u32 tiling_flags;
|
u32 tiling_flags;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
int radeon_gem_debugfs_init(struct radeon_device *rdev);
|
||||||
|
|
||||||
/* sub-allocation manager, it has to be protected by another lock.
|
/* sub-allocation manager, it has to be protected by another lock.
|
||||||
* By conception this is an helper for other part of the driver
|
* By conception this is an helper for other part of the driver
|
||||||
* like the indirect buffer or semaphore, which both have their
|
* like the indirect buffer or semaphore, which both have their
|
||||||
@ -545,6 +563,7 @@ struct radeon_mc {
|
|||||||
bool vram_is_ddr;
|
bool vram_is_ddr;
|
||||||
bool igp_sideport_enabled;
|
bool igp_sideport_enabled;
|
||||||
u64 gtt_base_align;
|
u64 gtt_base_align;
|
||||||
|
u64 mc_mask;
|
||||||
};
|
};
|
||||||
|
|
||||||
bool radeon_combios_sideport_present(struct radeon_device *rdev);
|
bool radeon_combios_sideport_present(struct radeon_device *rdev);
|
||||||
@ -678,6 +697,8 @@ struct radeon_ring {
|
|||||||
u32 ptr_reg_mask;
|
u32 ptr_reg_mask;
|
||||||
u32 nop;
|
u32 nop;
|
||||||
u32 idx;
|
u32 idx;
|
||||||
|
u64 last_semaphore_signal_addr;
|
||||||
|
u64 last_semaphore_wait_addr;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -794,6 +815,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
|
|||||||
struct radeon_ib *ib, struct radeon_vm *vm,
|
struct radeon_ib *ib, struct radeon_vm *vm,
|
||||||
unsigned size);
|
unsigned size);
|
||||||
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
|
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||||
|
void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence);
|
||||||
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
|
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
|
||||||
struct radeon_ib *const_ib);
|
struct radeon_ib *const_ib);
|
||||||
int radeon_ib_pool_init(struct radeon_device *rdev);
|
int radeon_ib_pool_init(struct radeon_device *rdev);
|
||||||
@ -932,6 +954,7 @@ struct radeon_wb {
|
|||||||
#define R600_WB_DMA_RPTR_OFFSET 1792
|
#define R600_WB_DMA_RPTR_OFFSET 1792
|
||||||
#define R600_WB_IH_WPTR_OFFSET 2048
|
#define R600_WB_IH_WPTR_OFFSET 2048
|
||||||
#define CAYMAN_WB_DMA1_RPTR_OFFSET 2304
|
#define CAYMAN_WB_DMA1_RPTR_OFFSET 2304
|
||||||
|
#define R600_WB_UVD_RPTR_OFFSET 2560
|
||||||
#define R600_WB_EVENT_OFFSET 3072
|
#define R600_WB_EVENT_OFFSET 3072
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1132,6 +1155,46 @@ struct radeon_pm {
|
|||||||
int radeon_pm_get_type_index(struct radeon_device *rdev,
|
int radeon_pm_get_type_index(struct radeon_device *rdev,
|
||||||
enum radeon_pm_state_type ps_type,
|
enum radeon_pm_state_type ps_type,
|
||||||
int instance);
|
int instance);
|
||||||
|
/*
|
||||||
|
* UVD
|
||||||
|
*/
|
||||||
|
#define RADEON_MAX_UVD_HANDLES 10
|
||||||
|
#define RADEON_UVD_STACK_SIZE (1024*1024)
|
||||||
|
#define RADEON_UVD_HEAP_SIZE (1024*1024)
|
||||||
|
|
||||||
|
struct radeon_uvd {
|
||||||
|
struct radeon_bo *vcpu_bo;
|
||||||
|
void *cpu_addr;
|
||||||
|
uint64_t gpu_addr;
|
||||||
|
atomic_t handles[RADEON_MAX_UVD_HANDLES];
|
||||||
|
struct drm_file *filp[RADEON_MAX_UVD_HANDLES];
|
||||||
|
struct delayed_work idle_work;
|
||||||
|
};
|
||||||
|
|
||||||
|
int radeon_uvd_init(struct radeon_device *rdev);
|
||||||
|
void radeon_uvd_fini(struct radeon_device *rdev);
|
||||||
|
int radeon_uvd_suspend(struct radeon_device *rdev);
|
||||||
|
int radeon_uvd_resume(struct radeon_device *rdev);
|
||||||
|
int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
|
||||||
|
uint32_t handle, struct radeon_fence **fence);
|
||||||
|
int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
|
||||||
|
uint32_t handle, struct radeon_fence **fence);
|
||||||
|
void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo);
|
||||||
|
void radeon_uvd_free_handles(struct radeon_device *rdev,
|
||||||
|
struct drm_file *filp);
|
||||||
|
int radeon_uvd_cs_parse(struct radeon_cs_parser *parser);
|
||||||
|
void radeon_uvd_note_usage(struct radeon_device *rdev);
|
||||||
|
int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
|
||||||
|
unsigned vclk, unsigned dclk,
|
||||||
|
unsigned vco_min, unsigned vco_max,
|
||||||
|
unsigned fb_factor, unsigned fb_mask,
|
||||||
|
unsigned pd_min, unsigned pd_max,
|
||||||
|
unsigned pd_even,
|
||||||
|
unsigned *optimal_fb_div,
|
||||||
|
unsigned *optimal_vclk_div,
|
||||||
|
unsigned *optimal_dclk_div);
|
||||||
|
int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
|
||||||
|
unsigned cg_upll_func_cntl);
|
||||||
|
|
||||||
struct r600_audio {
|
struct r600_audio {
|
||||||
int channels;
|
int channels;
|
||||||
@ -1161,6 +1224,10 @@ struct radeon_asic {
|
|||||||
bool (*gui_idle)(struct radeon_device *rdev);
|
bool (*gui_idle)(struct radeon_device *rdev);
|
||||||
/* wait for mc_idle */
|
/* wait for mc_idle */
|
||||||
int (*mc_wait_for_idle)(struct radeon_device *rdev);
|
int (*mc_wait_for_idle)(struct radeon_device *rdev);
|
||||||
|
/* get the reference clock */
|
||||||
|
u32 (*get_xclk)(struct radeon_device *rdev);
|
||||||
|
/* get the gpu clock counter */
|
||||||
|
uint64_t (*get_gpu_clock_counter)(struct radeon_device *rdev);
|
||||||
/* gart */
|
/* gart */
|
||||||
struct {
|
struct {
|
||||||
void (*tlb_flush)(struct radeon_device *rdev);
|
void (*tlb_flush)(struct radeon_device *rdev);
|
||||||
@ -1171,7 +1238,9 @@ struct radeon_asic {
|
|||||||
void (*fini)(struct radeon_device *rdev);
|
void (*fini)(struct radeon_device *rdev);
|
||||||
|
|
||||||
u32 pt_ring_index;
|
u32 pt_ring_index;
|
||||||
void (*set_page)(struct radeon_device *rdev, uint64_t pe,
|
void (*set_page)(struct radeon_device *rdev,
|
||||||
|
struct radeon_ib *ib,
|
||||||
|
uint64_t pe,
|
||||||
uint64_t addr, unsigned count,
|
uint64_t addr, unsigned count,
|
||||||
uint32_t incr, uint32_t flags);
|
uint32_t incr, uint32_t flags);
|
||||||
} vm;
|
} vm;
|
||||||
@ -1206,6 +1275,9 @@ struct radeon_asic {
|
|||||||
void (*set_backlight_level)(struct radeon_encoder *radeon_encoder, u8 level);
|
void (*set_backlight_level)(struct radeon_encoder *radeon_encoder, u8 level);
|
||||||
/* get backlight level */
|
/* get backlight level */
|
||||||
u8 (*get_backlight_level)(struct radeon_encoder *radeon_encoder);
|
u8 (*get_backlight_level)(struct radeon_encoder *radeon_encoder);
|
||||||
|
/* audio callbacks */
|
||||||
|
void (*hdmi_enable)(struct drm_encoder *encoder, bool enable);
|
||||||
|
void (*hdmi_setmode)(struct drm_encoder *encoder, struct drm_display_mode *mode);
|
||||||
} display;
|
} display;
|
||||||
/* copy functions for bo handling */
|
/* copy functions for bo handling */
|
||||||
struct {
|
struct {
|
||||||
@ -1258,6 +1330,7 @@ struct radeon_asic {
|
|||||||
int (*get_pcie_lanes)(struct radeon_device *rdev);
|
int (*get_pcie_lanes)(struct radeon_device *rdev);
|
||||||
void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
|
void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
|
||||||
void (*set_clock_gating)(struct radeon_device *rdev, int enable);
|
void (*set_clock_gating)(struct radeon_device *rdev, int enable);
|
||||||
|
int (*set_uvd_clocks)(struct radeon_device *rdev, u32 vclk, u32 dclk);
|
||||||
} pm;
|
} pm;
|
||||||
/* pageflipping */
|
/* pageflipping */
|
||||||
struct {
|
struct {
|
||||||
@ -1420,6 +1493,7 @@ struct si_asic {
|
|||||||
unsigned multi_gpu_tile_size;
|
unsigned multi_gpu_tile_size;
|
||||||
|
|
||||||
unsigned tile_config;
|
unsigned tile_config;
|
||||||
|
uint32_t tile_mode_array[32];
|
||||||
};
|
};
|
||||||
|
|
||||||
union radeon_asic_config {
|
union radeon_asic_config {
|
||||||
@ -1505,6 +1579,7 @@ struct radeon_device {
|
|||||||
struct radeon_asic *asic;
|
struct radeon_asic *asic;
|
||||||
struct radeon_gem gem;
|
struct radeon_gem gem;
|
||||||
struct radeon_pm pm;
|
struct radeon_pm pm;
|
||||||
|
struct radeon_uvd uvd;
|
||||||
uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH];
|
uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH];
|
||||||
struct radeon_wb wb;
|
struct radeon_wb wb;
|
||||||
struct radeon_dummy_page dummy_page;
|
struct radeon_dummy_page dummy_page;
|
||||||
@ -1512,12 +1587,14 @@ struct radeon_device {
|
|||||||
bool suspend;
|
bool suspend;
|
||||||
bool need_dma32;
|
bool need_dma32;
|
||||||
bool accel_working;
|
bool accel_working;
|
||||||
|
bool fastfb_working; /* IGP feature*/
|
||||||
struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
|
struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
|
||||||
const struct firmware *me_fw; /* all family ME firmware */
|
const struct firmware *me_fw; /* all family ME firmware */
|
||||||
const struct firmware *pfp_fw; /* r6/700 PFP firmware */
|
const struct firmware *pfp_fw; /* r6/700 PFP firmware */
|
||||||
const struct firmware *rlc_fw; /* r6/700 RLC firmware */
|
const struct firmware *rlc_fw; /* r6/700 RLC firmware */
|
||||||
const struct firmware *mc_fw; /* NI MC firmware */
|
const struct firmware *mc_fw; /* NI MC firmware */
|
||||||
const struct firmware *ce_fw; /* SI CE firmware */
|
const struct firmware *ce_fw; /* SI CE firmware */
|
||||||
|
const struct firmware *uvd_fw; /* UVD firmware */
|
||||||
struct r600_blit r600_blit;
|
struct r600_blit r600_blit;
|
||||||
struct r600_vram_scratch vram_scratch;
|
struct r600_vram_scratch vram_scratch;
|
||||||
int msi_enabled; /* msi enabled */
|
int msi_enabled; /* msi enabled */
|
||||||
@ -1528,6 +1605,7 @@ struct radeon_device {
|
|||||||
int num_crtc; /* number of crtcs */
|
int num_crtc; /* number of crtcs */
|
||||||
struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
|
struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
|
||||||
bool audio_enabled;
|
bool audio_enabled;
|
||||||
|
bool has_uvd;
|
||||||
// struct r600_audio audio_status; /* audio stuff */
|
// struct r600_audio audio_status; /* audio stuff */
|
||||||
// struct notifier_block acpi_nb;
|
// struct notifier_block acpi_nb;
|
||||||
/* only one userspace can use Hyperz features or CMASK at a time */
|
/* only one userspace can use Hyperz features or CMASK at a time */
|
||||||
@ -1585,8 +1663,8 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
|
|||||||
#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
|
#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
|
||||||
#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg))
|
#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg))
|
||||||
#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
|
#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
|
||||||
#define RREG32_PCIE_P(reg) rdev->pciep_rreg(rdev, (reg))
|
#define RREG32_PCIE_PORT(reg) rdev->pciep_rreg(rdev, (reg))
|
||||||
#define WREG32_PCIE_P(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
|
#define WREG32_PCIE_PORT(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
|
||||||
#define WREG32_P(reg, val, mask) \
|
#define WREG32_P(reg, val, mask) \
|
||||||
do { \
|
do { \
|
||||||
uint32_t tmp_ = RREG32(reg); \
|
uint32_t tmp_ = RREG32(reg); \
|
||||||
@ -1594,6 +1672,8 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
|
|||||||
tmp_ |= ((val) & ~(mask)); \
|
tmp_ |= ((val) & ~(mask)); \
|
||||||
WREG32(reg, tmp_); \
|
WREG32(reg, tmp_); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
|
||||||
|
#define WREG32_OR(reg, or) WREG32_P(reg, or, ~or)
|
||||||
#define WREG32_PLL_P(reg, val, mask) \
|
#define WREG32_PLL_P(reg, val, mask) \
|
||||||
do { \
|
do { \
|
||||||
uint32_t tmp_ = RREG32_PLL(reg); \
|
uint32_t tmp_ = RREG32_PLL(reg); \
|
||||||
@ -1668,6 +1748,8 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
|
|||||||
#define ASIC_IS_DCE6(rdev) ((rdev->family >= CHIP_ARUBA))
|
#define ASIC_IS_DCE6(rdev) ((rdev->family >= CHIP_ARUBA))
|
||||||
#define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \
|
#define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \
|
||||||
(rdev->flags & RADEON_IS_IGP))
|
(rdev->flags & RADEON_IS_IGP))
|
||||||
|
#define ASIC_IS_DCE64(rdev) ((rdev->family == CHIP_OLAND))
|
||||||
|
#define ASIC_IS_NODCE(rdev) ((rdev->family == CHIP_HAINAN))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* BIOS helpers.
|
* BIOS helpers.
|
||||||
@ -1712,7 +1794,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
|
|||||||
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
|
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
|
||||||
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
|
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
|
||||||
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
|
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
|
||||||
#define radeon_asic_vm_set_page(rdev, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (pe), (addr), (count), (incr), (flags)))
|
#define radeon_asic_vm_set_page(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
|
||||||
#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp))
|
#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp))
|
||||||
#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp))
|
#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp))
|
||||||
#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
|
#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
|
||||||
@ -1725,6 +1807,8 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
|
|||||||
#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
|
#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
|
||||||
#define radeon_set_backlight_level(rdev, e, l) (rdev)->asic->display.set_backlight_level((e), (l))
|
#define radeon_set_backlight_level(rdev, e, l) (rdev)->asic->display.set_backlight_level((e), (l))
|
||||||
#define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e))
|
#define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e))
|
||||||
|
#define radeon_hdmi_enable(rdev, e, b) (rdev)->asic->display.hdmi_enable((e), (b))
|
||||||
|
#define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m))
|
||||||
#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
|
#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
|
||||||
#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
|
#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
|
||||||
#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
|
#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
|
||||||
@ -1740,6 +1824,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
|
|||||||
#define radeon_get_pcie_lanes(rdev) (rdev)->asic->pm.get_pcie_lanes((rdev))
|
#define radeon_get_pcie_lanes(rdev) (rdev)->asic->pm.get_pcie_lanes((rdev))
|
||||||
#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l))
|
#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l))
|
||||||
#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e))
|
#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e))
|
||||||
|
#define radeon_set_uvd_clocks(rdev, v, d) (rdev)->asic->pm.set_uvd_clocks((rdev), (v), (d))
|
||||||
#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s)))
|
#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s)))
|
||||||
#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r)))
|
#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r)))
|
||||||
#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev))
|
#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev))
|
||||||
@ -1758,10 +1843,13 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
|
|||||||
#define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc))
|
#define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc))
|
||||||
#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc))
|
#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc))
|
||||||
#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
|
#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
|
||||||
|
#define radeon_get_xclk(rdev) (rdev)->asic->get_xclk((rdev))
|
||||||
|
#define radeon_get_gpu_clock_counter(rdev) (rdev)->asic->get_gpu_clock_counter((rdev))
|
||||||
|
|
||||||
/* Common functions */
|
/* Common functions */
|
||||||
/* AGP */
|
/* AGP */
|
||||||
extern int radeon_gpu_reset(struct radeon_device *rdev);
|
extern int radeon_gpu_reset(struct radeon_device *rdev);
|
||||||
|
extern void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung);
|
||||||
extern void radeon_agp_disable(struct radeon_device *rdev);
|
extern void radeon_agp_disable(struct radeon_device *rdev);
|
||||||
extern int radeon_modeset_init(struct radeon_device *rdev);
|
extern int radeon_modeset_init(struct radeon_device *rdev);
|
||||||
extern void radeon_modeset_fini(struct radeon_device *rdev);
|
extern void radeon_modeset_fini(struct radeon_device *rdev);
|
||||||
@ -1784,6 +1872,9 @@ extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc
|
|||||||
extern int radeon_resume_kms(struct drm_device *dev);
|
extern int radeon_resume_kms(struct drm_device *dev);
|
||||||
extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
|
extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
|
||||||
extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
|
extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
|
||||||
|
extern void radeon_program_register_sequence(struct radeon_device *rdev,
|
||||||
|
const u32 *registers,
|
||||||
|
const u32 array_size);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* vm
|
* vm
|
||||||
@ -1856,9 +1947,6 @@ struct radeon_hdmi_acr {
|
|||||||
|
|
||||||
extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
|
extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
|
||||||
|
|
||||||
extern void r600_hdmi_enable(struct drm_encoder *encoder);
|
|
||||||
extern void r600_hdmi_disable(struct drm_encoder *encoder);
|
|
||||||
extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
|
|
||||||
extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
|
extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
|
||||||
u32 tiling_pipe_num,
|
u32 tiling_pipe_num,
|
||||||
u32 max_rb_num,
|
u32 max_rb_num,
|
||||||
@ -1869,8 +1957,6 @@ extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
|
|||||||
* evergreen functions used by radeon_encoder.c
|
* evergreen functions used by radeon_encoder.c
|
||||||
*/
|
*/
|
||||||
|
|
||||||
extern void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
|
|
||||||
|
|
||||||
extern int ni_init_microcode(struct radeon_device *rdev);
|
extern int ni_init_microcode(struct radeon_device *rdev);
|
||||||
extern int ni_mc_load_microcode(struct radeon_device *rdev);
|
extern int ni_mc_load_microcode(struct radeon_device *rdev);
|
||||||
|
|
||||||
@ -1896,7 +1982,8 @@ bool set_mode(struct drm_device *dev, struct drm_connector *connector,
|
|||||||
videomode_t *mode, bool strict);
|
videomode_t *mode, bool strict);
|
||||||
|
|
||||||
|
|
||||||
|
#ifndef __TTM__
|
||||||
#define radeon_ttm_set_active_vram_size(a, b)
|
#define radeon_ttm_set_active_vram_size(a, b)
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -122,6 +122,10 @@ static void radeon_register_accessor_init(struct radeon_device *rdev)
|
|||||||
rdev->mc_rreg = &rs600_mc_rreg;
|
rdev->mc_rreg = &rs600_mc_rreg;
|
||||||
rdev->mc_wreg = &rs600_mc_wreg;
|
rdev->mc_wreg = &rs600_mc_wreg;
|
||||||
}
|
}
|
||||||
|
if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
|
||||||
|
rdev->mc_rreg = &rs780_mc_rreg;
|
||||||
|
rdev->mc_wreg = &rs780_mc_wreg;
|
||||||
|
}
|
||||||
if (rdev->family >= CHIP_R600) {
|
if (rdev->family >= CHIP_R600) {
|
||||||
rdev->pciep_rreg = &r600_pciep_rreg;
|
rdev->pciep_rreg = &r600_pciep_rreg;
|
||||||
rdev->pciep_wreg = &r600_pciep_wreg;
|
rdev->pciep_wreg = &r600_pciep_wreg;
|
||||||
@ -934,6 +938,8 @@ static struct radeon_asic r600_asic = {
|
|||||||
// .ioctl_wait_idle = r600_ioctl_wait_idle,
|
// .ioctl_wait_idle = r600_ioctl_wait_idle,
|
||||||
.gui_idle = &r600_gui_idle,
|
.gui_idle = &r600_gui_idle,
|
||||||
.mc_wait_for_idle = &r600_mc_wait_for_idle,
|
.mc_wait_for_idle = &r600_mc_wait_for_idle,
|
||||||
|
.get_xclk = &r600_get_xclk,
|
||||||
|
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
|
||||||
.gart = {
|
.gart = {
|
||||||
.tlb_flush = &r600_pcie_gart_tlb_flush,
|
.tlb_flush = &r600_pcie_gart_tlb_flush,
|
||||||
.set_page = &rs600_gart_set_page,
|
.set_page = &rs600_gart_set_page,
|
||||||
@ -946,7 +952,7 @@ static struct radeon_asic r600_asic = {
|
|||||||
// .cs_parse = &r600_cs_parse,
|
// .cs_parse = &r600_cs_parse,
|
||||||
.ring_test = &r600_ring_test,
|
.ring_test = &r600_ring_test,
|
||||||
.ib_test = &r600_ib_test,
|
.ib_test = &r600_ib_test,
|
||||||
.is_lockup = &r600_gpu_is_lockup,
|
.is_lockup = &r600_gfx_is_lockup,
|
||||||
},
|
},
|
||||||
[R600_RING_TYPE_DMA_INDEX] = {
|
[R600_RING_TYPE_DMA_INDEX] = {
|
||||||
.ib_execute = &r600_dma_ring_ib_execute,
|
.ib_execute = &r600_dma_ring_ib_execute,
|
||||||
@ -1018,6 +1024,8 @@ static struct radeon_asic rs780_asic = {
|
|||||||
// .ioctl_wait_idle = r600_ioctl_wait_idle,
|
// .ioctl_wait_idle = r600_ioctl_wait_idle,
|
||||||
.gui_idle = &r600_gui_idle,
|
.gui_idle = &r600_gui_idle,
|
||||||
.mc_wait_for_idle = &r600_mc_wait_for_idle,
|
.mc_wait_for_idle = &r600_mc_wait_for_idle,
|
||||||
|
.get_xclk = &r600_get_xclk,
|
||||||
|
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
|
||||||
.gart = {
|
.gart = {
|
||||||
.tlb_flush = &r600_pcie_gart_tlb_flush,
|
.tlb_flush = &r600_pcie_gart_tlb_flush,
|
||||||
.set_page = &rs600_gart_set_page,
|
.set_page = &rs600_gart_set_page,
|
||||||
@ -1030,7 +1038,7 @@ static struct radeon_asic rs780_asic = {
|
|||||||
// .cs_parse = &r600_cs_parse,
|
// .cs_parse = &r600_cs_parse,
|
||||||
.ring_test = &r600_ring_test,
|
.ring_test = &r600_ring_test,
|
||||||
.ib_test = &r600_ib_test,
|
.ib_test = &r600_ib_test,
|
||||||
.is_lockup = &r600_gpu_is_lockup,
|
.is_lockup = &r600_gfx_is_lockup,
|
||||||
},
|
},
|
||||||
[R600_RING_TYPE_DMA_INDEX] = {
|
[R600_RING_TYPE_DMA_INDEX] = {
|
||||||
.ib_execute = &r600_dma_ring_ib_execute,
|
.ib_execute = &r600_dma_ring_ib_execute,
|
||||||
@ -1102,6 +1110,8 @@ static struct radeon_asic rv770_asic = {
|
|||||||
// .ioctl_wait_idle = r600_ioctl_wait_idle,
|
// .ioctl_wait_idle = r600_ioctl_wait_idle,
|
||||||
.gui_idle = &r600_gui_idle,
|
.gui_idle = &r600_gui_idle,
|
||||||
.mc_wait_for_idle = &r600_mc_wait_for_idle,
|
.mc_wait_for_idle = &r600_mc_wait_for_idle,
|
||||||
|
.get_xclk = &rv770_get_xclk,
|
||||||
|
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
|
||||||
.gart = {
|
.gart = {
|
||||||
.tlb_flush = &r600_pcie_gart_tlb_flush,
|
.tlb_flush = &r600_pcie_gart_tlb_flush,
|
||||||
.set_page = &rs600_gart_set_page,
|
.set_page = &rs600_gart_set_page,
|
||||||
@ -1114,7 +1124,7 @@ static struct radeon_asic rv770_asic = {
|
|||||||
// .cs_parse = &r600_cs_parse,
|
// .cs_parse = &r600_cs_parse,
|
||||||
.ring_test = &r600_ring_test,
|
.ring_test = &r600_ring_test,
|
||||||
.ib_test = &r600_ib_test,
|
.ib_test = &r600_ib_test,
|
||||||
.is_lockup = &r600_gpu_is_lockup,
|
.is_lockup = &r600_gfx_is_lockup,
|
||||||
},
|
},
|
||||||
[R600_RING_TYPE_DMA_INDEX] = {
|
[R600_RING_TYPE_DMA_INDEX] = {
|
||||||
.ib_execute = &r600_dma_ring_ib_execute,
|
.ib_execute = &r600_dma_ring_ib_execute,
|
||||||
@ -1124,6 +1134,15 @@ static struct radeon_asic rv770_asic = {
|
|||||||
.ring_test = &r600_dma_ring_test,
|
.ring_test = &r600_dma_ring_test,
|
||||||
.ib_test = &r600_dma_ib_test,
|
.ib_test = &r600_dma_ib_test,
|
||||||
.is_lockup = &r600_dma_is_lockup,
|
.is_lockup = &r600_dma_is_lockup,
|
||||||
|
},
|
||||||
|
[R600_RING_TYPE_UVD_INDEX] = {
|
||||||
|
// .ib_execute = &r600_uvd_ib_execute,
|
||||||
|
// .emit_fence = &r600_uvd_fence_emit,
|
||||||
|
// .emit_semaphore = &r600_uvd_semaphore_emit,
|
||||||
|
// .cs_parse = &radeon_uvd_cs_parse,
|
||||||
|
// .ring_test = &r600_uvd_ring_test,
|
||||||
|
// .ib_test = &r600_uvd_ib_test,
|
||||||
|
// .is_lockup = &radeon_ring_test_lockup,
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.irq = {
|
.irq = {
|
||||||
@ -1140,9 +1159,9 @@ static struct radeon_asic rv770_asic = {
|
|||||||
.copy = {
|
.copy = {
|
||||||
.blit = &r600_copy_blit,
|
.blit = &r600_copy_blit,
|
||||||
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
|
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
|
||||||
.dma = &r600_copy_dma,
|
.dma = &rv770_copy_dma,
|
||||||
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
|
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
|
||||||
.copy = &r600_copy_dma,
|
.copy = &rv770_copy_dma,
|
||||||
.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
|
.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
|
||||||
},
|
},
|
||||||
.surface = {
|
.surface = {
|
||||||
@ -1168,6 +1187,7 @@ static struct radeon_asic rv770_asic = {
|
|||||||
// .get_pcie_lanes = &r600_get_pcie_lanes,
|
// .get_pcie_lanes = &r600_get_pcie_lanes,
|
||||||
// .set_pcie_lanes = &r600_set_pcie_lanes,
|
// .set_pcie_lanes = &r600_set_pcie_lanes,
|
||||||
// .set_clock_gating = &radeon_atom_set_clock_gating,
|
// .set_clock_gating = &radeon_atom_set_clock_gating,
|
||||||
|
.set_uvd_clocks = &rv770_set_uvd_clocks,
|
||||||
},
|
},
|
||||||
.pflip = {
|
.pflip = {
|
||||||
// .pre_page_flip = &rs600_pre_page_flip,
|
// .pre_page_flip = &rs600_pre_page_flip,
|
||||||
@ -1186,6 +1206,8 @@ static struct radeon_asic evergreen_asic = {
|
|||||||
// .ioctl_wait_idle = r600_ioctl_wait_idle,
|
// .ioctl_wait_idle = r600_ioctl_wait_idle,
|
||||||
.gui_idle = &r600_gui_idle,
|
.gui_idle = &r600_gui_idle,
|
||||||
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
|
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
|
||||||
|
.get_xclk = &rv770_get_xclk,
|
||||||
|
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
|
||||||
.gart = {
|
.gart = {
|
||||||
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
|
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
|
||||||
.set_page = &rs600_gart_set_page,
|
.set_page = &rs600_gart_set_page,
|
||||||
@ -1198,7 +1220,7 @@ static struct radeon_asic evergreen_asic = {
|
|||||||
// .cs_parse = &evergreen_cs_parse,
|
// .cs_parse = &evergreen_cs_parse,
|
||||||
.ring_test = &r600_ring_test,
|
.ring_test = &r600_ring_test,
|
||||||
.ib_test = &r600_ib_test,
|
.ib_test = &r600_ib_test,
|
||||||
.is_lockup = &evergreen_gpu_is_lockup,
|
.is_lockup = &evergreen_gfx_is_lockup,
|
||||||
},
|
},
|
||||||
[R600_RING_TYPE_DMA_INDEX] = {
|
[R600_RING_TYPE_DMA_INDEX] = {
|
||||||
.ib_execute = &evergreen_dma_ring_ib_execute,
|
.ib_execute = &evergreen_dma_ring_ib_execute,
|
||||||
@ -1207,7 +1229,16 @@ static struct radeon_asic evergreen_asic = {
|
|||||||
// .cs_parse = &evergreen_dma_cs_parse,
|
// .cs_parse = &evergreen_dma_cs_parse,
|
||||||
.ring_test = &r600_dma_ring_test,
|
.ring_test = &r600_dma_ring_test,
|
||||||
.ib_test = &r600_dma_ib_test,
|
.ib_test = &r600_dma_ib_test,
|
||||||
.is_lockup = &r600_dma_is_lockup,
|
.is_lockup = &evergreen_dma_is_lockup,
|
||||||
|
},
|
||||||
|
[R600_RING_TYPE_UVD_INDEX] = {
|
||||||
|
// .ib_execute = &r600_uvd_ib_execute,
|
||||||
|
// .emit_fence = &r600_uvd_fence_emit,
|
||||||
|
// .emit_semaphore = &r600_uvd_semaphore_emit,
|
||||||
|
// .cs_parse = &radeon_uvd_cs_parse,
|
||||||
|
// .ring_test = &r600_uvd_ring_test,
|
||||||
|
// .ib_test = &r600_uvd_ib_test,
|
||||||
|
// .is_lockup = &radeon_ring_test_lockup,
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.irq = {
|
.irq = {
|
||||||
@ -1252,6 +1283,7 @@ static struct radeon_asic evergreen_asic = {
|
|||||||
// .get_pcie_lanes = &r600_get_pcie_lanes,
|
// .get_pcie_lanes = &r600_get_pcie_lanes,
|
||||||
// .set_pcie_lanes = &r600_set_pcie_lanes,
|
// .set_pcie_lanes = &r600_set_pcie_lanes,
|
||||||
// .set_clock_gating = NULL,
|
// .set_clock_gating = NULL,
|
||||||
|
.set_uvd_clocks = &evergreen_set_uvd_clocks,
|
||||||
},
|
},
|
||||||
.pflip = {
|
.pflip = {
|
||||||
// .pre_page_flip = &evergreen_pre_page_flip,
|
// .pre_page_flip = &evergreen_pre_page_flip,
|
||||||
@ -1270,6 +1302,8 @@ static struct radeon_asic sumo_asic = {
|
|||||||
// .ioctl_wait_idle = r600_ioctl_wait_idle,
|
// .ioctl_wait_idle = r600_ioctl_wait_idle,
|
||||||
.gui_idle = &r600_gui_idle,
|
.gui_idle = &r600_gui_idle,
|
||||||
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
|
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
|
||||||
|
.get_xclk = &r600_get_xclk,
|
||||||
|
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
|
||||||
.gart = {
|
.gart = {
|
||||||
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
|
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
|
||||||
.set_page = &rs600_gart_set_page,
|
.set_page = &rs600_gart_set_page,
|
||||||
@ -1282,7 +1316,7 @@ static struct radeon_asic sumo_asic = {
|
|||||||
// .cs_parse = &evergreen_cs_parse,
|
// .cs_parse = &evergreen_cs_parse,
|
||||||
.ring_test = &r600_ring_test,
|
.ring_test = &r600_ring_test,
|
||||||
.ib_test = &r600_ib_test,
|
.ib_test = &r600_ib_test,
|
||||||
.is_lockup = &evergreen_gpu_is_lockup,
|
.is_lockup = &evergreen_gfx_is_lockup,
|
||||||
},
|
},
|
||||||
[R600_RING_TYPE_DMA_INDEX] = {
|
[R600_RING_TYPE_DMA_INDEX] = {
|
||||||
.ib_execute = &evergreen_dma_ring_ib_execute,
|
.ib_execute = &evergreen_dma_ring_ib_execute,
|
||||||
@ -1291,7 +1325,16 @@ static struct radeon_asic sumo_asic = {
|
|||||||
// .cs_parse = &evergreen_dma_cs_parse,
|
// .cs_parse = &evergreen_dma_cs_parse,
|
||||||
.ring_test = &r600_dma_ring_test,
|
.ring_test = &r600_dma_ring_test,
|
||||||
.ib_test = &r600_dma_ib_test,
|
.ib_test = &r600_dma_ib_test,
|
||||||
.is_lockup = &r600_dma_is_lockup,
|
.is_lockup = &evergreen_dma_is_lockup,
|
||||||
|
},
|
||||||
|
[R600_RING_TYPE_UVD_INDEX] = {
|
||||||
|
// .ib_execute = &r600_uvd_ib_execute,
|
||||||
|
// .emit_fence = &r600_uvd_fence_emit,
|
||||||
|
// .emit_semaphore = &r600_uvd_semaphore_emit,
|
||||||
|
// .cs_parse = &radeon_uvd_cs_parse,
|
||||||
|
// .ring_test = &r600_uvd_ring_test,
|
||||||
|
// .ib_test = &r600_uvd_ib_test,
|
||||||
|
// .is_lockup = &radeon_ring_test_lockup,
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.irq = {
|
.irq = {
|
||||||
@ -1336,6 +1379,7 @@ static struct radeon_asic sumo_asic = {
|
|||||||
.get_pcie_lanes = NULL,
|
.get_pcie_lanes = NULL,
|
||||||
.set_pcie_lanes = NULL,
|
.set_pcie_lanes = NULL,
|
||||||
.set_clock_gating = NULL,
|
.set_clock_gating = NULL,
|
||||||
|
.set_uvd_clocks = &sumo_set_uvd_clocks,
|
||||||
},
|
},
|
||||||
.pflip = {
|
.pflip = {
|
||||||
// .pre_page_flip = &evergreen_pre_page_flip,
|
// .pre_page_flip = &evergreen_pre_page_flip,
|
||||||
@ -1354,6 +1398,8 @@ static struct radeon_asic btc_asic = {
|
|||||||
// .ioctl_wait_idle = r600_ioctl_wait_idle,
|
// .ioctl_wait_idle = r600_ioctl_wait_idle,
|
||||||
.gui_idle = &r600_gui_idle,
|
.gui_idle = &r600_gui_idle,
|
||||||
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
|
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
|
||||||
|
.get_xclk = &rv770_get_xclk,
|
||||||
|
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
|
||||||
.gart = {
|
.gart = {
|
||||||
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
|
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
|
||||||
.set_page = &rs600_gart_set_page,
|
.set_page = &rs600_gart_set_page,
|
||||||
@ -1366,7 +1412,7 @@ static struct radeon_asic btc_asic = {
|
|||||||
// .cs_parse = &evergreen_cs_parse,
|
// .cs_parse = &evergreen_cs_parse,
|
||||||
.ring_test = &r600_ring_test,
|
.ring_test = &r600_ring_test,
|
||||||
.ib_test = &r600_ib_test,
|
.ib_test = &r600_ib_test,
|
||||||
.is_lockup = &evergreen_gpu_is_lockup,
|
.is_lockup = &evergreen_gfx_is_lockup,
|
||||||
},
|
},
|
||||||
[R600_RING_TYPE_DMA_INDEX] = {
|
[R600_RING_TYPE_DMA_INDEX] = {
|
||||||
.ib_execute = &evergreen_dma_ring_ib_execute,
|
.ib_execute = &evergreen_dma_ring_ib_execute,
|
||||||
@ -1375,7 +1421,16 @@ static struct radeon_asic btc_asic = {
|
|||||||
// .cs_parse = &evergreen_dma_cs_parse,
|
// .cs_parse = &evergreen_dma_cs_parse,
|
||||||
.ring_test = &r600_dma_ring_test,
|
.ring_test = &r600_dma_ring_test,
|
||||||
.ib_test = &r600_dma_ib_test,
|
.ib_test = &r600_dma_ib_test,
|
||||||
.is_lockup = &r600_dma_is_lockup,
|
.is_lockup = &evergreen_dma_is_lockup,
|
||||||
|
},
|
||||||
|
[R600_RING_TYPE_UVD_INDEX] = {
|
||||||
|
// .ib_execute = &r600_uvd_ib_execute,
|
||||||
|
// .emit_fence = &r600_uvd_fence_emit,
|
||||||
|
// .emit_semaphore = &r600_uvd_semaphore_emit,
|
||||||
|
// .cs_parse = &radeon_uvd_cs_parse,
|
||||||
|
// .ring_test = &r600_uvd_ring_test,
|
||||||
|
// .ib_test = &r600_uvd_ib_test,
|
||||||
|
// .is_lockup = &radeon_ring_test_lockup,
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.irq = {
|
.irq = {
|
||||||
@ -1420,6 +1475,7 @@ static struct radeon_asic btc_asic = {
|
|||||||
.get_pcie_lanes = NULL,
|
.get_pcie_lanes = NULL,
|
||||||
.set_pcie_lanes = NULL,
|
.set_pcie_lanes = NULL,
|
||||||
.set_clock_gating = NULL,
|
.set_clock_gating = NULL,
|
||||||
|
.set_uvd_clocks = &evergreen_set_uvd_clocks,
|
||||||
},
|
},
|
||||||
.pflip = {
|
.pflip = {
|
||||||
// .pre_page_flip = &evergreen_pre_page_flip,
|
// .pre_page_flip = &evergreen_pre_page_flip,
|
||||||
@ -1438,6 +1494,8 @@ static struct radeon_asic cayman_asic = {
|
|||||||
// .ioctl_wait_idle = r600_ioctl_wait_idle,
|
// .ioctl_wait_idle = r600_ioctl_wait_idle,
|
||||||
.gui_idle = &r600_gui_idle,
|
.gui_idle = &r600_gui_idle,
|
||||||
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
|
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
|
||||||
|
.get_xclk = &rv770_get_xclk,
|
||||||
|
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
|
||||||
.gart = {
|
.gart = {
|
||||||
.tlb_flush = &cayman_pcie_gart_tlb_flush,
|
.tlb_flush = &cayman_pcie_gart_tlb_flush,
|
||||||
.set_page = &rs600_gart_set_page,
|
.set_page = &rs600_gart_set_page,
|
||||||
@ -1457,7 +1515,7 @@ static struct radeon_asic cayman_asic = {
|
|||||||
// .cs_parse = &evergreen_cs_parse,
|
// .cs_parse = &evergreen_cs_parse,
|
||||||
.ring_test = &r600_ring_test,
|
.ring_test = &r600_ring_test,
|
||||||
.ib_test = &r600_ib_test,
|
.ib_test = &r600_ib_test,
|
||||||
.is_lockup = &evergreen_gpu_is_lockup,
|
.is_lockup = &cayman_gfx_is_lockup,
|
||||||
.vm_flush = &cayman_vm_flush,
|
.vm_flush = &cayman_vm_flush,
|
||||||
},
|
},
|
||||||
[CAYMAN_RING_TYPE_CP1_INDEX] = {
|
[CAYMAN_RING_TYPE_CP1_INDEX] = {
|
||||||
@ -1468,7 +1526,7 @@ static struct radeon_asic cayman_asic = {
|
|||||||
// .cs_parse = &evergreen_cs_parse,
|
// .cs_parse = &evergreen_cs_parse,
|
||||||
.ring_test = &r600_ring_test,
|
.ring_test = &r600_ring_test,
|
||||||
.ib_test = &r600_ib_test,
|
.ib_test = &r600_ib_test,
|
||||||
.is_lockup = &evergreen_gpu_is_lockup,
|
.is_lockup = &cayman_gfx_is_lockup,
|
||||||
.vm_flush = &cayman_vm_flush,
|
.vm_flush = &cayman_vm_flush,
|
||||||
},
|
},
|
||||||
[CAYMAN_RING_TYPE_CP2_INDEX] = {
|
[CAYMAN_RING_TYPE_CP2_INDEX] = {
|
||||||
@ -1479,7 +1537,7 @@ static struct radeon_asic cayman_asic = {
|
|||||||
// .cs_parse = &evergreen_cs_parse,
|
// .cs_parse = &evergreen_cs_parse,
|
||||||
.ring_test = &r600_ring_test,
|
.ring_test = &r600_ring_test,
|
||||||
.ib_test = &r600_ib_test,
|
.ib_test = &r600_ib_test,
|
||||||
.is_lockup = &evergreen_gpu_is_lockup,
|
.is_lockup = &cayman_gfx_is_lockup,
|
||||||
.vm_flush = &cayman_vm_flush,
|
.vm_flush = &cayman_vm_flush,
|
||||||
},
|
},
|
||||||
[R600_RING_TYPE_DMA_INDEX] = {
|
[R600_RING_TYPE_DMA_INDEX] = {
|
||||||
@ -1503,6 +1561,15 @@ static struct radeon_asic cayman_asic = {
|
|||||||
.ib_test = &r600_dma_ib_test,
|
.ib_test = &r600_dma_ib_test,
|
||||||
.is_lockup = &cayman_dma_is_lockup,
|
.is_lockup = &cayman_dma_is_lockup,
|
||||||
.vm_flush = &cayman_dma_vm_flush,
|
.vm_flush = &cayman_dma_vm_flush,
|
||||||
|
},
|
||||||
|
[R600_RING_TYPE_UVD_INDEX] = {
|
||||||
|
// .ib_execute = &r600_uvd_ib_execute,
|
||||||
|
// .emit_fence = &r600_uvd_fence_emit,
|
||||||
|
// .emit_semaphore = &cayman_uvd_semaphore_emit,
|
||||||
|
// .cs_parse = &radeon_uvd_cs_parse,
|
||||||
|
// .ring_test = &r600_uvd_ring_test,
|
||||||
|
// .ib_test = &r600_uvd_ib_test,
|
||||||
|
// .is_lockup = &radeon_ring_test_lockup,
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.irq = {
|
.irq = {
|
||||||
@ -1547,6 +1614,7 @@ static struct radeon_asic cayman_asic = {
|
|||||||
.get_pcie_lanes = NULL,
|
.get_pcie_lanes = NULL,
|
||||||
.set_pcie_lanes = NULL,
|
.set_pcie_lanes = NULL,
|
||||||
.set_clock_gating = NULL,
|
.set_clock_gating = NULL,
|
||||||
|
.set_uvd_clocks = &evergreen_set_uvd_clocks,
|
||||||
},
|
},
|
||||||
.pflip = {
|
.pflip = {
|
||||||
// .pre_page_flip = &evergreen_pre_page_flip,
|
// .pre_page_flip = &evergreen_pre_page_flip,
|
||||||
@ -1565,6 +1633,8 @@ static struct radeon_asic trinity_asic = {
|
|||||||
// .ioctl_wait_idle = r600_ioctl_wait_idle,
|
// .ioctl_wait_idle = r600_ioctl_wait_idle,
|
||||||
.gui_idle = &r600_gui_idle,
|
.gui_idle = &r600_gui_idle,
|
||||||
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
|
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
|
||||||
|
.get_xclk = &r600_get_xclk,
|
||||||
|
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
|
||||||
.gart = {
|
.gart = {
|
||||||
.tlb_flush = &cayman_pcie_gart_tlb_flush,
|
.tlb_flush = &cayman_pcie_gart_tlb_flush,
|
||||||
.set_page = &rs600_gart_set_page,
|
.set_page = &rs600_gart_set_page,
|
||||||
@ -1584,7 +1654,7 @@ static struct radeon_asic trinity_asic = {
|
|||||||
// .cs_parse = &evergreen_cs_parse,
|
// .cs_parse = &evergreen_cs_parse,
|
||||||
.ring_test = &r600_ring_test,
|
.ring_test = &r600_ring_test,
|
||||||
.ib_test = &r600_ib_test,
|
.ib_test = &r600_ib_test,
|
||||||
.is_lockup = &evergreen_gpu_is_lockup,
|
.is_lockup = &cayman_gfx_is_lockup,
|
||||||
.vm_flush = &cayman_vm_flush,
|
.vm_flush = &cayman_vm_flush,
|
||||||
},
|
},
|
||||||
[CAYMAN_RING_TYPE_CP1_INDEX] = {
|
[CAYMAN_RING_TYPE_CP1_INDEX] = {
|
||||||
@ -1595,7 +1665,7 @@ static struct radeon_asic trinity_asic = {
|
|||||||
// .cs_parse = &evergreen_cs_parse,
|
// .cs_parse = &evergreen_cs_parse,
|
||||||
.ring_test = &r600_ring_test,
|
.ring_test = &r600_ring_test,
|
||||||
.ib_test = &r600_ib_test,
|
.ib_test = &r600_ib_test,
|
||||||
.is_lockup = &evergreen_gpu_is_lockup,
|
.is_lockup = &cayman_gfx_is_lockup,
|
||||||
.vm_flush = &cayman_vm_flush,
|
.vm_flush = &cayman_vm_flush,
|
||||||
},
|
},
|
||||||
[CAYMAN_RING_TYPE_CP2_INDEX] = {
|
[CAYMAN_RING_TYPE_CP2_INDEX] = {
|
||||||
@ -1606,7 +1676,7 @@ static struct radeon_asic trinity_asic = {
|
|||||||
// .cs_parse = &evergreen_cs_parse,
|
// .cs_parse = &evergreen_cs_parse,
|
||||||
.ring_test = &r600_ring_test,
|
.ring_test = &r600_ring_test,
|
||||||
.ib_test = &r600_ib_test,
|
.ib_test = &r600_ib_test,
|
||||||
.is_lockup = &evergreen_gpu_is_lockup,
|
.is_lockup = &cayman_gfx_is_lockup,
|
||||||
.vm_flush = &cayman_vm_flush,
|
.vm_flush = &cayman_vm_flush,
|
||||||
},
|
},
|
||||||
[R600_RING_TYPE_DMA_INDEX] = {
|
[R600_RING_TYPE_DMA_INDEX] = {
|
||||||
@ -1630,6 +1700,15 @@ static struct radeon_asic trinity_asic = {
|
|||||||
.ib_test = &r600_dma_ib_test,
|
.ib_test = &r600_dma_ib_test,
|
||||||
.is_lockup = &cayman_dma_is_lockup,
|
.is_lockup = &cayman_dma_is_lockup,
|
||||||
.vm_flush = &cayman_dma_vm_flush,
|
.vm_flush = &cayman_dma_vm_flush,
|
||||||
|
},
|
||||||
|
[R600_RING_TYPE_UVD_INDEX] = {
|
||||||
|
// .ib_execute = &r600_uvd_ib_execute,
|
||||||
|
// .emit_fence = &r600_uvd_fence_emit,
|
||||||
|
// .emit_semaphore = &cayman_uvd_semaphore_emit,
|
||||||
|
// .cs_parse = &radeon_uvd_cs_parse,
|
||||||
|
// .ring_test = &r600_uvd_ring_test,
|
||||||
|
// .ib_test = &r600_uvd_ib_test,
|
||||||
|
// .is_lockup = &radeon_ring_test_lockup,
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.irq = {
|
.irq = {
|
||||||
@ -1674,6 +1753,7 @@ static struct radeon_asic trinity_asic = {
|
|||||||
.get_pcie_lanes = NULL,
|
.get_pcie_lanes = NULL,
|
||||||
.set_pcie_lanes = NULL,
|
.set_pcie_lanes = NULL,
|
||||||
.set_clock_gating = NULL,
|
.set_clock_gating = NULL,
|
||||||
|
.set_uvd_clocks = &sumo_set_uvd_clocks,
|
||||||
},
|
},
|
||||||
.pflip = {
|
.pflip = {
|
||||||
// .pre_page_flip = &evergreen_pre_page_flip,
|
// .pre_page_flip = &evergreen_pre_page_flip,
|
||||||
@ -1692,6 +1772,8 @@ static struct radeon_asic si_asic = {
|
|||||||
// .ioctl_wait_idle = r600_ioctl_wait_idle,
|
// .ioctl_wait_idle = r600_ioctl_wait_idle,
|
||||||
.gui_idle = &r600_gui_idle,
|
.gui_idle = &r600_gui_idle,
|
||||||
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
|
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
|
||||||
|
.get_xclk = &si_get_xclk,
|
||||||
|
.get_gpu_clock_counter = &si_get_gpu_clock_counter,
|
||||||
.gart = {
|
.gart = {
|
||||||
.tlb_flush = &si_pcie_gart_tlb_flush,
|
.tlb_flush = &si_pcie_gart_tlb_flush,
|
||||||
.set_page = &rs600_gart_set_page,
|
.set_page = &rs600_gart_set_page,
|
||||||
@ -1711,7 +1793,7 @@ static struct radeon_asic si_asic = {
|
|||||||
.cs_parse = NULL,
|
.cs_parse = NULL,
|
||||||
.ring_test = &r600_ring_test,
|
.ring_test = &r600_ring_test,
|
||||||
.ib_test = &r600_ib_test,
|
.ib_test = &r600_ib_test,
|
||||||
.is_lockup = &si_gpu_is_lockup,
|
.is_lockup = &si_gfx_is_lockup,
|
||||||
.vm_flush = &si_vm_flush,
|
.vm_flush = &si_vm_flush,
|
||||||
},
|
},
|
||||||
[CAYMAN_RING_TYPE_CP1_INDEX] = {
|
[CAYMAN_RING_TYPE_CP1_INDEX] = {
|
||||||
@ -1722,7 +1804,7 @@ static struct radeon_asic si_asic = {
|
|||||||
.cs_parse = NULL,
|
.cs_parse = NULL,
|
||||||
.ring_test = &r600_ring_test,
|
.ring_test = &r600_ring_test,
|
||||||
.ib_test = &r600_ib_test,
|
.ib_test = &r600_ib_test,
|
||||||
.is_lockup = &si_gpu_is_lockup,
|
.is_lockup = &si_gfx_is_lockup,
|
||||||
.vm_flush = &si_vm_flush,
|
.vm_flush = &si_vm_flush,
|
||||||
},
|
},
|
||||||
[CAYMAN_RING_TYPE_CP2_INDEX] = {
|
[CAYMAN_RING_TYPE_CP2_INDEX] = {
|
||||||
@ -1733,7 +1815,7 @@ static struct radeon_asic si_asic = {
|
|||||||
.cs_parse = NULL,
|
.cs_parse = NULL,
|
||||||
.ring_test = &r600_ring_test,
|
.ring_test = &r600_ring_test,
|
||||||
.ib_test = &r600_ib_test,
|
.ib_test = &r600_ib_test,
|
||||||
.is_lockup = &si_gpu_is_lockup,
|
.is_lockup = &si_gfx_is_lockup,
|
||||||
.vm_flush = &si_vm_flush,
|
.vm_flush = &si_vm_flush,
|
||||||
},
|
},
|
||||||
[R600_RING_TYPE_DMA_INDEX] = {
|
[R600_RING_TYPE_DMA_INDEX] = {
|
||||||
@ -1744,7 +1826,7 @@ static struct radeon_asic si_asic = {
|
|||||||
.cs_parse = NULL,
|
.cs_parse = NULL,
|
||||||
.ring_test = &r600_dma_ring_test,
|
.ring_test = &r600_dma_ring_test,
|
||||||
.ib_test = &r600_dma_ib_test,
|
.ib_test = &r600_dma_ib_test,
|
||||||
.is_lockup = &cayman_dma_is_lockup,
|
.is_lockup = &si_dma_is_lockup,
|
||||||
.vm_flush = &si_dma_vm_flush,
|
.vm_flush = &si_dma_vm_flush,
|
||||||
},
|
},
|
||||||
[CAYMAN_RING_TYPE_DMA1_INDEX] = {
|
[CAYMAN_RING_TYPE_DMA1_INDEX] = {
|
||||||
@ -1755,8 +1837,17 @@ static struct radeon_asic si_asic = {
|
|||||||
.cs_parse = NULL,
|
.cs_parse = NULL,
|
||||||
.ring_test = &r600_dma_ring_test,
|
.ring_test = &r600_dma_ring_test,
|
||||||
.ib_test = &r600_dma_ib_test,
|
.ib_test = &r600_dma_ib_test,
|
||||||
.is_lockup = &cayman_dma_is_lockup,
|
.is_lockup = &si_dma_is_lockup,
|
||||||
.vm_flush = &si_dma_vm_flush,
|
.vm_flush = &si_dma_vm_flush,
|
||||||
|
},
|
||||||
|
[R600_RING_TYPE_UVD_INDEX] = {
|
||||||
|
// .ib_execute = &r600_uvd_ib_execute,
|
||||||
|
// .emit_fence = &r600_uvd_fence_emit,
|
||||||
|
// .emit_semaphore = &cayman_uvd_semaphore_emit,
|
||||||
|
// .cs_parse = &radeon_uvd_cs_parse,
|
||||||
|
// .ring_test = &r600_uvd_ring_test,
|
||||||
|
// .ib_test = &r600_uvd_ib_test,
|
||||||
|
// .is_lockup = &radeon_ring_test_lockup,
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.irq = {
|
.irq = {
|
||||||
@ -1801,6 +1892,7 @@ static struct radeon_asic si_asic = {
|
|||||||
.get_pcie_lanes = NULL,
|
.get_pcie_lanes = NULL,
|
||||||
.set_pcie_lanes = NULL,
|
.set_pcie_lanes = NULL,
|
||||||
.set_clock_gating = NULL,
|
.set_clock_gating = NULL,
|
||||||
|
// .set_uvd_clocks = &si_set_uvd_clocks,
|
||||||
},
|
},
|
||||||
.pflip = {
|
.pflip = {
|
||||||
// .pre_page_flip = &evergreen_pre_page_flip,
|
// .pre_page_flip = &evergreen_pre_page_flip,
|
||||||
@ -1829,6 +1921,8 @@ int radeon_asic_init(struct radeon_device *rdev)
|
|||||||
else
|
else
|
||||||
rdev->num_crtc = 2;
|
rdev->num_crtc = 2;
|
||||||
|
|
||||||
|
rdev->has_uvd = false;
|
||||||
|
|
||||||
switch (rdev->family) {
|
switch (rdev->family) {
|
||||||
case CHIP_R100:
|
case CHIP_R100:
|
||||||
case CHIP_RV100:
|
case CHIP_RV100:
|
||||||
@ -1893,16 +1987,22 @@ int radeon_asic_init(struct radeon_device *rdev)
|
|||||||
case CHIP_RV635:
|
case CHIP_RV635:
|
||||||
case CHIP_RV670:
|
case CHIP_RV670:
|
||||||
rdev->asic = &r600_asic;
|
rdev->asic = &r600_asic;
|
||||||
|
if (rdev->family == CHIP_R600)
|
||||||
|
rdev->has_uvd = false;
|
||||||
|
else
|
||||||
|
rdev->has_uvd = true;
|
||||||
break;
|
break;
|
||||||
case CHIP_RS780:
|
case CHIP_RS780:
|
||||||
case CHIP_RS880:
|
case CHIP_RS880:
|
||||||
rdev->asic = &rs780_asic;
|
rdev->asic = &rs780_asic;
|
||||||
|
rdev->has_uvd = true;
|
||||||
break;
|
break;
|
||||||
case CHIP_RV770:
|
case CHIP_RV770:
|
||||||
case CHIP_RV730:
|
case CHIP_RV730:
|
||||||
case CHIP_RV710:
|
case CHIP_RV710:
|
||||||
case CHIP_RV740:
|
case CHIP_RV740:
|
||||||
rdev->asic = &rv770_asic;
|
rdev->asic = &rv770_asic;
|
||||||
|
rdev->has_uvd = true;
|
||||||
break;
|
break;
|
||||||
case CHIP_CEDAR:
|
case CHIP_CEDAR:
|
||||||
case CHIP_REDWOOD:
|
case CHIP_REDWOOD:
|
||||||
@ -1915,11 +2015,13 @@ int radeon_asic_init(struct radeon_device *rdev)
|
|||||||
else
|
else
|
||||||
rdev->num_crtc = 6;
|
rdev->num_crtc = 6;
|
||||||
rdev->asic = &evergreen_asic;
|
rdev->asic = &evergreen_asic;
|
||||||
|
rdev->has_uvd = true;
|
||||||
break;
|
break;
|
||||||
case CHIP_PALM:
|
case CHIP_PALM:
|
||||||
case CHIP_SUMO:
|
case CHIP_SUMO:
|
||||||
case CHIP_SUMO2:
|
case CHIP_SUMO2:
|
||||||
rdev->asic = &sumo_asic;
|
rdev->asic = &sumo_asic;
|
||||||
|
rdev->has_uvd = true;
|
||||||
break;
|
break;
|
||||||
case CHIP_BARTS:
|
case CHIP_BARTS:
|
||||||
case CHIP_TURKS:
|
case CHIP_TURKS:
|
||||||
@ -1930,23 +2032,37 @@ int radeon_asic_init(struct radeon_device *rdev)
|
|||||||
else
|
else
|
||||||
rdev->num_crtc = 6;
|
rdev->num_crtc = 6;
|
||||||
rdev->asic = &btc_asic;
|
rdev->asic = &btc_asic;
|
||||||
|
rdev->has_uvd = true;
|
||||||
break;
|
break;
|
||||||
case CHIP_CAYMAN:
|
case CHIP_CAYMAN:
|
||||||
rdev->asic = &cayman_asic;
|
rdev->asic = &cayman_asic;
|
||||||
/* set num crtcs */
|
/* set num crtcs */
|
||||||
rdev->num_crtc = 6;
|
rdev->num_crtc = 6;
|
||||||
|
rdev->has_uvd = true;
|
||||||
break;
|
break;
|
||||||
case CHIP_ARUBA:
|
case CHIP_ARUBA:
|
||||||
rdev->asic = &trinity_asic;
|
rdev->asic = &trinity_asic;
|
||||||
/* set num crtcs */
|
/* set num crtcs */
|
||||||
rdev->num_crtc = 4;
|
rdev->num_crtc = 4;
|
||||||
|
rdev->has_uvd = true;
|
||||||
break;
|
break;
|
||||||
case CHIP_TAHITI:
|
case CHIP_TAHITI:
|
||||||
case CHIP_PITCAIRN:
|
case CHIP_PITCAIRN:
|
||||||
case CHIP_VERDE:
|
case CHIP_VERDE:
|
||||||
|
case CHIP_OLAND:
|
||||||
|
case CHIP_HAINAN:
|
||||||
rdev->asic = &si_asic;
|
rdev->asic = &si_asic;
|
||||||
/* set num crtcs */
|
/* set num crtcs */
|
||||||
|
if (rdev->family == CHIP_HAINAN)
|
||||||
|
rdev->num_crtc = 0;
|
||||||
|
else if (rdev->family == CHIP_OLAND)
|
||||||
|
rdev->num_crtc = 2;
|
||||||
|
else
|
||||||
rdev->num_crtc = 6;
|
rdev->num_crtc = 6;
|
||||||
|
if (rdev->family == CHIP_HAINAN)
|
||||||
|
rdev->has_uvd = false;
|
||||||
|
else
|
||||||
|
rdev->has_uvd = true;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
/* FIXME: not supported yet */
|
/* FIXME: not supported yet */
|
||||||
|
@ -319,7 +319,7 @@ void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
|
|||||||
bool emit_wait);
|
bool emit_wait);
|
||||||
void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
|
void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||||
bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
|
bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
|
||||||
bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
|
bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||||
int r600_asic_reset(struct radeon_device *rdev);
|
int r600_asic_reset(struct radeon_device *rdev);
|
||||||
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
|
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
|
||||||
uint32_t tiling_flags, uint32_t pitch,
|
uint32_t tiling_flags, uint32_t pitch,
|
||||||
@ -330,6 +330,7 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
|
|||||||
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
|
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||||
int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
|
int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||||
int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
|
int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||||
|
int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
|
||||||
int r600_copy_blit(struct radeon_device *rdev,
|
int r600_copy_blit(struct radeon_device *rdev,
|
||||||
uint64_t src_offset, uint64_t dst_offset,
|
uint64_t src_offset, uint64_t dst_offset,
|
||||||
unsigned num_gpu_pages, struct radeon_fence **fence);
|
unsigned num_gpu_pages, struct radeon_fence **fence);
|
||||||
@ -346,6 +347,8 @@ extern bool r600_gui_idle(struct radeon_device *rdev);
|
|||||||
extern void r600_pm_misc(struct radeon_device *rdev);
|
extern void r600_pm_misc(struct radeon_device *rdev);
|
||||||
extern void r600_pm_init_profile(struct radeon_device *rdev);
|
extern void r600_pm_init_profile(struct radeon_device *rdev);
|
||||||
extern void rs780_pm_init_profile(struct radeon_device *rdev);
|
extern void rs780_pm_init_profile(struct radeon_device *rdev);
|
||||||
|
extern uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg);
|
||||||
|
extern void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
|
||||||
extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
|
extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
|
||||||
extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes);
|
extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes);
|
||||||
extern int r600_get_pcie_lanes(struct radeon_device *rdev);
|
extern int r600_get_pcie_lanes(struct radeon_device *rdev);
|
||||||
@ -373,11 +376,12 @@ void r600_disable_interrupts(struct radeon_device *rdev);
|
|||||||
void r600_rlc_stop(struct radeon_device *rdev);
|
void r600_rlc_stop(struct radeon_device *rdev);
|
||||||
/* r600 audio */
|
/* r600 audio */
|
||||||
int r600_audio_init(struct radeon_device *rdev);
|
int r600_audio_init(struct radeon_device *rdev);
|
||||||
void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
|
|
||||||
struct r600_audio r600_audio_status(struct radeon_device *rdev);
|
struct r600_audio r600_audio_status(struct radeon_device *rdev);
|
||||||
void r600_audio_fini(struct radeon_device *rdev);
|
void r600_audio_fini(struct radeon_device *rdev);
|
||||||
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
|
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
|
||||||
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
|
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
|
||||||
|
void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
|
||||||
|
void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
|
||||||
/* r600 blit */
|
/* r600 blit */
|
||||||
int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
|
int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
|
||||||
struct radeon_fence **fence, struct radeon_sa_bo **vb,
|
struct radeon_fence **fence, struct radeon_sa_bo **vb,
|
||||||
@ -389,7 +393,21 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
|
|||||||
unsigned num_gpu_pages,
|
unsigned num_gpu_pages,
|
||||||
struct radeon_sa_bo *vb);
|
struct radeon_sa_bo *vb);
|
||||||
int r600_mc_wait_for_idle(struct radeon_device *rdev);
|
int r600_mc_wait_for_idle(struct radeon_device *rdev);
|
||||||
uint64_t r600_get_gpu_clock(struct radeon_device *rdev);
|
u32 r600_get_xclk(struct radeon_device *rdev);
|
||||||
|
uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
|
||||||
|
|
||||||
|
/* uvd */
|
||||||
|
int r600_uvd_init(struct radeon_device *rdev);
|
||||||
|
int r600_uvd_rbc_start(struct radeon_device *rdev);
|
||||||
|
void r600_uvd_rbc_stop(struct radeon_device *rdev);
|
||||||
|
int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
|
||||||
|
void r600_uvd_fence_emit(struct radeon_device *rdev,
|
||||||
|
struct radeon_fence *fence);
|
||||||
|
void r600_uvd_semaphore_emit(struct radeon_device *rdev,
|
||||||
|
struct radeon_ring *ring,
|
||||||
|
struct radeon_semaphore *semaphore,
|
||||||
|
bool emit_wait);
|
||||||
|
void r600_uvd_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* rv770,rv730,rv710,rv740
|
* rv770,rv730,rv710,rv740
|
||||||
@ -407,6 +425,9 @@ int rv770_copy_dma(struct radeon_device *rdev,
|
|||||||
uint64_t src_offset, uint64_t dst_offset,
|
uint64_t src_offset, uint64_t dst_offset,
|
||||||
unsigned num_gpu_pages,
|
unsigned num_gpu_pages,
|
||||||
struct radeon_fence **fence);
|
struct radeon_fence **fence);
|
||||||
|
u32 rv770_get_xclk(struct radeon_device *rdev);
|
||||||
|
int rv770_uvd_resume(struct radeon_device *rdev);
|
||||||
|
int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* evergreen
|
* evergreen
|
||||||
@ -422,7 +443,8 @@ int evergreen_init(struct radeon_device *rdev);
|
|||||||
void evergreen_fini(struct radeon_device *rdev);
|
void evergreen_fini(struct radeon_device *rdev);
|
||||||
int evergreen_suspend(struct radeon_device *rdev);
|
int evergreen_suspend(struct radeon_device *rdev);
|
||||||
int evergreen_resume(struct radeon_device *rdev);
|
int evergreen_resume(struct radeon_device *rdev);
|
||||||
bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
|
bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||||
|
bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||||
int evergreen_asic_reset(struct radeon_device *rdev);
|
int evergreen_asic_reset(struct radeon_device *rdev);
|
||||||
void evergreen_bandwidth_update(struct radeon_device *rdev);
|
void evergreen_bandwidth_update(struct radeon_device *rdev);
|
||||||
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
|
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||||
@ -441,6 +463,8 @@ extern void evergreen_pm_prepare(struct radeon_device *rdev);
|
|||||||
extern void evergreen_pm_finish(struct radeon_device *rdev);
|
extern void evergreen_pm_finish(struct radeon_device *rdev);
|
||||||
extern void sumo_pm_init_profile(struct radeon_device *rdev);
|
extern void sumo_pm_init_profile(struct radeon_device *rdev);
|
||||||
extern void btc_pm_init_profile(struct radeon_device *rdev);
|
extern void btc_pm_init_profile(struct radeon_device *rdev);
|
||||||
|
int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
|
||||||
|
int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
|
||||||
extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
|
extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
|
||||||
extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
|
extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
|
||||||
extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
|
extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
|
||||||
@ -456,12 +480,18 @@ int evergreen_copy_dma(struct radeon_device *rdev,
|
|||||||
uint64_t src_offset, uint64_t dst_offset,
|
uint64_t src_offset, uint64_t dst_offset,
|
||||||
unsigned num_gpu_pages,
|
unsigned num_gpu_pages,
|
||||||
struct radeon_fence **fence);
|
struct radeon_fence **fence);
|
||||||
|
void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable);
|
||||||
|
void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* cayman
|
* cayman
|
||||||
*/
|
*/
|
||||||
void cayman_fence_ring_emit(struct radeon_device *rdev,
|
void cayman_fence_ring_emit(struct radeon_device *rdev,
|
||||||
struct radeon_fence *fence);
|
struct radeon_fence *fence);
|
||||||
|
void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
|
||||||
|
struct radeon_ring *ring,
|
||||||
|
struct radeon_semaphore *semaphore,
|
||||||
|
bool emit_wait);
|
||||||
void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
|
void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
|
||||||
int cayman_init(struct radeon_device *rdev);
|
int cayman_init(struct radeon_device *rdev);
|
||||||
void cayman_fini(struct radeon_device *rdev);
|
void cayman_fini(struct radeon_device *rdev);
|
||||||
@ -473,13 +503,16 @@ int cayman_vm_init(struct radeon_device *rdev);
|
|||||||
void cayman_vm_fini(struct radeon_device *rdev);
|
void cayman_vm_fini(struct radeon_device *rdev);
|
||||||
void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
||||||
uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
|
uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
|
||||||
void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
|
void cayman_vm_set_page(struct radeon_device *rdev,
|
||||||
|
struct radeon_ib *ib,
|
||||||
|
uint64_t pe,
|
||||||
uint64_t addr, unsigned count,
|
uint64_t addr, unsigned count,
|
||||||
uint32_t incr, uint32_t flags);
|
uint32_t incr, uint32_t flags);
|
||||||
int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
|
int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||||
int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
|
int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||||
void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
|
void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
|
||||||
struct radeon_ib *ib);
|
struct radeon_ib *ib);
|
||||||
|
bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
|
||||||
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
|
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
|
||||||
void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
||||||
|
|
||||||
@ -496,23 +529,28 @@ int si_init(struct radeon_device *rdev);
|
|||||||
void si_fini(struct radeon_device *rdev);
|
void si_fini(struct radeon_device *rdev);
|
||||||
int si_suspend(struct radeon_device *rdev);
|
int si_suspend(struct radeon_device *rdev);
|
||||||
int si_resume(struct radeon_device *rdev);
|
int si_resume(struct radeon_device *rdev);
|
||||||
bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
|
bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||||
|
bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||||
int si_asic_reset(struct radeon_device *rdev);
|
int si_asic_reset(struct radeon_device *rdev);
|
||||||
void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
|
void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||||
int si_irq_set(struct radeon_device *rdev);
|
int si_irq_set(struct radeon_device *rdev);
|
||||||
int si_irq_process(struct radeon_device *rdev);
|
int si_irq_process(struct radeon_device *rdev);
|
||||||
int si_vm_init(struct radeon_device *rdev);
|
int si_vm_init(struct radeon_device *rdev);
|
||||||
void si_vm_fini(struct radeon_device *rdev);
|
void si_vm_fini(struct radeon_device *rdev);
|
||||||
void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
|
void si_vm_set_page(struct radeon_device *rdev,
|
||||||
|
struct radeon_ib *ib,
|
||||||
|
uint64_t pe,
|
||||||
uint64_t addr, unsigned count,
|
uint64_t addr, unsigned count,
|
||||||
uint32_t incr, uint32_t flags);
|
uint32_t incr, uint32_t flags);
|
||||||
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
||||||
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
|
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||||
uint64_t si_get_gpu_clock(struct radeon_device *rdev);
|
|
||||||
int si_copy_dma(struct radeon_device *rdev,
|
int si_copy_dma(struct radeon_device *rdev,
|
||||||
uint64_t src_offset, uint64_t dst_offset,
|
uint64_t src_offset, uint64_t dst_offset,
|
||||||
unsigned num_gpu_pages,
|
unsigned num_gpu_pages,
|
||||||
struct radeon_fence **fence);
|
struct radeon_fence **fence);
|
||||||
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
||||||
|
u32 si_get_xclk(struct radeon_device *rdev);
|
||||||
|
uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
|
||||||
|
int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -2028,6 +2028,8 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
|
|||||||
num_modes = power_info->info.ucNumOfPowerModeEntries;
|
num_modes = power_info->info.ucNumOfPowerModeEntries;
|
||||||
if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
|
if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
|
||||||
num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
|
num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
|
||||||
|
if (num_modes == 0)
|
||||||
|
return state_index;
|
||||||
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL);
|
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL);
|
||||||
if (!rdev->pm.power_state)
|
if (!rdev->pm.power_state)
|
||||||
return state_index;
|
return state_index;
|
||||||
@ -2307,7 +2309,7 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
|
|||||||
rdev->pm.default_power_state_index = state_index;
|
rdev->pm.default_power_state_index = state_index;
|
||||||
rdev->pm.power_state[state_index].default_clock_mode =
|
rdev->pm.power_state[state_index].default_clock_mode =
|
||||||
&rdev->pm.power_state[state_index].clock_info[mode_index - 1];
|
&rdev->pm.power_state[state_index].clock_info[mode_index - 1];
|
||||||
if (ASIC_IS_DCE5(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
|
if ((rdev->family >= CHIP_BARTS) && !(rdev->flags & RADEON_IS_IGP)) {
|
||||||
/* NI chips post without MC ucode, so default clocks are strobe mode only */
|
/* NI chips post without MC ucode, so default clocks are strobe mode only */
|
||||||
rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
|
rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
|
||||||
rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
|
rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
|
||||||
@ -2345,7 +2347,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
|
|||||||
sclk |= clock_info->rs780.ucLowEngineClockHigh << 16;
|
sclk |= clock_info->rs780.ucLowEngineClockHigh << 16;
|
||||||
rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
|
rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
|
||||||
}
|
}
|
||||||
} else if (ASIC_IS_DCE6(rdev)) {
|
} else if (rdev->family >= CHIP_TAHITI) {
|
||||||
sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
|
sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
|
||||||
sclk |= clock_info->si.ucEngineClockHigh << 16;
|
sclk |= clock_info->si.ucEngineClockHigh << 16;
|
||||||
mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
|
mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
|
||||||
@ -2358,7 +2360,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
|
|||||||
le16_to_cpu(clock_info->si.usVDDC);
|
le16_to_cpu(clock_info->si.usVDDC);
|
||||||
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci =
|
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci =
|
||||||
le16_to_cpu(clock_info->si.usVDDCI);
|
le16_to_cpu(clock_info->si.usVDDCI);
|
||||||
} else if (ASIC_IS_DCE4(rdev)) {
|
} else if (rdev->family >= CHIP_CEDAR) {
|
||||||
sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow);
|
sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow);
|
||||||
sclk |= clock_info->evergreen.ucEngineClockHigh << 16;
|
sclk |= clock_info->evergreen.ucEngineClockHigh << 16;
|
||||||
mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow);
|
mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow);
|
||||||
@ -2432,6 +2434,8 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
|
|||||||
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
|
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
|
||||||
|
|
||||||
radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
|
radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
|
||||||
|
if (power_info->pplib.ucNumStates == 0)
|
||||||
|
return state_index;
|
||||||
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
|
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
|
||||||
power_info->pplib.ucNumStates, GFP_KERNEL);
|
power_info->pplib.ucNumStates, GFP_KERNEL);
|
||||||
if (!rdev->pm.power_state)
|
if (!rdev->pm.power_state)
|
||||||
@ -2514,6 +2518,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
|
|||||||
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
|
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
|
||||||
u16 data_offset;
|
u16 data_offset;
|
||||||
u8 frev, crev;
|
u8 frev, crev;
|
||||||
|
u8 *power_state_offset;
|
||||||
|
|
||||||
if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
|
if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
|
||||||
&frev, &crev, &data_offset))
|
&frev, &crev, &data_offset))
|
||||||
@ -2530,15 +2535,17 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
|
|||||||
non_clock_info_array = (struct _NonClockInfoArray *)
|
non_clock_info_array = (struct _NonClockInfoArray *)
|
||||||
(mode_info->atom_context->bios + data_offset +
|
(mode_info->atom_context->bios + data_offset +
|
||||||
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
|
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
|
||||||
|
if (state_array->ucNumEntries == 0)
|
||||||
|
return state_index;
|
||||||
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
|
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
|
||||||
state_array->ucNumEntries, GFP_KERNEL);
|
state_array->ucNumEntries, GFP_KERNEL);
|
||||||
if (!rdev->pm.power_state)
|
if (!rdev->pm.power_state)
|
||||||
return state_index;
|
return state_index;
|
||||||
|
power_state_offset = (u8 *)state_array->states;
|
||||||
for (i = 0; i < state_array->ucNumEntries; i++) {
|
for (i = 0; i < state_array->ucNumEntries; i++) {
|
||||||
mode_index = 0;
|
mode_index = 0;
|
||||||
power_state = (union pplib_power_state *)&state_array->states[i];
|
power_state = (union pplib_power_state *)power_state_offset;
|
||||||
/* XXX this might be an inagua bug... */
|
non_clock_array_index = power_state->v2.nonClockInfoIndex;
|
||||||
non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
|
|
||||||
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
|
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
|
||||||
&non_clock_info_array->nonClockInfo[non_clock_array_index];
|
&non_clock_info_array->nonClockInfo[non_clock_array_index];
|
||||||
rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
|
rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
|
||||||
@ -2550,9 +2557,6 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
|
|||||||
if (power_state->v2.ucNumDPMLevels) {
|
if (power_state->v2.ucNumDPMLevels) {
|
||||||
for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
|
for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
|
||||||
clock_array_index = power_state->v2.clockInfoIndex[j];
|
clock_array_index = power_state->v2.clockInfoIndex[j];
|
||||||
/* XXX this might be an inagua bug... */
|
|
||||||
if (clock_array_index >= clock_info_array->ucNumEntries)
|
|
||||||
continue;
|
|
||||||
clock_info = (union pplib_clock_info *)
|
clock_info = (union pplib_clock_info *)
|
||||||
&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
|
&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
|
||||||
valid = radeon_atombios_parse_pplib_clock_info(rdev,
|
valid = radeon_atombios_parse_pplib_clock_info(rdev,
|
||||||
@ -2574,6 +2578,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
|
|||||||
non_clock_info);
|
non_clock_info);
|
||||||
state_index++;
|
state_index++;
|
||||||
}
|
}
|
||||||
|
power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
|
||||||
}
|
}
|
||||||
/* if multiple clock modes, mark the lowest as no display */
|
/* if multiple clock modes, mark the lowest as no display */
|
||||||
for (i = 0; i < state_index; i++) {
|
for (i = 0; i < state_index; i++) {
|
||||||
@ -2620,7 +2625,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
|
|||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} else {
|
}
|
||||||
|
|
||||||
|
if (state_index == 0) {
|
||||||
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
|
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
|
||||||
if (rdev->pm.power_state) {
|
if (rdev->pm.power_state) {
|
||||||
rdev->pm.power_state[0].clock_info =
|
rdev->pm.power_state[0].clock_info =
|
||||||
@ -2654,6 +2661,111 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
|
|||||||
rdev->pm.current_vddc = 0;
|
rdev->pm.current_vddc = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
union get_clock_dividers {
|
||||||
|
struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS v1;
|
||||||
|
struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 v2;
|
||||||
|
struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 v3;
|
||||||
|
struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 v4;
|
||||||
|
struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5 v5;
|
||||||
|
};
|
||||||
|
|
||||||
|
int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
|
||||||
|
u8 clock_type,
|
||||||
|
u32 clock,
|
||||||
|
bool strobe_mode,
|
||||||
|
struct atom_clock_dividers *dividers)
|
||||||
|
{
|
||||||
|
union get_clock_dividers args;
|
||||||
|
int index = GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL);
|
||||||
|
u8 frev, crev;
|
||||||
|
|
||||||
|
memset(&args, 0, sizeof(args));
|
||||||
|
memset(dividers, 0, sizeof(struct atom_clock_dividers));
|
||||||
|
|
||||||
|
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
switch (crev) {
|
||||||
|
case 1:
|
||||||
|
/* r4xx, r5xx */
|
||||||
|
args.v1.ucAction = clock_type;
|
||||||
|
args.v1.ulClock = cpu_to_le32(clock); /* 10 khz */
|
||||||
|
|
||||||
|
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||||
|
|
||||||
|
dividers->post_div = args.v1.ucPostDiv;
|
||||||
|
dividers->fb_div = args.v1.ucFbDiv;
|
||||||
|
dividers->enable_post_div = true;
|
||||||
|
break;
|
||||||
|
case 2:
|
||||||
|
case 3:
|
||||||
|
/* r6xx, r7xx, evergreen, ni */
|
||||||
|
if (rdev->family <= CHIP_RV770) {
|
||||||
|
args.v2.ucAction = clock_type;
|
||||||
|
args.v2.ulClock = cpu_to_le32(clock); /* 10 khz */
|
||||||
|
|
||||||
|
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||||
|
|
||||||
|
dividers->post_div = args.v2.ucPostDiv;
|
||||||
|
dividers->fb_div = le16_to_cpu(args.v2.usFbDiv);
|
||||||
|
dividers->ref_div = args.v2.ucAction;
|
||||||
|
if (rdev->family == CHIP_RV770) {
|
||||||
|
dividers->enable_post_div = (le32_to_cpu(args.v2.ulClock) & (1 << 24)) ?
|
||||||
|
true : false;
|
||||||
|
dividers->vco_mode = (le32_to_cpu(args.v2.ulClock) & (1 << 25)) ? 1 : 0;
|
||||||
|
} else
|
||||||
|
dividers->enable_post_div = (dividers->fb_div & 1) ? true : false;
|
||||||
|
} else {
|
||||||
|
if (clock_type == COMPUTE_ENGINE_PLL_PARAM) {
|
||||||
|
args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
|
||||||
|
|
||||||
|
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||||
|
|
||||||
|
dividers->post_div = args.v3.ucPostDiv;
|
||||||
|
dividers->enable_post_div = (args.v3.ucCntlFlag &
|
||||||
|
ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
|
||||||
|
dividers->enable_dithen = (args.v3.ucCntlFlag &
|
||||||
|
ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
|
||||||
|
dividers->fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv);
|
||||||
|
dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac);
|
||||||
|
dividers->ref_div = args.v3.ucRefDiv;
|
||||||
|
dividers->vco_mode = (args.v3.ucCntlFlag &
|
||||||
|
ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
|
||||||
|
} else {
|
||||||
|
args.v5.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
|
||||||
|
if (strobe_mode)
|
||||||
|
args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN;
|
||||||
|
|
||||||
|
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||||
|
|
||||||
|
dividers->post_div = args.v5.ucPostDiv;
|
||||||
|
dividers->enable_post_div = (args.v5.ucCntlFlag &
|
||||||
|
ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
|
||||||
|
dividers->enable_dithen = (args.v5.ucCntlFlag &
|
||||||
|
ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
|
||||||
|
dividers->whole_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDiv);
|
||||||
|
dividers->frac_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDivFrac);
|
||||||
|
dividers->ref_div = args.v5.ucRefDiv;
|
||||||
|
dividers->vco_mode = (args.v5.ucCntlFlag &
|
||||||
|
ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case 4:
|
||||||
|
/* fusion */
|
||||||
|
args.v4.ulClock = cpu_to_le32(clock); /* 10 khz */
|
||||||
|
|
||||||
|
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||||
|
|
||||||
|
dividers->post_div = args.v4.ucPostDiv;
|
||||||
|
dividers->real_clock = le32_to_cpu(args.v4.ulClock);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
|
void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
|
||||||
{
|
{
|
||||||
DYNAMIC_CLOCK_GATING_PS_ALLOCATION args;
|
DYNAMIC_CLOCK_GATING_PS_ALLOCATION args;
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user