forked from KolibriOS/kolibrios
RC11.01 R600 irq handler
git-svn-id: svn://kolibrios.org@2004 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
parent
22a5a068ee
commit
e2c16e815b
@ -886,9 +886,6 @@ int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
|
||||
total_objects += dev->mode_config.num_connector;
|
||||
total_objects += dev->mode_config.num_encoder;
|
||||
|
||||
if (total_objects == 0)
|
||||
return -EINVAL;
|
||||
|
||||
group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL);
|
||||
if (!group->id_list)
|
||||
return -ENOMEM;
|
||||
|
@ -184,9 +184,9 @@ drm_edid_block_valid(u8 *raw_edid)
|
||||
|
||||
bad:
|
||||
if (raw_edid) {
|
||||
DRM_ERROR("Raw EDID:\n");
|
||||
printk(KERN_ERR "Raw EDID:\n");
|
||||
// print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH);
|
||||
// printk("\n");
|
||||
printk(KERN_ERR "\n");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -258,6 +258,17 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
|
||||
return ret == 2 ? 0 : -1;
|
||||
}
|
||||
|
||||
static bool drm_edid_is_zero(u8 *in_edid, int length)
|
||||
{
|
||||
int i;
|
||||
u32 *raw_edid = (u32 *)in_edid;
|
||||
|
||||
for (i = 0; i < length / 4; i++)
|
||||
if (*(raw_edid + i) != 0)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static u8 *
|
||||
drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
|
||||
{
|
||||
@ -274,6 +285,10 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
|
||||
goto out;
|
||||
if (drm_edid_block_valid(block))
|
||||
break;
|
||||
if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
|
||||
connector->null_edid_counter++;
|
||||
goto carp;
|
||||
}
|
||||
}
|
||||
if (i == 4)
|
||||
goto carp;
|
||||
|
@ -924,7 +924,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
|
||||
|
||||
/* clean out all the encoder/crtc combos */
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||
// encoder->crtc = NULL;
|
||||
encoder->crtc = NULL;
|
||||
}
|
||||
|
||||
crtcs = kcalloc(dev->mode_config.num_connector,
|
||||
|
@ -55,20 +55,23 @@ NAME_SRC= \
|
||||
radeon_device.c \
|
||||
evergreen.c \
|
||||
evergreen_blit_shaders.c \
|
||||
cayman_blit_shaders.c \
|
||||
radeon_clocks.c \
|
||||
radeon_i2c.c \
|
||||
atom.c \
|
||||
ni.c \
|
||||
radeon_gem.c \
|
||||
radeon_atombios.c \
|
||||
radeon_agp.c \
|
||||
radeon_asic.c \
|
||||
radeon_atombios.c \
|
||||
radeon_bios.c \
|
||||
radeon_combios.c \
|
||||
radeon_connectors.c \
|
||||
atombios_crtc.c \
|
||||
atombios_dp.c \
|
||||
radeon_encoders.c \
|
||||
radeon_connectors.c \
|
||||
radeon_bios.c \
|
||||
radeon_combios.c \
|
||||
radeon_fence.c \
|
||||
radeon_gem.c \
|
||||
radeon_i2c.c \
|
||||
radeon_irq_kms.c \
|
||||
radeon_legacy_crtc.c \
|
||||
radeon_legacy_encoders.c \
|
||||
radeon_legacy_tv.c \
|
||||
|
@ -55,20 +55,23 @@ NAME_SRC= \
|
||||
radeon_device.c \
|
||||
evergreen.c \
|
||||
evergreen_blit_shaders.c \
|
||||
cayman_blit_shaders.c \
|
||||
radeon_clocks.c \
|
||||
radeon_i2c.c \
|
||||
atom.c \
|
||||
ni.c \
|
||||
radeon_gem.c \
|
||||
radeon_atombios.c \
|
||||
radeon_agp.c \
|
||||
radeon_asic.c \
|
||||
radeon_atombios.c \
|
||||
radeon_bios.c \
|
||||
radeon_combios.c \
|
||||
radeon_connectors.c \
|
||||
atombios_crtc.c \
|
||||
atombios_dp.c \
|
||||
radeon_encoders.c \
|
||||
radeon_connectors.c \
|
||||
radeon_bios.c \
|
||||
radeon_combios.c \
|
||||
radeon_fence.c \
|
||||
radeon_gem.c \
|
||||
radeon_i2c.c \
|
||||
radeon_irq_kms.c \
|
||||
radeon_legacy_crtc.c \
|
||||
radeon_legacy_encoders.c \
|
||||
radeon_legacy_tv.c \
|
||||
|
373
drivers/video/drm/radeon/cayman_blit_shaders.c
Normal file
373
drivers/video/drm/radeon/cayman_blit_shaders.c
Normal file
@ -0,0 +1,373 @@
|
||||
/*
|
||||
* Copyright 2010 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Alex Deucher <alexander.deucher@amd.com>
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
/*
|
||||
* evergreen cards need to use the 3D engine to blit data which requires
|
||||
* quite a bit of hw state setup. Rather than pull the whole 3D driver
|
||||
* (which normally generates the 3D state) into the DRM, we opt to use
|
||||
* statically generated state tables. The regsiter state and shaders
|
||||
* were hand generated to support blitting functionality. See the 3D
|
||||
* driver or documentation for descriptions of the registers and
|
||||
* shader instructions.
|
||||
*/
|
||||
|
||||
const u32 cayman_default_state[] =
|
||||
{
|
||||
0xc0066900,
|
||||
0x00000000,
|
||||
0x00000060, /* DB_RENDER_CONTROL */
|
||||
0x00000000, /* DB_COUNT_CONTROL */
|
||||
0x00000000, /* DB_DEPTH_VIEW */
|
||||
0x0000002a, /* DB_RENDER_OVERRIDE */
|
||||
0x00000000, /* DB_RENDER_OVERRIDE2 */
|
||||
0x00000000, /* DB_HTILE_DATA_BASE */
|
||||
|
||||
0xc0026900,
|
||||
0x0000000a,
|
||||
0x00000000, /* DB_STENCIL_CLEAR */
|
||||
0x00000000, /* DB_DEPTH_CLEAR */
|
||||
|
||||
0xc0036900,
|
||||
0x0000000f,
|
||||
0x00000000, /* DB_DEPTH_INFO */
|
||||
0x00000000, /* DB_Z_INFO */
|
||||
0x00000000, /* DB_STENCIL_INFO */
|
||||
|
||||
0xc0016900,
|
||||
0x00000080,
|
||||
0x00000000, /* PA_SC_WINDOW_OFFSET */
|
||||
|
||||
0xc00d6900,
|
||||
0x00000083,
|
||||
0x0000ffff, /* PA_SC_CLIPRECT_RULE */
|
||||
0x00000000, /* PA_SC_CLIPRECT_0_TL */
|
||||
0x20002000, /* PA_SC_CLIPRECT_0_BR */
|
||||
0x00000000,
|
||||
0x20002000,
|
||||
0x00000000,
|
||||
0x20002000,
|
||||
0x00000000,
|
||||
0x20002000,
|
||||
0xaaaaaaaa, /* PA_SC_EDGERULE */
|
||||
0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
|
||||
0x0000000f, /* CB_TARGET_MASK */
|
||||
0x0000000f, /* CB_SHADER_MASK */
|
||||
|
||||
0xc0226900,
|
||||
0x00000094,
|
||||
0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
|
||||
0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
|
||||
0x80000000,
|
||||
0x20002000,
|
||||
0x80000000,
|
||||
0x20002000,
|
||||
0x80000000,
|
||||
0x20002000,
|
||||
0x80000000,
|
||||
0x20002000,
|
||||
0x80000000,
|
||||
0x20002000,
|
||||
0x80000000,
|
||||
0x20002000,
|
||||
0x80000000,
|
||||
0x20002000,
|
||||
0x80000000,
|
||||
0x20002000,
|
||||
0x80000000,
|
||||
0x20002000,
|
||||
0x80000000,
|
||||
0x20002000,
|
||||
0x80000000,
|
||||
0x20002000,
|
||||
0x80000000,
|
||||
0x20002000,
|
||||
0x80000000,
|
||||
0x20002000,
|
||||
0x80000000,
|
||||
0x20002000,
|
||||
0x80000000,
|
||||
0x20002000,
|
||||
0x00000000, /* PA_SC_VPORT_ZMIN_0 */
|
||||
0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
|
||||
|
||||
0xc0016900,
|
||||
0x000000d4,
|
||||
0x00000000, /* SX_MISC */
|
||||
|
||||
0xc0026900,
|
||||
0x000000d9,
|
||||
0x00000000, /* CP_RINGID */
|
||||
0x00000000, /* CP_VMID */
|
||||
|
||||
0xc0096900,
|
||||
0x00000100,
|
||||
0x00ffffff, /* VGT_MAX_VTX_INDX */
|
||||
0x00000000, /* VGT_MIN_VTX_INDX */
|
||||
0x00000000, /* VGT_INDX_OFFSET */
|
||||
0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
|
||||
0x00000000, /* SX_ALPHA_TEST_CONTROL */
|
||||
0x00000000, /* CB_BLEND_RED */
|
||||
0x00000000, /* CB_BLEND_GREEN */
|
||||
0x00000000, /* CB_BLEND_BLUE */
|
||||
0x00000000, /* CB_BLEND_ALPHA */
|
||||
|
||||
0xc0016900,
|
||||
0x00000187,
|
||||
0x00000100, /* SPI_VS_OUT_ID_0 */
|
||||
|
||||
0xc0026900,
|
||||
0x00000191,
|
||||
0x00000100, /* SPI_PS_INPUT_CNTL_0 */
|
||||
0x00000101, /* SPI_PS_INPUT_CNTL_1 */
|
||||
|
||||
0xc0016900,
|
||||
0x000001b1,
|
||||
0x00000000, /* SPI_VS_OUT_CONFIG */
|
||||
|
||||
0xc0106900,
|
||||
0x000001b3,
|
||||
0x20000001, /* SPI_PS_IN_CONTROL_0 */
|
||||
0x00000000, /* SPI_PS_IN_CONTROL_1 */
|
||||
0x00000000, /* SPI_INTERP_CONTROL_0 */
|
||||
0x00000000, /* SPI_INPUT_Z */
|
||||
0x00000000, /* SPI_FOG_CNTL */
|
||||
0x00100000, /* SPI_BARYC_CNTL */
|
||||
0x00000000, /* SPI_PS_IN_CONTROL_2 */
|
||||
0x00000000, /* SPI_COMPUTE_INPUT_CNTL */
|
||||
0x00000000, /* SPI_COMPUTE_NUM_THREAD_X */
|
||||
0x00000000, /* SPI_COMPUTE_NUM_THREAD_Y */
|
||||
0x00000000, /* SPI_COMPUTE_NUM_THREAD_Z */
|
||||
0x00000000, /* SPI_GPR_MGMT */
|
||||
0x00000000, /* SPI_LDS_MGMT */
|
||||
0x00000000, /* SPI_STACK_MGMT */
|
||||
0x00000000, /* SPI_WAVE_MGMT_1 */
|
||||
0x00000000, /* SPI_WAVE_MGMT_2 */
|
||||
|
||||
0xc0016900,
|
||||
0x000001e0,
|
||||
0x00000000, /* CB_BLEND0_CONTROL */
|
||||
|
||||
0xc00e6900,
|
||||
0x00000200,
|
||||
0x00000000, /* DB_DEPTH_CONTROL */
|
||||
0x00000000, /* DB_EQAA */
|
||||
0x00cc0010, /* CB_COLOR_CONTROL */
|
||||
0x00000210, /* DB_SHADER_CONTROL */
|
||||
0x00010000, /* PA_CL_CLIP_CNTL */
|
||||
0x00000004, /* PA_SU_SC_MODE_CNTL */
|
||||
0x00000100, /* PA_CL_VTE_CNTL */
|
||||
0x00000000, /* PA_CL_VS_OUT_CNTL */
|
||||
0x00000000, /* PA_CL_NANINF_CNTL */
|
||||
0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
|
||||
0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
|
||||
0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
|
||||
0x00000000, /* */
|
||||
0x00000000, /* */
|
||||
|
||||
0xc0026900,
|
||||
0x00000229,
|
||||
0x00000000, /* SQ_PGM_START_FS */
|
||||
0x00000000,
|
||||
|
||||
0xc0016900,
|
||||
0x0000023b,
|
||||
0x00000000, /* SQ_LDS_ALLOC_PS */
|
||||
|
||||
0xc0066900,
|
||||
0x00000240,
|
||||
0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
|
||||
0xc0046900,
|
||||
0x00000247,
|
||||
0x00000000, /* SQ_GS_VERT_ITEMSIZE */
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
|
||||
0xc0116900,
|
||||
0x00000280,
|
||||
0x00000000, /* PA_SU_POINT_SIZE */
|
||||
0x00000000, /* PA_SU_POINT_MINMAX */
|
||||
0x00000008, /* PA_SU_LINE_CNTL */
|
||||
0x00000000, /* PA_SC_LINE_STIPPLE */
|
||||
0x00000000, /* VGT_OUTPUT_PATH_CNTL */
|
||||
0x00000000, /* VGT_HOS_CNTL */
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000, /* VGT_GS_MODE */
|
||||
|
||||
0xc0026900,
|
||||
0x00000292,
|
||||
0x00000000, /* PA_SC_MODE_CNTL_0 */
|
||||
0x00000000, /* PA_SC_MODE_CNTL_1 */
|
||||
|
||||
0xc0016900,
|
||||
0x000002a1,
|
||||
0x00000000, /* VGT_PRIMITIVEID_EN */
|
||||
|
||||
0xc0016900,
|
||||
0x000002a5,
|
||||
0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
|
||||
|
||||
0xc0026900,
|
||||
0x000002a8,
|
||||
0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
|
||||
0x00000000,
|
||||
|
||||
0xc0026900,
|
||||
0x000002ad,
|
||||
0x00000000, /* VGT_REUSE_OFF */
|
||||
0x00000000,
|
||||
|
||||
0xc0016900,
|
||||
0x000002d5,
|
||||
0x00000000, /* VGT_SHADER_STAGES_EN */
|
||||
|
||||
0xc0016900,
|
||||
0x000002dc,
|
||||
0x0000aa00, /* DB_ALPHA_TO_MASK */
|
||||
|
||||
0xc0066900,
|
||||
0x000002de,
|
||||
0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
|
||||
0xc0026900,
|
||||
0x000002e5,
|
||||
0x00000000, /* VGT_STRMOUT_CONFIG */
|
||||
0x00000000,
|
||||
|
||||
0xc01b6900,
|
||||
0x000002f5,
|
||||
0x76543210, /* PA_SC_CENTROID_PRIORITY_0 */
|
||||
0xfedcba98, /* PA_SC_CENTROID_PRIORITY_1 */
|
||||
0x00000000, /* PA_SC_LINE_CNTL */
|
||||
0x00000000, /* PA_SC_AA_CONFIG */
|
||||
0x00000005, /* PA_SU_VTX_CNTL */
|
||||
0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
|
||||
0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
|
||||
0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
|
||||
0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
|
||||
0x00000000, /* PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0xffffffff, /* PA_SC_AA_MASK_X0Y0_X1Y0 */
|
||||
0xffffffff,
|
||||
|
||||
0xc0026900,
|
||||
0x00000316,
|
||||
0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
|
||||
0x00000010, /* */
|
||||
};
|
||||
|
||||
const u32 cayman_vs[] =
|
||||
{
|
||||
0x00000004,
|
||||
0x80400400,
|
||||
0x0000a03c,
|
||||
0x95000688,
|
||||
0x00004000,
|
||||
0x15000688,
|
||||
0x00000000,
|
||||
0x88000000,
|
||||
0x04000000,
|
||||
0x67961001,
|
||||
#ifdef __BIG_ENDIAN
|
||||
0x00020000,
|
||||
#else
|
||||
0x00000000,
|
||||
#endif
|
||||
0x00000000,
|
||||
0x04000000,
|
||||
0x67961000,
|
||||
#ifdef __BIG_ENDIAN
|
||||
0x00020008,
|
||||
#else
|
||||
0x00000008,
|
||||
#endif
|
||||
0x00000000,
|
||||
};
|
||||
|
||||
const u32 cayman_ps[] =
|
||||
{
|
||||
0x00000004,
|
||||
0xa00c0000,
|
||||
0x00000008,
|
||||
0x80400000,
|
||||
0x00000000,
|
||||
0x95000688,
|
||||
0x00000000,
|
||||
0x88000000,
|
||||
0x00380400,
|
||||
0x00146b10,
|
||||
0x00380000,
|
||||
0x20146b10,
|
||||
0x00380400,
|
||||
0x40146b00,
|
||||
0x80380000,
|
||||
0x60146b00,
|
||||
0x00000010,
|
||||
0x000d1000,
|
||||
0xb0800000,
|
||||
0x00000000,
|
||||
};
|
||||
|
||||
const u32 cayman_ps_size = ARRAY_SIZE(cayman_ps);
|
||||
const u32 cayman_vs_size = ARRAY_SIZE(cayman_vs);
|
||||
const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state);
|
35
drivers/video/drm/radeon/cayman_blit_shaders.h
Normal file
35
drivers/video/drm/radeon/cayman_blit_shaders.h
Normal file
@ -0,0 +1,35 @@
|
||||
/*
|
||||
* Copyright 2010 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CAYMAN_BLIT_SHADERS_H
|
||||
#define CAYMAN_BLIT_SHADERS_H
|
||||
|
||||
extern const u32 cayman_ps[];
|
||||
extern const u32 cayman_vs[];
|
||||
extern const u32 cayman_default_state[];
|
||||
|
||||
extern const u32 cayman_ps_size, cayman_vs_size;
|
||||
extern const u32 cayman_default_size;
|
||||
|
||||
#endif
|
@ -281,7 +281,6 @@ void evergreen_hpd_set_polarity(struct radeon_device *rdev,
|
||||
}
|
||||
}
|
||||
|
||||
#if 0
|
||||
void evergreen_hpd_init(struct radeon_device *rdev)
|
||||
{
|
||||
struct drm_device *dev = rdev->ddev;
|
||||
@ -320,10 +319,12 @@ void evergreen_hpd_init(struct radeon_device *rdev)
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (rdev->irq.installed)
|
||||
evergreen_irq_set(rdev);
|
||||
// if (rdev->irq.installed)
|
||||
// evergreen_irq_set(rdev);
|
||||
}
|
||||
|
||||
#if 0
|
||||
|
||||
void evergreen_hpd_fini(struct radeon_device *rdev)
|
||||
{
|
||||
struct drm_device *dev = rdev->ddev;
|
||||
@ -2240,6 +2241,9 @@ int evergreen_mc_init(struct radeon_device *rdev)
|
||||
|
||||
/* Get VRAM informations */
|
||||
rdev->mc.vram_is_ddr = true;
|
||||
if (rdev->flags & RADEON_IS_IGP)
|
||||
tmp = RREG32(FUS_MC_ARB_RAMCFG);
|
||||
else
|
||||
tmp = RREG32(MC_ARB_RAMCFG);
|
||||
if (tmp & CHANSIZE_OVERRIDE) {
|
||||
chansize = 16;
|
||||
|
@ -466,7 +466,7 @@
|
||||
#define IH_RB_WPTR_ADDR_LO 0x3e14
|
||||
#define IH_CNTL 0x3e18
|
||||
# define ENABLE_INTR (1 << 0)
|
||||
# define IH_MC_SWAP(x) ((x) << 2)
|
||||
# define IH_MC_SWAP(x) ((x) << 1)
|
||||
# define IH_MC_SWAP_NONE 0
|
||||
# define IH_MC_SWAP_16BIT 1
|
||||
# define IH_MC_SWAP_32BIT 2
|
||||
@ -547,7 +547,7 @@
|
||||
# define LB_D5_VBLANK_INTERRUPT (1 << 3)
|
||||
# define DC_HPD5_INTERRUPT (1 << 17)
|
||||
# define DC_HPD5_RX_INTERRUPT (1 << 18)
|
||||
#define DISP_INTERRUPT_STATUS_CONTINUE5 0x6050
|
||||
#define DISP_INTERRUPT_STATUS_CONTINUE5 0x6150
|
||||
# define LB_D6_VLINE_INTERRUPT (1 << 2)
|
||||
# define LB_D6_VBLANK_INTERRUPT (1 << 3)
|
||||
# define DC_HPD6_INTERRUPT (1 << 17)
|
||||
|
@ -114,11 +114,29 @@ ___start_builtin_fw:
|
||||
dd SUMOME_START
|
||||
dd (SUMOME_END - SUMOME_START)
|
||||
|
||||
|
||||
dd FIRMWARE_SUMO2_ME
|
||||
dd SUMO2ME_START
|
||||
dd (SUMO2ME_END - SUMO2ME_START)
|
||||
|
||||
|
||||
macro ni_code [arg]
|
||||
{
|
||||
dd FIRMWARE_#arg#_ME
|
||||
dd arg#ME_START
|
||||
dd (arg#ME_END - arg#ME_START)
|
||||
|
||||
dd FIRMWARE_#arg#_PFP
|
||||
dd arg#PFP_START
|
||||
dd (arg#PFP_END - arg#PFP_START)
|
||||
|
||||
dd FIRMWARE_#arg#_MC
|
||||
dd arg#MC_START
|
||||
dd (arg#MC_END - arg#MC_START)
|
||||
|
||||
}
|
||||
|
||||
ni_code BARTS, TURKS, CAICOS, CAYMAN
|
||||
|
||||
dd FIRMWARE_RV610_PFP
|
||||
dd RV610PFP_START
|
||||
dd (RV610PFP_END - RV610PFP_START)
|
||||
@ -180,6 +198,9 @@ ___start_builtin_fw:
|
||||
dd SUMO2PFP_START
|
||||
dd (SUMO2PFP_END - SUMO2PFP_START)
|
||||
|
||||
dd FIRMWARE_BARTS_PFP
|
||||
dd BARTSPFP_START
|
||||
dd (BARTSPFP_END - BARTSPFP_START)
|
||||
|
||||
|
||||
dd FIRMWARE_R600_RLC
|
||||
@ -206,10 +227,15 @@ ___start_builtin_fw:
|
||||
dd JUNIPERRLC_START
|
||||
dd (JUNIPERRLC_END - JUNIPERRLC_START)
|
||||
|
||||
dd FIRMWARE_BTC_RLC
|
||||
dd BTCRLC_START
|
||||
dd (BTCRLC_END - BTCRLC_START)
|
||||
|
||||
dd FIRMWARE_SUMO_RLC
|
||||
dd SUMORLC_START
|
||||
dd (SUMORLC_END - SUMORLC_START)
|
||||
|
||||
|
||||
___end_builtin_fw:
|
||||
|
||||
|
||||
@ -223,7 +249,6 @@ FIRMWARE_RS600_CP db 'radeon/RS600_cp.bin',0
|
||||
FIRMWARE_RS690_CP db 'radeon/RS690_cp.bin',0
|
||||
|
||||
FIRMWARE_RS780_ME db 'radeon/RS780_me.bin',0
|
||||
FIRMWARE_RS780_PFP db 'radeon/RS780_pfp.bin',0
|
||||
|
||||
FIRMWARE_R600_ME db 'radeon/RV600_me.bin',0
|
||||
FIRMWARE_RV610_ME db 'radeon/RV610_me.bin',0
|
||||
@ -234,6 +259,7 @@ FIRMWARE_RV670_ME db 'radeon/RV670_me.bin',0
|
||||
FIRMWARE_RV710_ME db 'radeon/RV710_me.bin',0
|
||||
FIRMWARE_RV730_ME db 'radeon/RV730_me.bin',0
|
||||
FIRMWARE_RV770_ME db 'radeon/RV770_me.bin',0
|
||||
|
||||
FIRMWARE_CYPRESS_ME db 'radeon/CYPRESS_me.bin',0
|
||||
FIRMWARE_REDWOOD_ME db 'radeon/REDWOOD_me.bin',0
|
||||
FIRMWARE_CEDAR_ME db 'radeon/CEDAR_me.bin',0
|
||||
@ -242,7 +268,13 @@ FIRMWARE_PALM_ME db 'radeon/PALM_me.bin',0
|
||||
FIRMWARE_SUMO_ME db 'radeon/SUMO_me.bin',0
|
||||
FIRMWARE_SUMO2_ME db 'radeon/SUMO2_me.bin',0
|
||||
|
||||
FIRMWARE_BARTS_ME db 'radeon/BARTS_me.bin',0
|
||||
FIRMWARE_TURKS_ME db 'radeon/TURKS_me.bin',0
|
||||
FIRMWARE_CAICOS_ME db 'radeon/CAICOS_me.bin',0
|
||||
FIRMWARE_CAYMAN_ME db 'radeon/CAYMAN_me.bin',0
|
||||
|
||||
|
||||
FIRMWARE_RS780_PFP db 'radeon/RS780_pfp.bin',0
|
||||
FIRMWARE_R600_PFP db 'radeon/R600_pfp.bin',0
|
||||
FIRMWARE_RV610_PFP db 'radeon/RV610_pfp.bin',0
|
||||
FIRMWARE_RV620_PFP db 'radeon/RV620_pfp.bin',0
|
||||
@ -252,6 +284,7 @@ FIRMWARE_RV670_PFP db 'radeon/RV670_pfp.bin',0
|
||||
FIRMWARE_RV710_PFP db 'radeon/RV710_pfp.bin',0
|
||||
FIRMWARE_RV730_PFP db 'radeon/RV730_pfp.bin',0
|
||||
FIRMWARE_RV770_PFP db 'radeon/RV770_pfp.bin',0
|
||||
|
||||
FIRMWARE_CYPRESS_PFP db 'radeon/CYPRESS_pfp.bin',0
|
||||
FIRMWARE_REDWOOD_PFP db 'radeon/REDWOOD_pfp.bin',0
|
||||
FIRMWARE_CEDAR_PFP db 'radeon/CEDAR_pfp.bin',0
|
||||
@ -260,6 +293,11 @@ FIRMWARE_PALM_PFP db 'radeon/PALM_pfp.bin',0
|
||||
FIRMWARE_SUMO_PFP db 'radeon/SUMO_pfp.bin',0
|
||||
FIRMWARE_SUMO2_PFP db 'radeon/SUMO2_pfp.bin',0
|
||||
|
||||
FIRMWARE_BARTS_PFP db 'radeon/BARTS_pfp.bin',0
|
||||
FIRMWARE_TURKS_PFP db 'radeon/TURKS_pfp.bin',0
|
||||
FIRMWARE_CAICOS_PFP db 'radeon/CAICOS_pfp.bin',0
|
||||
FIRMWARE_CAYMAN_PFP db 'radeon/CAYMAN_pfp.bin',0
|
||||
|
||||
|
||||
FIRMWARE_R600_RLC db 'radeon/R600_rlc.bin',0
|
||||
FIRMWARE_R700_RLC db 'radeon/R700_rlc.bin',0
|
||||
@ -268,6 +306,14 @@ FIRMWARE_REDWOOD_RLC db 'radeon/REDWOOD_rlc.bin',0
|
||||
FIRMWARE_CEDAR_RLC db 'radeon/CEDAR_rlc.bin',0
|
||||
FIRMWARE_JUNIPER_RLC db 'radeon/JUNIPER_rlc.bin',0
|
||||
FIRMWARE_SUMO_RLC db 'radeon/SUMO_rlc.bin',0
|
||||
FIRMWARE_BTC_RLC db 'radeon/BTC_rlc.bin',0
|
||||
FIRMWARE_CAYMAN_RLC db 'radeon/CAYMAN_rlc.bin',0
|
||||
|
||||
|
||||
FIRMWARE_BARTS_MC db 'radeon/BARTS_mc.bin',0
|
||||
FIRMWARE_TURKS_MC db 'radeon/TURKS_mc.bin',0
|
||||
FIRMWARE_CAICOS_MC db 'radeon/CAICOS_mc.bin',0
|
||||
FIRMWARE_CAYMAN_MC db 'radeon/CAYMAN_mc.bin',0
|
||||
|
||||
|
||||
align 16
|
||||
@ -396,6 +442,26 @@ SUMO2ME_START:
|
||||
file 'firmware/SUMO2_me.bin'
|
||||
SUMO2ME_END:
|
||||
|
||||
align 16
|
||||
BARTSME_START:
|
||||
file 'firmware/BARTS_me.bin'
|
||||
BARTSME_END:
|
||||
|
||||
align 16
|
||||
TURKSME_START:
|
||||
file 'firmware/TURKS_me.bin'
|
||||
TURKSME_END:
|
||||
|
||||
align 16
|
||||
CAICOSME_START:
|
||||
file 'firmware/CAICOS_me.bin'
|
||||
CAICOSME_END:
|
||||
|
||||
align 16
|
||||
CAYMANME_START:
|
||||
file 'firmware/CAYMAN_me.bin'
|
||||
CAYMANME_END:
|
||||
|
||||
|
||||
align 16
|
||||
RV610PFP_START:
|
||||
@ -476,7 +542,25 @@ SUMO2PFP_START:
|
||||
file 'firmware/SUMO2_pfp.bin'
|
||||
SUMO2PFP_END:
|
||||
|
||||
align 16
|
||||
BARTSPFP_START:
|
||||
file 'firmware/BARTS_pfp.bin'
|
||||
BARTSPFP_END:
|
||||
|
||||
align 16
|
||||
TURKSPFP_START:
|
||||
file 'firmware/TURKS_pfp.bin'
|
||||
TURKSPFP_END:
|
||||
|
||||
align 16
|
||||
CAICOSPFP_START:
|
||||
file 'firmware/CAICOS_pfp.bin'
|
||||
CAICOSPFP_END:
|
||||
|
||||
align 16
|
||||
CAYMANPFP_START:
|
||||
file 'firmware/CAYMAN_pfp.bin'
|
||||
CAYMANPFP_END:
|
||||
|
||||
align 16
|
||||
R600RLC_START:
|
||||
@ -512,3 +596,34 @@ align 16
|
||||
SUMORLC_START:
|
||||
file 'firmware/SUMO_rlc.bin'
|
||||
SUMORLC_END:
|
||||
|
||||
align 16
|
||||
BTCRLC_START:
|
||||
file 'firmware/BTC_rlc.bin'
|
||||
BTCRLC_END:
|
||||
|
||||
align 16
|
||||
CAYMANRLC_START:
|
||||
file 'firmware/CAYMAN_rlc.bin'
|
||||
CAYMANRLC_END:
|
||||
|
||||
|
||||
align 16
|
||||
BARTSMC_START:
|
||||
file 'firmware/BARTS_mc.bin'
|
||||
BARTSMC_END:
|
||||
|
||||
align 16
|
||||
TURKSMC_START:
|
||||
file 'firmware/TURKS_mc.bin'
|
||||
TURKSMC_END:
|
||||
|
||||
align 16
|
||||
CAICOSMC_START:
|
||||
file 'firmware/CAICOS_mc.bin'
|
||||
CAICOSMC_END:
|
||||
|
||||
align 16
|
||||
CAYMANMC_START:
|
||||
file 'firmware/CAYMAN_mc.bin'
|
||||
CAYMANMC_END:
|
||||
|
@ -31,7 +31,7 @@
|
||||
#include "nid.h"
|
||||
#include "atom.h"
|
||||
#include "ni_reg.h"
|
||||
//#include "cayman_blit_shaders.h"
|
||||
#include "cayman_blit_shaders.h"
|
||||
|
||||
extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
|
||||
extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
|
||||
@ -389,7 +389,6 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
#if 0
|
||||
/*
|
||||
* Core functions
|
||||
*/
|
||||
@ -1029,12 +1028,6 @@ void cayman_pcie_gart_disable(struct radeon_device *rdev)
|
||||
}
|
||||
}
|
||||
|
||||
void cayman_pcie_gart_fini(struct radeon_device *rdev)
|
||||
{
|
||||
cayman_pcie_gart_disable(rdev);
|
||||
radeon_gart_table_vram_free(rdev);
|
||||
radeon_gart_fini(rdev);
|
||||
}
|
||||
|
||||
/*
|
||||
* CP.
|
||||
@ -1044,7 +1037,6 @@ static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
|
||||
if (enable)
|
||||
WREG32(CP_ME_CNTL, 0);
|
||||
else {
|
||||
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
|
||||
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
|
||||
WREG32(SCRATCH_UMSK, 0);
|
||||
}
|
||||
@ -1142,11 +1134,7 @@ static int cayman_cp_start(struct radeon_device *rdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cayman_cp_fini(struct radeon_device *rdev)
|
||||
{
|
||||
cayman_cp_enable(rdev, false);
|
||||
radeon_ring_fini(rdev);
|
||||
}
|
||||
|
||||
|
||||
int cayman_cp_resume(struct radeon_device *rdev)
|
||||
{
|
||||
@ -1388,26 +1376,10 @@ static int cayman_startup(struct radeon_device *rdev)
|
||||
return r;
|
||||
cayman_gpu_init(rdev);
|
||||
|
||||
r = evergreen_blit_init(rdev);
|
||||
if (r) {
|
||||
evergreen_blit_fini(rdev);
|
||||
rdev->asic->copy = NULL;
|
||||
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
|
||||
}
|
||||
|
||||
/* allocate wb buffer */
|
||||
r = radeon_wb_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* Enable IRQ */
|
||||
r = r600_irq_init(rdev);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: IH init failed (%d).\n", r);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
return r;
|
||||
}
|
||||
evergreen_irq_set(rdev);
|
||||
|
||||
r = radeon_ring_init(rdev, rdev->cp.ring_size);
|
||||
if (r)
|
||||
@ -1422,53 +1394,9 @@ static int cayman_startup(struct radeon_device *rdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cayman_resume(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
|
||||
/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
|
||||
* posting will perform necessary task to bring back GPU into good
|
||||
* shape.
|
||||
*/
|
||||
/* post card */
|
||||
atom_asic_init(rdev->mode_info.atom_context);
|
||||
|
||||
r = cayman_startup(rdev);
|
||||
if (r) {
|
||||
DRM_ERROR("cayman startup failed on resume\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = r600_ib_test(rdev);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failled testing IB (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
return r;
|
||||
|
||||
}
|
||||
|
||||
int cayman_suspend(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
|
||||
/* FIXME: we should wait for ring to be empty */
|
||||
cayman_cp_enable(rdev, false);
|
||||
rdev->cp.ready = false;
|
||||
evergreen_irq_suspend(rdev);
|
||||
radeon_wb_disable(rdev);
|
||||
cayman_pcie_gart_disable(rdev);
|
||||
|
||||
/* unpin shaders bo */
|
||||
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
|
||||
if (likely(r == 0)) {
|
||||
radeon_bo_unpin(rdev->r600_blit.shader_obj);
|
||||
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Plan is to move initialization in that function and use
|
||||
* helper function so that radeon_device_init pretty much
|
||||
@ -1514,9 +1442,6 @@ int cayman_init(struct radeon_device *rdev)
|
||||
/* Initialize clocks */
|
||||
radeon_get_clock_info(rdev->ddev);
|
||||
/* Fence driver */
|
||||
r = radeon_fence_driver_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
/* initialize memory controller */
|
||||
r = evergreen_mc_init(rdev);
|
||||
if (r)
|
||||
@ -1526,15 +1451,10 @@ int cayman_init(struct radeon_device *rdev)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = radeon_irq_kms_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
rdev->cp.ring_obj = NULL;
|
||||
r600_ring_init(rdev, 1024 * 1024);
|
||||
|
||||
rdev->ih.ring_obj = NULL;
|
||||
r600_ih_ring_init(rdev, 64 * 1024);
|
||||
|
||||
r = r600_pcie_gart_init(rdev);
|
||||
if (r)
|
||||
@ -1544,24 +1464,9 @@ int cayman_init(struct radeon_device *rdev)
|
||||
r = cayman_startup(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "disabling GPU acceleration\n");
|
||||
cayman_cp_fini(rdev);
|
||||
r600_irq_fini(rdev);
|
||||
radeon_wb_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
cayman_pcie_gart_fini(rdev);
|
||||
rdev->accel_working = false;
|
||||
}
|
||||
if (rdev->accel_working) {
|
||||
r = radeon_ib_pool_init(rdev);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
|
||||
rdev->accel_working = false;
|
||||
}
|
||||
r = r600_ib_test(rdev);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failed testing IB (%d).\n", r);
|
||||
rdev->accel_working = false;
|
||||
}
|
||||
}
|
||||
|
||||
/* Don't start up if the MC ucode is missing.
|
||||
@ -1576,19 +1481,3 @@ int cayman_init(struct radeon_device *rdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void cayman_fini(struct radeon_device *rdev)
|
||||
{
|
||||
evergreen_blit_fini(rdev);
|
||||
cayman_cp_fini(rdev);
|
||||
r600_irq_fini(rdev);
|
||||
radeon_wb_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
cayman_pcie_gart_fini(rdev);
|
||||
radeon_gem_fini(rdev);
|
||||
radeon_fence_driver_fini(rdev);
|
||||
radeon_bo_fini(rdev);
|
||||
radeon_atombios_fini(rdev);
|
||||
kfree(rdev->bios);
|
||||
rdev->bios = NULL;
|
||||
}
|
||||
#endif
|
||||
|
@ -320,7 +320,7 @@
|
||||
#define CGTS_USER_TCC_DISABLE 0x914C
|
||||
#define TCC_DISABLE_MASK 0xFFFF0000
|
||||
#define TCC_DISABLE_SHIFT 16
|
||||
#define CGTS_SM_CTRL_REG 0x915C
|
||||
#define CGTS_SM_CTRL_REG 0x9150
|
||||
#define OVERRIDE (1 << 21)
|
||||
|
||||
#define TA_CNTL_AUX 0x9508
|
||||
|
@ -208,7 +208,7 @@ static void pci_read_irq(struct pci_dev *dev)
|
||||
irq = PciRead8(dev->bus, dev->devfn, PCI_INTERRUPT_PIN);
|
||||
dev->pin = irq;
|
||||
if (irq)
|
||||
PciRead8(dev->bus, dev->devfn, PCI_INTERRUPT_LINE);
|
||||
irq = PciRead8(dev->bus, dev->devfn, PCI_INTERRUPT_LINE);
|
||||
dev->irq = irq;
|
||||
};
|
||||
|
||||
|
@ -3551,9 +3551,6 @@ int r100_init(struct radeon_device *rdev)
|
||||
if (r) {
|
||||
/* Somethings want wront with the accel init stop accel */
|
||||
dev_err(rdev->dev, "Disabling GPU acceleration\n");
|
||||
// r100_cp_fini(rdev);
|
||||
// r100_wb_fini(rdev);
|
||||
// r100_ib_fini(rdev);
|
||||
if (rdev->flags & RADEON_IS_PCI)
|
||||
r100_pci_gart_fini(rdev);
|
||||
rdev->accel_working = false;
|
||||
|
@ -289,28 +289,28 @@ void r600_hpd_init(struct radeon_device *rdev)
|
||||
switch (radeon_connector->hpd.hpd) {
|
||||
case RADEON_HPD_1:
|
||||
WREG32(DC_HPD1_CONTROL, tmp);
|
||||
// rdev->irq.hpd[0] = true;
|
||||
rdev->irq.hpd[0] = true;
|
||||
break;
|
||||
case RADEON_HPD_2:
|
||||
WREG32(DC_HPD2_CONTROL, tmp);
|
||||
// rdev->irq.hpd[1] = true;
|
||||
rdev->irq.hpd[1] = true;
|
||||
break;
|
||||
case RADEON_HPD_3:
|
||||
WREG32(DC_HPD3_CONTROL, tmp);
|
||||
// rdev->irq.hpd[2] = true;
|
||||
rdev->irq.hpd[2] = true;
|
||||
break;
|
||||
case RADEON_HPD_4:
|
||||
WREG32(DC_HPD4_CONTROL, tmp);
|
||||
// rdev->irq.hpd[3] = true;
|
||||
rdev->irq.hpd[3] = true;
|
||||
break;
|
||||
/* DCE 3.2 */
|
||||
case RADEON_HPD_5:
|
||||
WREG32(DC_HPD5_CONTROL, tmp);
|
||||
// rdev->irq.hpd[4] = true;
|
||||
rdev->irq.hpd[4] = true;
|
||||
break;
|
||||
case RADEON_HPD_6:
|
||||
WREG32(DC_HPD6_CONTROL, tmp);
|
||||
// rdev->irq.hpd[5] = true;
|
||||
rdev->irq.hpd[5] = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -322,23 +322,23 @@ void r600_hpd_init(struct radeon_device *rdev)
|
||||
switch (radeon_connector->hpd.hpd) {
|
||||
case RADEON_HPD_1:
|
||||
WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
|
||||
// rdev->irq.hpd[0] = true;
|
||||
rdev->irq.hpd[0] = true;
|
||||
break;
|
||||
case RADEON_HPD_2:
|
||||
WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
|
||||
// rdev->irq.hpd[1] = true;
|
||||
rdev->irq.hpd[1] = true;
|
||||
break;
|
||||
case RADEON_HPD_3:
|
||||
WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
|
||||
// rdev->irq.hpd[2] = true;
|
||||
rdev->irq.hpd[2] = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
// if (rdev->irq.installed)
|
||||
// r600_irq_set(rdev);
|
||||
if (rdev->irq.installed)
|
||||
r600_irq_set(rdev);
|
||||
}
|
||||
|
||||
void r600_hpd_fini(struct radeon_device *rdev)
|
||||
@ -352,28 +352,28 @@ void r600_hpd_fini(struct radeon_device *rdev)
|
||||
switch (radeon_connector->hpd.hpd) {
|
||||
case RADEON_HPD_1:
|
||||
WREG32(DC_HPD1_CONTROL, 0);
|
||||
// rdev->irq.hpd[0] = false;
|
||||
rdev->irq.hpd[0] = false;
|
||||
break;
|
||||
case RADEON_HPD_2:
|
||||
WREG32(DC_HPD2_CONTROL, 0);
|
||||
// rdev->irq.hpd[1] = false;
|
||||
rdev->irq.hpd[1] = false;
|
||||
break;
|
||||
case RADEON_HPD_3:
|
||||
WREG32(DC_HPD3_CONTROL, 0);
|
||||
// rdev->irq.hpd[2] = false;
|
||||
rdev->irq.hpd[2] = false;
|
||||
break;
|
||||
case RADEON_HPD_4:
|
||||
WREG32(DC_HPD4_CONTROL, 0);
|
||||
// rdev->irq.hpd[3] = false;
|
||||
rdev->irq.hpd[3] = false;
|
||||
break;
|
||||
/* DCE 3.2 */
|
||||
case RADEON_HPD_5:
|
||||
WREG32(DC_HPD5_CONTROL, 0);
|
||||
// rdev->irq.hpd[4] = false;
|
||||
rdev->irq.hpd[4] = false;
|
||||
break;
|
||||
case RADEON_HPD_6:
|
||||
WREG32(DC_HPD6_CONTROL, 0);
|
||||
// rdev->irq.hpd[5] = false;
|
||||
rdev->irq.hpd[5] = false;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -385,15 +385,15 @@ void r600_hpd_fini(struct radeon_device *rdev)
|
||||
switch (radeon_connector->hpd.hpd) {
|
||||
case RADEON_HPD_1:
|
||||
WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
|
||||
// rdev->irq.hpd[0] = false;
|
||||
rdev->irq.hpd[0] = false;
|
||||
break;
|
||||
case RADEON_HPD_2:
|
||||
WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
|
||||
// rdev->irq.hpd[1] = false;
|
||||
rdev->irq.hpd[1] = false;
|
||||
break;
|
||||
case RADEON_HPD_3:
|
||||
WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
|
||||
// rdev->irq.hpd[2] = false;
|
||||
rdev->irq.hpd[2] = false;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -1955,9 +1955,9 @@ int r600_init(struct radeon_device *rdev)
|
||||
DRM_ERROR("Failed to register debugfs file for mc !\n");
|
||||
}
|
||||
/* This don't do much */
|
||||
// r = radeon_gem_init(rdev);
|
||||
// if (r)
|
||||
// return r;
|
||||
r = radeon_gem_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
/* Read BIOS */
|
||||
if (!radeon_get_bios(rdev)) {
|
||||
if (ASIC_IS_AVIVO(rdev))
|
||||
@ -1987,9 +1987,9 @@ int r600_init(struct radeon_device *rdev)
|
||||
/* Initialize clocks */
|
||||
radeon_get_clock_info(rdev->ddev);
|
||||
/* Fence driver */
|
||||
// r = radeon_fence_driver_init(rdev);
|
||||
// if (r)
|
||||
// return r;
|
||||
r = radeon_fence_driver_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
r = radeon_agp_init(rdev);
|
||||
if (r)
|
||||
@ -2003,15 +2003,15 @@ int r600_init(struct radeon_device *rdev)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
// r = radeon_irq_kms_init(rdev);
|
||||
// if (r)
|
||||
// return r;
|
||||
r = radeon_irq_kms_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
rdev->cp.ring_obj = NULL;
|
||||
r600_ring_init(rdev, 1024 * 1024);
|
||||
|
||||
// rdev->ih.ring_obj = NULL;
|
||||
// r600_ih_ring_init(rdev, 64 * 1024);
|
||||
rdev->ih.ring_obj = NULL;
|
||||
r600_ih_ring_init(rdev, 64 * 1024);
|
||||
|
||||
r = r600_pcie_gart_init(rdev);
|
||||
if (r)
|
||||
@ -2039,9 +2039,268 @@ int r600_init(struct radeon_device *rdev)
|
||||
// rdev->accel_working = false;
|
||||
// }
|
||||
}
|
||||
if (r)
|
||||
return r; /* TODO error handling */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* CS stuff
|
||||
*/
|
||||
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
|
||||
{
|
||||
/* FIXME: implement */
|
||||
radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
|
||||
radeon_ring_write(rdev,
|
||||
#ifdef __BIG_ENDIAN
|
||||
(2 << 0) |
|
||||
#endif
|
||||
(ib->gpu_addr & 0xFFFFFFFC));
|
||||
radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
|
||||
radeon_ring_write(rdev, ib->length_dw);
|
||||
}
|
||||
|
||||
int r600_ib_test(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_ib *ib;
|
||||
uint32_t scratch;
|
||||
uint32_t tmp = 0;
|
||||
unsigned i;
|
||||
int r;
|
||||
|
||||
r = radeon_scratch_get(rdev, &scratch);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
WREG32(scratch, 0xCAFEDEAD);
|
||||
r = radeon_ib_get(rdev, &ib);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failed to get ib (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
|
||||
ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
|
||||
ib->ptr[2] = 0xDEADBEEF;
|
||||
ib->ptr[3] = PACKET2(0);
|
||||
ib->ptr[4] = PACKET2(0);
|
||||
ib->ptr[5] = PACKET2(0);
|
||||
ib->ptr[6] = PACKET2(0);
|
||||
ib->ptr[7] = PACKET2(0);
|
||||
ib->ptr[8] = PACKET2(0);
|
||||
ib->ptr[9] = PACKET2(0);
|
||||
ib->ptr[10] = PACKET2(0);
|
||||
ib->ptr[11] = PACKET2(0);
|
||||
ib->ptr[12] = PACKET2(0);
|
||||
ib->ptr[13] = PACKET2(0);
|
||||
ib->ptr[14] = PACKET2(0);
|
||||
ib->ptr[15] = PACKET2(0);
|
||||
ib->length_dw = 16;
|
||||
r = radeon_ib_schedule(rdev, ib);
|
||||
if (r) {
|
||||
radeon_scratch_free(rdev, scratch);
|
||||
radeon_ib_free(rdev, &ib);
|
||||
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
r = radeon_fence_wait(ib->fence, false);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: fence wait failed (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
tmp = RREG32(scratch);
|
||||
if (tmp == 0xDEADBEEF)
|
||||
break;
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
if (i < rdev->usec_timeout) {
|
||||
DRM_INFO("ib test succeeded in %u usecs\n", i);
|
||||
} else {
|
||||
DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
|
||||
scratch, tmp);
|
||||
r = -EINVAL;
|
||||
}
|
||||
radeon_scratch_free(rdev, scratch);
|
||||
radeon_ib_free(rdev, &ib);
|
||||
return r;
|
||||
}
|
||||
|
||||
/*
|
||||
* Interrupts
|
||||
*
|
||||
* Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
|
||||
* the same as the CP ring buffer, but in reverse. Rather than the CPU
|
||||
* writing to the ring and the GPU consuming, the GPU writes to the ring
|
||||
* and host consumes. As the host irq handler processes interrupts, it
|
||||
* increments the rptr. When the rptr catches up with the wptr, all the
|
||||
* current interrupts have been processed.
|
||||
*/
|
||||
|
||||
void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
|
||||
{
|
||||
u32 rb_bufsz;
|
||||
|
||||
/* Align ring size */
|
||||
rb_bufsz = drm_order(ring_size / 4);
|
||||
ring_size = (1 << rb_bufsz) * 4;
|
||||
rdev->ih.ring_size = ring_size;
|
||||
rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
|
||||
rdev->ih.rptr = 0;
|
||||
}
|
||||
|
||||
static int r600_ih_ring_alloc(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
|
||||
/* Allocate ring buffer */
|
||||
if (rdev->ih.ring_obj == NULL) {
|
||||
r = radeon_bo_create(rdev, rdev->ih.ring_size,
|
||||
PAGE_SIZE, true,
|
||||
RADEON_GEM_DOMAIN_GTT,
|
||||
&rdev->ih.ring_obj);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
r = radeon_bo_reserve(rdev->ih.ring_obj, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
r = radeon_bo_pin(rdev->ih.ring_obj,
|
||||
RADEON_GEM_DOMAIN_GTT,
|
||||
&rdev->ih.gpu_addr);
|
||||
if (r) {
|
||||
radeon_bo_unreserve(rdev->ih.ring_obj);
|
||||
DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
r = radeon_bo_kmap(rdev->ih.ring_obj,
|
||||
(void **)&rdev->ih.ring);
|
||||
radeon_bo_unreserve(rdev->ih.ring_obj);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void r600_ih_ring_fini(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
if (rdev->ih.ring_obj) {
|
||||
r = radeon_bo_reserve(rdev->ih.ring_obj, false);
|
||||
if (likely(r == 0)) {
|
||||
radeon_bo_kunmap(rdev->ih.ring_obj);
|
||||
radeon_bo_unpin(rdev->ih.ring_obj);
|
||||
radeon_bo_unreserve(rdev->ih.ring_obj);
|
||||
}
|
||||
radeon_bo_unref(&rdev->ih.ring_obj);
|
||||
rdev->ih.ring = NULL;
|
||||
rdev->ih.ring_obj = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void r600_rlc_stop(struct radeon_device *rdev)
|
||||
{
|
||||
|
||||
if ((rdev->family >= CHIP_RV770) &&
|
||||
(rdev->family <= CHIP_RV740)) {
|
||||
/* r7xx asics need to soft reset RLC before halting */
|
||||
WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
|
||||
RREG32(SRBM_SOFT_RESET);
|
||||
udelay(15000);
|
||||
WREG32(SRBM_SOFT_RESET, 0);
|
||||
RREG32(SRBM_SOFT_RESET);
|
||||
}
|
||||
|
||||
WREG32(RLC_CNTL, 0);
|
||||
}
|
||||
|
||||
static void r600_rlc_start(struct radeon_device *rdev)
|
||||
{
|
||||
WREG32(RLC_CNTL, RLC_ENABLE);
|
||||
}
|
||||
|
||||
static int r600_rlc_init(struct radeon_device *rdev)
|
||||
{
|
||||
u32 i;
|
||||
const __be32 *fw_data;
|
||||
|
||||
if (!rdev->rlc_fw)
|
||||
return -EINVAL;
|
||||
|
||||
r600_rlc_stop(rdev);
|
||||
|
||||
WREG32(RLC_HB_BASE, 0);
|
||||
WREG32(RLC_HB_CNTL, 0);
|
||||
WREG32(RLC_HB_RPTR, 0);
|
||||
WREG32(RLC_HB_WPTR, 0);
|
||||
if (rdev->family <= CHIP_CAICOS) {
|
||||
WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
|
||||
WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
|
||||
}
|
||||
WREG32(RLC_MC_CNTL, 0);
|
||||
WREG32(RLC_UCODE_CNTL, 0);
|
||||
|
||||
fw_data = (const __be32 *)rdev->rlc_fw->data;
|
||||
if (rdev->family >= CHIP_CAYMAN) {
|
||||
for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
|
||||
WREG32(RLC_UCODE_ADDR, i);
|
||||
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
|
||||
}
|
||||
} else if (rdev->family >= CHIP_CEDAR) {
|
||||
for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
|
||||
WREG32(RLC_UCODE_ADDR, i);
|
||||
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
|
||||
}
|
||||
} else if (rdev->family >= CHIP_RV770) {
|
||||
for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
|
||||
WREG32(RLC_UCODE_ADDR, i);
|
||||
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < RLC_UCODE_SIZE; i++) {
|
||||
WREG32(RLC_UCODE_ADDR, i);
|
||||
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
|
||||
}
|
||||
}
|
||||
WREG32(RLC_UCODE_ADDR, 0);
|
||||
|
||||
r600_rlc_start(rdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void r600_enable_interrupts(struct radeon_device *rdev)
|
||||
{
|
||||
u32 ih_cntl = RREG32(IH_CNTL);
|
||||
u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
|
||||
|
||||
ih_cntl |= ENABLE_INTR;
|
||||
ih_rb_cntl |= IH_RB_ENABLE;
|
||||
WREG32(IH_CNTL, ih_cntl);
|
||||
WREG32(IH_RB_CNTL, ih_rb_cntl);
|
||||
rdev->ih.enabled = true;
|
||||
}
|
||||
|
||||
void r600_disable_interrupts(struct radeon_device *rdev)
|
||||
{
|
||||
u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
|
||||
u32 ih_cntl = RREG32(IH_CNTL);
|
||||
|
||||
ih_rb_cntl &= ~IH_RB_ENABLE;
|
||||
ih_cntl &= ~ENABLE_INTR;
|
||||
WREG32(IH_RB_CNTL, ih_rb_cntl);
|
||||
WREG32(IH_CNTL, ih_cntl);
|
||||
/* set rptr, wptr to 0 */
|
||||
WREG32(IH_RB_RPTR, 0);
|
||||
WREG32(IH_RB_WPTR, 0);
|
||||
rdev->ih.enabled = false;
|
||||
rdev->ih.wptr = 0;
|
||||
rdev->ih.rptr = 0;
|
||||
}
|
||||
|
||||
static void r600_disable_interrupt_state(struct radeon_device *rdev)
|
||||
{
|
||||
u32 tmp;
|
||||
@ -2080,12 +2339,523 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
|
||||
}
|
||||
}
|
||||
|
||||
int r600_irq_init(struct radeon_device *rdev)
|
||||
{
|
||||
int ret = 0;
|
||||
int rb_bufsz;
|
||||
u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
|
||||
|
||||
/* allocate ring */
|
||||
ret = r600_ih_ring_alloc(rdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* disable irqs */
|
||||
r600_disable_interrupts(rdev);
|
||||
|
||||
/* init rlc */
|
||||
ret = r600_rlc_init(rdev);
|
||||
if (ret) {
|
||||
r600_ih_ring_fini(rdev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* setup interrupt control */
|
||||
/* set dummy read address to ring address */
|
||||
WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
|
||||
interrupt_cntl = RREG32(INTERRUPT_CNTL);
|
||||
/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
|
||||
* IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
|
||||
*/
|
||||
interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
|
||||
/* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
|
||||
interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
|
||||
WREG32(INTERRUPT_CNTL, interrupt_cntl);
|
||||
|
||||
WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
|
||||
rb_bufsz = drm_order(rdev->ih.ring_size / 4);
|
||||
|
||||
ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
|
||||
IH_WPTR_OVERFLOW_CLEAR |
|
||||
(rb_bufsz << 1));
|
||||
|
||||
if (rdev->wb.enabled)
|
||||
ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
|
||||
|
||||
/* set the writeback address whether it's enabled or not */
|
||||
WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
|
||||
WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
|
||||
|
||||
WREG32(IH_RB_CNTL, ih_rb_cntl);
|
||||
|
||||
/* set rptr, wptr to 0 */
|
||||
WREG32(IH_RB_RPTR, 0);
|
||||
WREG32(IH_RB_WPTR, 0);
|
||||
|
||||
/* Default settings for IH_CNTL (disabled at first) */
|
||||
ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
|
||||
/* RPTR_REARM only works if msi's are enabled */
|
||||
if (rdev->msi_enabled)
|
||||
ih_cntl |= RPTR_REARM;
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
|
||||
#endif
|
||||
WREG32(IH_CNTL, ih_cntl);
|
||||
|
||||
/* force the active interrupt state to all disabled */
|
||||
if (rdev->family >= CHIP_CEDAR)
|
||||
evergreen_disable_interrupt_state(rdev);
|
||||
else
|
||||
r600_disable_interrupt_state(rdev);
|
||||
|
||||
/* enable irqs */
|
||||
r600_enable_interrupts(rdev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
int r600_irq_set(struct radeon_device *rdev)
|
||||
{
|
||||
u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
|
||||
u32 mode_int = 0;
|
||||
u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
|
||||
u32 grbm_int_cntl = 0;
|
||||
u32 hdmi1, hdmi2;
|
||||
u32 d1grph = 0, d2grph = 0;
|
||||
|
||||
ENTER();
|
||||
|
||||
if (!rdev->irq.installed) {
|
||||
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
/* don't enable anything if the ih is disabled */
|
||||
if (!rdev->ih.enabled) {
|
||||
r600_disable_interrupts(rdev);
|
||||
/* force the active interrupt state to all disabled */
|
||||
r600_disable_interrupt_state(rdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
|
||||
if (ASIC_IS_DCE3(rdev)) {
|
||||
hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
|
||||
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
|
||||
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
|
||||
hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
|
||||
hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
|
||||
if (ASIC_IS_DCE32(rdev)) {
|
||||
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
|
||||
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
|
||||
}
|
||||
} else {
|
||||
hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
|
||||
hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
|
||||
hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
|
||||
hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
|
||||
}
|
||||
|
||||
if (rdev->irq.sw_int) {
|
||||
DRM_DEBUG("r600_irq_set: sw int\n");
|
||||
cp_int_cntl |= RB_INT_ENABLE;
|
||||
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
|
||||
}
|
||||
if (rdev->irq.crtc_vblank_int[0] ||
|
||||
rdev->irq.pflip[0]) {
|
||||
DRM_DEBUG("r600_irq_set: vblank 0\n");
|
||||
mode_int |= D1MODE_VBLANK_INT_MASK;
|
||||
}
|
||||
if (rdev->irq.crtc_vblank_int[1] ||
|
||||
rdev->irq.pflip[1]) {
|
||||
DRM_DEBUG("r600_irq_set: vblank 1\n");
|
||||
mode_int |= D2MODE_VBLANK_INT_MASK;
|
||||
}
|
||||
if (rdev->irq.hpd[0]) {
|
||||
DRM_DEBUG("r600_irq_set: hpd 1\n");
|
||||
hpd1 |= DC_HPDx_INT_EN;
|
||||
}
|
||||
if (rdev->irq.hpd[1]) {
|
||||
DRM_DEBUG("r600_irq_set: hpd 2\n");
|
||||
hpd2 |= DC_HPDx_INT_EN;
|
||||
}
|
||||
if (rdev->irq.hpd[2]) {
|
||||
DRM_DEBUG("r600_irq_set: hpd 3\n");
|
||||
hpd3 |= DC_HPDx_INT_EN;
|
||||
}
|
||||
if (rdev->irq.hpd[3]) {
|
||||
DRM_DEBUG("r600_irq_set: hpd 4\n");
|
||||
hpd4 |= DC_HPDx_INT_EN;
|
||||
}
|
||||
if (rdev->irq.hpd[4]) {
|
||||
DRM_DEBUG("r600_irq_set: hpd 5\n");
|
||||
hpd5 |= DC_HPDx_INT_EN;
|
||||
}
|
||||
if (rdev->irq.hpd[5]) {
|
||||
DRM_DEBUG("r600_irq_set: hpd 6\n");
|
||||
hpd6 |= DC_HPDx_INT_EN;
|
||||
}
|
||||
if (rdev->irq.hdmi[0]) {
|
||||
DRM_DEBUG("r600_irq_set: hdmi 1\n");
|
||||
hdmi1 |= R600_HDMI_INT_EN;
|
||||
}
|
||||
if (rdev->irq.hdmi[1]) {
|
||||
DRM_DEBUG("r600_irq_set: hdmi 2\n");
|
||||
hdmi2 |= R600_HDMI_INT_EN;
|
||||
}
|
||||
if (rdev->irq.gui_idle) {
|
||||
DRM_DEBUG("gui idle\n");
|
||||
grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
|
||||
}
|
||||
|
||||
WREG32(CP_INT_CNTL, cp_int_cntl);
|
||||
WREG32(DxMODE_INT_MASK, mode_int);
|
||||
WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
|
||||
WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
|
||||
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
|
||||
WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
|
||||
if (ASIC_IS_DCE3(rdev)) {
|
||||
WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
|
||||
WREG32(DC_HPD1_INT_CONTROL, hpd1);
|
||||
WREG32(DC_HPD2_INT_CONTROL, hpd2);
|
||||
WREG32(DC_HPD3_INT_CONTROL, hpd3);
|
||||
WREG32(DC_HPD4_INT_CONTROL, hpd4);
|
||||
if (ASIC_IS_DCE32(rdev)) {
|
||||
WREG32(DC_HPD5_INT_CONTROL, hpd5);
|
||||
WREG32(DC_HPD6_INT_CONTROL, hpd6);
|
||||
}
|
||||
} else {
|
||||
WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
|
||||
WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
|
||||
WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
|
||||
WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
|
||||
}
|
||||
|
||||
LEAVE();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void r600_irq_ack(struct radeon_device *rdev)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
if (ASIC_IS_DCE3(rdev)) {
|
||||
rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
|
||||
rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
|
||||
rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
|
||||
} else {
|
||||
rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
|
||||
rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
|
||||
rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
|
||||
}
|
||||
rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
|
||||
rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
|
||||
|
||||
if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
|
||||
WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
|
||||
if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
|
||||
WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
|
||||
if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
|
||||
WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
|
||||
if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
|
||||
WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
|
||||
if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
|
||||
WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
|
||||
if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
|
||||
WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
|
||||
if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
|
||||
if (ASIC_IS_DCE3(rdev)) {
|
||||
tmp = RREG32(DC_HPD1_INT_CONTROL);
|
||||
tmp |= DC_HPDx_INT_ACK;
|
||||
WREG32(DC_HPD1_INT_CONTROL, tmp);
|
||||
} else {
|
||||
tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
|
||||
tmp |= DC_HPDx_INT_ACK;
|
||||
WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
|
||||
}
|
||||
}
|
||||
if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
|
||||
if (ASIC_IS_DCE3(rdev)) {
|
||||
tmp = RREG32(DC_HPD2_INT_CONTROL);
|
||||
tmp |= DC_HPDx_INT_ACK;
|
||||
WREG32(DC_HPD2_INT_CONTROL, tmp);
|
||||
} else {
|
||||
tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
|
||||
tmp |= DC_HPDx_INT_ACK;
|
||||
WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
|
||||
}
|
||||
}
|
||||
if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
|
||||
if (ASIC_IS_DCE3(rdev)) {
|
||||
tmp = RREG32(DC_HPD3_INT_CONTROL);
|
||||
tmp |= DC_HPDx_INT_ACK;
|
||||
WREG32(DC_HPD3_INT_CONTROL, tmp);
|
||||
} else {
|
||||
tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
|
||||
tmp |= DC_HPDx_INT_ACK;
|
||||
WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
|
||||
}
|
||||
}
|
||||
if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
|
||||
tmp = RREG32(DC_HPD4_INT_CONTROL);
|
||||
tmp |= DC_HPDx_INT_ACK;
|
||||
WREG32(DC_HPD4_INT_CONTROL, tmp);
|
||||
}
|
||||
if (ASIC_IS_DCE32(rdev)) {
|
||||
if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
|
||||
tmp = RREG32(DC_HPD5_INT_CONTROL);
|
||||
tmp |= DC_HPDx_INT_ACK;
|
||||
WREG32(DC_HPD5_INT_CONTROL, tmp);
|
||||
}
|
||||
if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
|
||||
tmp = RREG32(DC_HPD5_INT_CONTROL);
|
||||
tmp |= DC_HPDx_INT_ACK;
|
||||
WREG32(DC_HPD6_INT_CONTROL, tmp);
|
||||
}
|
||||
}
|
||||
if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
|
||||
WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
|
||||
}
|
||||
if (ASIC_IS_DCE3(rdev)) {
|
||||
if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
|
||||
WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
|
||||
}
|
||||
} else {
|
||||
if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
|
||||
WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
|
||||
{
|
||||
u32 wptr, tmp;
|
||||
|
||||
if (rdev->wb.enabled)
|
||||
wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
|
||||
else
|
||||
wptr = RREG32(IH_RB_WPTR);
|
||||
|
||||
if (wptr & RB_OVERFLOW) {
|
||||
/* When a ring buffer overflow happen start parsing interrupt
|
||||
* from the last not overwritten vector (wptr + 16). Hopefully
|
||||
* this should allow us to catchup.
|
||||
*/
|
||||
dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
|
||||
wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
|
||||
rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
|
||||
tmp = RREG32(IH_RB_CNTL);
|
||||
tmp |= IH_WPTR_OVERFLOW_CLEAR;
|
||||
WREG32(IH_RB_CNTL, tmp);
|
||||
}
|
||||
return (wptr & rdev->ih.ptr_mask);
|
||||
}
|
||||
|
||||
/* r600 IV Ring
|
||||
* Each IV ring entry is 128 bits:
|
||||
* [7:0] - interrupt source id
|
||||
* [31:8] - reserved
|
||||
* [59:32] - interrupt source data
|
||||
* [127:60] - reserved
|
||||
*
|
||||
* The basic interrupt vector entries
|
||||
* are decoded as follows:
|
||||
* src_id src_data description
|
||||
* 1 0 D1 Vblank
|
||||
* 1 1 D1 Vline
|
||||
* 5 0 D2 Vblank
|
||||
* 5 1 D2 Vline
|
||||
* 19 0 FP Hot plug detection A
|
||||
* 19 1 FP Hot plug detection B
|
||||
* 19 2 DAC A auto-detection
|
||||
* 19 3 DAC B auto-detection
|
||||
* 21 4 HDMI block A
|
||||
* 21 5 HDMI block B
|
||||
* 176 - CP_INT RB
|
||||
* 177 - CP_INT IB1
|
||||
* 178 - CP_INT IB2
|
||||
* 181 - EOP Interrupt
|
||||
* 233 - GUI Idle
|
||||
*
|
||||
* Note, these are based on r600 and may need to be
|
||||
* adjusted or added to on newer asics
|
||||
*/
|
||||
|
||||
int r600_irq_process(struct radeon_device *rdev)
|
||||
{
|
||||
u32 wptr;
|
||||
u32 rptr;
|
||||
u32 src_id, src_data;
|
||||
u32 ring_index;
|
||||
unsigned long flags;
|
||||
bool queue_hotplug = false;
|
||||
|
||||
if (!rdev->ih.enabled || rdev->shutdown)
|
||||
return IRQ_NONE;
|
||||
|
||||
wptr = r600_get_ih_wptr(rdev);
|
||||
rptr = rdev->ih.rptr;
|
||||
DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
|
||||
|
||||
spin_lock_irqsave(&rdev->ih.lock, flags);
|
||||
|
||||
if (rptr == wptr) {
|
||||
spin_unlock_irqrestore(&rdev->ih.lock, flags);
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
restart_ih:
|
||||
/* display interrupts */
|
||||
r600_irq_ack(rdev);
|
||||
|
||||
rdev->ih.wptr = wptr;
|
||||
while (rptr != wptr) {
|
||||
/* wptr/rptr are in bytes! */
|
||||
ring_index = rptr / 4;
|
||||
src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
|
||||
src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
|
||||
|
||||
switch (src_id) {
|
||||
case 1: /* D1 vblank/vline */
|
||||
switch (src_data) {
|
||||
case 0: /* D1 vblank */
|
||||
if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
|
||||
if (rdev->irq.crtc_vblank_int[0]) {
|
||||
// drm_handle_vblank(rdev->ddev, 0);
|
||||
rdev->pm.vblank_sync = true;
|
||||
// wake_up(&rdev->irq.vblank_queue);
|
||||
}
|
||||
// if (rdev->irq.pflip[0])
|
||||
// radeon_crtc_handle_flip(rdev, 0);
|
||||
rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
|
||||
DRM_DEBUG("IH: D1 vblank\n");
|
||||
}
|
||||
break;
|
||||
case 1: /* D1 vline */
|
||||
if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
|
||||
rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
|
||||
DRM_DEBUG("IH: D1 vline\n");
|
||||
}
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case 5: /* D2 vblank/vline */
|
||||
switch (src_data) {
|
||||
case 0: /* D2 vblank */
|
||||
if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
|
||||
if (rdev->irq.crtc_vblank_int[1]) {
|
||||
// drm_handle_vblank(rdev->ddev, 1);
|
||||
rdev->pm.vblank_sync = true;
|
||||
// wake_up(&rdev->irq.vblank_queue);
|
||||
}
|
||||
// if (rdev->irq.pflip[1])
|
||||
// radeon_crtc_handle_flip(rdev, 1);
|
||||
rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
|
||||
DRM_DEBUG("IH: D2 vblank\n");
|
||||
}
|
||||
break;
|
||||
case 1: /* D1 vline */
|
||||
if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
|
||||
rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
|
||||
DRM_DEBUG("IH: D2 vline\n");
|
||||
}
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case 19: /* HPD/DAC hotplug */
|
||||
switch (src_data) {
|
||||
case 0:
|
||||
if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
|
||||
rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
|
||||
queue_hotplug = true;
|
||||
DRM_DEBUG("IH: HPD1\n");
|
||||
}
|
||||
break;
|
||||
case 1:
|
||||
if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
|
||||
rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
|
||||
queue_hotplug = true;
|
||||
DRM_DEBUG("IH: HPD2\n");
|
||||
}
|
||||
break;
|
||||
case 4:
|
||||
if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
|
||||
rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
|
||||
queue_hotplug = true;
|
||||
DRM_DEBUG("IH: HPD3\n");
|
||||
}
|
||||
break;
|
||||
case 5:
|
||||
if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
|
||||
rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
|
||||
queue_hotplug = true;
|
||||
DRM_DEBUG("IH: HPD4\n");
|
||||
}
|
||||
break;
|
||||
case 10:
|
||||
if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
|
||||
rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
|
||||
queue_hotplug = true;
|
||||
DRM_DEBUG("IH: HPD5\n");
|
||||
}
|
||||
break;
|
||||
case 12:
|
||||
if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
|
||||
rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
|
||||
queue_hotplug = true;
|
||||
DRM_DEBUG("IH: HPD6\n");
|
||||
}
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case 21: /* HDMI */
|
||||
DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
|
||||
// r600_audio_schedule_polling(rdev);
|
||||
break;
|
||||
case 176: /* CP_INT in ring buffer */
|
||||
case 177: /* CP_INT in IB1 */
|
||||
case 178: /* CP_INT in IB2 */
|
||||
DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
|
||||
// radeon_fence_process(rdev);
|
||||
break;
|
||||
case 181: /* CP EOP event */
|
||||
DRM_DEBUG("IH: CP EOP\n");
|
||||
// radeon_fence_process(rdev);
|
||||
break;
|
||||
case 233: /* GUI IDLE */
|
||||
DRM_DEBUG("IH: GUI idle\n");
|
||||
rdev->pm.gui_idle = true;
|
||||
// wake_up(&rdev->irq.idle_queue);
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
|
||||
break;
|
||||
}
|
||||
|
||||
/* wptr/rptr are in bytes! */
|
||||
rptr += 16;
|
||||
rptr &= rdev->ih.ptr_mask;
|
||||
}
|
||||
/* make sure wptr hasn't changed while processing */
|
||||
wptr = r600_get_ih_wptr(rdev);
|
||||
if (wptr != rdev->ih.wptr)
|
||||
goto restart_ih;
|
||||
// if (queue_hotplug)
|
||||
// schedule_work(&rdev->hotplug_work);
|
||||
rdev->ih.rptr = rptr;
|
||||
WREG32(IH_RB_RPTR, rdev->ih.rptr);
|
||||
spin_unlock_irqrestore(&rdev->ih.lock, flags);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/*
|
||||
* Debugfs info
|
||||
|
@ -536,7 +536,7 @@
|
||||
#define IH_RB_WPTR_ADDR_LO 0x3e14
|
||||
#define IH_CNTL 0x3e18
|
||||
# define ENABLE_INTR (1 << 0)
|
||||
# define IH_MC_SWAP(x) ((x) << 2)
|
||||
# define IH_MC_SWAP(x) ((x) << 1)
|
||||
# define IH_MC_SWAP_NONE 0
|
||||
# define IH_MC_SWAP_16BIT 1
|
||||
# define IH_MC_SWAP_32BIT 2
|
||||
|
@ -70,7 +70,7 @@
|
||||
#include <ttm/ttm_placement.h>
|
||||
#include <ttm/ttm_module.h>
|
||||
|
||||
|
||||
#include <linux/irqreturn.h>
|
||||
#include <pci.h>
|
||||
|
||||
#include <errno-base.h>
|
||||
@ -263,6 +263,7 @@ void radeon_pm_resume(struct radeon_device *rdev);
|
||||
void radeon_combios_get_power_modes(struct radeon_device *rdev);
|
||||
void radeon_atombios_get_power_modes(struct radeon_device *rdev);
|
||||
void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
|
||||
int radeon_atom_get_max_vddc(struct radeon_device *rdev, u16 *voltage);
|
||||
void rs690_pm_info(struct radeon_device *rdev);
|
||||
extern int rv6xx_get_temp(struct radeon_device *rdev);
|
||||
extern int rv770_get_temp(struct radeon_device *rdev);
|
||||
@ -375,6 +376,15 @@ int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
|
||||
uint64_t *gpu_addr);
|
||||
void radeon_gem_object_unpin(struct drm_gem_object *obj);
|
||||
|
||||
int radeon_mode_dumb_create(struct drm_file *file_priv,
|
||||
struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args);
|
||||
int radeon_mode_dumb_mmap(struct drm_file *filp,
|
||||
struct drm_device *dev,
|
||||
uint32_t handle, uint64_t *offset_p);
|
||||
int radeon_mode_dumb_destroy(struct drm_file *file_priv,
|
||||
struct drm_device *dev,
|
||||
uint32_t handle);
|
||||
|
||||
/*
|
||||
* GART structures, functions & helpers
|
||||
@ -524,6 +534,8 @@ int radeon_irq_kms_init(struct radeon_device *rdev);
|
||||
void radeon_irq_kms_fini(struct radeon_device *rdev);
|
||||
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev);
|
||||
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
|
||||
void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
|
||||
void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
|
||||
|
||||
/*
|
||||
* CP & ring.
|
||||
@ -638,7 +650,7 @@ struct radeon_cs_chunk {
|
||||
struct radeon_cs_parser {
|
||||
struct device *dev;
|
||||
struct radeon_device *rdev;
|
||||
// struct drm_file *filp;
|
||||
struct drm_file *filp;
|
||||
/* chunks */
|
||||
unsigned nchunks;
|
||||
struct radeon_cs_chunk *chunks;
|
||||
@ -1229,8 +1241,7 @@ struct radeon_device {
|
||||
struct r600_blit r600_blit;
|
||||
struct r700_vram_scratch vram_scratch;
|
||||
int msi_enabled; /* msi enabled */
|
||||
// struct r600_ih ih; /* r6/700 interrupt ring */
|
||||
// struct workqueue_struct *wq;
|
||||
struct r600_ih ih; /* r6/700 interrupt ring */
|
||||
// struct work_struct hotplug_work;
|
||||
int num_crtc; /* number of crtcs */
|
||||
struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
|
||||
@ -1310,6 +1321,7 @@ static inline void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
|
||||
#define RREG16(reg) readw(((void __iomem *)rdev->rmmio) + (reg))
|
||||
#define WREG16(reg, v) writew(v, ((void __iomem *)rdev->rmmio) + (reg))
|
||||
#define RREG32(reg) r100_mm_rreg(rdev, (reg))
|
||||
#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg)))
|
||||
#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v))
|
||||
#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
|
||||
#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
|
||||
|
@ -518,8 +518,8 @@ static struct radeon_asic r600_asic = {
|
||||
.gart_set_page = &rs600_gart_set_page,
|
||||
.ring_test = &r600_ring_test,
|
||||
// .ring_ib_execute = &r600_ring_ib_execute,
|
||||
// .irq_set = &r600_irq_set,
|
||||
// .irq_process = &r600_irq_process,
|
||||
.irq_set = &r600_irq_set,
|
||||
.irq_process = &r600_irq_process,
|
||||
.fence_ring_emit = &r600_fence_ring_emit,
|
||||
// .cs_parse = &r600_cs_parse,
|
||||
// .copy_blit = &r600_copy_blit,
|
||||
@ -555,8 +555,8 @@ static struct radeon_asic rs780_asic = {
|
||||
.gart_set_page = &rs600_gart_set_page,
|
||||
.ring_test = &r600_ring_test,
|
||||
// .ring_ib_execute = &r600_ring_ib_execute,
|
||||
// .irq_set = &r600_irq_set,
|
||||
// .irq_process = &r600_irq_process,
|
||||
.irq_set = &r600_irq_set,
|
||||
.irq_process = &r600_irq_process,
|
||||
.fence_ring_emit = &r600_fence_ring_emit,
|
||||
// .cs_parse = &r600_cs_parse,
|
||||
// .copy_blit = &r600_copy_blit,
|
||||
@ -589,9 +589,9 @@ static struct radeon_asic rv770_asic = {
|
||||
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
|
||||
.gart_set_page = &rs600_gart_set_page,
|
||||
.ring_test = &r600_ring_test,
|
||||
// .ring_ib_execute = &r600_ring_ib_execute,
|
||||
// .irq_set = &r600_irq_set,
|
||||
// .irq_process = &r600_irq_process,
|
||||
.ring_ib_execute = &r600_ring_ib_execute,
|
||||
.irq_set = &r600_irq_set,
|
||||
.irq_process = &r600_irq_process,
|
||||
.fence_ring_emit = &r600_fence_ring_emit,
|
||||
// .cs_parse = &r600_cs_parse,
|
||||
// .copy_blit = &r600_copy_blit,
|
||||
@ -706,26 +706,24 @@ static struct radeon_asic btc_asic = {
|
||||
.set_surface_reg = r600_set_surface_reg,
|
||||
.clear_surface_reg = r600_clear_surface_reg,
|
||||
.bandwidth_update = &evergreen_bandwidth_update,
|
||||
.hpd_init = &evergreen_hpd_init,
|
||||
.hpd_sense = &evergreen_hpd_sense,
|
||||
};
|
||||
|
||||
|
||||
#if 0
|
||||
static struct radeon_asic cayman_asic = {
|
||||
.init = &cayman_init,
|
||||
.fini = &cayman_fini,
|
||||
.suspend = &cayman_suspend,
|
||||
.resume = &cayman_resume,
|
||||
// .fini = &evergreen_fini,
|
||||
// .suspend = &evergreen_suspend,
|
||||
// .resume = &evergreen_resume,
|
||||
.cp_commit = &r600_cp_commit,
|
||||
.gpu_is_lockup = &cayman_gpu_is_lockup,
|
||||
.asic_reset = &cayman_asic_reset,
|
||||
.vga_set_state = &r600_vga_set_state,
|
||||
.gart_tlb_flush = &cayman_pcie_gart_tlb_flush,
|
||||
.gart_set_page = &rs600_gart_set_page,
|
||||
.ring_test = &r600_ring_test,
|
||||
.ring_ib_execute = &evergreen_ring_ib_execute,
|
||||
.irq_set = &evergreen_irq_set,
|
||||
.irq_process = &evergreen_irq_process,
|
||||
.get_vblank_counter = &evergreen_get_vblank_counter,
|
||||
// .ring_ib_execute = &r600_ring_ib_execute,
|
||||
// .irq_set = &r600_irq_set,
|
||||
// .irq_process = &r600_irq_process,
|
||||
.fence_ring_emit = &r600_fence_ring_emit,
|
||||
// .cs_parse = &r600_cs_parse,
|
||||
// .copy_blit = &r600_copy_blit,
|
||||
@ -735,22 +733,13 @@ static struct radeon_asic cayman_asic = {
|
||||
.set_engine_clock = &radeon_atom_set_engine_clock,
|
||||
.get_memory_clock = &radeon_atom_get_memory_clock,
|
||||
.set_memory_clock = &radeon_atom_set_memory_clock,
|
||||
.get_pcie_lanes = NULL,
|
||||
.set_pcie_lanes = NULL,
|
||||
.set_clock_gating = NULL,
|
||||
.set_surface_reg = r600_set_surface_reg,
|
||||
.clear_surface_reg = r600_clear_surface_reg,
|
||||
.bandwidth_update = &evergreen_bandwidth_update,
|
||||
.gui_idle = &r600_gui_idle,
|
||||
.pm_misc = &evergreen_pm_misc,
|
||||
.pm_prepare = &evergreen_pm_prepare,
|
||||
.pm_finish = &evergreen_pm_finish,
|
||||
.pm_init_profile = &r600_pm_init_profile,
|
||||
.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
|
||||
.pre_page_flip = &evergreen_pre_page_flip,
|
||||
.page_flip = &evergreen_page_flip,
|
||||
.post_page_flip = &evergreen_post_page_flip,
|
||||
};
|
||||
#endif
|
||||
|
||||
int radeon_asic_init(struct radeon_device *rdev)
|
||||
{
|
||||
@ -863,7 +852,11 @@ int radeon_asic_init(struct radeon_device *rdev)
|
||||
rdev->num_crtc = 6;
|
||||
rdev->asic = &btc_asic;
|
||||
break;
|
||||
|
||||
case CHIP_CAYMAN:
|
||||
rdev->asic = &cayman_asic;
|
||||
/* set num crtcs */
|
||||
rdev->num_crtc = 6;
|
||||
break;
|
||||
default:
|
||||
/* FIXME: not supported yet */
|
||||
return -EINVAL;
|
||||
|
@ -105,7 +105,7 @@ static bool radeon_read_bios(struct radeon_device *rdev)
|
||||
static bool radeon_atrm_get_bios(struct radeon_device *rdev)
|
||||
{
|
||||
int ret;
|
||||
int size = 64 * 1024;
|
||||
int size = 256 * 1024;
|
||||
int i;
|
||||
|
||||
if (!radeon_atrm_supported(rdev->pdev))
|
||||
|
@ -197,6 +197,93 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
|
||||
}
|
||||
}
|
||||
|
||||
void radeon_wb_disable(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (rdev->wb.wb_obj) {
|
||||
r = radeon_bo_reserve(rdev->wb.wb_obj, false);
|
||||
if (unlikely(r != 0))
|
||||
return;
|
||||
radeon_bo_kunmap(rdev->wb.wb_obj);
|
||||
radeon_bo_unpin(rdev->wb.wb_obj);
|
||||
radeon_bo_unreserve(rdev->wb.wb_obj);
|
||||
}
|
||||
rdev->wb.enabled = false;
|
||||
}
|
||||
|
||||
void radeon_wb_fini(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_wb_disable(rdev);
|
||||
if (rdev->wb.wb_obj) {
|
||||
radeon_bo_unref(&rdev->wb.wb_obj);
|
||||
rdev->wb.wb = NULL;
|
||||
rdev->wb.wb_obj = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
int radeon_wb_init(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (rdev->wb.wb_obj == NULL) {
|
||||
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
|
||||
RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
|
||||
if (r) {
|
||||
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
r = radeon_bo_reserve(rdev->wb.wb_obj, false);
|
||||
if (unlikely(r != 0)) {
|
||||
radeon_wb_fini(rdev);
|
||||
return r;
|
||||
}
|
||||
r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
|
||||
&rdev->wb.gpu_addr);
|
||||
if (r) {
|
||||
radeon_bo_unreserve(rdev->wb.wb_obj);
|
||||
dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
|
||||
radeon_wb_fini(rdev);
|
||||
return r;
|
||||
}
|
||||
r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
|
||||
radeon_bo_unreserve(rdev->wb.wb_obj);
|
||||
if (r) {
|
||||
dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
|
||||
radeon_wb_fini(rdev);
|
||||
return r;
|
||||
}
|
||||
|
||||
/* clear wb memory */
|
||||
memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
|
||||
/* disable event_write fences */
|
||||
rdev->wb.use_event = false;
|
||||
/* disabled via module param */
|
||||
if (radeon_no_wb == 1)
|
||||
rdev->wb.enabled = false;
|
||||
else {
|
||||
/* often unreliable on AGP */
|
||||
// if (rdev->flags & RADEON_IS_AGP) {
|
||||
// rdev->wb.enabled = false;
|
||||
// } else {
|
||||
rdev->wb.enabled = true;
|
||||
/* event_write fences are only available on r600+ */
|
||||
if (rdev->family >= CHIP_R600)
|
||||
rdev->wb.use_event = true;
|
||||
// }
|
||||
}
|
||||
/* always use writeback/events on NI */
|
||||
if (ASIC_IS_DCE5(rdev)) {
|
||||
rdev->wb.enabled = true;
|
||||
rdev->wb.use_event = true;
|
||||
}
|
||||
|
||||
dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_vram_location - try to find VRAM location
|
||||
* @rdev: radeon device structure holding all necessary informations
|
||||
|
@ -453,9 +453,8 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
|
||||
if (ret) {
|
||||
radeon_setup_encoder_clones(dev);
|
||||
radeon_print_display_setup(dev);
|
||||
|
||||
// list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head)
|
||||
// radeon_ddc_dump(drm_connector);
|
||||
list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head)
|
||||
radeon_ddc_dump(drm_connector);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -30,7 +30,7 @@
|
||||
*/
|
||||
#include <linux/seq_file.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <linux/wait.h>
|
||||
//#include <linux/wait.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/slab.h>
|
||||
@ -39,6 +39,35 @@
|
||||
#include "radeon_reg.h"
|
||||
#include "radeon.h"
|
||||
|
||||
static void radeon_fence_write(struct radeon_device *rdev, u32 seq)
|
||||
{
|
||||
if (rdev->wb.enabled) {
|
||||
u32 scratch_index;
|
||||
if (rdev->wb.use_event)
|
||||
scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
|
||||
else
|
||||
scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
|
||||
rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);;
|
||||
} else
|
||||
WREG32(rdev->fence_drv.scratch_reg, seq);
|
||||
}
|
||||
|
||||
static u32 radeon_fence_read(struct radeon_device *rdev)
|
||||
{
|
||||
u32 seq;
|
||||
|
||||
if (rdev->wb.enabled) {
|
||||
u32 scratch_index;
|
||||
if (rdev->wb.use_event)
|
||||
scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
|
||||
else
|
||||
scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
|
||||
seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
|
||||
} else
|
||||
seq = RREG32(rdev->fence_drv.scratch_reg);
|
||||
return seq;
|
||||
}
|
||||
|
||||
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
@ -49,15 +78,15 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
|
||||
return 0;
|
||||
}
|
||||
fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
|
||||
if (!rdev->cp.ready) {
|
||||
if (!rdev->cp.ready)
|
||||
/* FIXME: cp is not running assume everythings is done right
|
||||
* away
|
||||
*/
|
||||
WREG32(rdev->fence_drv.scratch_reg, fence->seq);
|
||||
} else
|
||||
radeon_fence_write(rdev, fence->seq);
|
||||
else
|
||||
radeon_fence_ring_emit(rdev, fence);
|
||||
|
||||
trace_radeon_fence_emit(rdev->ddev, fence->seq);
|
||||
// trace_radeon_fence_emit(rdev->ddev, fence->seq);
|
||||
fence->emited = true;
|
||||
list_move_tail(&fence->list, &rdev->fence_drv.emited);
|
||||
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
||||
@ -72,19 +101,12 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
|
||||
bool wake = false;
|
||||
unsigned long cjiffies;
|
||||
|
||||
if (rdev->wb.enabled) {
|
||||
u32 scratch_index;
|
||||
if (rdev->wb.use_event)
|
||||
scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
|
||||
else
|
||||
scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
|
||||
seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
|
||||
} else
|
||||
seq = RREG32(rdev->fence_drv.scratch_reg);
|
||||
#if 0
|
||||
seq = radeon_fence_read(rdev);
|
||||
if (seq != rdev->fence_drv.last_seq) {
|
||||
rdev->fence_drv.last_seq = seq;
|
||||
rdev->fence_drv.last_jiffies = jiffies;
|
||||
rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
|
||||
// rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
|
||||
} else {
|
||||
cjiffies = jiffies;
|
||||
if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) {
|
||||
@ -126,6 +148,7 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
|
||||
} while (i != &rdev->fence_drv.emited);
|
||||
wake = true;
|
||||
}
|
||||
#endif
|
||||
return wake;
|
||||
}
|
||||
|
||||
@ -150,7 +173,7 @@ int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
|
||||
if ((*fence) == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
kref_init(&((*fence)->kref));
|
||||
// kref_init(&((*fence)->kref));
|
||||
(*fence)->rdev = rdev;
|
||||
(*fence)->emited = false;
|
||||
(*fence)->signaled = false;
|
||||
@ -208,11 +231,13 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
|
||||
if (radeon_fence_signaled(fence)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if 0
|
||||
timeout = rdev->fence_drv.last_timeout;
|
||||
retry:
|
||||
/* save current sequence used to check for GPU lockup */
|
||||
seq = rdev->fence_drv.last_seq;
|
||||
trace_radeon_fence_wait_begin(rdev->ddev, seq);
|
||||
// trace_radeon_fence_wait_begin(rdev->ddev, seq);
|
||||
if (intr) {
|
||||
radeon_irq_kms_sw_irq_get(rdev);
|
||||
r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
|
||||
@ -227,7 +252,7 @@ retry:
|
||||
radeon_fence_signaled(fence), timeout);
|
||||
radeon_irq_kms_sw_irq_put(rdev);
|
||||
}
|
||||
trace_radeon_fence_wait_end(rdev->ddev, seq);
|
||||
// trace_radeon_fence_wait_end(rdev->ddev, seq);
|
||||
if (unlikely(!radeon_fence_signaled(fence))) {
|
||||
/* we were interrupted for some reason and fence isn't
|
||||
* isn't signaled yet, resume wait
|
||||
@ -250,19 +275,21 @@ retry:
|
||||
r = radeon_gpu_reset(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
WREG32(rdev->fence_drv.scratch_reg, fence->seq);
|
||||
radeon_fence_write(rdev, fence->seq);
|
||||
rdev->gpu_lockup = false;
|
||||
}
|
||||
timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
|
||||
// timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
|
||||
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
|
||||
rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
|
||||
rdev->fence_drv.last_jiffies = jiffies;
|
||||
// rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
|
||||
// rdev->fence_drv.last_jiffies = jiffies;
|
||||
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
||||
goto retry;
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if 0
|
||||
int radeon_fence_wait_next(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
@ -315,16 +342,16 @@ struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
|
||||
return fence;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void radeon_fence_unref(struct radeon_fence **fence)
|
||||
{
|
||||
struct radeon_fence *tmp = *fence;
|
||||
|
||||
*fence = NULL;
|
||||
if (tmp) {
|
||||
kref_put(&tmp->kref, radeon_fence_destroy);
|
||||
}
|
||||
}
|
||||
|
||||
#if 0
|
||||
void radeon_fence_process(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
@ -338,6 +365,8 @@ void radeon_fence_process(struct radeon_device *rdev)
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
int radeon_fence_driver_init(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
@ -350,33 +379,17 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
|
||||
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
||||
return r;
|
||||
}
|
||||
WREG32(rdev->fence_drv.scratch_reg, 0);
|
||||
radeon_fence_write(rdev, 0);
|
||||
atomic_set(&rdev->fence_drv.seq, 0);
|
||||
INIT_LIST_HEAD(&rdev->fence_drv.created);
|
||||
INIT_LIST_HEAD(&rdev->fence_drv.emited);
|
||||
INIT_LIST_HEAD(&rdev->fence_drv.signaled);
|
||||
init_waitqueue_head(&rdev->fence_drv.queue);
|
||||
// init_waitqueue_head(&rdev->fence_drv.queue);
|
||||
rdev->fence_drv.initialized = true;
|
||||
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
||||
if (radeon_debugfs_fence_init(rdev)) {
|
||||
dev_err(rdev->dev, "fence debugfs file creation failed\n");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_fence_driver_fini(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
|
||||
if (!rdev->fence_drv.initialized)
|
||||
return;
|
||||
wake_up_all(&rdev->fence_drv.queue);
|
||||
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
|
||||
radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg);
|
||||
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
||||
rdev->fence_drv.initialized = false;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Fence debugfs
|
||||
@ -390,7 +403,7 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
|
||||
struct radeon_fence *fence;
|
||||
|
||||
seq_printf(m, "Last signaled fence 0x%08X\n",
|
||||
RREG32(rdev->fence_drv.scratch_reg));
|
||||
radeon_fence_read(rdev));
|
||||
if (!list_empty(&rdev->fence_drv.emited)) {
|
||||
fence = list_entry(rdev->fence_drv.emited.prev,
|
||||
struct radeon_fence, list);
|
||||
|
225
drivers/video/drm/radeon/radeon_irq_kms.c
Normal file
225
drivers/video/drm/radeon/radeon_irq_kms.c
Normal file
@ -0,0 +1,225 @@
|
||||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#include "drmP.h"
|
||||
#include "drm_crtc_helper.h"
|
||||
#include "radeon_drm.h"
|
||||
#include "radeon_reg.h"
|
||||
#include "radeon.h"
|
||||
#include "atom.h"
|
||||
|
||||
static struct radeon_device *main_device;
|
||||
|
||||
#if 0
|
||||
|
||||
irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
|
||||
{
|
||||
struct drm_device *dev = (struct drm_device *) arg;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
|
||||
return radeon_irq_process(rdev);
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle hotplug events outside the interrupt handler proper.
|
||||
*/
|
||||
static void radeon_hotplug_work_func(struct work_struct *work)
|
||||
{
|
||||
struct radeon_device *rdev = container_of(work, struct radeon_device,
|
||||
hotplug_work);
|
||||
struct drm_device *dev = rdev->ddev;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct drm_connector *connector;
|
||||
|
||||
if (mode_config->num_connector) {
|
||||
list_for_each_entry(connector, &mode_config->connector_list, head)
|
||||
radeon_connector_hotplug(connector);
|
||||
}
|
||||
/* Just fire off a uevent and let userspace tell us what to do */
|
||||
drm_helper_hpd_irq_event(dev);
|
||||
}
|
||||
|
||||
|
||||
void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
|
||||
{
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
unsigned i;
|
||||
|
||||
if (rdev == NULL) {
|
||||
return;
|
||||
}
|
||||
/* Disable *all* interrupts */
|
||||
rdev->irq.sw_int = false;
|
||||
rdev->irq.gui_idle = false;
|
||||
for (i = 0; i < rdev->num_crtc; i++)
|
||||
rdev->irq.crtc_vblank_int[i] = false;
|
||||
for (i = 0; i < 6; i++) {
|
||||
rdev->irq.hpd[i] = false;
|
||||
rdev->irq.pflip[i] = false;
|
||||
}
|
||||
radeon_irq_set(rdev);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void irq_handler_kms()
|
||||
{
|
||||
dbgprintf("%s\n",__FUNCTION__);
|
||||
radeon_irq_process(main_device);
|
||||
}
|
||||
|
||||
|
||||
static void radeon_irq_preinstall(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
/* Disable *all* interrupts */
|
||||
rdev->irq.sw_int = false;
|
||||
rdev->irq.gui_idle = false;
|
||||
for (i = 0; i < rdev->num_crtc; i++)
|
||||
rdev->irq.crtc_vblank_int[i] = false;
|
||||
for (i = 0; i < 6; i++) {
|
||||
rdev->irq.hpd[i] = false;
|
||||
rdev->irq.pflip[i] = false;
|
||||
}
|
||||
radeon_irq_set(rdev);
|
||||
/* Clear bits */
|
||||
radeon_irq_process(rdev);
|
||||
}
|
||||
|
||||
int radeon_driver_irq_postinstall(struct radeon_device *rdev)
|
||||
{
|
||||
// struct radeon_device *rdev = dev->dev_private;
|
||||
|
||||
// dev->max_vblank_count = 0x001fffff;
|
||||
rdev->irq.sw_int = true;
|
||||
radeon_irq_set(rdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int radeon_irq_kms_init(struct radeon_device *rdev)
|
||||
{
|
||||
int i;
|
||||
int irq_line;
|
||||
int r = 0;
|
||||
|
||||
ENTER();
|
||||
|
||||
// INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
|
||||
|
||||
spin_lock_init(&rdev->irq.sw_lock);
|
||||
for (i = 0; i < rdev->num_crtc; i++)
|
||||
spin_lock_init(&rdev->irq.pflip_lock[i]);
|
||||
// r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
|
||||
// if (r) {
|
||||
// return r;
|
||||
// }
|
||||
|
||||
rdev->msi_enabled = 0;
|
||||
rdev->irq.installed = true;
|
||||
main_device = rdev;
|
||||
|
||||
radeon_irq_preinstall(rdev);
|
||||
|
||||
irq_line = rdev->pdev->irq;
|
||||
|
||||
dbgprintf("%s install irq %d\n", __FUNCTION__, irq_line);
|
||||
|
||||
AttachIntHandler(irq_line, irq_handler_kms, 2);
|
||||
|
||||
// r = drm_irq_install(rdev->ddev);
|
||||
|
||||
r = radeon_driver_irq_postinstall(rdev);
|
||||
if (r) {
|
||||
rdev->irq.installed = false;
|
||||
LEAVE();
|
||||
return r;
|
||||
}
|
||||
|
||||
DRM_INFO("radeon: irq initialized.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
|
||||
if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount == 1)) {
|
||||
rdev->irq.sw_int = true;
|
||||
radeon_irq_set(rdev);
|
||||
}
|
||||
spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
|
||||
}
|
||||
|
||||
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
|
||||
BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount <= 0);
|
||||
if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount == 0)) {
|
||||
rdev->irq.sw_int = false;
|
||||
radeon_irq_set(rdev);
|
||||
}
|
||||
spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
|
||||
}
|
||||
|
||||
#if 0
|
||||
void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
|
||||
if (crtc < 0 || crtc >= rdev->num_crtc)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags);
|
||||
if (rdev->ddev->irq_enabled && (++rdev->irq.pflip_refcount[crtc] == 1)) {
|
||||
rdev->irq.pflip[crtc] = true;
|
||||
radeon_irq_set(rdev);
|
||||
}
|
||||
spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags);
|
||||
}
|
||||
|
||||
void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
|
||||
if (crtc < 0 || crtc >= rdev->num_crtc)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags);
|
||||
BUG_ON(rdev->ddev->irq_enabled && rdev->irq.pflip_refcount[crtc] <= 0);
|
||||
if (rdev->ddev->irq_enabled && (--rdev->irq.pflip_refcount[crtc] == 0)) {
|
||||
rdev->irq.pflip[crtc] = false;
|
||||
radeon_irq_set(rdev);
|
||||
}
|
||||
spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags);
|
||||
}
|
||||
|
||||
#endif
|
@ -38,9 +38,6 @@ int radeon_debugfs_ib_init(struct radeon_device *rdev);
|
||||
/*
|
||||
* IB.
|
||||
*/
|
||||
|
||||
#if 0
|
||||
|
||||
int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
|
||||
{
|
||||
struct radeon_fence *fence;
|
||||
@ -74,20 +71,20 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
|
||||
}
|
||||
rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
|
||||
nib->free = false;
|
||||
if (nib->fence) {
|
||||
mutex_unlock(&rdev->ib_pool.mutex);
|
||||
r = radeon_fence_wait(nib->fence, false);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
|
||||
nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
|
||||
mutex_lock(&rdev->ib_pool.mutex);
|
||||
nib->free = true;
|
||||
mutex_unlock(&rdev->ib_pool.mutex);
|
||||
radeon_fence_unref(&fence);
|
||||
return r;
|
||||
}
|
||||
mutex_lock(&rdev->ib_pool.mutex);
|
||||
}
|
||||
// if (nib->fence) {
|
||||
// mutex_unlock(&rdev->ib_pool.mutex);
|
||||
// r = radeon_fence_wait(nib->fence, false);
|
||||
// if (r) {
|
||||
// dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
|
||||
// nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
|
||||
// mutex_lock(&rdev->ib_pool.mutex);
|
||||
// nib->free = true;
|
||||
// mutex_unlock(&rdev->ib_pool.mutex);
|
||||
// radeon_fence_unref(&fence);
|
||||
// return r;
|
||||
// }
|
||||
// mutex_lock(&rdev->ib_pool.mutex);
|
||||
// }
|
||||
radeon_fence_unref(&nib->fence);
|
||||
nib->fence = fence;
|
||||
nib->length_dw = 0;
|
||||
@ -136,7 +133,6 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
|
||||
radeon_ring_unlock_commit(rdev);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
int radeon_ib_pool_init(struct radeon_device *rdev)
|
||||
{
|
||||
|
82
drivers/video/drm/radeon/radeon_trace.h
Normal file
82
drivers/video/drm/radeon/radeon_trace.h
Normal file
@ -0,0 +1,82 @@
|
||||
#if !defined(_RADEON_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _RADEON_TRACE_H_
|
||||
|
||||
#include <linux/stringify.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM radeon
|
||||
#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
|
||||
#define TRACE_INCLUDE_FILE radeon_trace
|
||||
|
||||
TRACE_EVENT(radeon_bo_create,
|
||||
TP_PROTO(struct radeon_bo *bo),
|
||||
TP_ARGS(bo),
|
||||
TP_STRUCT__entry(
|
||||
__field(struct radeon_bo *, bo)
|
||||
__field(u32, pages)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->bo = bo;
|
||||
__entry->pages = bo->tbo.num_pages;
|
||||
),
|
||||
TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(radeon_fence_request,
|
||||
|
||||
TP_PROTO(struct drm_device *dev, u32 seqno),
|
||||
|
||||
TP_ARGS(dev, seqno),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, dev)
|
||||
__field(u32, seqno)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = dev->primary->index;
|
||||
__entry->seqno = seqno;
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(radeon_fence_request, radeon_fence_emit,
|
||||
|
||||
TP_PROTO(struct drm_device *dev, u32 seqno),
|
||||
|
||||
TP_ARGS(dev, seqno)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(radeon_fence_request, radeon_fence_retire,
|
||||
|
||||
TP_PROTO(struct drm_device *dev, u32 seqno),
|
||||
|
||||
TP_ARGS(dev, seqno)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin,
|
||||
|
||||
TP_PROTO(struct drm_device *dev, u32 seqno),
|
||||
|
||||
TP_ARGS(dev, seqno)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end,
|
||||
|
||||
TP_PROTO(struct drm_device *dev, u32 seqno),
|
||||
|
||||
TP_ARGS(dev, seqno)
|
||||
);
|
||||
|
||||
#endif
|
||||
|
||||
/* This part must be outside protection */
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
#include <trace/define_trace.h>
|
@ -79,12 +79,16 @@ void fini_cursor(cursor_t *cursor)
|
||||
__DestroyObject(cursor);
|
||||
};
|
||||
|
||||
|
||||
static void radeon_show_cursor()
|
||||
{
|
||||
struct radeon_device *rdev = (struct radeon_device *)rdisplay->ddev->dev_private;
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
|
||||
if (ASIC_IS_DCE4(rdev)) {
|
||||
WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL);
|
||||
WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN |
|
||||
EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT));
|
||||
} else if (ASIC_IS_AVIVO(rdev)) {
|
||||
WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL);
|
||||
WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
|
||||
(AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
|
||||
@ -109,8 +113,17 @@ cursor_t* __stdcall select_cursor(cursor_t *cursor)
|
||||
rdisplay->cursor = cursor;
|
||||
gpu_addr = radeon_bo_gpu_offset(cursor->robj);
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev))
|
||||
if (ASIC_IS_DCE4(rdev))
|
||||
{
|
||||
WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH, 0);
|
||||
WREG32(EVERGREEN_CUR_SURFACE_ADDRESS, gpu_addr);
|
||||
}
|
||||
else if (ASIC_IS_AVIVO(rdev))
|
||||
{
|
||||
if (rdev->family >= CHIP_RV770)
|
||||
WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, 0);
|
||||
WREG32(AVIVO_D1CUR_SURFACE_ADDRESS, gpu_addr);
|
||||
}
|
||||
else {
|
||||
WREG32(RADEON_CUR_OFFSET, gpu_addr - rdev->mc.vram_start);
|
||||
}
|
||||
@ -126,7 +139,14 @@ static void radeon_lock_cursor(bool lock)
|
||||
|
||||
uint32_t cur_lock;
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
if (ASIC_IS_DCE4(rdev)) {
|
||||
cur_lock = RREG32(EVERGREEN_CUR_UPDATE);
|
||||
if (lock)
|
||||
cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK;
|
||||
else
|
||||
cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK;
|
||||
WREG32(EVERGREEN_CUR_UPDATE, cur_lock);
|
||||
} else if (ASIC_IS_AVIVO(rdev)) {
|
||||
cur_lock = RREG32(AVIVO_D1CUR_UPDATE);
|
||||
if (lock)
|
||||
cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK;
|
||||
@ -151,12 +171,15 @@ void __stdcall move_cursor(cursor_t *cursor, int x, int y)
|
||||
|
||||
int hot_x = cursor->hot_x;
|
||||
int hot_y = cursor->hot_y;
|
||||
|
||||
radeon_lock_cursor(true);
|
||||
if (ASIC_IS_AVIVO(rdev))
|
||||
{
|
||||
int w = 32;
|
||||
|
||||
radeon_lock_cursor(true);
|
||||
|
||||
if (ASIC_IS_DCE4(rdev)) {
|
||||
WREG32(EVERGREEN_CUR_POSITION,(x << 16) | y);
|
||||
WREG32(EVERGREEN_CUR_HOT_SPOT, (hot_x << 16) | hot_y);
|
||||
WREG32(EVERGREEN_CUR_SIZE, ((w - 1) << 16) | 31);
|
||||
} else if (ASIC_IS_AVIVO(rdev)) {
|
||||
WREG32(AVIVO_D1CUR_POSITION, (x << 16) | y);
|
||||
WREG32(AVIVO_D1CUR_HOT_SPOT, (hot_x << 16) | hot_y);
|
||||
WREG32(AVIVO_D1CUR_SIZE, ((w - 1) << 16) | 31);
|
||||
|
@ -30,7 +30,11 @@ static void radeon_show_cursor_kms(struct drm_crtc *crtc)
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct radeon_device *rdev = crtc->dev->dev_private;
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
if (ASIC_IS_DCE4(rdev)) {
|
||||
WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
|
||||
WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN |
|
||||
EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT));
|
||||
} else if (ASIC_IS_AVIVO(rdev)) {
|
||||
WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
|
||||
WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
|
||||
(AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
|
||||
@ -58,7 +62,14 @@ static void radeon_lock_cursor_kms(struct drm_crtc *crtc, bool lock)
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
uint32_t cur_lock;
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
if (ASIC_IS_DCE4(rdev)) {
|
||||
cur_lock = RREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset);
|
||||
if (lock)
|
||||
cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK;
|
||||
else
|
||||
cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK;
|
||||
WREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock);
|
||||
} else if (ASIC_IS_AVIVO(rdev)) {
|
||||
cur_lock = RREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset);
|
||||
if (lock)
|
||||
cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK;
|
||||
@ -90,8 +101,16 @@ cursor_t* __stdcall select_cursor_kms(cursor_t *cursor)
|
||||
rdisplay->cursor = cursor;
|
||||
gpu_addr = radeon_bo_gpu_offset(cursor->robj);
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev))
|
||||
if (ASIC_IS_DCE4(rdev)) {
|
||||
WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
|
||||
0);
|
||||
WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
|
||||
gpu_addr);
|
||||
} else if (ASIC_IS_AVIVO(rdev)) {
|
||||
if (rdev->family >= CHIP_RV770)
|
||||
WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, 0);
|
||||
WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr);
|
||||
}
|
||||
else {
|
||||
radeon_crtc->legacy_cursor_offset = gpu_addr - rdev->mc.vram_start;
|
||||
/* offset is from DISP(2)_BASE_ADDRESS */
|
||||
@ -110,44 +129,18 @@ void __stdcall move_cursor_kms(cursor_t *cursor, int x, int y)
|
||||
|
||||
int hot_x = cursor->hot_x;
|
||||
int hot_y = cursor->hot_y;
|
||||
int w = 32;
|
||||
|
||||
radeon_lock_cursor_kms(crtc, true);
|
||||
if (ASIC_IS_AVIVO(rdev))
|
||||
{
|
||||
int w = 32;
|
||||
int i = 0;
|
||||
struct drm_crtc *crtc_p;
|
||||
|
||||
/* avivo cursor are offset into the total surface */
|
||||
// x += crtc->x;
|
||||
// y += crtc->y;
|
||||
|
||||
// DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
|
||||
#if 0
|
||||
/* avivo cursor image can't end on 128 pixel boundry or
|
||||
* go past the end of the frame if both crtcs are enabled
|
||||
*/
|
||||
list_for_each_entry(crtc_p, &crtc->dev->mode_config.crtc_list, head) {
|
||||
if (crtc_p->enabled)
|
||||
i++;
|
||||
}
|
||||
if (i > 1) {
|
||||
int cursor_end, frame_end;
|
||||
|
||||
cursor_end = x + w;
|
||||
frame_end = crtc->x + crtc->mode.crtc_hdisplay;
|
||||
if (cursor_end >= frame_end) {
|
||||
w = w - (cursor_end - frame_end);
|
||||
if (!(frame_end & 0x7f))
|
||||
w--;
|
||||
} else {
|
||||
if (!(cursor_end & 0x7f))
|
||||
w--;
|
||||
}
|
||||
if (w <= 0)
|
||||
w = 1;
|
||||
}
|
||||
#endif
|
||||
if (ASIC_IS_DCE4(rdev)) {
|
||||
WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset,
|
||||
(x << 16) | y);
|
||||
WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset,
|
||||
(hot_x << 16) | hot_y);
|
||||
WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
|
||||
((w - 1) << 16) | 31);
|
||||
} else if (ASIC_IS_AVIVO(rdev)) {
|
||||
WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset,
|
||||
(x << 16) | y);
|
||||
WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset,
|
||||
|
@ -498,6 +498,12 @@ static void rv770_program_channel_remap(struct radeon_device *rdev)
|
||||
else
|
||||
tcp_chan_steer = 0x00fac688;
|
||||
|
||||
/* RV770 CE has special chremap setup */
|
||||
if (rdev->pdev->device == 0x944e) {
|
||||
tcp_chan_steer = 0x00b08b08;
|
||||
mc_shared_chremap = 0x00b08b08;
|
||||
}
|
||||
|
||||
WREG32(TCP_CHAN_STEER, tcp_chan_steer);
|
||||
WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
|
||||
}
|
||||
@ -1082,6 +1088,20 @@ static int rv770_startup(struct radeon_device *rdev)
|
||||
if (r)
|
||||
return r;
|
||||
rv770_gpu_init(rdev);
|
||||
/* allocate wb buffer */
|
||||
r = radeon_wb_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* Enable IRQ */
|
||||
r = r600_irq_init(rdev);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: IH init failed (%d).\n", r);
|
||||
// radeon_irq_kms_fini(rdev);
|
||||
return r;
|
||||
}
|
||||
r600_irq_set(rdev);
|
||||
|
||||
r = radeon_ring_init(rdev, rdev->cp.ring_size);
|
||||
if (r)
|
||||
return r;
|
||||
@ -1111,6 +1131,10 @@ int rv770_init(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
|
||||
/* This don't do much */
|
||||
r = radeon_gem_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
/* Read BIOS */
|
||||
if (!radeon_get_bios(rdev)) {
|
||||
if (ASIC_IS_AVIVO(rdev))
|
||||
@ -1140,9 +1164,9 @@ int rv770_init(struct radeon_device *rdev)
|
||||
/* Initialize clocks */
|
||||
radeon_get_clock_info(rdev->ddev);
|
||||
/* Fence driver */
|
||||
// r = radeon_fence_driver_init(rdev);
|
||||
// if (r)
|
||||
// return r;
|
||||
r = radeon_fence_driver_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
/* initialize AGP */
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
r = radeon_agp_init(rdev);
|
||||
@ -1157,10 +1181,15 @@ int rv770_init(struct radeon_device *rdev)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = radeon_irq_kms_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
rdev->cp.ring_obj = NULL;
|
||||
r600_ring_init(rdev, 1024 * 1024);
|
||||
|
||||
rdev->ih.ring_obj = NULL;
|
||||
r600_ih_ring_init(rdev, 64 * 1024);
|
||||
|
||||
r = r600_pcie_gart_init(rdev);
|
||||
if (r)
|
||||
@ -1170,22 +1199,21 @@ int rv770_init(struct radeon_device *rdev)
|
||||
r = rv770_startup(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "disabling GPU acceleration\n");
|
||||
|
||||
rv770_pcie_gart_fini(rdev);
|
||||
rdev->accel_working = false;
|
||||
}
|
||||
if (rdev->accel_working) {
|
||||
// r = radeon_ib_pool_init(rdev);
|
||||
// if (r) {
|
||||
// dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
|
||||
// rdev->accel_working = false;
|
||||
// } else {
|
||||
// r = r600_ib_test(rdev);
|
||||
// if (r) {
|
||||
// dev_err(rdev->dev, "IB test failed (%d).\n", r);
|
||||
// rdev->accel_working = false;
|
||||
// }
|
||||
// }
|
||||
r = radeon_ib_pool_init(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
|
||||
rdev->accel_working = false;
|
||||
} else {
|
||||
r = r600_ib_test(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "IB test failed (%d).\n", r);
|
||||
rdev->accel_working = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
Loading…
Reference in New Issue
Block a user