kms rc9.1: gpu test

git-svn-id: svn://kolibrios.org@1428 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge) 2010-03-01 06:55:30 +00:00
parent 4ecac42a2a
commit ca11e25dff
21 changed files with 6009 additions and 82 deletions

View File

@ -1472,7 +1472,7 @@ extern int drm_ati_pcigart_cleanup(struct drm_device *dev,
struct drm_ati_pcigart_info * gart_info);
extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
size_t align, dma_addr_t maxaddr);
size_t align);
extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);

View File

@ -35,7 +35,7 @@
//#include <linux/spinlock.h>
#include <linux/types.h>
//#include <linux/idr.h>
#include <linux/idr.h>
#include <linux/fb.h>

View File

@ -0,0 +1,14 @@
#ifndef _ASM_X86_UNALIGNED_H
#define _ASM_X86_UNALIGNED_H
/*
* The x86 can do unaligned accesses itself.
*/
#include <linux/unaligned/access_ok.h>
#include <linux/unaligned/generic.h>
#define get_unaligned __get_unaligned_le
#define put_unaligned __put_unaligned_le
#endif /* _ASM_X86_UNALIGNED_H */

View File

@ -0,0 +1,67 @@
#ifndef _LINUX_UNALIGNED_ACCESS_OK_H
#define _LINUX_UNALIGNED_ACCESS_OK_H
#include <linux/kernel.h>
#include <asm/byteorder.h>
static inline u16 get_unaligned_le16(const void *p)
{
return le16_to_cpup((__le16 *)p);
}
static inline u32 get_unaligned_le32(const void *p)
{
return le32_to_cpup((__le32 *)p);
}
static inline u64 get_unaligned_le64(const void *p)
{
return le64_to_cpup((__le64 *)p);
}
static inline u16 get_unaligned_be16(const void *p)
{
return be16_to_cpup((__be16 *)p);
}
static inline u32 get_unaligned_be32(const void *p)
{
return be32_to_cpup((__be32 *)p);
}
static inline u64 get_unaligned_be64(const void *p)
{
return be64_to_cpup((__be64 *)p);
}
static inline void put_unaligned_le16(u16 val, void *p)
{
*((__le16 *)p) = cpu_to_le16(val);
}
static inline void put_unaligned_le32(u32 val, void *p)
{
*((__le32 *)p) = cpu_to_le32(val);
}
static inline void put_unaligned_le64(u64 val, void *p)
{
*((__le64 *)p) = cpu_to_le64(val);
}
static inline void put_unaligned_be16(u16 val, void *p)
{
*((__be16 *)p) = cpu_to_be16(val);
}
static inline void put_unaligned_be32(u32 val, void *p)
{
*((__be32 *)p) = cpu_to_be32(val);
}
static inline void put_unaligned_be64(u64 val, void *p)
{
*((__be64 *)p) = cpu_to_be64(val);
}
#endif /* _LINUX_UNALIGNED_ACCESS_OK_H */

View File

@ -0,0 +1,68 @@
#ifndef _LINUX_UNALIGNED_GENERIC_H
#define _LINUX_UNALIGNED_GENERIC_H
/*
* Cause a link-time error if we try an unaligned access other than
* 1,2,4 or 8 bytes long
*/
extern void __bad_unaligned_access_size(void);
#define __get_unaligned_le(ptr) ((__force typeof(*(ptr)))({ \
__builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \
__builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_le16((ptr)), \
__builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_le32((ptr)), \
__builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_le64((ptr)), \
__bad_unaligned_access_size())))); \
}))
#define __get_unaligned_be(ptr) ((__force typeof(*(ptr)))({ \
__builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \
__builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_be16((ptr)), \
__builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_be32((ptr)), \
__builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_be64((ptr)), \
__bad_unaligned_access_size())))); \
}))
#define __put_unaligned_le(val, ptr) ({ \
void *__gu_p = (ptr); \
switch (sizeof(*(ptr))) { \
case 1: \
*(u8 *)__gu_p = (__force u8)(val); \
break; \
case 2: \
put_unaligned_le16((__force u16)(val), __gu_p); \
break; \
case 4: \
put_unaligned_le32((__force u32)(val), __gu_p); \
break; \
case 8: \
put_unaligned_le64((__force u64)(val), __gu_p); \
break; \
default: \
__bad_unaligned_access_size(); \
break; \
} \
(void)0; })
#define __put_unaligned_be(val, ptr) ({ \
void *__gu_p = (ptr); \
switch (sizeof(*(ptr))) { \
case 1: \
*(u8 *)__gu_p = (__force u8)(val); \
break; \
case 2: \
put_unaligned_be16((__force u16)(val), __gu_p); \
break; \
case 4: \
put_unaligned_be32((__force u32)(val), __gu_p); \
break; \
case 8: \
put_unaligned_be64((__force u64)(val), __gu_p); \
break; \
default: \
__bad_unaligned_access_size(); \
break; \
} \
(void)0; })
#endif /* _LINUX_UNALIGNED_GENERIC_H */

View File

@ -598,6 +598,50 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
return mode;
}
/*
* EDID is delightfully ambiguous about how interlaced modes are to be
* encoded. Our internal representation is of frame height, but some
* HDTV detailed timings are encoded as field height.
*
* The format list here is from CEA, in frame size. Technically we
* should be checking refresh rate too. Whatever.
*/
static void
drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
struct detailed_pixel_timing *pt)
{
int i;
static const struct {
int w, h;
} cea_interlaced[] = {
{ 1920, 1080 },
{ 720, 480 },
{ 1440, 480 },
{ 2880, 480 },
{ 720, 576 },
{ 1440, 576 },
{ 2880, 576 },
};
static const int n_sizes =
sizeof(cea_interlaced)/sizeof(cea_interlaced[0]);
if (!(pt->misc & DRM_EDID_PT_INTERLACED))
return;
for (i = 0; i < n_sizes; i++) {
if ((mode->hdisplay == cea_interlaced[i].w) &&
(mode->vdisplay == cea_interlaced[i].h / 2)) {
mode->vdisplay *= 2;
mode->vsync_start *= 2;
mode->vsync_end *= 2;
mode->vtotal *= 2;
mode->vtotal |= 1;
}
}
mode->flags |= DRM_MODE_FLAG_INTERLACE;
}
/**
* drm_mode_detailed - create a new mode from an EDID detailed timing section
* @dev: DRM device (needed to create new mode)
@ -680,8 +724,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
drm_mode_set_name(mode);
if (pt->misc & DRM_EDID_PT_INTERLACED)
mode->flags |= DRM_MODE_FLAG_INTERLACE;
drm_mode_do_interlace_quirk(mode, pt);
if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;

View File

@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <asm/unaligned.h>
#define ATOM_DEBUG
@ -212,7 +213,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
case ATOM_ARG_PS:
idx = U8(*ptr);
(*ptr)++;
val = le32_to_cpu(ctx->ps[idx]);
/* get_unaligned_le32 avoids unaligned accesses from atombios
* tables, noticed on a DEC Alpha. */
val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
if (print)
DEBUG("PS[0x%02X,0x%04X]", idx, val);
break;

View File

@ -350,7 +350,7 @@ retry:
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
if (args.ucReplyStatus && !args.ucDataOutLen) {
if (args.ucReplyStatus == 0x20 && retry_count < 10)
if (args.ucReplyStatus == 0x20 && retry_count++ < 10)
goto retry;
DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],

View File

@ -279,7 +279,6 @@ int r520_init(struct radeon_device *rdev)
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
// rv515_suspend(rdev);
// r100_cp_fini(rdev);
// r100_wb_fini(rdev);
// r100_ib_fini(rdev);

View File

@ -1894,6 +1894,7 @@ int r600_init(struct radeon_device *rdev)
rdev->accel_working = true;
r = r600_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
// r600_suspend(rdev);
// r600_wb_fini(rdev);
// radeon_ring_fini(rdev);

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,504 @@
/*
* RadeonHD R6xx, R7xx Register documentation
*
* Copyright (C) 2008-2009 Advanced Micro Devices, Inc.
* Copyright (C) 2008-2009 Matthias Hopf
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _R600_REG_R6xx_H_
#define _R600_REG_R6xx_H_
/*
* Registers for R6xx chips that are not documented yet
*/
enum {
MM_INDEX = 0x0000,
MM_DATA = 0x0004,
SRBM_STATUS = 0x0e50,
RLC_RQ_PENDING_bit = 1 << 3,
RCU_RQ_PENDING_bit = 1 << 4,
GRBM_RQ_PENDING_bit = 1 << 5,
HI_RQ_PENDING_bit = 1 << 6,
IO_EXTERN_SIGNAL_bit = 1 << 7,
VMC_BUSY_bit = 1 << 8,
MCB_BUSY_bit = 1 << 9,
MCDZ_BUSY_bit = 1 << 10,
MCDY_BUSY_bit = 1 << 11,
MCDX_BUSY_bit = 1 << 12,
MCDW_BUSY_bit = 1 << 13,
SEM_BUSY_bit = 1 << 14,
SRBM_STATUS__RLC_BUSY_bit = 1 << 15,
PDMA_BUSY_bit = 1 << 16,
IH_BUSY_bit = 1 << 17,
CSC_BUSY_bit = 1 << 20,
CMC7_BUSY_bit = 1 << 21,
CMC6_BUSY_bit = 1 << 22,
CMC5_BUSY_bit = 1 << 23,
CMC4_BUSY_bit = 1 << 24,
CMC3_BUSY_bit = 1 << 25,
CMC2_BUSY_bit = 1 << 26,
CMC1_BUSY_bit = 1 << 27,
CMC0_BUSY_bit = 1 << 28,
BIF_BUSY_bit = 1 << 29,
IDCT_BUSY_bit = 1 << 30,
SRBM_READ_ERROR = 0x0e98,
READ_ADDRESS_mask = 0xffff << 2,
READ_ADDRESS_shift = 2,
READ_REQUESTER_HI_bit = 1 << 24,
READ_REQUESTER_GRBM_bit = 1 << 25,
READ_REQUESTER_RCU_bit = 1 << 26,
READ_REQUESTER_RLC_bit = 1 << 27,
READ_ERROR_bit = 1 << 31,
SRBM_INT_STATUS = 0x0ea4,
RDERR_INT_STAT_bit = 1 << 0,
GFX_CNTX_SWITCH_INT_STAT_bit = 1 << 1,
SRBM_INT_ACK = 0x0ea8,
RDERR_INT_ACK_bit = 1 << 0,
GFX_CNTX_SWITCH_INT_ACK_bit = 1 << 1,
/* R6XX_MC_VM_FB_LOCATION = 0x2180, */
VENDOR_DEVICE_ID = 0x4000,
HDP_MEM_COHERENCY_FLUSH_CNTL = 0x5480,
/* D1GRPH_PRIMARY_SURFACE_ADDRESS = 0x6110, */
/* D1GRPH_PITCH = 0x6120, */
/* D1GRPH_Y_END = 0x6138, */
GRBM_STATUS = 0x8010,
R600_CMDFIFO_AVAIL_mask = 0x1f << 0,
R700_CMDFIFO_AVAIL_mask = 0xf << 0,
CMDFIFO_AVAIL_shift = 0,
SRBM_RQ_PENDING_bit = 1 << 5,
CP_RQ_PENDING_bit = 1 << 6,
CF_RQ_PENDING_bit = 1 << 7,
PF_RQ_PENDING_bit = 1 << 8,
GRBM_EE_BUSY_bit = 1 << 10,
GRBM_STATUS__VC_BUSY_bit = 1 << 11,
DB03_CLEAN_bit = 1 << 12,
CB03_CLEAN_bit = 1 << 13,
VGT_BUSY_NO_DMA_bit = 1 << 16,
GRBM_STATUS__VGT_BUSY_bit = 1 << 17,
TA03_BUSY_bit = 1 << 18,
GRBM_STATUS__TC_BUSY_bit = 1 << 19,
SX_BUSY_bit = 1 << 20,
SH_BUSY_bit = 1 << 21,
SPI03_BUSY_bit = 1 << 22,
SMX_BUSY_bit = 1 << 23,
SC_BUSY_bit = 1 << 24,
PA_BUSY_bit = 1 << 25,
DB03_BUSY_bit = 1 << 26,
CR_BUSY_bit = 1 << 27,
CP_COHERENCY_BUSY_bit = 1 << 28,
GRBM_STATUS__CP_BUSY_bit = 1 << 29,
CB03_BUSY_bit = 1 << 30,
GUI_ACTIVE_bit = 1 << 31,
GRBM_STATUS2 = 0x8014,
CR_CLEAN_bit = 1 << 0,
SMX_CLEAN_bit = 1 << 1,
SPI0_BUSY_bit = 1 << 8,
SPI1_BUSY_bit = 1 << 9,
SPI2_BUSY_bit = 1 << 10,
SPI3_BUSY_bit = 1 << 11,
TA0_BUSY_bit = 1 << 12,
TA1_BUSY_bit = 1 << 13,
TA2_BUSY_bit = 1 << 14,
TA3_BUSY_bit = 1 << 15,
DB0_BUSY_bit = 1 << 16,
DB1_BUSY_bit = 1 << 17,
DB2_BUSY_bit = 1 << 18,
DB3_BUSY_bit = 1 << 19,
CB0_BUSY_bit = 1 << 20,
CB1_BUSY_bit = 1 << 21,
CB2_BUSY_bit = 1 << 22,
CB3_BUSY_bit = 1 << 23,
GRBM_SOFT_RESET = 0x8020,
SOFT_RESET_CP_bit = 1 << 0,
SOFT_RESET_CB_bit = 1 << 1,
SOFT_RESET_CR_bit = 1 << 2,
SOFT_RESET_DB_bit = 1 << 3,
SOFT_RESET_PA_bit = 1 << 5,
SOFT_RESET_SC_bit = 1 << 6,
SOFT_RESET_SMX_bit = 1 << 7,
SOFT_RESET_SPI_bit = 1 << 8,
SOFT_RESET_SH_bit = 1 << 9,
SOFT_RESET_SX_bit = 1 << 10,
SOFT_RESET_TC_bit = 1 << 11,
SOFT_RESET_TA_bit = 1 << 12,
SOFT_RESET_VC_bit = 1 << 13,
SOFT_RESET_VGT_bit = 1 << 14,
SOFT_RESET_GRBM_GCA_bit = 1 << 15,
WAIT_UNTIL = 0x8040,
WAIT_CP_DMA_IDLE_bit = 1 << 8,
WAIT_CMDFIFO_bit = 1 << 10,
WAIT_2D_IDLE_bit = 1 << 14,
WAIT_3D_IDLE_bit = 1 << 15,
WAIT_2D_IDLECLEAN_bit = 1 << 16,
WAIT_3D_IDLECLEAN_bit = 1 << 17,
WAIT_EXTERN_SIG_bit = 1 << 19,
CMDFIFO_ENTRIES_mask = 0x1f << 20,
CMDFIFO_ENTRIES_shift = 20,
GRBM_READ_ERROR = 0x8058,
/* READ_ADDRESS_mask = 0xffff << 2, */
/* READ_ADDRESS_shift = 2, */
READ_REQUESTER_SRBM_bit = 1 << 28,
READ_REQUESTER_CP_bit = 1 << 29,
READ_REQUESTER_WU_POLL_bit = 1 << 30,
/* READ_ERROR_bit = 1 << 31, */
SCRATCH_REG0 = 0x8500,
SCRATCH_REG1 = 0x8504,
SCRATCH_REG2 = 0x8508,
SCRATCH_REG3 = 0x850c,
SCRATCH_REG4 = 0x8510,
SCRATCH_REG5 = 0x8514,
SCRATCH_REG6 = 0x8518,
SCRATCH_REG7 = 0x851c,
SCRATCH_UMSK = 0x8540,
SCRATCH_ADDR = 0x8544,
CP_COHER_CNTL = 0x85f0,
DEST_BASE_0_ENA_bit = 1 << 0,
DEST_BASE_1_ENA_bit = 1 << 1,
SO0_DEST_BASE_ENA_bit = 1 << 2,
SO1_DEST_BASE_ENA_bit = 1 << 3,
SO2_DEST_BASE_ENA_bit = 1 << 4,
SO3_DEST_BASE_ENA_bit = 1 << 5,
CB0_DEST_BASE_ENA_bit = 1 << 6,
CB1_DEST_BASE_ENA_bit = 1 << 7,
CB2_DEST_BASE_ENA_bit = 1 << 8,
CB3_DEST_BASE_ENA_bit = 1 << 9,
CB4_DEST_BASE_ENA_bit = 1 << 10,
CB5_DEST_BASE_ENA_bit = 1 << 11,
CB6_DEST_BASE_ENA_bit = 1 << 12,
CB7_DEST_BASE_ENA_bit = 1 << 13,
DB_DEST_BASE_ENA_bit = 1 << 14,
CR_DEST_BASE_ENA_bit = 1 << 15,
TC_ACTION_ENA_bit = 1 << 23,
VC_ACTION_ENA_bit = 1 << 24,
CB_ACTION_ENA_bit = 1 << 25,
DB_ACTION_ENA_bit = 1 << 26,
SH_ACTION_ENA_bit = 1 << 27,
SMX_ACTION_ENA_bit = 1 << 28,
CR0_ACTION_ENA_bit = 1 << 29,
CR1_ACTION_ENA_bit = 1 << 30,
CR2_ACTION_ENA_bit = 1 << 31,
CP_COHER_SIZE = 0x85f4,
CP_COHER_BASE = 0x85f8,
CP_COHER_STATUS = 0x85fc,
MATCHING_GFX_CNTX_mask = 0xff << 0,
MATCHING_GFX_CNTX_shift = 0,
MATCHING_CR_CNTX_mask = 0xffff << 8,
MATCHING_CR_CNTX_shift = 8,
STATUS_bit = 1 << 31,
CP_STALLED_STAT1 = 0x8674,
RBIU_TO_DMA_NOT_RDY_TO_RCV_bit = 1 << 0,
RBIU_TO_IBS_NOT_RDY_TO_RCV_bit = 1 << 1,
RBIU_TO_SEM_NOT_RDY_TO_RCV_bit = 1 << 2,
RBIU_TO_2DREGS_NOT_RDY_TO_RCV_bit = 1 << 3,
RBIU_TO_MEMWR_NOT_RDY_TO_RCV_bit = 1 << 4,
RBIU_TO_MEMRD_NOT_RDY_TO_RCV_bit = 1 << 5,
RBIU_TO_EOPD_NOT_RDY_TO_RCV_bit = 1 << 6,
RBIU_TO_RECT_NOT_RDY_TO_RCV_bit = 1 << 7,
RBIU_TO_STRMO_NOT_RDY_TO_RCV_bit = 1 << 8,
RBIU_TO_PSTAT_NOT_RDY_TO_RCV_bit = 1 << 9,
MIU_WAITING_ON_RDREQ_FREE_bit = 1 << 16,
MIU_WAITING_ON_WRREQ_FREE_bit = 1 << 17,
MIU_NEEDS_AVAIL_WRREQ_PHASE_bit = 1 << 18,
RCIU_WAITING_ON_GRBM_FREE_bit = 1 << 24,
RCIU_WAITING_ON_VGT_FREE_bit = 1 << 25,
RCIU_STALLED_ON_ME_READ_bit = 1 << 26,
RCIU_STALLED_ON_DMA_READ_bit = 1 << 27,
RCIU_HALTED_BY_REG_VIOLATION_bit = 1 << 28,
CP_STALLED_STAT2 = 0x8678,
PFP_TO_CSF_NOT_RDY_TO_RCV_bit = 1 << 0,
PFP_TO_MEQ_NOT_RDY_TO_RCV_bit = 1 << 1,
PFP_TO_VGT_NOT_RDY_TO_RCV_bit = 1 << 2,
PFP_HALTED_BY_INSTR_VIOLATION_bit = 1 << 3,
MULTIPASS_IB_PENDING_IN_PFP_bit = 1 << 4,
ME_BRUSH_WC_NOT_RDY_TO_RCV_bit = 1 << 8,
ME_STALLED_ON_BRUSH_LOGIC_bit = 1 << 9,
CR_CNTX_NOT_AVAIL_TO_ME_bit = 1 << 10,
GFX_CNTX_NOT_AVAIL_TO_ME_bit = 1 << 11,
ME_RCIU_NOT_RDY_TO_RCV_bit = 1 << 12,
ME_TO_CONST_NOT_RDY_TO_RCV_bit = 1 << 13,
ME_WAITING_DATA_FROM_PFP_bit = 1 << 14,
ME_WAITING_ON_PARTIAL_FLUSH_bit = 1 << 15,
RECT_FIFO_NEEDS_CR_RECT_DONE_bit = 1 << 16,
RECT_FIFO_NEEDS_WR_CONFIRM_bit = 1 << 17,
EOPD_FIFO_NEEDS_SC_EOP_DONE_bit = 1 << 18,
EOPD_FIFO_NEEDS_SMX_EOP_DONE_bit = 1 << 19,
EOPD_FIFO_NEEDS_WR_CONFIRM_bit = 1 << 20,
EOPD_FIFO_NEEDS_SIGNAL_SEM_bit = 1 << 21,
SO_NUMPRIM_FIFO_NEEDS_SOADDR_bit = 1 << 22,
SO_NUMPRIM_FIFO_NEEDS_NUMPRIM_bit = 1 << 23,
PIPE_STATS_FIFO_NEEDS_SAMPLE_bit = 1 << 24,
SURF_SYNC_NEEDS_IDLE_CNTXS_bit = 1 << 30,
SURF_SYNC_NEEDS_ALL_CLEAN_bit = 1 << 31,
CP_BUSY_STAT = 0x867c,
REG_BUS_FIFO_BUSY_bit = 1 << 0,
RING_FETCHING_DATA_bit = 1 << 1,
INDR1_FETCHING_DATA_bit = 1 << 2,
INDR2_FETCHING_DATA_bit = 1 << 3,
STATE_FETCHING_DATA_bit = 1 << 4,
PRED_FETCHING_DATA_bit = 1 << 5,
COHER_CNTR_NEQ_ZERO_bit = 1 << 6,
PFP_PARSING_PACKETS_bit = 1 << 7,
ME_PARSING_PACKETS_bit = 1 << 8,
RCIU_PFP_BUSY_bit = 1 << 9,
RCIU_ME_BUSY_bit = 1 << 10,
OUTSTANDING_READ_TAGS_bit = 1 << 11,
SEM_CMDFIFO_NOT_EMPTY_bit = 1 << 12,
SEM_FAILED_AND_HOLDING_bit = 1 << 13,
SEM_POLLING_FOR_PASS_bit = 1 << 14,
_3D_BUSY_bit = 1 << 15,
_2D_BUSY_bit = 1 << 16,
CP_STAT = 0x8680,
CSF_RING_BUSY_bit = 1 << 0,
CSF_WPTR_POLL_BUSY_bit = 1 << 1,
CSF_INDIRECT1_BUSY_bit = 1 << 2,
CSF_INDIRECT2_BUSY_bit = 1 << 3,
CSF_STATE_BUSY_bit = 1 << 4,
CSF_PREDICATE_BUSY_bit = 1 << 5,
CSF_BUSY_bit = 1 << 6,
MIU_RDREQ_BUSY_bit = 1 << 7,
MIU_WRREQ_BUSY_bit = 1 << 8,
ROQ_RING_BUSY_bit = 1 << 9,
ROQ_INDIRECT1_BUSY_bit = 1 << 10,
ROQ_INDIRECT2_BUSY_bit = 1 << 11,
ROQ_STATE_BUSY_bit = 1 << 12,
ROQ_PREDICATE_BUSY_bit = 1 << 13,
ROQ_ALIGN_BUSY_bit = 1 << 14,
PFP_BUSY_bit = 1 << 15,
MEQ_BUSY_bit = 1 << 16,
ME_BUSY_bit = 1 << 17,
QUERY_BUSY_bit = 1 << 18,
SEMAPHORE_BUSY_bit = 1 << 19,
INTERRUPT_BUSY_bit = 1 << 20,
SURFACE_SYNC_BUSY_bit = 1 << 21,
DMA_BUSY_bit = 1 << 22,
RCIU_BUSY_bit = 1 << 23,
CP_STAT__CP_BUSY_bit = 1 << 31,
CP_ME_CNTL = 0x86d8,
ME_STATMUX_mask = 0xff << 0,
ME_STATMUX_shift = 0,
ME_HALT_bit = 1 << 28,
CP_ME_STATUS = 0x86dc,
CP_RB_RPTR = 0x8700,
RB_RPTR_mask = 0xfffff << 0,
RB_RPTR_shift = 0,
CP_RB_WPTR_DELAY = 0x8704,
PRE_WRITE_TIMER_mask = 0xfffffff << 0,
PRE_WRITE_TIMER_shift = 0,
PRE_WRITE_LIMIT_mask = 0x0f << 28,
PRE_WRITE_LIMIT_shift = 28,
CP_ROQ_RB_STAT = 0x8780,
ROQ_RPTR_PRIMARY_mask = 0x3ff << 0,
ROQ_RPTR_PRIMARY_shift = 0,
ROQ_WPTR_PRIMARY_mask = 0x3ff << 16,
ROQ_WPTR_PRIMARY_shift = 16,
CP_ROQ_IB1_STAT = 0x8784,
ROQ_RPTR_INDIRECT1_mask = 0x3ff << 0,
ROQ_RPTR_INDIRECT1_shift = 0,
ROQ_WPTR_INDIRECT1_mask = 0x3ff << 16,
ROQ_WPTR_INDIRECT1_shift = 16,
CP_ROQ_IB2_STAT = 0x8788,
ROQ_RPTR_INDIRECT2_mask = 0x3ff << 0,
ROQ_RPTR_INDIRECT2_shift = 0,
ROQ_WPTR_INDIRECT2_mask = 0x3ff << 16,
ROQ_WPTR_INDIRECT2_shift = 16,
CP_MEQ_STAT = 0x8794,
MEQ_RPTR_mask = 0x3ff << 0,
MEQ_RPTR_shift = 0,
MEQ_WPTR_mask = 0x3ff << 16,
MEQ_WPTR_shift = 16,
CC_GC_SHADER_PIPE_CONFIG = 0x8950,
INACTIVE_QD_PIPES_mask = 0xff << 8,
INACTIVE_QD_PIPES_shift = 8,
R6XX_MAX_QD_PIPES = 8,
INACTIVE_SIMDS_mask = 0xff << 16,
INACTIVE_SIMDS_shift = 16,
R6XX_MAX_SIMDS = 8,
GC_USER_SHADER_PIPE_CONFIG = 0x8954,
VC_ENHANCE = 0x9714,
DB_DEBUG = 0x9830,
PREZ_MUST_WAIT_FOR_POSTZ_DONE = 1 << 31,
DB_WATERMARKS = 0x00009838,
DEPTH_FREE_mask = 0x1f << 0,
DEPTH_FREE_shift = 0,
DEPTH_FLUSH_mask = 0x3f << 5,
DEPTH_FLUSH_shift = 5,
FORCE_SUMMARIZE_mask = 0x0f << 11,
FORCE_SUMMARIZE_shift = 11,
DEPTH_PENDING_FREE_mask = 0x1f << 15,
DEPTH_PENDING_FREE_shift = 15,
DEPTH_CACHELINE_FREE_mask = 0x1f << 20,
DEPTH_CACHELINE_FREE_shift = 20,
EARLY_Z_PANIC_DISABLE_bit = 1 << 25,
LATE_Z_PANIC_DISABLE_bit = 1 << 26,
RE_Z_PANIC_DISABLE_bit = 1 << 27,
DB_EXTRA_DEBUG_mask = 0x0f << 28,
DB_EXTRA_DEBUG_shift = 28,
CP_RB_BASE = 0xc100,
CP_RB_CNTL = 0xc104,
RB_BUFSZ_mask = 0x3f << 0,
CP_RB_WPTR = 0xc114,
RB_WPTR_mask = 0xfffff << 0,
RB_WPTR_shift = 0,
CP_RB_RPTR_WR = 0xc108,
RB_RPTR_WR_mask = 0xfffff << 0,
RB_RPTR_WR_shift = 0,
CP_INT_STATUS = 0xc128,
DISABLE_CNTX_SWITCH_INT_STAT_bit = 1 << 0,
ENABLE_CNTX_SWITCH_INT_STAT_bit = 1 << 1,
SEM_SIGNAL_INT_STAT_bit = 1 << 18,
CNTX_BUSY_INT_STAT_bit = 1 << 19,
CNTX_EMPTY_INT_STAT_bit = 1 << 20,
WAITMEM_SEM_INT_STAT_bit = 1 << 21,
PRIV_INSTR_INT_STAT_bit = 1 << 22,
PRIV_REG_INT_STAT_bit = 1 << 23,
OPCODE_ERROR_INT_STAT_bit = 1 << 24,
SCRATCH_INT_STAT_bit = 1 << 25,
TIME_STAMP_INT_STAT_bit = 1 << 26,
RESERVED_BIT_ERROR_INT_STAT_bit = 1 << 27,
DMA_INT_STAT_bit = 1 << 28,
IB2_INT_STAT_bit = 1 << 29,
IB1_INT_STAT_bit = 1 << 30,
RB_INT_STAT_bit = 1 << 31,
/* SX_ALPHA_TEST_CONTROL = 0x00028410, */
ALPHA_FUNC__REF_NEVER = 0,
ALPHA_FUNC__REF_ALWAYS = 7,
/* DB_SHADER_CONTROL = 0x0002880c, */
Z_ORDER__EARLY_Z_THEN_LATE_Z = 2,
/* PA_SU_SC_MODE_CNTL = 0x00028814, */
/* POLY_MODE_mask = 0x03 << 3, */
POLY_MODE__TRIANGLES = 0, POLY_MODE__DUAL_MODE,
/* POLYMODE_FRONT_PTYPE_mask = 0x07 << 5, */
POLYMODE_PTYPE__POINTS = 0, POLYMODE_PTYPE__LINES, POLYMODE_PTYPE__TRIANGLES,
PA_SC_AA_SAMPLE_LOCS_8S_WD1_M = 0x00028c20,
DB_SRESULTS_COMPARE_STATE0 = 0x00028d28, /* See autoregs: DB_SRESULTS_COMPARE_STATE1 */
/* DB_SRESULTS_COMPARE_STATE1 = 0x00028d2c, */
DB_ALPHA_TO_MASK = 0x00028d44,
ALPHA_TO_MASK_ENABLE = 1 << 0,
ALPHA_TO_MASK_OFFSET0_mask = 0x03 << 8,
ALPHA_TO_MASK_OFFSET0_shift = 8,
ALPHA_TO_MASK_OFFSET1_mask = 0x03 << 8,
ALPHA_TO_MASK_OFFSET1_shift = 10,
ALPHA_TO_MASK_OFFSET2_mask = 0x03 << 8,
ALPHA_TO_MASK_OFFSET2_shift = 12,
ALPHA_TO_MASK_OFFSET3_mask = 0x03 << 8,
ALPHA_TO_MASK_OFFSET3_shift = 14,
/* SQ_VTX_CONSTANT_WORD2_0 = 0x00038008, */
/* SQ_VTX_CONSTANT_WORD2_0__DATA_FORMAT_mask = 0x3f << 20, */
FMT_INVALID=0, FMT_8, FMT_4_4, FMT_3_3_2,
FMT_16=5, FMT_16_FLOAT, FMT_8_8,
FMT_5_6_5, FMT_6_5_5, FMT_1_5_5_5, FMT_4_4_4_4,
FMT_5_5_5_1, FMT_32, FMT_32_FLOAT, FMT_16_16,
FMT_16_16_FLOAT=16, FMT_8_24, FMT_8_24_FLOAT, FMT_24_8,
FMT_24_8_FLOAT, FMT_10_11_11, FMT_10_11_11_FLOAT, FMT_11_11_10,
FMT_11_11_10_FLOAT, FMT_2_10_10_10, FMT_8_8_8_8, FMT_10_10_10_2,
FMT_X24_8_32_FLOAT, FMT_32_32, FMT_32_32_FLOAT, FMT_16_16_16_16,
FMT_16_16_16_16_FLOAT=32, FMT_32_32_32_32=34, FMT_32_32_32_32_FLOAT,
FMT_1 = 37, FMT_GB_GR=39,
FMT_BG_RG, FMT_32_AS_8, FMT_32_AS_8_8, FMT_5_9_9_9_SHAREDEXP,
FMT_8_8_8, FMT_16_16_16, FMT_16_16_16_FLOAT, FMT_32_32_32,
FMT_32_32_32_FLOAT=48,
/* High level register file lengths */
SQ_ALU_CONSTANT = SQ_ALU_CONSTANT0_0, /* 256 PS, 256 VS */
SQ_ALU_CONSTANT_ps_num = 256,
SQ_ALU_CONSTANT_vs_num = 256,
SQ_ALU_CONSTANT_all_num = 512,
SQ_ALU_CONSTANT_offset = 16,
SQ_ALU_CONSTANT_ps = 0,
SQ_ALU_CONSTANT_vs = SQ_ALU_CONSTANT_ps + SQ_ALU_CONSTANT_ps_num,
SQ_TEX_RESOURCE = SQ_TEX_RESOURCE_WORD0_0, /* 160 PS, 160 VS, 16 FS, 160 GS */
SQ_TEX_RESOURCE_ps_num = 160,
SQ_TEX_RESOURCE_vs_num = 160,
SQ_TEX_RESOURCE_fs_num = 16,
SQ_TEX_RESOURCE_gs_num = 160,
SQ_TEX_RESOURCE_all_num = 496,
SQ_TEX_RESOURCE_offset = 28,
SQ_TEX_RESOURCE_ps = 0,
SQ_TEX_RESOURCE_vs = SQ_TEX_RESOURCE_ps + SQ_TEX_RESOURCE_ps_num,
SQ_TEX_RESOURCE_fs = SQ_TEX_RESOURCE_vs + SQ_TEX_RESOURCE_vs_num,
SQ_TEX_RESOURCE_gs = SQ_TEX_RESOURCE_fs + SQ_TEX_RESOURCE_fs_num,
SQ_VTX_RESOURCE = SQ_VTX_CONSTANT_WORD0_0, /* 160 PS, 160 VS, 16 FS, 160 GS */
SQ_VTX_RESOURCE_ps_num = 160,
SQ_VTX_RESOURCE_vs_num = 160,
SQ_VTX_RESOURCE_fs_num = 16,
SQ_VTX_RESOURCE_gs_num = 160,
SQ_VTX_RESOURCE_all_num = 496,
SQ_VTX_RESOURCE_offset = 28,
SQ_VTX_RESOURCE_ps = 0,
SQ_VTX_RESOURCE_vs = SQ_VTX_RESOURCE_ps + SQ_VTX_RESOURCE_ps_num,
SQ_VTX_RESOURCE_fs = SQ_VTX_RESOURCE_vs + SQ_VTX_RESOURCE_vs_num,
SQ_VTX_RESOURCE_gs = SQ_VTX_RESOURCE_fs + SQ_VTX_RESOURCE_fs_num,
SQ_TEX_SAMPLER_WORD = SQ_TEX_SAMPLER_WORD0_0, /* 18 per PS, VS, GS */
SQ_TEX_SAMPLER_WORD_ps_num = 18,
SQ_TEX_SAMPLER_WORD_vs_num = 18,
SQ_TEX_SAMPLER_WORD_gs_num = 18,
SQ_TEX_SAMPLER_WORD_all_num = 54,
SQ_TEX_SAMPLER_WORD_offset = 12,
SQ_TEX_SAMPLER_WORD_ps = 0,
SQ_TEX_SAMPLER_WORD_vs = SQ_TEX_SAMPLER_WORD_ps + SQ_TEX_SAMPLER_WORD_ps_num,
SQ_TEX_SAMPLER_WORD_gs = SQ_TEX_SAMPLER_WORD_vs + SQ_TEX_SAMPLER_WORD_vs_num,
SQ_LOOP_CONST = SQ_LOOP_CONST_0, /* 32 per PS, VS, GS */
SQ_LOOP_CONST_ps_num = 32,
SQ_LOOP_CONST_vs_num = 32,
SQ_LOOP_CONST_gs_num = 32,
SQ_LOOP_CONST_all_num = 96,
SQ_LOOP_CONST_offset = 4,
SQ_LOOP_CONST_ps = 0,
SQ_LOOP_CONST_vs = SQ_LOOP_CONST_ps + SQ_LOOP_CONST_ps_num,
SQ_LOOP_CONST_gs = SQ_LOOP_CONST_vs + SQ_LOOP_CONST_vs_num,
SQ_BOOL_CONST = SQ_BOOL_CONST_0, /* 32 bits per PS, VS, GS */
SQ_BOOL_CONST_ps_num = 1,
SQ_BOOL_CONST_vs_num = 1,
SQ_BOOL_CONST_gs_num = 1,
SQ_BOOL_CONST_all_num = 3,
SQ_BOOL_CONST_offset = 4,
SQ_BOOL_CONST_ps = 0,
SQ_BOOL_CONST_vs = SQ_BOOL_CONST_ps + SQ_BOOL_CONST_ps_num,
SQ_BOOL_CONST_gs = SQ_BOOL_CONST_vs + SQ_BOOL_CONST_vs_num
};
#endif

View File

@ -159,6 +159,7 @@ static inline void __raw_writeq(__u64 b, volatile void __iomem *addr)
* symbol;
*/
#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
/* RADEON_IB_POOL_SIZE must be a power of 2 */
#define RADEON_IB_POOL_SIZE 16
#define RADEON_DEBUGFS_MAX_NUM_FILES 32
#define RADEONFB_CONN_LIMIT 4
@ -427,11 +428,12 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
*/
struct radeon_ib {
struct list_head list;
unsigned long idx;
unsigned idx;
uint64_t gpu_addr;
struct radeon_fence *fence;
uint32_t *ptr;
uint32_t length_dw;
bool free;
};
/*
@ -441,10 +443,9 @@ struct radeon_ib {
struct radeon_ib_pool {
// struct mutex mutex;
struct radeon_bo *robj;
struct list_head scheduled_ibs;
struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
bool ready;
DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE);
unsigned head_id;
};
struct radeon_cp {

View File

@ -206,6 +206,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
*connector_type = DRM_MODE_CONNECTOR_DVID;
}
/* Asrock RS600 board lists the DVI port as HDMI */
if ((dev->pdev->device == 0x7941) &&
(dev->pdev->subsystem_vendor == 0x1849) &&
(dev->pdev->subsystem_device == 0x7941)) {
if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
(supported_device == ATOM_DEVICE_DFP3_SUPPORT))
*connector_type = DRM_MODE_CONNECTOR_DVID;
}
/* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
if ((dev->pdev->device == 0x7941) &&
(dev->pdev->subsystem_vendor == 0x147b) &&

View File

@ -780,7 +780,7 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
* connected and the DVI port disconnected. If the edid doesn't
* say HDMI, vice versa.
*/
if (radeon_connector->shared_ddc && connector_status_connected) {
if (radeon_connector->shared_ddc && (ret == connector_status_connected)) {
struct drm_device *dev = connector->dev;
struct drm_connector *list_connector;
struct radeon_connector *list_radeon_connector;
@ -1060,8 +1060,7 @@ radeon_add_atom_connector(struct drm_device *dev,
return;
}
if (radeon_connector->ddc_bus && i2c_bus->valid) {
if (memcmp(&radeon_connector->ddc_bus->rec, i2c_bus,
sizeof(struct radeon_i2c_bus_rec)) == 0) {
if (radeon_connector->ddc_bus->rec.i2c_id == i2c_bus->i2c_id) {
radeon_connector->shared_ddc = true;
shared_ddc = true;
}

View File

@ -34,6 +34,7 @@
#include "radeon.h"
#include "radeon_asic.h"
#include "atom.h"
#include "display.h"
#include <drm/drm_pciids.h>
@ -50,6 +51,7 @@ int radeon_new_pll = 1;
int radeon_vram_limit = 0;
int radeon_audio = 0;
extern display_t *rdisplay;
void parse_cmdline(char *cmdline, videomode_t *mode, char *log, int *kms);
int init_display(struct radeon_device *rdev, videomode_t *mode);
@ -57,6 +59,7 @@ int init_display_kms(struct radeon_device *rdev, videomode_t *mode);
int get_modes(videomode_t *mode, int *count);
int set_user_mode(videomode_t *mode);
int r100_2D_test(struct radeon_device *rdev);
/* Legacy VGA regions */
@ -957,6 +960,8 @@ static pci_dev_t device;
u32_t drvEntry(int action, char *cmdline)
{
struct radeon_device *rdev = NULL;
struct pci_device_id *ent;
int err;
@ -998,6 +1003,14 @@ u32_t drvEntry(int action, char *cmdline)
err = drm_get_dev(&device.pci_dev, ent);
rdev = rdisplay->ddev->dev_private;
if( (rdev->asic == &r600_asic) ||
(rdev->asic == &rv770_asic))
r600_2D_test(rdev);
else
r100_2D_test(rdev);
err = RegService("DISPLAY", display_handler);
if( err != 0)

View File

@ -109,9 +109,6 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
radeon_bo_unreserve(rdev->gart.table.vram.robj);
rdev->gart.table_addr = gpu_addr;
return r;
dbgprintf("alloc gart vram: gpu_base %x lin_addr %x\n",
rdev->gart.table_addr, rdev->gart.table.vram.ptr);
}
void radeon_gart_table_vram_free(struct radeon_device *rdev)

View File

@ -144,8 +144,8 @@ extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
struct list_head *head);
extern int radeon_bo_list_reserve(struct list_head *head);
extern void radeon_bo_list_unreserve(struct list_head *head);
extern int radeon_bo_list_validate(struct list_head *head, void *fence);
extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence);
extern int radeon_bo_list_validate(struct list_head *head);
extern void radeon_bo_list_fence(struct list_head *head, void *fence);
extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
struct vm_area_struct *vma);
extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,

View File

@ -44,68 +44,55 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
{
struct radeon_fence *fence;
struct radeon_ib *nib;
unsigned long i;
int r = 0;
int r = 0, i, c;
*ib = NULL;
r = radeon_fence_create(rdev, &fence);
if (r) {
DRM_ERROR("failed to create fence for new IB\n");
dev_err(rdev->dev, "failed to create fence for new IB\n");
return r;
}
mutex_lock(&rdev->ib_pool.mutex);
i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
if (i < RADEON_IB_POOL_SIZE) {
set_bit(i, rdev->ib_pool.alloc_bm);
rdev->ib_pool.ibs[i].length_dw = 0;
*ib = &rdev->ib_pool.ibs[i];
mutex_unlock(&rdev->ib_pool.mutex);
goto out;
for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
i &= (RADEON_IB_POOL_SIZE - 1);
if (rdev->ib_pool.ibs[i].free) {
nib = &rdev->ib_pool.ibs[i];
break;
}
if (list_empty(&rdev->ib_pool.scheduled_ibs)) {
/* we go do nothings here */
mutex_unlock(&rdev->ib_pool.mutex);
DRM_ERROR("all IB allocated none scheduled.\n");
r = -EINVAL;
goto out;
}
/* get the first ib on the scheduled list */
nib = list_entry(rdev->ib_pool.scheduled_ibs.next,
struct radeon_ib, list);
if (nib->fence == NULL) {
/* we go do nothings here */
if (nib == NULL) {
/* This should never happen, it means we allocated all
* IB and haven't scheduled one yet, return EBUSY to
* userspace hoping that on ioctl recall we get better
* luck
*/
dev_err(rdev->dev, "no free indirect buffer !\n");
mutex_unlock(&rdev->ib_pool.mutex);
DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx);
r = -EINVAL;
goto out;
radeon_fence_unref(&fence);
return -EBUSY;
}
mutex_unlock(&rdev->ib_pool.mutex);
rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
nib->free = false;
if (nib->fence) {
mutex_unlock(&rdev->ib_pool.mutex);
r = radeon_fence_wait(nib->fence, false);
if (r) {
DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx,
(unsigned long)nib->gpu_addr, nib->length_dw);
DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n");
goto out;
dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
mutex_lock(&rdev->ib_pool.mutex);
nib->free = true;
mutex_unlock(&rdev->ib_pool.mutex);
radeon_fence_unref(&fence);
return r;
}
mutex_lock(&rdev->ib_pool.mutex);
}
radeon_fence_unref(&nib->fence);
nib->fence = fence;
nib->length_dw = 0;
/* scheduled list is accessed here */
mutex_lock(&rdev->ib_pool.mutex);
list_del(&nib->list);
INIT_LIST_HEAD(&nib->list);
mutex_unlock(&rdev->ib_pool.mutex);
*ib = nib;
out:
if (r) {
radeon_fence_unref(&fence);
} else {
(*ib)->fence = fence;
}
return r;
return 0;
}
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
@ -116,19 +103,10 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
if (tmp == NULL) {
return;
}
mutex_lock(&rdev->ib_pool.mutex);
if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) {
/* IB is scheduled & not signaled don't do anythings */
mutex_unlock(&rdev->ib_pool.mutex);
return;
}
list_del(&tmp->list);
INIT_LIST_HEAD(&tmp->list);
if (tmp->fence)
if (!tmp->fence->emited)
radeon_fence_unref(&tmp->fence);
tmp->length_dw = 0;
clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
mutex_lock(&rdev->ib_pool.mutex);
tmp->free = true;
mutex_unlock(&rdev->ib_pool.mutex);
}
@ -138,7 +116,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
if (!ib->length_dw || !rdev->cp.ready) {
/* TODO: Nothings in the ib we should report. */
DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx);
DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
return -EINVAL;
}
@ -151,7 +129,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
radeon_ring_ib_execute(rdev, ib);
radeon_fence_emit(rdev, ib->fence);
mutex_lock(&rdev->ib_pool.mutex);
list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs);
/* once scheduled IB is considered free and protected by the fence */
ib->free = true;
mutex_unlock(&rdev->ib_pool.mutex);
radeon_ring_unlock_commit(rdev);
return 0;
@ -168,7 +147,6 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
if (rdev->ib_pool.robj)
return 0;
/* Allocate 1M object buffer */
INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
true, RADEON_GEM_DOMAIN_GTT,
&rdev->ib_pool.robj);
@ -199,9 +177,9 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
rdev->ib_pool.ibs[i].ptr = ptr + offset;
rdev->ib_pool.ibs[i].idx = i;
rdev->ib_pool.ibs[i].length_dw = 0;
INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list);
rdev->ib_pool.ibs[i].free = true;
}
bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
rdev->ib_pool.head_id = 0;
rdev->ib_pool.ready = true;
DRM_INFO("radeon: ib pool ready.\n");
if (radeon_debugfs_ib_init(rdev)) {
@ -218,7 +196,6 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
return;
}
mutex_lock(&rdev->ib_pool.mutex);
bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
if (rdev->ib_pool.robj) {
r = radeon_bo_reserve(rdev->ib_pool.robj, false);
if (likely(r == 0)) {
@ -372,7 +349,7 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
if (ib == NULL) {
return 0;
}
seq_printf(m, "IB %04lu\n", ib->idx);
seq_printf(m, "IB %04u\n", ib->idx);
seq_printf(m, "IB fence %p\n", ib->fence);
seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
for (i = 0; i < ib->length_dw; i++) {

File diff suppressed because it is too large Load Diff

View File

@ -487,7 +487,6 @@ static int rv515_startup(struct radeon_device *rdev)
return r;
}
/* Enable IRQ */
// rdev->irq.sw_int = true;
// rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */