git-svn-id: svn://kolibrios.org@1246 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge) 2009-11-02 20:36:12 +00:00
parent 184460aa4b
commit c4874ac302
24 changed files with 1749 additions and 586 deletions

View File

@ -626,6 +626,12 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
return NULL; return NULL;
} }
/* it is incorrect if hsync/vsync width is zero */
if (!hsync_pulse_width || !vsync_pulse_width) {
DRM_DEBUG_KMS("Incorrect Detailed timing. "
"Wrong Hsync/Vsync pulse width\n");
return NULL;
}
mode = drm_mode_create(dev); mode = drm_mode_create(dev);
if (!mode) if (!mode)
return NULL; return NULL;
@ -647,6 +653,15 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
mode->vsync_end = mode->vsync_start + vsync_pulse_width; mode->vsync_end = mode->vsync_start + vsync_pulse_width;
mode->vtotal = mode->vdisplay + vblank; mode->vtotal = mode->vdisplay + vblank;
/* perform the basic check for the detailed timing */
if (mode->hsync_end > mode->htotal ||
mode->vsync_end > mode->vtotal) {
drm_mode_destroy(dev, mode);
DRM_DEBUG_KMS("Incorrect detailed timing. "
"Sync is beyond the blank.\n");
return NULL;
}
drm_mode_set_name(mode); drm_mode_set_name(mode);
if (pt->misc & DRM_EDID_PT_INTERLACED) if (pt->misc & DRM_EDID_PT_INTERLACED)
@ -1052,8 +1067,8 @@ static int drm_ddc_read_edid(struct drm_connector *connector,
goto end; goto end;
} }
if (!edid_is_valid((struct edid *)buf)) { if (!edid_is_valid((struct edid *)buf)) {
// dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n", dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
// drm_get_connector_name(connector)); drm_get_connector_name(connector));
ret = -1; ret = -1;
} }
end: end:
@ -1078,8 +1093,8 @@ struct edid *drm_get_edid(struct drm_connector *connector,
edid = kmalloc(EDID_LENGTH * (MAX_EDID_EXT_NUM + 1), edid = kmalloc(EDID_LENGTH * (MAX_EDID_EXT_NUM + 1),
GFP_KERNEL); GFP_KERNEL);
if (edid == NULL) { if (edid == NULL) {
// dev_warn(&connector->dev->pdev->dev, dev_warn(&connector->dev->pdev->dev,
// "Failed to allocate EDID\n"); "Failed to allocate EDID\n");
goto end; goto end;
} }
@ -1094,11 +1109,11 @@ struct edid *drm_get_edid(struct drm_connector *connector,
int edid_ext_num = edid->extensions; int edid_ext_num = edid->extensions;
if (edid_ext_num > MAX_EDID_EXT_NUM) { if (edid_ext_num > MAX_EDID_EXT_NUM) {
// dev_warn(&connector->dev->pdev->dev, dev_warn(&connector->dev->pdev->dev,
// "The number of extension(%d) is " "The number of extension(%d) is "
// "over max (%d), actually read number (%d)\n", "over max (%d), actually read number (%d)\n",
// edid_ext_num, MAX_EDID_EXT_NUM, edid_ext_num, MAX_EDID_EXT_NUM,
// MAX_EDID_EXT_NUM); MAX_EDID_EXT_NUM);
/* Reset EDID extension number to be read */ /* Reset EDID extension number to be read */
edid_ext_num = MAX_EDID_EXT_NUM; edid_ext_num = MAX_EDID_EXT_NUM;
} }
@ -1202,8 +1217,8 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
return 0; return 0;
} }
if (!edid_is_valid(edid)) { if (!edid_is_valid(edid)) {
// dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n", dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
// drm_get_connector_name(connector)); drm_get_connector_name(connector));
return 0; return 0;
} }

View File

@ -240,22 +240,39 @@ out_free:
} }
EXPORT_SYMBOL(drm_fb_helper_init_crtc_count); EXPORT_SYMBOL(drm_fb_helper_init_crtc_count);
static void setcolreg(struct drm_crtc *crtc, u16 red, u16 green, static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, u16 regno, struct fb_info *info) u16 blue, u16 regno, struct fb_info *info)
{ {
struct drm_fb_helper *fb_helper = info->par; struct drm_fb_helper *fb_helper = info->par;
struct drm_framebuffer *fb = fb_helper->fb; struct drm_framebuffer *fb = fb_helper->fb;
int pindex; int pindex;
if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
u32 *palette;
u32 value;
/* place color in psuedopalette */
if (regno > 16)
return -EINVAL;
palette = (u32 *)info->pseudo_palette;
red >>= (16 - info->var.red.length);
green >>= (16 - info->var.green.length);
blue >>= (16 - info->var.blue.length);
value = (red << info->var.red.offset) |
(green << info->var.green.offset) |
(blue << info->var.blue.offset);
palette[regno] = value;
return 0;
}
pindex = regno; pindex = regno;
if (fb->bits_per_pixel == 16) { if (fb->bits_per_pixel == 16) {
pindex = regno << 3; pindex = regno << 3;
if (fb->depth == 16 && regno > 63) if (fb->depth == 16 && regno > 63)
return; return -EINVAL;
if (fb->depth == 15 && regno > 31) if (fb->depth == 15 && regno > 31)
return; return -EINVAL;
if (fb->depth == 16) { if (fb->depth == 16) {
u16 r, g, b; u16 r, g, b;
@ -279,13 +296,7 @@ static void setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
if (fb->depth != 16) if (fb->depth != 16)
fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex); fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex);
return 0;
if (regno < 16 && info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
((u32 *) fb->pseudo_palette)[regno] =
(regno << info->var.red.offset) |
(regno << info->var.green.offset) |
(regno << info->var.blue.offset);
}
} }
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
@ -322,7 +333,9 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
if (transp) if (transp)
htransp = *transp++; htransp = *transp++;
setcolreg(crtc, hred, hgreen, hblue, start++, info); rc = setcolreg(crtc, hred, hgreen, hblue, start++, info);
if (rc)
return rc;
} }
crtc_funcs->load_lut(crtc); crtc_funcs->load_lut(crtc);
} }
@ -341,6 +354,7 @@ int drm_fb_helper_setcolreg(unsigned regno,
struct drm_device *dev = fb_helper->dev; struct drm_device *dev = fb_helper->dev;
struct drm_crtc *crtc; struct drm_crtc *crtc;
int i; int i;
int ret;
if (regno > 255) if (regno > 255)
return 1; return 1;
@ -354,8 +368,10 @@ int drm_fb_helper_setcolreg(unsigned regno,
if (i == fb_helper->crtc_count) if (i == fb_helper->crtc_count)
continue; continue;
ret = setcolreg(crtc, red, green, blue, regno, info);
if (ret)
return ret;
setcolreg(crtc, red, green, blue, regno, info);
crtc_funcs->load_lut(crtc); crtc_funcs->load_lut(crtc);
} }
return 0; return 0;
@ -702,7 +718,7 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
{ {
info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR : info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
FB_VISUAL_DIRECTCOLOR; FB_VISUAL_TRUECOLOR;
info->fix.type_aux = 0; info->fix.type_aux = 0;
info->fix.xpanstep = 1; /* doing it in hw */ info->fix.xpanstep = 1; /* doing it in hw */
info->fix.ypanstep = 1; /* doing it in hw */ info->fix.ypanstep = 1; /* doing it in hw */

View File

@ -82,7 +82,7 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
{ {
struct drm_mm_node *child; struct drm_mm_node *child;
child = malloc(sizeof(*child)); child = kzalloc(sizeof(*child), 0);
if (unlikely(child == NULL)) { if (unlikely(child == NULL)) {
spin_lock(&mm->unused_lock); spin_lock(&mm->unused_lock);

View File

@ -874,7 +874,7 @@ int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
if (!ida->free_bitmap) { if (!ida->free_bitmap) {
struct ida_bitmap *bitmap; struct ida_bitmap *bitmap;
bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask); bitmap = kzalloc(sizeof(struct ida_bitmap), gfp_mask);
if (!bitmap) if (!bitmap)
return 0; return 0;

View File

@ -146,8 +146,6 @@ char *strcpy(char *s1, const char *s2);
char *strncpy (char *dst, const char *src, size_t len); char *strncpy (char *dst, const char *src, size_t len);
void *malloc(size_t size); void *malloc(size_t size);
#define kmalloc(s,f) malloc((s))
#define kfree free #define kfree free
static inline void *kzalloc(size_t size, u32_t flags) static inline void *kzalloc(size_t size, u32_t flags)
@ -157,6 +155,8 @@ static inline void *kzalloc(size_t size, u32_t flags)
return ret; return ret;
} }
#define kmalloc(s,f) kzalloc((s), (f))
struct drm_file; struct drm_file;
#define offsetof(TYPE,MEMBER) __builtin_offsetof(TYPE,MEMBER) #define offsetof(TYPE,MEMBER) __builtin_offsetof(TYPE,MEMBER)

View File

@ -168,7 +168,7 @@ static inline void usleep(u32_t delay)
{ {
if( !delay ) if( !delay )
delay++; delay++;
delay*= 256; delay*= 500;
while(delay--) while(delay--)
__asm__ __volatile__( __asm__ __volatile__(
@ -180,7 +180,7 @@ static inline void usleep(u32_t delay)
static inline void udelay(u32_t delay) static inline void udelay(u32_t delay)
{ {
if(!delay) delay++; if(!delay) delay++;
delay*= 256; delay*= 500;
while(delay--) while(delay--)
{ {
@ -387,4 +387,12 @@ static inline void iounmap(void *addr)
FreeKernelSpace(addr); FreeKernelSpace(addr);
} }
static inline void *
pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
addr_t *dma_handle)
{
*dma_handle = AllocPages(size >> 12);
return (void*)MapIoMem(*dma_handle, size, PG_SW+PG_NOCACHE);
}
#endif #endif

View File

@ -466,6 +466,8 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb) struct drm_framebuffer *old_fb)
{ {
ENTER();
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private; struct radeon_device *rdev = dev->dev_private;
@ -487,8 +489,7 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
// return -EINVAL; // return -EINVAL;
// } // }
fb_location = 0; //rdev->mc.vram_location; fb_location = rdev->mc.vram_location;
switch (crtc->fb->bits_per_pixel) { switch (crtc->fb->bits_per_pixel) {
case 8: case 8:
@ -562,10 +563,13 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
else else
WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0); WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
if (old_fb && old_fb != crtc->fb) { // if (old_fb && old_fb != crtc->fb) {
// radeon_fb = to_radeon_framebuffer(old_fb); // radeon_fb = to_radeon_framebuffer(old_fb);
// radeon_gem_object_unpin(radeon_fb->obj); // radeon_gem_object_unpin(radeon_fb->obj);
} // }
LEAVE();
return 0; return 0;
} }

View File

@ -0,0 +1,92 @@
#include <stdint.h>
#include <drm/drmP.h>
#include <drm.h>
#include <drm_mm.h>
#include "radeon_drm.h"
#include "radeon.h"
#include "radeon_object.h"
static int my_atoi(char **cmd)
{
char* p = *cmd;
int val = 0;
for (;; *p++) {
switch (*p) {
case '0' ... '9':
val = 10*val+(*p-'0');
break;
default:
*cmd = p;
return val;
}
}
}
char* parse_mode(char *p, mode_t *mode)
{
char c;
while( (c = *p++) == ' ');
if( c )
{
p--;
mode->width = my_atoi(&p);
if(*p == 'x') p++;
mode->height = my_atoi(&p);
if(*p == 'x') p++;
mode->bpp = 32;
mode->freq = my_atoi(&p);
if( mode->freq == 0 )
mode->freq = 60;
}
return p;
};
char* parse_path(char *p, char *log)
{
char c;
while( (c = *p++) == ' ');
p--;
while( (c = *log++ = *p++) && (c != ' '));
*log = 0;
return p;
};
void parse_cmdline(char *cmdline, mode_t *mode, char *log, int *kms)
{
char *p = cmdline;
char c = *p++;
while( c )
{
if( c == '-')
{
switch(*p++)
{
case 'm':
p = parse_mode(p, mode);
break;
case 'l':
p = parse_path(p, log);
break;
case 'n':
*kms = 0;
};
};
c = *p++;
};
};

View File

@ -1,25 +1,59 @@
typedef struct tag_object kobj_t;
typedef struct tag_display display_t;
struct tag_object
{
uint32_t magic;
void *destroy;
kobj_t *fd;
kobj_t *bk;
uint32_t pid;
};
typedef struct typedef struct
{ {
u32_t width; kobj_t header;
u32_t height;
u32_t bpp;
u32_t lfb; uint32_t *data;
u32_t pci_fb; uint32_t hot_x;
u32_t gpu_fb; uint32_t hot_y;
u32_t fb_object;
struct list_head list;
struct radeon_object *robj;
}cursor_t;
#define CURSOR_WIDTH 64
#define CURSOR_HEIGHT 64
struct tag_display
{
int x;
int y;
int width;
int height;
int bpp;
int vrefresh;
int pitch;
int lfb;
int supported_modes;
struct drm_device *ddev;
struct drm_connector *connector;
struct drm_crtc *crtc;
struct list_head cursors;
struct drm_display_mode *mode;
cursor_t *cursor; cursor_t *cursor;
int (*init_cursor)(cursor_t*);
cursor_t* (__stdcall *select_cursor)(cursor_t*);
void (*show_cursor)(int show);
void (__stdcall *move_cursor)(cursor_t *cursor, int x, int y);
void (__stdcall *restore_cursor)(int x, int y);
int (*set_cursor)(); };
int (*show_cursor)();
int (*hide_cursor)();
int (*move_cursor)();
int (*copy)(); extern display_t *rdisplay;
int (*blit)();
}display_t; int init_cursor(cursor_t *cursor);
void __stdcall restore_cursor(int x, int y);

View File

@ -70,7 +70,9 @@ NAME_SRC= \
rs690.c \ rs690.c \
rv770.c \ rv770.c \
radeon_fb.c \ radeon_fb.c \
rdisplay.c rdisplay.c \
rdisplay_kms.c \
cmdline.c
SRC_DEP:= SRC_DEP:=

View File

@ -344,7 +344,7 @@ static dev_t* pci_scan_device(u32_t bus, int devfn)
hdr = PciRead8(bus, devfn, PCI_HEADER_TYPE); hdr = PciRead8(bus, devfn, PCI_HEADER_TYPE);
dev = (dev_t*)malloc(sizeof(dev_t)); dev = (dev_t*)kzalloc(sizeof(dev_t), 0);
INIT_LIST_HEAD(&dev->link); INIT_LIST_HEAD(&dev->link);

View File

@ -2784,6 +2784,7 @@ int r100_init(struct radeon_device *rdev)
r100_vram_info(rdev); r100_vram_info(rdev);
/* Initialize memory controller (also test AGP) */ /* Initialize memory controller (also test AGP) */
r = r100_mc_init(rdev); r = r100_mc_init(rdev);
dbgprintf("mc vram location %x\n", rdev->mc.vram_location);
if (r) if (r)
return r; return r;
/* Fence driver */ /* Fence driver */

View File

@ -1277,6 +1277,7 @@ int r300_init(struct radeon_device *rdev)
r300_vram_info(rdev); r300_vram_info(rdev);
/* Initialize memory controller (also test AGP) */ /* Initialize memory controller (also test AGP) */
r = r420_mc_init(rdev); r = r420_mc_init(rdev);
dbgprintf("mc vram location %x\n", rdev->mc.vram_location);
if (r) if (r)
return r; return r;
/* Fence driver */ /* Fence driver */

View File

@ -250,6 +250,7 @@ int r520_init(struct radeon_device *rdev)
r520_vram_info(rdev); r520_vram_info(rdev);
/* Initialize memory controller (also test AGP) */ /* Initialize memory controller (also test AGP) */
r = r420_mc_init(rdev); r = r420_mc_init(rdev);
dbgprintf("mc vram location %x\n", rdev->mc.vram_location);
if (r) if (r)
return r; return r;
rv515_debugfs(rdev); rv515_debugfs(rdev);

View File

@ -1297,6 +1297,7 @@ int r600_init(struct radeon_device *rdev)
// if (r) // if (r)
// return r; // return r;
r = r600_mc_init(rdev); r = r600_mc_init(rdev);
dbgprintf("mc vram location %x\n", rdev->mc.vram_location);
if (r) if (r)
return r; return r;
/* Memory manager */ /* Memory manager */

View File

@ -860,54 +860,6 @@ static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uin
void r100_pll_errata_after_index(struct radeon_device *rdev); void r100_pll_errata_after_index(struct radeon_device *rdev);
enum chipset_type {
NOT_SUPPORTED,
SUPPORTED,
};
struct agp_version {
u16_t major;
u16_t minor;
};
struct agp_bridge_data;
struct agp_kern_info {
struct agp_version version;
struct pci_dev *device;
enum chipset_type chipset;
unsigned long mode;
unsigned long aper_base;
size_t aper_size;
int max_memory; /* In pages */
int current_memory;
bool cant_use_aperture;
unsigned long page_mask;
// struct vm_operations_struct *vm_ops;
};
/**
* AGP data.
*
* \sa drm_agp_init() and drm_device::agp.
*/
struct drm_agp_head {
struct agp_kern_info agp_info; /**< AGP device information */
// struct list_head memory;
unsigned long mode; /**< AGP mode */
struct agp_bridge_data *bridge;
int enabled; /**< whether the AGP bus as been enabled */
int acquired; /**< whether the AGP device has been acquired */
unsigned long base;
int agp_mtrr;
int cant_use_aperture;
unsigned long page_mask;
};
/* /*
* ASICs helpers. * ASICs helpers.
*/ */

View File

@ -964,6 +964,10 @@ radeon_add_legacy_connector(struct drm_device *dev,
uint32_t subpixel_order = SubPixelNone; uint32_t subpixel_order = SubPixelNone;
int ret; int ret;
ENTER();
dbgprintf("id %d device %x type %x i2c %x\n",
connector_id, supported_device, connector_type, i2c_bus);
/* fixme - tv/cv/din */ /* fixme - tv/cv/din */
if (connector_type == DRM_MODE_CONNECTOR_Unknown) if (connector_type == DRM_MODE_CONNECTOR_Unknown)
return; return;
@ -973,6 +977,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
radeon_connector = to_radeon_connector(connector); radeon_connector = to_radeon_connector(connector);
if (radeon_connector->connector_id == connector_id) { if (radeon_connector->connector_id == connector_id) {
radeon_connector->devices |= supported_device; radeon_connector->devices |= supported_device;
LEAVE();
return; return;
} }
} }
@ -1066,6 +1071,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
connector->display_info.subpixel_order = subpixel_order; connector->display_info.subpixel_order = subpixel_order;
drm_sysfs_connector_add(connector); drm_sysfs_connector_add(connector);
LEAVE();
return; return;
failed: failed:
@ -1073,4 +1079,5 @@ failed:
radeon_i2c_destroy(radeon_connector->ddc_bus); radeon_i2c_destroy(radeon_connector->ddc_bus);
drm_connector_cleanup(connector); drm_connector_cleanup(connector);
kfree(connector); kfree(connector);
LEAVE();
} }

View File

@ -45,9 +45,12 @@ int radeon_gart_size = 512; /* default gart size */
int radeon_benchmarking = 0; int radeon_benchmarking = 0;
int radeon_connector_table = 0; int radeon_connector_table = 0;
int radeon_tv = 0; int radeon_tv = 0;
int radeon_modeset = 1;
void parse_cmdline(char *cmdline, mode_t *mode, char *log); void parse_cmdline(char *cmdline, mode_t *mode, char *log);
int init_display(struct radeon_device *rdev, mode_t *mode); int init_display(struct radeon_device *rdev, mode_t *mode);
int init_display_kms(struct radeon_device *rdev, mode_t *mode);
int get_modes(mode_t *mode, int *count); int get_modes(mode_t *mode, int *count);
int set_user_mode(mode_t *mode); int set_user_mode(mode_t *mode);
@ -690,10 +693,13 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
* otherwise it should provide enough functionalities * otherwise it should provide enough functionalities
* for shadowfb to run * for shadowfb to run
*/ */
if( radeon_modeset )
{
r = radeon_modeset_init(rdev); r = radeon_modeset_init(rdev);
if (r) { if (r) {
return r; return r;
} }
};
return 0; return 0;
} }
@ -702,12 +708,12 @@ mode_t usermode;
int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent) int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
{ {
struct drm_device *dev; static struct drm_device *dev;
int ret; int ret;
ENTER(); ENTER();
dev = malloc(sizeof(*dev)); dev = kzalloc(sizeof(*dev), 0);
if (!dev) if (!dev)
return -ENOMEM; return -ENOMEM;
@ -736,6 +742,9 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
// driver->name, driver->major, driver->minor, driver->patchlevel, // driver->name, driver->major, driver->minor, driver->patchlevel,
// driver->date, pci_name(pdev), dev->primary->index); // driver->date, pci_name(pdev), dev->primary->index);
if( radeon_modeset )
init_display_kms(dev->dev_private, &usermode);
else
init_display(dev->dev_private, &usermode); init_display(dev->dev_private, &usermode);
LEAVE(); LEAVE();
@ -836,7 +845,8 @@ int _stdcall display_handler(ioctl_t *io)
dbgprintf("SRV_ENUM_MODES inp %x inp_size %x out_size %x\n", dbgprintf("SRV_ENUM_MODES inp %x inp_size %x out_size %x\n",
inp, io->inp_size, io->out_size ); inp, io->inp_size, io->out_size );
if( (outp != NULL) && (io->out_size == 4) && if( radeon_modeset &&
(outp != NULL) && (io->out_size == 4) &&
(io->inp_size == *outp * sizeof(mode_t)) ) (io->inp_size == *outp * sizeof(mode_t)) )
{ {
retval = get_modes((mode_t*)inp, outp); retval = get_modes((mode_t*)inp, outp);
@ -844,25 +854,28 @@ int _stdcall display_handler(ioctl_t *io)
break; break;
case SRV_SET_MODE: case SRV_SET_MODE:
if( (inp != NULL) && dbgprintf("SRV_SET_MODE inp %x inp_size %x\n",
inp, io->inp_size);
if( radeon_modeset &&
(inp != NULL) &&
(io->inp_size == sizeof(mode_t)) ) (io->inp_size == sizeof(mode_t)) )
{ {
retval = set_user_mode((mode_t*)inp); retval = set_user_mode((mode_t*)inp);
}; };
break; break;
}; };
return retval; return retval;
} }
static char log[256];
static dev_t device;
u32_t drvEntry(int action, char *cmdline) u32_t drvEntry(int action, char *cmdline)
{ {
static char log[256];
struct pci_device_id *ent; struct pci_device_id *ent;
dev_t device;
int err; int err;
u32_t retval = 0; u32_t retval = 0;
@ -885,6 +898,7 @@ u32_t drvEntry(int action, char *cmdline)
return 0; return 0;
}; };
} }
dbgprintf("Radeon RC05 cmdline %s\n", cmdline);
enum_pci_devices(); enum_pci_devices();
@ -901,6 +915,10 @@ u32_t drvEntry(int action, char *cmdline)
err = drm_get_dev(&device.pci_dev, ent); err = drm_get_dev(&device.pci_dev, ent);
return RegService("DISPLAY", display_handler); err = RegService("DISPLAY", display_handler);
if( err != 0)
dbgprintf("Set DISPLAY handler\n");
return err;
}; };

View File

@ -37,8 +37,8 @@ int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
{ {
void *ptr; void *ptr;
// ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size, ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size,
// &rdev->gart.table_addr); &rdev->gart.table_addr);
if (ptr == NULL) { if (ptr == NULL) {
return -ENOMEM; return -ENOMEM;
} }

View File

@ -435,6 +435,8 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
// if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) { // if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) {
// return -EINVAL; // return -EINVAL;
// } // }
base = rdev->mc.vram_location;
/* if scanout was in GTT this really wouldn't work */ /* if scanout was in GTT this really wouldn't work */
/* crtc offset is from display base addr not FB location */ /* crtc offset is from display base addr not FB location */
radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location; radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location;

View File

@ -6,77 +6,12 @@
#include "radeon_drm.h" #include "radeon_drm.h"
#include "radeon.h" #include "radeon.h"
#include "radeon_object.h" #include "radeon_object.h"
#include "display.h"
#define CURSOR_WIDTH 64 display_t *rdisplay;
#define CURSOR_HEIGHT 64
typedef struct tag_object kobj_t; static cursor_t* __stdcall select_cursor(cursor_t *cursor);
typedef struct tag_display display_t; static void __stdcall move_cursor(cursor_t *cursor, int x, int y);
struct tag_object
{
uint32_t magic;
void *destroy;
kobj_t *fd;
kobj_t *bk;
uint32_t pid;
};
typedef struct
{
kobj_t header;
uint32_t *data;
uint32_t hot_x;
uint32_t hot_y;
struct list_head list;
struct radeon_object *robj;
}cursor_t;
int init_cursor(cursor_t *cursor);
cursor_t* __stdcall select_cursor(cursor_t *cursor);
void __stdcall move_cursor(cursor_t *cursor, int x, int y);
void __stdcall restore_cursor(int x, int y);
struct tag_display
{
int x;
int y;
int width;
int height;
int bpp;
int vrefresh;
int pitch;
int lfb;
int supported_modes;
struct drm_device *ddev;
struct drm_connector *connector;
struct drm_crtc *crtc;
struct list_head cursors;
cursor_t *cursor;
int (*init_cursor)(cursor_t*);
cursor_t* (__stdcall *select_cursor)(cursor_t*);
void (*show_cursor)(int show);
void (__stdcall *move_cursor)(cursor_t *cursor, int x, int y);
void (__stdcall *restore_cursor)(int x, int y);
};
int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled);
static display_t *rdisplay;
void set_crtc(struct drm_crtc *crtc)
{
ENTER();
rdisplay->crtc = crtc;
LEAVE();
}
int init_cursor(cursor_t *cursor) int init_cursor(cursor_t *cursor)
{ {
@ -98,7 +33,7 @@ int init_cursor(cursor_t *cursor)
if (unlikely(r != 0)) if (unlikely(r != 0))
return r; return r;
radeon_object_pin(cursor->robj, RADEON_GEM_DOMAIN_VRAM, NULL); radeon_object_pin(cursor->robj, TTM_PL_FLAG_VRAM, NULL);
r = radeon_object_kmap(cursor->robj, &bits); r = radeon_object_kmap(cursor->robj, &bits);
if (r) { if (r) {
@ -123,66 +58,29 @@ int init_cursor(cursor_t *cursor)
return 0; return 0;
}; };
static void radeon_show_cursor(struct drm_crtc *crtc) static void radeon_show_cursor()
{ {
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct radeon_device *rdev = (struct radeon_device *)rdisplay->ddev->dev_private;
struct radeon_device *rdev = crtc->dev->dev_private;
if (ASIC_IS_AVIVO(rdev)) { if (ASIC_IS_AVIVO(rdev)) {
WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL);
WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN | WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
(AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
} else { } else {
switch (radeon_crtc->crtc_id) {
case 0:
WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL); WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
break;
case 1:
WREG32(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL);
break;
default:
return;
}
WREG32_P(RADEON_MM_DATA, (RADEON_CRTC_CUR_EN | WREG32_P(RADEON_MM_DATA, (RADEON_CRTC_CUR_EN |
(RADEON_CRTC_CUR_MODE_24BPP << RADEON_CRTC_CUR_MODE_SHIFT)), (RADEON_CRTC_CUR_MODE_24BPP << RADEON_CRTC_CUR_MODE_SHIFT)),
~(RADEON_CRTC_CUR_EN | RADEON_CRTC_CUR_MODE_MASK)); ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_CUR_MODE_MASK));
} }
} }
static void radeon_lock_cursor(struct drm_crtc *crtc, bool lock)
{
struct radeon_device *rdev = crtc->dev->dev_private;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
uint32_t cur_lock;
if (ASIC_IS_AVIVO(rdev)) {
cur_lock = RREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset);
if (lock)
cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK;
else
cur_lock &= ~AVIVO_D1CURSOR_UPDATE_LOCK;
WREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock);
} else {
cur_lock = RREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset);
if (lock)
cur_lock |= RADEON_CUR_LOCK;
else
cur_lock &= ~RADEON_CUR_LOCK;
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, cur_lock);
}
}
cursor_t* __stdcall select_cursor(cursor_t *cursor) cursor_t* __stdcall select_cursor(cursor_t *cursor)
{ {
struct radeon_device *rdev; struct radeon_device *rdev;
struct radeon_crtc *radeon_crtc;
cursor_t *old; cursor_t *old;
uint32_t gpu_addr; uint32_t gpu_addr;
rdev = (struct radeon_device *)rdisplay->ddev->dev_private; rdev = (struct radeon_device *)rdisplay->ddev->dev_private;
radeon_crtc = to_radeon_crtc(rdisplay->crtc);
old = rdisplay->cursor; old = rdisplay->cursor;
@ -190,250 +88,86 @@ cursor_t* __stdcall select_cursor(cursor_t *cursor)
gpu_addr = cursor->robj->gpu_addr; gpu_addr = cursor->robj->gpu_addr;
if (ASIC_IS_AVIVO(rdev)) if (ASIC_IS_AVIVO(rdev))
WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr); WREG32(AVIVO_D1CUR_SURFACE_ADDRESS, gpu_addr);
else { else {
radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr; WREG32(RADEON_CUR_OFFSET, gpu_addr - rdev->mc.vram_location);
/* offset is from DISP(2)_BASE_ADDRESS */
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
} }
return old; return old;
}; };
static void radeon_lock_cursor(bool lock)
{
struct radeon_device *rdev;
rdev = (struct radeon_device *)rdisplay->ddev->dev_private;
uint32_t cur_lock;
if (ASIC_IS_AVIVO(rdev)) {
cur_lock = RREG32(AVIVO_D1CUR_UPDATE);
if (lock)
cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK;
else
cur_lock &= ~AVIVO_D1CURSOR_UPDATE_LOCK;
WREG32(AVIVO_D1CUR_UPDATE, cur_lock);
} else {
cur_lock = RREG32(RADEON_CUR_OFFSET);
if (lock)
cur_lock |= RADEON_CUR_LOCK;
else
cur_lock &= ~RADEON_CUR_LOCK;
WREG32(RADEON_CUR_OFFSET, cur_lock);
}
}
void __stdcall move_cursor(cursor_t *cursor, int x, int y) void __stdcall move_cursor(cursor_t *cursor, int x, int y)
{ {
struct radeon_device *rdev; struct radeon_device *rdev;
rdev = (struct radeon_device *)rdisplay->ddev->dev_private; rdev = (struct radeon_device *)rdisplay->ddev->dev_private;
struct drm_crtc *crtc = rdisplay->crtc;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
int hot_x = cursor->hot_x; int hot_x = cursor->hot_x;
int hot_y = cursor->hot_y; int hot_y = cursor->hot_y;
radeon_lock_cursor(crtc, true); radeon_lock_cursor(true);
if (ASIC_IS_AVIVO(rdev)) if (ASIC_IS_AVIVO(rdev))
{ {
int w = 32; int w = 32;
int i = 0; int i = 0;
struct drm_crtc *crtc_p;
/* avivo cursor are offset into the total surface */ WREG32(AVIVO_D1CUR_POSITION, (x << 16) | y);
// x += crtc->x; WREG32(AVIVO_D1CUR_HOT_SPOT, (hot_x << 16) | hot_y);
// y += crtc->y; WREG32(AVIVO_D1CUR_SIZE, ((w - 1) << 16) | 31);
// DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
#if 0
/* avivo cursor image can't end on 128 pixel boundry or
* go past the end of the frame if both crtcs are enabled
*/
list_for_each_entry(crtc_p, &crtc->dev->mode_config.crtc_list, head) {
if (crtc_p->enabled)
i++;
}
if (i > 1) {
int cursor_end, frame_end;
cursor_end = x + w;
frame_end = crtc->x + crtc->mode.crtc_hdisplay;
if (cursor_end >= frame_end) {
w = w - (cursor_end - frame_end);
if (!(frame_end & 0x7f))
w--;
} else { } else {
if (!(cursor_end & 0x7f)) uint32_t gpu_addr;
w--;
}
if (w <= 0)
w = 1;
}
#endif
WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset,
(x << 16) | y);
WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset,
(hot_x << 16) | hot_y);
WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset,
((w - 1) << 16) | 31);
} else {
if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)
y *= 2;
WREG32(RADEON_CUR_HORZ_VERT_OFF + radeon_crtc->crtc_offset, WREG32(RADEON_CUR_HORZ_VERT_OFF,
(RADEON_CUR_LOCK | (hot_x << 16) | (hot_y << 16))); (RADEON_CUR_LOCK | (hot_x << 16) | (hot_y << 16)));
WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset, WREG32(RADEON_CUR_HORZ_VERT_POSN,
(RADEON_CUR_LOCK | (x << 16) | y)); (RADEON_CUR_LOCK | (x << 16) | y));
gpu_addr = cursor->robj->gpu_addr;
/* offset is from DISP(2)_BASE_ADDRESS */ /* offset is from DISP(2)_BASE_ADDRESS */
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, WREG32(RADEON_CUR_OFFSET,
(radeon_crtc->legacy_cursor_offset + (hot_y * 256))); (gpu_addr - rdev->mc.vram_location + (hot_y * 256)));
} }
radeon_lock_cursor(crtc, false); radeon_lock_cursor(false);
} }
void __stdcall restore_cursor(int x, int y) void __stdcall restore_cursor(int x, int y)
{ {
}; };
static char *manufacturer_name(unsigned char *x)
{
static char name[4];
name[0] = ((x[0] & 0x7C) >> 2) + '@';
name[1] = ((x[0] & 0x03) << 3) + ((x[1] & 0xE0) >> 5) + '@';
name[2] = (x[1] & 0x1F) + '@';
name[3] = 0;
return name;
}
bool set_mode(struct drm_device *dev, struct drm_connector *connector,
mode_t *reqmode, bool strict)
{
struct drm_display_mode *mode = NULL, *tmpmode;
bool ret = false;
ENTER();
list_for_each_entry(tmpmode, &connector->modes, head)
{
if( (drm_mode_width(tmpmode) == reqmode->width) &&
(drm_mode_height(tmpmode) == reqmode->height) &&
(drm_mode_vrefresh(tmpmode) == reqmode->freq) )
{
mode = tmpmode;
break;
}
};
if( (mode == NULL) && (strict == false) )
{
list_for_each_entry(tmpmode, &connector->modes, head)
{
if( (drm_mode_width(tmpmode) == reqmode->width) &&
(drm_mode_height(tmpmode) == reqmode->height) )
{
mode = tmpmode;
break;
}
};
};
if( mode != NULL )
{
struct drm_framebuffer *fb;
struct drm_encoder *encoder;
struct drm_crtc *crtc;
char con_edid[128];
char *con_name;
char *enc_name;
encoder = connector->encoder;
crtc = encoder->crtc;
fb = list_first_entry(&dev->mode_config.fb_kernel_list,
struct drm_framebuffer, filp_head);
memcpy(con_edid, connector->edid_blob_ptr->data, 128);
dbgprintf("Manufacturer: %s Model %x Serial Number %u\n",
manufacturer_name(con_edid + 0x08),
(unsigned short)(con_edid[0x0A] + (con_edid[0x0B] << 8)),
(unsigned int)(con_edid[0x0C] + (con_edid[0x0D] << 8)
+ (con_edid[0x0E] << 16) + (con_edid[0x0F] << 24)));
con_name = drm_get_connector_name(connector);
enc_name = drm_get_encoder_name(encoder);
dbgprintf("set mode %d %d connector %s encoder %s\n",
reqmode->width, reqmode->height, con_name, enc_name);
fb->width = reqmode->width;
fb->height = reqmode->height;
fb->pitch = radeon_align_pitch(dev->dev_private, reqmode->width, 32, false) * ((32 + 1) / 8);
crtc->fb = fb;
crtc->enabled = true;
rdisplay->crtc = crtc;
ret = drm_crtc_helper_set_mode(crtc, mode, 0, 0, fb);
if (ret == true)
{
rdisplay->width = fb->width;
rdisplay->height = fb->height;
rdisplay->pitch = fb->pitch;
rdisplay->vrefresh = drm_mode_vrefresh(mode);
sysSetScreen(fb->width, fb->height, fb->pitch);
dbgprintf("new mode %d x %d pitch %d\n",
fb->width, fb->height, fb->pitch);
}
else
DRM_ERROR("failed to set mode %d_%d on crtc %p\n",
fb->width, fb->height, crtc);
}
LEAVE();
return ret;
};
static int count_connector_modes(struct drm_connector* connector)
{
struct drm_display_mode *mode;
int count = 0;
list_for_each_entry(mode, &connector->modes, head)
{
count++;
};
return count;
};
static struct drm_connector* get_def_connector(struct drm_device *dev)
{
struct drm_connector *connector;
struct drm_connector *def_connector = NULL;
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
{
struct drm_encoder *encoder;
struct drm_crtc *crtc;
if( connector->status != connector_status_connected)
continue;
encoder = connector->encoder;
if( encoder == NULL)
continue;
if( encoder->encoder_type == DRM_MODE_ENCODER_TVDAC )
{
dbgprintf("skip tvdac encoder %s connector %s\n",
drm_get_encoder_name(encoder),
drm_get_connector_name(connector));
continue;
};
crtc = encoder->crtc;
if(crtc == NULL)
continue;
def_connector = connector;
break;
};
return def_connector;
};
bool init_display(struct radeon_device *rdev, mode_t *usermode) bool init_display(struct radeon_device *rdev, mode_t *usermode)
{ {
struct drm_device *dev; struct drm_device *dev;
cursor_t *cursor; cursor_t *cursor;
bool retval = false; bool retval = false;
u32_t ifl;
ENTER(); ENTER();
@ -441,183 +175,33 @@ bool init_display(struct radeon_device *rdev, mode_t *usermode)
dev = rdisplay->ddev = rdev->ddev; dev = rdisplay->ddev = rdev->ddev;
ifl = safe_cli();
{
list_for_each_entry(cursor, &rdisplay->cursors, list) list_for_each_entry(cursor, &rdisplay->cursors, list)
{ {
init_cursor(cursor); init_cursor(cursor);
}; };
rdisplay->connector = get_def_connector(dev);
if( rdisplay->connector == 0 )
return false;
rdisplay->supported_modes = count_connector_modes(rdisplay->connector);
if( (usermode->width != 0) &&
(usermode->height != 0) )
{
retval = set_mode(dev, rdisplay->connector, usermode, false);
}
else
{
mode_t mode;
mode.width = rdisplay->width;
mode.height = rdisplay->height;
mode.bpp = 32;
mode.freq = 60;
retval = set_mode(dev, rdisplay->connector, &mode, false);
}; };
safe_sti(ifl);
select_cursor(rdisplay->cursor); ifl = safe_cli();
radeon_show_cursor(rdisplay->crtc); {
rdisplay->restore_cursor(0,0);
rdisplay->init_cursor = init_cursor; rdisplay->init_cursor = init_cursor;
rdisplay->select_cursor = select_cursor; rdisplay->select_cursor = select_cursor;
rdisplay->show_cursor = NULL; rdisplay->show_cursor = NULL;
rdisplay->move_cursor = move_cursor; rdisplay->move_cursor = move_cursor;
rdisplay->restore_cursor = restore_cursor; rdisplay->restore_cursor = restore_cursor;
select_cursor(rdisplay->cursor);
radeon_show_cursor();
};
safe_sti(ifl);
LEAVE(); LEAVE();
return retval; return retval;
}; };
static int my_atoi(char **cmd)
{
char* p = *cmd;
int val = 0;
for (;; *p++) {
switch (*p) {
case '0' ... '9':
val = 10*val+(*p-'0');
break;
default:
*cmd = p;
return val;
}
}
}
char* parse_mode(char *p, mode_t *mode)
{
char c;
while( (c = *p++) == ' ');
if( c )
{
p--;
mode->width = my_atoi(&p);
if(*p == 'x') p++;
mode->height = my_atoi(&p);
if(*p == 'x') p++;
mode->bpp = 32;
mode->freq = my_atoi(&p);
if( mode->freq == 0 )
mode->freq = 60;
}
return p;
};
char* parse_path(char *p, char *log)
{
char c;
while( (c = *p++) == ' ');
p--;
while( (c = *log++ = *p++) && (c != ' '));
*log = 0;
return p;
};
void parse_cmdline(char *cmdline, mode_t *mode, char *log)
{
char *p = cmdline;
char c = *p++;
while( c )
{
if( c == '-')
{
switch(*p++)
{
case 'm':
p = parse_mode(p, mode);
break;
case 'l':
p = parse_path(p, log);
break;
};
};
c = *p++;
};
};
int get_modes(mode_t *mode, int *count)
{
int err = -1;
ENTER();
dbgprintf("mode %x count %d\n", mode, *count);
if( *count == 0 )
{
*count = rdisplay->supported_modes;
err = 0;
}
else if( mode != NULL )
{
struct drm_display_mode *drmmode;
int i = 0;
if( *count > rdisplay->supported_modes)
*count = rdisplay->supported_modes;
list_for_each_entry(drmmode, &rdisplay->connector->modes, head)
{
if( i < *count)
{
mode->width = drm_mode_width(drmmode);
mode->height = drm_mode_height(drmmode);
mode->bpp = 32;
mode->freq = drm_mode_vrefresh(drmmode);
i++;
mode++;
}
else break;
};
*count = i;
err = 0;
};
LEAVE();
return err;
}
int set_user_mode(mode_t *mode)
{
int err = -1;
if( (mode->width != 0) &&
(mode->height != 0) &&
(mode->freq != 0 ) )
{
if( set_mode(rdisplay->ddev, rdisplay->connector, mode, true) )
err = 0;
};
return err;
};

View File

@ -0,0 +1,441 @@
#include <stdint.h>
#include <drm/drmP.h>
#include <drm.h>
#include <drm_mm.h>
#include "radeon_drm.h"
#include "radeon.h"
#include "radeon_object.h"
#include "display.h"
static cursor_t* __stdcall select_cursor_kms(cursor_t *cursor);
static void __stdcall move_cursor_kms(cursor_t *cursor, int x, int y);
int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled);
static void radeon_show_cursor_kms(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_device *rdev = crtc->dev->dev_private;
if (ASIC_IS_AVIVO(rdev)) {
WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
(AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
} else {
switch (radeon_crtc->crtc_id) {
case 0:
WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
break;
case 1:
WREG32(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL);
break;
default:
return;
}
WREG32_P(RADEON_MM_DATA, (RADEON_CRTC_CUR_EN |
(RADEON_CRTC_CUR_MODE_24BPP << RADEON_CRTC_CUR_MODE_SHIFT)),
~(RADEON_CRTC_CUR_EN | RADEON_CRTC_CUR_MODE_MASK));
}
}
static void radeon_lock_cursor_kms(struct drm_crtc *crtc, bool lock)
{
struct radeon_device *rdev = crtc->dev->dev_private;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
uint32_t cur_lock;
if (ASIC_IS_AVIVO(rdev)) {
cur_lock = RREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset);
if (lock)
cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK;
else
cur_lock &= ~AVIVO_D1CURSOR_UPDATE_LOCK;
WREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock);
} else {
cur_lock = RREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset);
if (lock)
cur_lock |= RADEON_CUR_LOCK;
else
cur_lock &= ~RADEON_CUR_LOCK;
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, cur_lock);
}
}
cursor_t* __stdcall select_cursor_kms(cursor_t *cursor)
{
struct radeon_device *rdev;
struct radeon_crtc *radeon_crtc;
cursor_t *old;
uint32_t gpu_addr;
rdev = (struct radeon_device *)rdisplay->ddev->dev_private;
radeon_crtc = to_radeon_crtc(rdisplay->crtc);
old = rdisplay->cursor;
rdisplay->cursor = cursor;
gpu_addr = cursor->robj->gpu_addr;
if (ASIC_IS_AVIVO(rdev))
WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr);
else {
radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
/* offset is from DISP(2)_BASE_ADDRESS */
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
}
return old;
};
void __stdcall move_cursor_kms(cursor_t *cursor, int x, int y)
{
struct radeon_device *rdev;
rdev = (struct radeon_device *)rdisplay->ddev->dev_private;
struct drm_crtc *crtc = rdisplay->crtc;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
int hot_x = cursor->hot_x;
int hot_y = cursor->hot_y;
radeon_lock_cursor_kms(crtc, true);
if (ASIC_IS_AVIVO(rdev))
{
int w = 32;
int i = 0;
struct drm_crtc *crtc_p;
/* avivo cursor are offset into the total surface */
// x += crtc->x;
// y += crtc->y;
// DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
#if 0
/* avivo cursor image can't end on 128 pixel boundry or
* go past the end of the frame if both crtcs are enabled
*/
list_for_each_entry(crtc_p, &crtc->dev->mode_config.crtc_list, head) {
if (crtc_p->enabled)
i++;
}
if (i > 1) {
int cursor_end, frame_end;
cursor_end = x + w;
frame_end = crtc->x + crtc->mode.crtc_hdisplay;
if (cursor_end >= frame_end) {
w = w - (cursor_end - frame_end);
if (!(frame_end & 0x7f))
w--;
} else {
if (!(cursor_end & 0x7f))
w--;
}
if (w <= 0)
w = 1;
}
#endif
WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset,
(x << 16) | y);
WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset,
(hot_x << 16) | hot_y);
WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset,
((w - 1) << 16) | 31);
} else {
if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)
y *= 2;
WREG32(RADEON_CUR_HORZ_VERT_OFF + radeon_crtc->crtc_offset,
(RADEON_CUR_LOCK | (hot_x << 16) | (hot_y << 16)));
WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset,
(RADEON_CUR_LOCK | (x << 16) | y));
/* offset is from DISP(2)_BASE_ADDRESS */
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,
(radeon_crtc->legacy_cursor_offset + (hot_y * 256)));
}
radeon_lock_cursor_kms(crtc, false);
}
static char *manufacturer_name(unsigned char *x)
{
static char name[4];
name[0] = ((x[0] & 0x7C) >> 2) + '@';
name[1] = ((x[0] & 0x03) << 3) + ((x[1] & 0xE0) >> 5) + '@';
name[2] = (x[1] & 0x1F) + '@';
name[3] = 0;
return name;
}
bool set_mode(struct drm_device *dev, struct drm_connector *connector,
mode_t *reqmode, bool strict)
{
struct drm_display_mode *mode = NULL, *tmpmode;
bool ret = false;
ENTER();
dbgprintf("width %d height %d vrefresh %d\n",
reqmode->width, reqmode->height, reqmode->freq);
list_for_each_entry(tmpmode, &connector->modes, head)
{
if( (drm_mode_width(tmpmode) == reqmode->width) &&
(drm_mode_height(tmpmode) == reqmode->height) &&
(drm_mode_vrefresh(tmpmode) == reqmode->freq) )
{
mode = tmpmode;
goto do_set;
}
};
if( (mode == NULL) && (strict == false) )
{
list_for_each_entry(tmpmode, &connector->modes, head)
{
if( (drm_mode_width(tmpmode) == reqmode->width) &&
(drm_mode_height(tmpmode) == reqmode->height) )
{
mode = tmpmode;
goto do_set;
}
};
};
do_set:
if( mode != NULL )
{
struct drm_framebuffer *fb;
struct drm_encoder *encoder;
struct drm_crtc *crtc;
// char con_edid[128];
char *con_name;
char *enc_name;
encoder = connector->encoder;
crtc = encoder->crtc;
fb = list_first_entry(&dev->mode_config.fb_kernel_list,
struct drm_framebuffer, filp_head);
// memcpy(con_edid, connector->edid_blob_ptr->data, 128);
// dbgprintf("Manufacturer: %s Model %x Serial Number %u\n",
// manufacturer_name(con_edid + 0x08),
// (unsigned short)(con_edid[0x0A] + (con_edid[0x0B] << 8)),
// (unsigned int)(con_edid[0x0C] + (con_edid[0x0D] << 8)
// + (con_edid[0x0E] << 16) + (con_edid[0x0F] << 24)));
con_name = drm_get_connector_name(connector);
enc_name = drm_get_encoder_name(encoder);
dbgprintf("set mode %d %d connector %s encoder %s\n",
reqmode->width, reqmode->height, con_name, enc_name);
fb->width = reqmode->width;
fb->height = reqmode->height;
fb->pitch = radeon_align_pitch(dev->dev_private, reqmode->width, 32, false) * ((32 + 1) / 8);
crtc->fb = fb;
crtc->enabled = true;
rdisplay->crtc = crtc;
ret = drm_crtc_helper_set_mode(crtc, mode, 0, 0, fb);
select_cursor_kms(rdisplay->cursor);
radeon_show_cursor_kms(crtc);
if (ret == true)
{
rdisplay->width = fb->width;
rdisplay->height = fb->height;
rdisplay->pitch = fb->pitch;
rdisplay->vrefresh = drm_mode_vrefresh(mode);
sysSetScreen(fb->width, fb->height, fb->pitch);
dbgprintf("new mode %d x %d pitch %d\n",
fb->width, fb->height, fb->pitch);
}
else
DRM_ERROR("failed to set mode %d_%d on crtc %p\n",
fb->width, fb->height, crtc);
}
LEAVE();
return ret;
};
static int count_connector_modes(struct drm_connector* connector)
{
struct drm_display_mode *mode;
int count = 0;
list_for_each_entry(mode, &connector->modes, head)
{
count++;
};
return count;
};
static struct drm_connector* get_def_connector(struct drm_device *dev)
{
struct drm_connector *connector;
struct drm_connector *def_connector = NULL;
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
{
struct drm_encoder *encoder;
struct drm_crtc *crtc;
if( connector->status != connector_status_connected)
continue;
encoder = connector->encoder;
if( encoder == NULL)
continue;
crtc = encoder->crtc;
if(crtc == NULL)
continue;
def_connector = connector;
break;
};
return def_connector;
};
bool init_display_kms(struct radeon_device *rdev, mode_t *usermode)
{
struct drm_device *dev;
cursor_t *cursor;
bool retval = false;
u32_t ifl;
ENTER();
rdisplay = GetDisplay();
dev = rdisplay->ddev = rdev->ddev;
ifl = safe_cli();
{
list_for_each_entry(cursor, &rdisplay->cursors, list)
{
init_cursor(cursor);
};
};
safe_sti(ifl);
rdisplay->connector = get_def_connector(dev);
if( rdisplay->connector == 0 )
{
dbgprintf("no active connectors\n");
return false;
};
rdisplay->crtc = rdisplay->connector->encoder->crtc;
rdisplay->supported_modes = count_connector_modes(rdisplay->connector);
if( (usermode->width != 0) &&
(usermode->height != 0) &&
( (usermode->width != rdisplay->width) ||
(usermode->height != rdisplay->height) ||
(usermode->freq != rdisplay->vrefresh) ) )
{
retval = set_mode(dev, rdisplay->connector, usermode, false);
}
ifl = safe_cli();
{
rdisplay->restore_cursor(0,0);
rdisplay->init_cursor = init_cursor;
rdisplay->select_cursor = select_cursor_kms;
rdisplay->show_cursor = NULL;
rdisplay->move_cursor = move_cursor_kms;
rdisplay->restore_cursor = restore_cursor;
radeon_show_cursor_kms(rdisplay->crtc);
};
safe_sti(ifl);
LEAVE();
return retval;
};
int get_modes(mode_t *mode, int *count)
{
int err = -1;
ENTER();
dbgprintf("mode %x count %d\n", mode, *count);
if( *count == 0 )
{
*count = rdisplay->supported_modes;
err = 0;
}
else if( mode != NULL )
{
struct drm_display_mode *drmmode;
int i = 0;
if( *count > rdisplay->supported_modes)
*count = rdisplay->supported_modes;
list_for_each_entry(drmmode, &rdisplay->connector->modes, head)
{
if( i < *count)
{
mode->width = drm_mode_width(drmmode);
mode->height = drm_mode_height(drmmode);
mode->bpp = 32;
mode->freq = drm_mode_vrefresh(drmmode);
i++;
mode++;
}
else break;
};
*count = i;
err = 0;
};
LEAVE();
return err;
}
int set_user_mode(mode_t *mode)
{
int err = -1;
ENTER();
dbgprintf("width %d height %d vrefresh %d\n",
mode->width, mode->height, mode->freq);
if( (mode->width != 0) &&
(mode->height != 0) &&
(mode->freq != 0 ) &&
( (mode->width != rdisplay->width) ||
(mode->height != rdisplay->height) ||
(mode->freq != rdisplay->vrefresh) ) )
{
if( set_mode(rdisplay->ddev, rdisplay->connector, mode, true) )
err = 0;
};
LEAVE();
return err;
};

View File

@ -552,6 +552,7 @@ int rv515_init(struct radeon_device *rdev)
rv515_vram_info(rdev); rv515_vram_info(rdev);
/* Initialize memory controller (also test AGP) */ /* Initialize memory controller (also test AGP) */
r = r420_mc_init(rdev); r = r420_mc_init(rdev);
dbgprintf("mc vram location %x\n", rdev->mc.vram_location);
if (r) if (r)
return r; return r;
rv515_debugfs(rdev); rv515_debugfs(rdev);

View File

@ -0,0 +1,983 @@
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
* Copyright 2009 Jerome Glisse.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alex Deucher
* Jerome Glisse
*/
//#include <linux/platform_device.h>
#include "drmP.h"
#include "radeon.h"
#include "radeon_drm.h"
#include "rv770d.h"
#include "atom.h"
#include "avivod.h"
#include <linux/firmware.h>
#define R700_PFP_UCODE_SIZE 848
#define R700_PM4_UCODE_SIZE 1360
static void rv770_gpu_init(struct radeon_device *rdev);
void rv770_fini(struct radeon_device *rdev);
/*
* GART
*/
int rv770_pcie_gart_enable(struct radeon_device *rdev)
{
u32 tmp;
int r, i;
if (rdev->gart.table.vram.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
EFFECTIVE_L2_QUEUE_SIZE(7));
WREG32(VM_L2_CNTL2, 0);
WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
/* Setup TLB control */
tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
SYSTEM_ACCESS_MODE_NOT_IN_SYS |
SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(rdev->dummy_page.addr >> 12));
for (i = 1; i < 7; i++)
WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
r600_pcie_gart_tlb_flush(rdev);
rdev->gart.ready = true;
return 0;
}
void rv770_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
int i;
/* Disable all tables */
for (i = 0; i < 7; i++)
WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
EFFECTIVE_L2_QUEUE_SIZE(7));
WREG32(VM_L2_CNTL2, 0);
WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
/* Setup TLB control */
tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
if (rdev->gart.table.vram.robj) {
// radeon_object_kunmap(rdev->gart.table.vram.robj);
// radeon_object_unpin(rdev->gart.table.vram.robj);
}
}
void rv770_pcie_gart_fini(struct radeon_device *rdev)
{
rv770_pcie_gart_disable(rdev);
// radeon_gart_table_vram_free(rdev);
radeon_gart_fini(rdev);
}
void rv770_agp_enable(struct radeon_device *rdev)
{
u32 tmp;
int i;
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
EFFECTIVE_L2_QUEUE_SIZE(7));
WREG32(VM_L2_CNTL2, 0);
WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
/* Setup TLB control */
tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
SYSTEM_ACCESS_MODE_NOT_IN_SYS |
SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
for (i = 0; i < 7; i++)
WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
}
static void rv770_mc_program(struct radeon_device *rdev)
{
struct rv515_mc_save save;
u32 tmp;
int i, j;
/* Initialize HDP */
for (i = 0, j = 0; i < 32; i++, j += 0x18) {
WREG32((0x2c14 + j), 0x00000000);
WREG32((0x2c18 + j), 0x00000000);
WREG32((0x2c1c + j), 0x00000000);
WREG32((0x2c20 + j), 0x00000000);
WREG32((0x2c24 + j), 0x00000000);
}
WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
rv515_mc_stop(rdev, &save);
if (r600_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
/* Lockout access through VGA aperture*/
WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
/* Update configuration */
if (rdev->flags & RADEON_IS_AGP) {
if (rdev->mc.vram_start < rdev->mc.gtt_start) {
/* VRAM before AGP */
WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
rdev->mc.vram_start >> 12);
WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
rdev->mc.gtt_end >> 12);
} else {
/* VRAM after AGP */
WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
rdev->mc.gtt_start >> 12);
WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
rdev->mc.vram_end >> 12);
}
} else {
WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
rdev->mc.vram_start >> 12);
WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
rdev->mc.vram_end >> 12);
}
WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
WREG32(MC_VM_FB_LOCATION, tmp);
WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
WREG32(HDP_NONSURFACE_INFO, (2 << 7));
WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF);
if (rdev->flags & RADEON_IS_AGP) {
WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
} else {
WREG32(MC_VM_AGP_BASE, 0);
WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
}
if (r600_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
rv515_mc_resume(rdev, &save);
/* we need to own VRAM, so turn off the VGA renderer here
* to stop it overwriting our objects */
rv515_vga_render_disable(rdev);
}
/*
* CP.
*/
void r700_cp_stop(struct radeon_device *rdev)
{
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
}
static int rv770_cp_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
int i;
if (!rdev->me_fw || !rdev->pfp_fw)
return -EINVAL;
r700_cp_stop(rdev);
WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0));
/* Reset cp */
WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
RREG32(GRBM_SOFT_RESET);
mdelay(15);
WREG32(GRBM_SOFT_RESET, 0);
fw_data = (const __be32 *)rdev->pfp_fw->data;
WREG32(CP_PFP_UCODE_ADDR, 0);
for (i = 0; i < R700_PFP_UCODE_SIZE; i++)
WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
WREG32(CP_PFP_UCODE_ADDR, 0);
fw_data = (const __be32 *)rdev->me_fw->data;
WREG32(CP_ME_RAM_WADDR, 0);
for (i = 0; i < R700_PM4_UCODE_SIZE; i++)
WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
WREG32(CP_PFP_UCODE_ADDR, 0);
WREG32(CP_ME_RAM_WADDR, 0);
WREG32(CP_ME_RAM_RADDR, 0);
return 0;
}
/*
* Core functions
*/
static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
u32 num_backends,
u32 backend_disable_mask)
{
u32 backend_map = 0;
u32 enabled_backends_mask;
u32 enabled_backends_count;
u32 cur_pipe;
u32 swizzle_pipe[R7XX_MAX_PIPES];
u32 cur_backend;
u32 i;
if (num_tile_pipes > R7XX_MAX_PIPES)
num_tile_pipes = R7XX_MAX_PIPES;
if (num_tile_pipes < 1)
num_tile_pipes = 1;
if (num_backends > R7XX_MAX_BACKENDS)
num_backends = R7XX_MAX_BACKENDS;
if (num_backends < 1)
num_backends = 1;
enabled_backends_mask = 0;
enabled_backends_count = 0;
for (i = 0; i < R7XX_MAX_BACKENDS; ++i) {
if (((backend_disable_mask >> i) & 1) == 0) {
enabled_backends_mask |= (1 << i);
++enabled_backends_count;
}
if (enabled_backends_count == num_backends)
break;
}
if (enabled_backends_count == 0) {
enabled_backends_mask = 1;
enabled_backends_count = 1;
}
if (enabled_backends_count != num_backends)
num_backends = enabled_backends_count;
memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
switch (num_tile_pipes) {
case 1:
swizzle_pipe[0] = 0;
break;
case 2:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
break;
case 3:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 1;
break;
case 4:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 3;
swizzle_pipe[3] = 1;
break;
case 5:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 1;
swizzle_pipe[4] = 3;
break;
case 6:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 5;
swizzle_pipe[4] = 3;
swizzle_pipe[5] = 1;
break;
case 7:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 3;
swizzle_pipe[5] = 1;
swizzle_pipe[6] = 5;
break;
case 8:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 3;
swizzle_pipe[5] = 1;
swizzle_pipe[6] = 7;
swizzle_pipe[7] = 5;
break;
}
cur_backend = 0;
for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
while (((1 << cur_backend) & enabled_backends_mask) == 0)
cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
}
return backend_map;
}
static void rv770_gpu_init(struct radeon_device *rdev)
{
int i, j, num_qd_pipes;
u32 sx_debug_1;
u32 smx_dc_ctl0;
u32 num_gs_verts_per_thread;
u32 vgt_gs_per_es;
u32 gs_prim_buffer_depth = 0;
u32 sq_ms_fifo_sizes;
u32 sq_config;
u32 sq_thread_resource_mgmt;
u32 hdp_host_path_cntl;
u32 sq_dyn_gpr_size_simd_ab_0;
u32 backend_map;
u32 gb_tiling_config = 0;
u32 cc_rb_backend_disable = 0;
u32 cc_gc_shader_pipe_config = 0;
u32 mc_arb_ramcfg;
u32 db_debug4;
/* setup chip specs */
switch (rdev->family) {
case CHIP_RV770:
rdev->config.rv770.max_pipes = 4;
rdev->config.rv770.max_tile_pipes = 8;
rdev->config.rv770.max_simds = 10;
rdev->config.rv770.max_backends = 4;
rdev->config.rv770.max_gprs = 256;
rdev->config.rv770.max_threads = 248;
rdev->config.rv770.max_stack_entries = 512;
rdev->config.rv770.max_hw_contexts = 8;
rdev->config.rv770.max_gs_threads = 16 * 2;
rdev->config.rv770.sx_max_export_size = 128;
rdev->config.rv770.sx_max_export_pos_size = 16;
rdev->config.rv770.sx_max_export_smx_size = 112;
rdev->config.rv770.sq_num_cf_insts = 2;
rdev->config.rv770.sx_num_of_sets = 7;
rdev->config.rv770.sc_prim_fifo_size = 0xF9;
rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
break;
case CHIP_RV730:
rdev->config.rv770.max_pipes = 2;
rdev->config.rv770.max_tile_pipes = 4;
rdev->config.rv770.max_simds = 8;
rdev->config.rv770.max_backends = 2;
rdev->config.rv770.max_gprs = 128;
rdev->config.rv770.max_threads = 248;
rdev->config.rv770.max_stack_entries = 256;
rdev->config.rv770.max_hw_contexts = 8;
rdev->config.rv770.max_gs_threads = 16 * 2;
rdev->config.rv770.sx_max_export_size = 256;
rdev->config.rv770.sx_max_export_pos_size = 32;
rdev->config.rv770.sx_max_export_smx_size = 224;
rdev->config.rv770.sq_num_cf_insts = 2;
rdev->config.rv770.sx_num_of_sets = 7;
rdev->config.rv770.sc_prim_fifo_size = 0xf9;
rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
if (rdev->config.rv770.sx_max_export_pos_size > 16) {
rdev->config.rv770.sx_max_export_pos_size -= 16;
rdev->config.rv770.sx_max_export_smx_size += 16;
}
break;
case CHIP_RV710:
rdev->config.rv770.max_pipes = 2;
rdev->config.rv770.max_tile_pipes = 2;
rdev->config.rv770.max_simds = 2;
rdev->config.rv770.max_backends = 1;
rdev->config.rv770.max_gprs = 256;
rdev->config.rv770.max_threads = 192;
rdev->config.rv770.max_stack_entries = 256;
rdev->config.rv770.max_hw_contexts = 4;
rdev->config.rv770.max_gs_threads = 8 * 2;
rdev->config.rv770.sx_max_export_size = 128;
rdev->config.rv770.sx_max_export_pos_size = 16;
rdev->config.rv770.sx_max_export_smx_size = 112;
rdev->config.rv770.sq_num_cf_insts = 1;
rdev->config.rv770.sx_num_of_sets = 7;
rdev->config.rv770.sc_prim_fifo_size = 0x40;
rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
break;
case CHIP_RV740:
rdev->config.rv770.max_pipes = 4;
rdev->config.rv770.max_tile_pipes = 4;
rdev->config.rv770.max_simds = 8;
rdev->config.rv770.max_backends = 4;
rdev->config.rv770.max_gprs = 256;
rdev->config.rv770.max_threads = 248;
rdev->config.rv770.max_stack_entries = 512;
rdev->config.rv770.max_hw_contexts = 8;
rdev->config.rv770.max_gs_threads = 16 * 2;
rdev->config.rv770.sx_max_export_size = 256;
rdev->config.rv770.sx_max_export_pos_size = 32;
rdev->config.rv770.sx_max_export_smx_size = 224;
rdev->config.rv770.sq_num_cf_insts = 2;
rdev->config.rv770.sx_num_of_sets = 7;
rdev->config.rv770.sc_prim_fifo_size = 0x100;
rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
if (rdev->config.rv770.sx_max_export_pos_size > 16) {
rdev->config.rv770.sx_max_export_pos_size -= 16;
rdev->config.rv770.sx_max_export_smx_size += 16;
}
break;
default:
break;
}
/* Initialize HDP */
j = 0;
for (i = 0; i < 32; i++) {
WREG32((0x2c14 + j), 0x00000000);
WREG32((0x2c18 + j), 0x00000000);
WREG32((0x2c1c + j), 0x00000000);
WREG32((0x2c20 + j), 0x00000000);
WREG32((0x2c24 + j), 0x00000000);
j += 0x18;
}
WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
/* setup tiling, simd, pipe config */
mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
switch (rdev->config.rv770.max_tile_pipes) {
case 1:
gb_tiling_config |= PIPE_TILING(0);
break;
case 2:
gb_tiling_config |= PIPE_TILING(1);
break;
case 4:
gb_tiling_config |= PIPE_TILING(2);
break;
case 8:
gb_tiling_config |= PIPE_TILING(3);
break;
default:
break;
}
if (rdev->family == CHIP_RV770)
gb_tiling_config |= BANK_TILING(1);
else
gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_SHIFT) >> NOOFBANK_MASK);
gb_tiling_config |= GROUP_SIZE(0);
if (((mc_arb_ramcfg & NOOFROWS_MASK) & NOOFROWS_SHIFT) > 3) {
gb_tiling_config |= ROW_TILING(3);
gb_tiling_config |= SAMPLE_SPLIT(3);
} else {
gb_tiling_config |=
ROW_TILING(((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT));
gb_tiling_config |=
SAMPLE_SPLIT(((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT));
}
gb_tiling_config |= BANK_SWAPS(1);
backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
rdev->config.rv770.max_backends,
(0xff << rdev->config.rv770.max_backends) & 0xff);
gb_tiling_config |= BACKEND_MAP(backend_map);
cc_gc_shader_pipe_config =
INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK);
cc_gc_shader_pipe_config |=
INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK);
cc_rb_backend_disable =
BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK);
WREG32(GB_TILING_CONFIG, gb_tiling_config);
WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CGTS_SYS_TCC_DISABLE, 0);
WREG32(CGTS_TCC_DISABLE, 0);
WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
WREG32(CGTS_USER_TCC_DISABLE, 0);
num_qd_pipes =
R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK);
WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK);
WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK);
/* set HW defaults for 3D engine */
WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
ROQ_IB2_START(0x2b)));
WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
SYNC_GRADIENT |
SYNC_WALKER |
SYNC_ALIGNER));
sx_debug_1 = RREG32(SX_DEBUG_1);
sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
WREG32(SX_DEBUG_1, sx_debug_1);
smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
smx_dc_ctl0 &= ~CACHE_DEPTH(0x1ff);
smx_dc_ctl0 |= CACHE_DEPTH((rdev->config.rv770.sx_num_of_sets * 64) - 1);
WREG32(SMX_DC_CTL0, smx_dc_ctl0);
WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) |
GS_FLUSH_CTL(4) |
ACK_FLUSH_CTL(3) |
SYNC_FLUSH_CTL));
if (rdev->family == CHIP_RV770)
WREG32(DB_DEBUG3, DB_CLK_OFF_DELAY(0x1f));
else {
db_debug4 = RREG32(DB_DEBUG4);
db_debug4 |= DISABLE_TILE_COVERED_FOR_PS_ITER;
WREG32(DB_DEBUG4, db_debug4);
}
WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.rv770.sx_max_export_size / 4) - 1) |
POSITION_BUFFER_SIZE((rdev->config.rv770.sx_max_export_pos_size / 4) - 1) |
SMX_BUFFER_SIZE((rdev->config.rv770.sx_max_export_smx_size / 4) - 1)));
WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.rv770.sc_prim_fifo_size) |
SC_HIZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_hiz_tile_fifo_size) |
SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_earlyz_tile_fifo_fize)));
WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
WREG32(VGT_NUM_INSTANCES, 1);
WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
WREG32(CP_PERFMON_CNTL, 0);
sq_ms_fifo_sizes = (CACHE_FIFO_SIZE(16 * rdev->config.rv770.sq_num_cf_insts) |
DONE_FIFO_HIWATER(0xe0) |
ALU_UPDATE_FIFO_HIWATER(0x8));
switch (rdev->family) {
case CHIP_RV770:
sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1);
break;
case CHIP_RV730:
case CHIP_RV710:
case CHIP_RV740:
default:
sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x4);
break;
}
WREG32(SQ_MS_FIFO_SIZES, sq_ms_fifo_sizes);
/* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
* should be adjusted as needed by the 2D/3D drivers. This just sets default values
*/
sq_config = RREG32(SQ_CONFIG);
sq_config &= ~(PS_PRIO(3) |
VS_PRIO(3) |
GS_PRIO(3) |
ES_PRIO(3));
sq_config |= (DX9_CONSTS |
VC_ENABLE |
EXPORT_SRC_C |
PS_PRIO(0) |
VS_PRIO(1) |
GS_PRIO(2) |
ES_PRIO(3));
if (rdev->family == CHIP_RV710)
/* no vertex cache */
sq_config &= ~VC_ENABLE;
WREG32(SQ_CONFIG, sq_config);
WREG32(SQ_GPR_RESOURCE_MGMT_1, (NUM_PS_GPRS((rdev->config.rv770.max_gprs * 24)/64) |
NUM_VS_GPRS((rdev->config.rv770.max_gprs * 24)/64) |
NUM_CLAUSE_TEMP_GPRS(((rdev->config.rv770.max_gprs * 24)/64)/2)));
WREG32(SQ_GPR_RESOURCE_MGMT_2, (NUM_GS_GPRS((rdev->config.rv770.max_gprs * 7)/64) |
NUM_ES_GPRS((rdev->config.rv770.max_gprs * 7)/64)));
sq_thread_resource_mgmt = (NUM_PS_THREADS((rdev->config.rv770.max_threads * 4)/8) |
NUM_VS_THREADS((rdev->config.rv770.max_threads * 2)/8) |
NUM_ES_THREADS((rdev->config.rv770.max_threads * 1)/8));
if (((rdev->config.rv770.max_threads * 1) / 8) > rdev->config.rv770.max_gs_threads)
sq_thread_resource_mgmt |= NUM_GS_THREADS(rdev->config.rv770.max_gs_threads);
else
sq_thread_resource_mgmt |= NUM_GS_THREADS((rdev->config.rv770.max_gs_threads * 1)/8);
WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
WREG32(SQ_STACK_RESOURCE_MGMT_1, (NUM_PS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4) |
NUM_VS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4)));
WREG32(SQ_STACK_RESOURCE_MGMT_2, (NUM_GS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4) |
NUM_ES_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4)));
sq_dyn_gpr_size_simd_ab_0 = (SIMDA_RING0((rdev->config.rv770.max_gprs * 38)/64) |
SIMDA_RING1((rdev->config.rv770.max_gprs * 38)/64) |
SIMDB_RING0((rdev->config.rv770.max_gprs * 38)/64) |
SIMDB_RING1((rdev->config.rv770.max_gprs * 38)/64));
WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_0, sq_dyn_gpr_size_simd_ab_0);
WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_1, sq_dyn_gpr_size_simd_ab_0);
WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_2, sq_dyn_gpr_size_simd_ab_0);
WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_3, sq_dyn_gpr_size_simd_ab_0);
WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_4, sq_dyn_gpr_size_simd_ab_0);
WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_5, sq_dyn_gpr_size_simd_ab_0);
WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_6, sq_dyn_gpr_size_simd_ab_0);
WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_7, sq_dyn_gpr_size_simd_ab_0);
WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
FORCE_EOV_MAX_REZ_CNT(255)));
if (rdev->family == CHIP_RV710)
WREG32(VGT_CACHE_INVALIDATION, (CACHE_INVALIDATION(TC_ONLY) |
AUTO_INVLD_EN(ES_AND_GS_AUTO)));
else
WREG32(VGT_CACHE_INVALIDATION, (CACHE_INVALIDATION(VC_AND_TC) |
AUTO_INVLD_EN(ES_AND_GS_AUTO)));
switch (rdev->family) {
case CHIP_RV770:
case CHIP_RV730:
case CHIP_RV740:
gs_prim_buffer_depth = 384;
break;
case CHIP_RV710:
gs_prim_buffer_depth = 128;
break;
default:
break;
}
num_gs_verts_per_thread = rdev->config.rv770.max_pipes * 16;
vgt_gs_per_es = gs_prim_buffer_depth + num_gs_verts_per_thread;
/* Max value for this is 256 */
if (vgt_gs_per_es > 256)
vgt_gs_per_es = 256;
WREG32(VGT_ES_PER_GS, 128);
WREG32(VGT_GS_PER_ES, vgt_gs_per_es);
WREG32(VGT_GS_PER_VS, 2);
/* more default values. 2D/3D driver should adjust as needed */
WREG32(VGT_GS_VERTEX_REUSE, 16);
WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
WREG32(VGT_STRMOUT_EN, 0);
WREG32(SX_MISC, 0);
WREG32(PA_SC_MODE_CNTL, 0);
WREG32(PA_SC_EDGERULE, 0xaaaaaaaa);
WREG32(PA_SC_AA_CONFIG, 0);
WREG32(PA_SC_CLIPRECT_RULE, 0xffff);
WREG32(PA_SC_LINE_STIPPLE, 0);
WREG32(SPI_INPUT_Z, 0);
WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
WREG32(CB_COLOR7_FRAG, 0);
/* clear render buffer base addresses */
WREG32(CB_COLOR0_BASE, 0);
WREG32(CB_COLOR1_BASE, 0);
WREG32(CB_COLOR2_BASE, 0);
WREG32(CB_COLOR3_BASE, 0);
WREG32(CB_COLOR4_BASE, 0);
WREG32(CB_COLOR5_BASE, 0);
WREG32(CB_COLOR6_BASE, 0);
WREG32(CB_COLOR7_BASE, 0);
WREG32(TCP_CNTL, 0);
hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
NUM_CLIP_SEQ(3)));
}
int rv770_mc_init(struct radeon_device *rdev)
{
fixed20_12 a;
u32 tmp;
int r;
/* Get VRAM informations */
/* FIXME: Don't know how to determine vram width, need to check
* vram_width usage
*/
rdev->mc.vram_width = 128;
rdev->mc.vram_is_ddr = true;
/* Could aper size report 0 ? */
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
/* Setup GPU memory space */
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
rdev->mc.mc_vram_size = rdev->mc.aper_size;
if (rdev->mc.real_vram_size > rdev->mc.aper_size)
rdev->mc.real_vram_size = rdev->mc.aper_size;
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
if (r)
return r;
/* gtt_size is setup by radeon_agp_init */
rdev->mc.gtt_location = rdev->mc.agp_base;
tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
/* Try to put vram before or after AGP because we
* we want SYSTEM_APERTURE to cover both VRAM and
* AGP so that GPU can catch out of VRAM/AGP access
*/
if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
/* Enought place before */
rdev->mc.vram_location = rdev->mc.gtt_location -
rdev->mc.mc_vram_size;
} else if (tmp > rdev->mc.mc_vram_size) {
/* Enought place after */
rdev->mc.vram_location = rdev->mc.gtt_location +
rdev->mc.gtt_size;
} else {
/* Try to setup VRAM then AGP might not
* not work on some card
*/
rdev->mc.vram_location = 0x00000000UL;
rdev->mc.gtt_location = rdev->mc.mc_vram_size;
}
} else {
rdev->mc.vram_location = 0x00000000UL;
rdev->mc.gtt_location = rdev->mc.mc_vram_size;
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
}
rdev->mc.vram_start = rdev->mc.vram_location;
rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
rdev->mc.gtt_start = rdev->mc.gtt_location;
rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
/* FIXME: we should enforce default clock in case GPU is not in
* default setup
*/
a.full = rfixed_const(100);
rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
return 0;
}
int rv770_gpu_reset(struct radeon_device *rdev)
{
/* FIXME: implement any rv770 specific bits */
return r600_gpu_reset(rdev);
}
static int rv770_startup(struct radeon_device *rdev)
{
int r;
rv770_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
rv770_agp_enable(rdev);
} else {
r = rv770_pcie_gart_enable(rdev);
if (r)
return r;
}
rv770_gpu_init(rdev);
// r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
// &rdev->r600_blit.shader_gpu_addr);
// if (r) {
// DRM_ERROR("failed to pin blit object %d\n", r);
// return r;
// }
// r = radeon_ring_init(rdev, rdev->cp.ring_size);
// if (r)
// return r;
// r = rv770_cp_load_microcode(rdev);
// if (r)
// return r;
// r = r600_cp_resume(rdev);
// if (r)
// return r;
/* write back buffer are not vital so don't worry about failure */
// r600_wb_enable(rdev);
return 0;
}
/* Plan is to move initialization in that function and use
* helper function so that radeon_device_init pretty much
* do nothing more than calling asic specific function. This
* should also allow to remove a bunch of callback function
* like vram_info.
*/
int rv770_init(struct radeon_device *rdev)
{
int r;
r = radeon_dummy_page_init(rdev);
if (r)
return r;
/* This don't do much */
r = radeon_gem_init(rdev);
if (r)
return r;
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
return -EINVAL;
}
/* Must be an ATOMBIOS */
if (!rdev->is_atom_bios) {
dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
return -EINVAL;
}
r = radeon_atombios_init(rdev);
if (r)
return r;
/* Post card if necessary */
if (!r600_card_posted(rdev) && rdev->bios) {
DRM_INFO("GPU not posted. posting now...\n");
atom_asic_init(rdev->mode_info.atom_context);
}
/* Initialize scratch registers */
r600_scratch_init(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
radeon_get_clock_info(rdev->ddev);
r = radeon_clocks_init(rdev);
if (r)
return r;
/* Fence driver */
// r = radeon_fence_driver_init(rdev);
// if (r)
// return r;
r = rv770_mc_init(rdev);
if (r)
return r;
/* Memory manager */
r = radeon_object_init(rdev);
if (r)
return r;
// rdev->cp.ring_obj = NULL;
// r600_ring_init(rdev, 1024 * 1024);
// if (!rdev->me_fw || !rdev->pfp_fw) {
// r = r600_cp_init_microcode(rdev);
// if (r) {
// DRM_ERROR("Failed to load firmware!\n");
// return r;
// }
// }
r = r600_pcie_gart_init(rdev);
if (r)
return r;
rdev->accel_working = true;
// r = r600_blit_init(rdev);
// if (r) {
// DRM_ERROR("radeon: failled blitter (%d).\n", r);
// rdev->accel_working = false;
// }
r = rv770_startup(rdev);
if (r) {
// rv770_suspend(rdev);
// r600_wb_fini(rdev);
// radeon_ring_fini(rdev);
rv770_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
if (rdev->accel_working) {
// r = radeon_ib_pool_init(rdev);
// if (r) {
// DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
// rdev->accel_working = false;
// }
// r = r600_ib_test(rdev);
// if (r) {
// DRM_ERROR("radeon: failled testing IB (%d).\n", r);
// rdev->accel_working = false;
// }
}
return 0;
}