kms: pre rc9

git-svn-id: svn://kolibrios.org@1321 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge) 2009-12-14 18:34:32 +00:00
parent 5698c48f5e
commit ea3dc0b0b8
92 changed files with 10079 additions and 1274 deletions

View File

@ -125,6 +125,15 @@ static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name, DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
drm_tv_subconnector_enum_list) drm_tv_subconnector_enum_list)
static struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
{ DRM_MODE_DIRTY_OFF, "Off" },
{ DRM_MODE_DIRTY_ON, "On" },
{ DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
};
DRM_ENUM_NAME_FN(drm_get_dirty_info_name,
drm_dirty_info_enum_list)
struct drm_conn_prop_enum_list { struct drm_conn_prop_enum_list {
int type; int type;
char *name; char *name;
@ -247,7 +256,8 @@ static void drm_mode_object_put(struct drm_device *dev,
mutex_unlock(&dev->mode_config.idr_mutex); mutex_unlock(&dev->mode_config.idr_mutex);
} }
void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type) struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
uint32_t id, uint32_t type)
{ {
struct drm_mode_object *obj = NULL; struct drm_mode_object *obj = NULL;
@ -272,7 +282,7 @@ EXPORT_SYMBOL(drm_mode_object_find);
* functions & device file and adds it to the master fd list. * functions & device file and adds it to the master fd list.
* *
* RETURNS: * RETURNS:
* Zero on success, error code on falure. * Zero on success, error code on failure.
*/ */
int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
const struct drm_framebuffer_funcs *funcs) const struct drm_framebuffer_funcs *funcs)
@ -801,6 +811,36 @@ int drm_mode_create_dithering_property(struct drm_device *dev)
} }
EXPORT_SYMBOL(drm_mode_create_dithering_property); EXPORT_SYMBOL(drm_mode_create_dithering_property);
/**
* drm_mode_create_dirty_property - create dirty property
* @dev: DRM device
*
* Called by a driver the first time it's needed, must be attached to desired
* connectors.
*/
int drm_mode_create_dirty_info_property(struct drm_device *dev)
{
struct drm_property *dirty_info;
int i;
if (dev->mode_config.dirty_info_property)
return 0;
dirty_info =
drm_property_create(dev, DRM_MODE_PROP_ENUM |
DRM_MODE_PROP_IMMUTABLE,
"dirty",
ARRAY_SIZE(drm_dirty_info_enum_list));
for (i = 0; i < ARRAY_SIZE(drm_dirty_info_enum_list); i++)
drm_property_add_enum(dirty_info, i,
drm_dirty_info_enum_list[i].type,
drm_dirty_info_enum_list[i].name);
dev->mode_config.dirty_info_property = dirty_info;
return 0;
}
EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
/** /**
* drm_mode_config_init - initialize DRM mode_configuration structure * drm_mode_config_init - initialize DRM mode_configuration structure
* @dev: DRM device * @dev: DRM device
@ -2339,7 +2379,7 @@ int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
} else if (connector->funcs->set_property) } else if (connector->funcs->set_property)
ret = connector->funcs->set_property(connector, property, out_resp->value); ret = connector->funcs->set_property(connector, property, out_resp->value);
/* store the property value if succesful */ /* store the property value if successful */
if (!ret) if (!ret)
drm_connector_property_set_value(connector, property, out_resp->value); drm_connector_property_set_value(connector, property, out_resp->value);
out: out:

View File

@ -109,7 +109,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
count = (*connector_funcs->get_modes)(connector); count = (*connector_funcs->get_modes)(connector);
if (!count) { if (!count) {
count = drm_add_modes_noedid(connector, 800, 600); count = drm_add_modes_noedid(connector, 1024, 768);
if (!count) if (!count)
return 0; return 0;
} }
@ -1020,6 +1020,11 @@ bool drm_helper_initial_config(struct drm_device *dev)
{ {
int count = 0; int count = 0;
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
drm_fb_helper_parse_command_line(dev);
count = drm_helper_probe_connector_modes(dev, count = drm_helper_probe_connector_modes(dev,
dev->mode_config.max_width, dev->mode_config.max_width,
dev->mode_config.max_height); dev->mode_config.max_height);

View File

@ -123,18 +123,20 @@ static const u8 edid_header[] = {
*/ */
static bool edid_is_valid(struct edid *edid) static bool edid_is_valid(struct edid *edid)
{ {
int i; int i, score = 0;
u8 csum = 0; u8 csum = 0;
u8 *raw_edid = (u8 *)edid; u8 *raw_edid = (u8 *)edid;
if (memcmp(edid->header, edid_header, sizeof(edid_header))) for (i = 0; i < sizeof(edid_header); i++)
if (raw_edid[i] == edid_header[i])
score++;
if (score == 8) ;
else if (score >= 6) {
DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
memcpy(raw_edid, edid_header, sizeof(edid_header));
} else
goto bad; goto bad;
if (edid->version != 1) {
DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
goto bad;
}
if (edid->revision > 4)
DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
for (i = 0; i < EDID_LENGTH; i++) for (i = 0; i < EDID_LENGTH; i++)
csum += raw_edid[i]; csum += raw_edid[i];
@ -143,6 +145,14 @@ static bool edid_is_valid(struct edid *edid)
goto bad; goto bad;
} }
if (edid->version != 1) {
DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
goto bad;
}
if (edid->revision > 4)
DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
return 1; return 1;
bad: bad:
@ -481,16 +491,17 @@ static struct drm_display_mode drm_dmt_modes[] = {
3048, 3536, 0, 1600, 1603, 1609, 1682, 0, 3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
}; };
static const int drm_num_dmt_modes =
sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
static struct drm_display_mode *drm_find_dmt(struct drm_device *dev, static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh) int hsize, int vsize, int fresh)
{ {
int i, count; int i;
struct drm_display_mode *ptr, *mode; struct drm_display_mode *ptr, *mode;
count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
mode = NULL; mode = NULL;
for (i = 0; i < count; i++) { for (i = 0; i < drm_num_dmt_modes; i++) {
ptr = &drm_dmt_modes[i]; ptr = &drm_dmt_modes[i];
if (hsize == ptr->hdisplay && if (hsize == ptr->hdisplay &&
vsize == ptr->vdisplay && vsize == ptr->vdisplay &&
@ -834,8 +845,165 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
return modes; return modes;
} }
/*
* XXX fix this for:
* - GTF secondary curve formula
* - EDID 1.4 range offsets
* - CVT extended bits
*/
static bool
mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing)
{
struct detailed_data_monitor_range *range;
int hsync, vrefresh;
range = &timing->data.other_data.data.range;
hsync = drm_mode_hsync(mode);
vrefresh = drm_mode_vrefresh(mode);
if (hsync < range->min_hfreq_khz || hsync > range->max_hfreq_khz)
return false;
if (vrefresh < range->min_vfreq || vrefresh > range->max_vfreq)
return false;
if (range->pixel_clock_mhz && range->pixel_clock_mhz != 0xff) {
/* be forgiving since it's in units of 10MHz */
int max_clock = range->pixel_clock_mhz * 10 + 9;
max_clock *= 1000;
if (mode->clock > max_clock)
return false;
}
return true;
}
/*
* XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
* need to account for them.
*/
static int drm_gtf_modes_for_range(struct drm_connector *connector,
struct detailed_timing *timing)
{
int i, modes = 0;
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
for (i = 0; i < drm_num_dmt_modes; i++) {
if (mode_in_range(drm_dmt_modes + i, timing)) {
newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
}
}
}
return modes;
}
static int drm_cvt_modes(struct drm_connector *connector,
struct detailed_timing *timing)
{
int i, j, modes = 0;
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
struct cvt_timing *cvt;
const int rates[] = { 60, 85, 75, 60, 50 };
for (i = 0; i < 4; i++) {
int width, height;
cvt = &(timing->data.other_data.data.cvt[i]);
height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2;
switch (cvt->code[1] & 0xc0) {
case 0x00:
width = height * 4 / 3;
break;
case 0x40:
width = height * 16 / 9;
break;
case 0x80:
width = height * 16 / 10;
break;
case 0xc0:
width = height * 15 / 9;
break;
}
for (j = 1; j < 5; j++) {
if (cvt->code[2] & (1 << j)) {
newmode = drm_cvt_mode(dev, width, height,
rates[j], j == 0,
false, false);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
}
}
}
}
return modes;
}
static int add_detailed_modes(struct drm_connector *connector,
struct detailed_timing *timing,
struct edid *edid, u32 quirks, int preferred)
{
int i, modes = 0;
struct detailed_non_pixel *data = &timing->data.other_data;
int timing_level = standard_timing_level(edid);
int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
if (timing->pixel_clock) {
newmode = drm_mode_detailed(dev, edid, timing, quirks);
if (!newmode)
return 0;
if (preferred)
newmode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, newmode);
return 1;
}
/* other timing types */
switch (data->type) {
case EDID_DETAIL_MONITOR_RANGE:
if (gtf)
modes += drm_gtf_modes_for_range(connector, timing);
break;
case EDID_DETAIL_STD_MODES:
/* Six modes per detailed section */
for (i = 0; i < 6; i++) {
struct std_timing *std;
struct drm_display_mode *newmode;
std = &data->data.timings[i];
newmode = drm_mode_std(dev, std, edid->revision,
timing_level);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
}
}
break;
case EDID_DETAIL_CVT_3BYTE:
modes += drm_cvt_modes(connector, timing);
break;
default:
break;
}
return modes;
}
/** /**
* add_detailed_modes - get detailed mode info from EDID data * add_detailed_info - get detailed mode info from EDID data
* @connector: attached connector * @connector: attached connector
* @edid: EDID block to scan * @edid: EDID block to scan
* @quirks: quirks to apply * @quirks: quirks to apply
@ -846,67 +1014,24 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
static int add_detailed_info(struct drm_connector *connector, static int add_detailed_info(struct drm_connector *connector,
struct edid *edid, u32 quirks) struct edid *edid, u32 quirks)
{ {
struct drm_device *dev = connector->dev; int i, modes = 0;
int i, j, modes = 0;
int timing_level;
timing_level = standard_timing_level(edid);
for (i = 0; i < EDID_DETAILED_TIMINGS; i++) { for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
struct detailed_timing *timing = &edid->detailed_timings[i]; struct detailed_timing *timing = &edid->detailed_timings[i];
struct detailed_non_pixel *data = &timing->data.other_data; int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
struct drm_display_mode *newmode;
/* X server check is version 1.1 or higher */ /* In 1.0, only timings are allowed */
if (edid->version == 1 && edid->revision >= 1 && if (!timing->pixel_clock && edid->version == 1 &&
!timing->pixel_clock) { edid->revision == 0)
/* Other timing or info */
switch (data->type) {
case EDID_DETAIL_MONITOR_SERIAL:
break;
case EDID_DETAIL_MONITOR_STRING:
break;
case EDID_DETAIL_MONITOR_RANGE:
/* Get monitor range data */
break;
case EDID_DETAIL_MONITOR_NAME:
break;
case EDID_DETAIL_MONITOR_CPDATA:
break;
case EDID_DETAIL_STD_MODES:
for (j = 0; j < 6; i++) {
struct std_timing *std;
struct drm_display_mode *newmode;
std = &data->data.timings[j];
newmode = drm_mode_std(dev, std,
edid->revision,
timing_level);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
}
}
break;
default:
break;
}
} else {
newmode = drm_mode_detailed(dev, edid, timing, quirks);
if (!newmode)
continue; continue;
/* First detailed mode is preferred */ modes += add_detailed_modes(connector, timing, edid, quirks,
if (i == 0 && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING)) preferred);
newmode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, newmode);
modes++;
}
} }
return modes; return modes;
} }
/** /**
* add_detailed_mode_eedid - get detailed mode info from addtional timing * add_detailed_mode_eedid - get detailed mode info from addtional timing
* EDID block * EDID block
@ -920,12 +1045,9 @@ static int add_detailed_info(struct drm_connector *connector,
static int add_detailed_info_eedid(struct drm_connector *connector, static int add_detailed_info_eedid(struct drm_connector *connector,
struct edid *edid, u32 quirks) struct edid *edid, u32 quirks)
{ {
struct drm_device *dev = connector->dev; int i, modes = 0;
int i, j, modes = 0;
char *edid_ext = NULL; char *edid_ext = NULL;
struct detailed_timing *timing; struct detailed_timing *timing;
struct detailed_non_pixel *data;
struct drm_display_mode *newmode;
int edid_ext_num; int edid_ext_num;
int start_offset, end_offset; int start_offset, end_offset;
int timing_level; int timing_level;
@ -976,51 +1098,7 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
for (i = start_offset; i < end_offset; for (i = start_offset; i < end_offset;
i += sizeof(struct detailed_timing)) { i += sizeof(struct detailed_timing)) {
timing = (struct detailed_timing *)(edid_ext + i); timing = (struct detailed_timing *)(edid_ext + i);
data = &timing->data.other_data; modes += add_detailed_modes(connector, timing, edid, quirks, 0);
/* Detailed mode timing */
if (timing->pixel_clock) {
newmode = drm_mode_detailed(dev, edid, timing, quirks);
if (!newmode)
continue;
drm_mode_probed_add(connector, newmode);
modes++;
continue;
}
/* Other timing or info */
switch (data->type) {
case EDID_DETAIL_MONITOR_SERIAL:
break;
case EDID_DETAIL_MONITOR_STRING:
break;
case EDID_DETAIL_MONITOR_RANGE:
/* Get monitor range data */
break;
case EDID_DETAIL_MONITOR_NAME:
break;
case EDID_DETAIL_MONITOR_CPDATA:
break;
case EDID_DETAIL_STD_MODES:
/* Five modes per detailed section */
for (j = 0; j < 5; i++) {
struct std_timing *std;
struct drm_display_mode *newmode;
std = &data->data.timings[j];
newmode = drm_mode_std(dev, std,
edid->revision,
timing_level);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
}
}
break;
default:
break;
}
} }
return modes; return modes;
@ -1066,19 +1144,19 @@ static int drm_ddc_read_edid(struct drm_connector *connector,
struct i2c_adapter *adapter, struct i2c_adapter *adapter,
char *buf, int len) char *buf, int len)
{ {
int ret; int i;
ret = drm_do_probe_ddc_edid(adapter, buf, len); for (i = 0; i < 4; i++) {
if (ret != 0) { if (drm_do_probe_ddc_edid(adapter, buf, len))
goto end; return -1;
if (edid_is_valid((struct edid *)buf))
return 0;
} }
if (!edid_is_valid((struct edid *)buf)) {
/* repeated checksum failures; warn, but carry on */
dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n", dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
drm_get_connector_name(connector)); drm_get_connector_name(connector));
ret = -1; return -1;
}
end:
return ret;
} }
/** /**
@ -1296,6 +1374,8 @@ int drm_add_modes_noedid(struct drm_connector *connector,
ptr->vdisplay > vdisplay) ptr->vdisplay > vdisplay)
continue; continue;
} }
if (drm_mode_vrefresh(ptr) > 61)
continue;
mode = drm_mode_duplicate(dev, ptr); mode = drm_mode_duplicate(dev, ptr);
if (mode) { if (mode) {
drm_mode_probed_add(connector, mode); drm_mode_probed_add(connector, mode);

View File

@ -159,30 +159,33 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
mutex_unlock(&dev->mode_config.mutex); mutex_unlock(&dev->mode_config.mutex);
} }
} }
if (dpms_mode == DRM_MODE_DPMS_OFF) {
mutex_lock(&dev->mode_config.mutex); mutex_lock(&dev->mode_config.mutex);
crtc_funcs->dpms(crtc, dpms_mode); crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
mutex_unlock(&dev->mode_config.mutex); mutex_unlock(&dev->mode_config.mutex);
} }
} }
}
} }
int drm_fb_helper_blank(int blank, struct fb_info *info) int drm_fb_helper_blank(int blank, struct fb_info *info)
{ {
switch (blank) { switch (blank) {
/* Display: On; HSync: On, VSync: On */
case FB_BLANK_UNBLANK: case FB_BLANK_UNBLANK:
drm_fb_helper_on(info); drm_fb_helper_on(info);
break; break;
/* Display: Off; HSync: On, VSync: On */
case FB_BLANK_NORMAL: case FB_BLANK_NORMAL:
drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY); drm_fb_helper_off(info, DRM_MODE_DPMS_ON);
break; break;
/* Display: Off; HSync: Off, VSync: On */
case FB_BLANK_HSYNC_SUSPEND: case FB_BLANK_HSYNC_SUSPEND:
drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY); drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
break; break;
/* Display: Off; HSync: On, VSync: Off */
case FB_BLANK_VSYNC_SUSPEND: case FB_BLANK_VSYNC_SUSPEND:
drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND); drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND);
break; break;
/* Display: Off; HSync: Off, VSync: Off */
case FB_BLANK_POWERDOWN: case FB_BLANK_POWERDOWN:
drm_fb_helper_off(info, DRM_MODE_DPMS_OFF); drm_fb_helper_off(info, DRM_MODE_DPMS_OFF);
break; break;

View File

@ -100,6 +100,11 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
return child; return child;
} }
/* drm_mm_pre_get() - pre allocate drm_mm_node structure
* drm_mm: memory manager struct we are pre-allocating for
*
* Returns 0 on success or -ENOMEM if allocation fails.
*/
int drm_mm_pre_get(struct drm_mm *mm) int drm_mm_pre_get(struct drm_mm *mm)
{ {
struct drm_mm_node *node; struct drm_mm_node *node;
@ -218,6 +223,44 @@ struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
} }
EXPORT_SYMBOL(drm_mm_get_block_generic); EXPORT_SYMBOL(drm_mm_get_block_generic);
struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
unsigned long size,
unsigned alignment,
unsigned long start,
unsigned long end,
int atomic)
{
struct drm_mm_node *align_splitoff = NULL;
unsigned tmp = 0;
unsigned wasted = 0;
if (node->start < start)
wasted += start - node->start;
if (alignment)
tmp = ((node->start + wasted) % alignment);
if (tmp)
wasted += alignment - tmp;
if (wasted) {
align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
if (unlikely(align_splitoff == NULL))
return NULL;
}
if (node->size == size) {
list_del_init(&node->fl_entry);
node->free = 0;
} else {
node = drm_mm_split_at_start(node, size, atomic);
}
if (align_splitoff)
drm_mm_put_block(align_splitoff);
return node;
}
EXPORT_SYMBOL(drm_mm_get_block_range_generic);
/* /*
* Put a block. Merge with the previous and / or next block if they are free. * Put a block. Merge with the previous and / or next block if they are free.
* Otherwise add to the free stack. * Otherwise add to the free stack.
@ -250,12 +293,14 @@ void drm_mm_put_block(struct drm_mm_node *cur)
prev_node->size += next_node->size; prev_node->size += next_node->size;
list_del(&next_node->ml_entry); list_del(&next_node->ml_entry);
list_del(&next_node->fl_entry); list_del(&next_node->fl_entry);
spin_lock(&mm->unused_lock);
if (mm->num_unused < MM_UNUSED_TARGET) { if (mm->num_unused < MM_UNUSED_TARGET) {
list_add(&next_node->fl_entry, list_add(&next_node->fl_entry,
&mm->unused_nodes); &mm->unused_nodes);
++mm->num_unused; ++mm->num_unused;
} else } else
kfree(next_node); kfree(next_node);
spin_unlock(&mm->unused_lock);
} else { } else {
next_node->size += cur->size; next_node->size += cur->size;
next_node->start = cur->start; next_node->start = cur->start;
@ -268,11 +313,13 @@ void drm_mm_put_block(struct drm_mm_node *cur)
list_add(&cur->fl_entry, &mm->fl_entry); list_add(&cur->fl_entry, &mm->fl_entry);
} else { } else {
list_del(&cur->ml_entry); list_del(&cur->ml_entry);
spin_lock(&mm->unused_lock);
if (mm->num_unused < MM_UNUSED_TARGET) { if (mm->num_unused < MM_UNUSED_TARGET) {
list_add(&cur->fl_entry, &mm->unused_nodes); list_add(&cur->fl_entry, &mm->unused_nodes);
++mm->num_unused; ++mm->num_unused;
} else } else
kfree(cur); kfree(cur);
spin_unlock(&mm->unused_lock);
} }
} }
@ -319,6 +366,56 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
} }
EXPORT_SYMBOL(drm_mm_search_free); EXPORT_SYMBOL(drm_mm_search_free);
struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long start,
unsigned long end,
int best_match)
{
struct list_head *list;
const struct list_head *free_stack = &mm->fl_entry;
struct drm_mm_node *entry;
struct drm_mm_node *best;
unsigned long best_size;
unsigned wasted;
best = NULL;
best_size = ~0UL;
list_for_each(list, free_stack) {
entry = list_entry(list, struct drm_mm_node, fl_entry);
wasted = 0;
if (entry->size < size)
continue;
if (entry->start > end || (entry->start+entry->size) < start)
continue;
if (entry->start < start)
wasted += start - entry->start;
if (alignment) {
register unsigned tmp = (entry->start + wasted) % alignment;
if (tmp)
wasted += alignment - tmp;
}
if (entry->size >= size + wasted) {
if (!best_match)
return entry;
if (size < best_size) {
best = entry;
best_size = entry->size;
}
}
}
return best;
}
EXPORT_SYMBOL(drm_mm_search_free_in_range);
int drm_mm_clean(struct drm_mm * mm) int drm_mm_clean(struct drm_mm * mm)
{ {
struct list_head *head = &mm->ml_entry; struct list_head *head = &mm->ml_entry;

View File

@ -553,6 +553,32 @@ int drm_mode_height(struct drm_display_mode *mode)
} }
EXPORT_SYMBOL(drm_mode_height); EXPORT_SYMBOL(drm_mode_height);
/** drm_mode_hsync - get the hsync of a mode
* @mode: mode
*
* LOCKING:
* None.
*
* Return @modes's hsync rate in kHz, rounded to the nearest int.
*/
int drm_mode_hsync(struct drm_display_mode *mode)
{
unsigned int calc_val;
if (mode->hsync)
return mode->hsync;
if (mode->htotal < 0)
return 0;
calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
calc_val += 500; /* round to 1000Hz */
calc_val /= 1000; /* truncate to kHz */
return calc_val;
}
EXPORT_SYMBOL(drm_mode_hsync);
/** /**
* drm_mode_vrefresh - get the vrefresh of a mode * drm_mode_vrefresh - get the vrefresh of a mode
* @mode: mode * @mode: mode
@ -560,7 +586,7 @@ EXPORT_SYMBOL(drm_mode_height);
* LOCKING: * LOCKING:
* None. * None.
* *
* Return @mode's vrefresh rate or calculate it if necessary. * Return @mode's vrefresh rate in Hz or calculate it if necessary.
* *
* FIXME: why is this needed? shouldn't vrefresh be set already? * FIXME: why is this needed? shouldn't vrefresh be set already?
* *

View File

@ -0,0 +1,258 @@
#ifndef _ASM_GENERIC_ATOMIC_LONG_H
#define _ASM_GENERIC_ATOMIC_LONG_H
/*
* Copyright (C) 2005 Silicon Graphics, Inc.
* Christoph Lameter
*
* Allows to provide arch independent atomic definitions without the need to
* edit all arch specific atomic.h files.
*/
#include <asm/types.h>
/*
* Suppport for atomic_long_t
*
* Casts for parameters are avoided for existing atomic functions in order to
* avoid issues with cast-as-lval under gcc 4.x and other limitations that the
* macros of a platform may have.
*/
#if BITS_PER_LONG == 64
typedef atomic64_t atomic_long_t;
#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
static inline long atomic_long_read(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
return (long)atomic64_read(v);
}
static inline void atomic_long_set(atomic_long_t *l, long i)
{
atomic64_t *v = (atomic64_t *)l;
atomic64_set(v, i);
}
static inline void atomic_long_inc(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
atomic64_inc(v);
}
static inline void atomic_long_dec(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
atomic64_dec(v);
}
static inline void atomic_long_add(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
atomic64_add(i, v);
}
static inline void atomic_long_sub(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
atomic64_sub(i, v);
}
static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
return atomic64_sub_and_test(i, v);
}
static inline int atomic_long_dec_and_test(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
return atomic64_dec_and_test(v);
}
static inline int atomic_long_inc_and_test(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
return atomic64_inc_and_test(v);
}
static inline int atomic_long_add_negative(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
return atomic64_add_negative(i, v);
}
static inline long atomic_long_add_return(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
return (long)atomic64_add_return(i, v);
}
static inline long atomic_long_sub_return(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
return (long)atomic64_sub_return(i, v);
}
static inline long atomic_long_inc_return(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
return (long)atomic64_inc_return(v);
}
static inline long atomic_long_dec_return(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
return (long)atomic64_dec_return(v);
}
static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
{
atomic64_t *v = (atomic64_t *)l;
return (long)atomic64_add_unless(v, a, u);
}
#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l))
#define atomic_long_cmpxchg(l, old, new) \
(atomic64_cmpxchg((atomic64_t *)(l), (old), (new)))
#define atomic_long_xchg(v, new) \
(atomic64_xchg((atomic64_t *)(v), (new)))
#else /* BITS_PER_LONG == 64 */
typedef atomic_t atomic_long_t;
#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
static inline long atomic_long_read(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return (long)atomic_read(v);
}
static inline void atomic_long_set(atomic_long_t *l, long i)
{
atomic_t *v = (atomic_t *)l;
atomic_set(v, i);
}
static inline void atomic_long_inc(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
atomic_inc(v);
}
static inline void atomic_long_dec(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
atomic_dec(v);
}
static inline void atomic_long_add(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
atomic_add(i, v);
}
static inline void atomic_long_sub(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
atomic_sub(i, v);
}
static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return atomic_sub_and_test(i, v);
}
static inline int atomic_long_dec_and_test(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return atomic_dec_and_test(v);
}
static inline int atomic_long_inc_and_test(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return atomic_inc_and_test(v);
}
static inline int atomic_long_add_negative(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return atomic_add_negative(i, v);
}
static inline long atomic_long_add_return(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return (long)atomic_add_return(i, v);
}
static inline long atomic_long_sub_return(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return (long)atomic_sub_return(i, v);
}
static inline long atomic_long_inc_return(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return (long)atomic_inc_return(v);
}
static inline long atomic_long_dec_return(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return (long)atomic_dec_return(v);
}
static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
{
atomic_t *v = (atomic_t *)l;
return (long)atomic_add_unless(v, a, u);
}
#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l))
#define atomic_long_cmpxchg(l, old, new) \
(atomic_cmpxchg((atomic_t *)(l), (old), (new)))
#define atomic_long_xchg(v, new) \
(atomic_xchg((atomic_t *)(v), (new)))
#endif /* BITS_PER_LONG == 64 */
#endif /* _ASM_GENERIC_ATOMIC_LONG_H */

View File

@ -0,0 +1,20 @@
#ifndef _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_
#define _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_
#include <asm-generic/bitops/le.h>
#define ext2_set_bit(nr,addr) \
generic___test_and_set_le_bit((nr),(unsigned long *)(addr))
#define ext2_clear_bit(nr,addr) \
generic___test_and_clear_le_bit((nr),(unsigned long *)(addr))
#define ext2_test_bit(nr,addr) \
generic_test_le_bit((nr),(unsigned long *)(addr))
#define ext2_find_first_zero_bit(addr, size) \
generic_find_first_zero_le_bit((unsigned long *)(addr), (size))
#define ext2_find_next_zero_bit(addr, size, off) \
generic_find_next_zero_le_bit((unsigned long *)(addr), (size), (off))
#define ext2_find_next_bit(addr, size, off) \
generic_find_next_le_bit((unsigned long *)(addr), (size), (off))
#endif /* _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ */

View File

@ -0,0 +1,36 @@
#ifndef _ASM_GENERIC_BITOPS_FLS64_H_
#define _ASM_GENERIC_BITOPS_FLS64_H_
#include <asm/types.h>
/**
* fls64 - find last set bit in a 64-bit word
* @x: the word to search
*
* This is defined in a similar way as the libc and compiler builtin
* ffsll, but returns the position of the most significant set bit.
*
* fls64(value) returns 0 if value is 0 or the position of the last
* set bit if value is nonzero. The last (most significant) bit is
* at position 64.
*/
#if BITS_PER_LONG == 32
static __always_inline int fls64(__u64 x)
{
__u32 h = x >> 32;
if (h)
return fls(h) + 32;
return fls(x);
}
#elif BITS_PER_LONG == 64
static __always_inline int fls64(__u64 x)
{
if (x == 0)
return 0;
return __fls(x) + 1;
}
#else
#error BITS_PER_LONG not 32 or 64
#endif
#endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */

View File

@ -0,0 +1,11 @@
#ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_
#define _ASM_GENERIC_BITOPS_HWEIGHT_H_
#include <asm/types.h>
extern unsigned int hweight32(unsigned int w);
extern unsigned int hweight16(unsigned int w);
extern unsigned int hweight8(unsigned int w);
extern unsigned long hweight64(__u64 w);
#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */

View File

@ -0,0 +1,57 @@
#ifndef _ASM_GENERIC_BITOPS_LE_H_
#define _ASM_GENERIC_BITOPS_LE_H_
#include <asm/types.h>
#include <asm/byteorder.h>
#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
#if defined(__LITTLE_ENDIAN)
#define generic_test_le_bit(nr, addr) test_bit(nr, addr)
#define generic___set_le_bit(nr, addr) __set_bit(nr, addr)
#define generic___clear_le_bit(nr, addr) __clear_bit(nr, addr)
#define generic_test_and_set_le_bit(nr, addr) test_and_set_bit(nr, addr)
#define generic_test_and_clear_le_bit(nr, addr) test_and_clear_bit(nr, addr)
#define generic___test_and_set_le_bit(nr, addr) __test_and_set_bit(nr, addr)
#define generic___test_and_clear_le_bit(nr, addr) __test_and_clear_bit(nr, addr)
#define generic_find_next_zero_le_bit(addr, size, offset) find_next_zero_bit(addr, size, offset)
#define generic_find_next_le_bit(addr, size, offset) \
find_next_bit(addr, size, offset)
#elif defined(__BIG_ENDIAN)
#define generic_test_le_bit(nr, addr) \
test_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
#define generic___set_le_bit(nr, addr) \
__set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
#define generic___clear_le_bit(nr, addr) \
__clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
#define generic_test_and_set_le_bit(nr, addr) \
test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
#define generic_test_and_clear_le_bit(nr, addr) \
test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
#define generic___test_and_set_le_bit(nr, addr) \
__test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
#define generic___test_and_clear_le_bit(nr, addr) \
__test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
extern unsigned long generic_find_next_zero_le_bit(const unsigned long *addr,
unsigned long size, unsigned long offset);
extern unsigned long generic_find_next_le_bit(const unsigned long *addr,
unsigned long size, unsigned long offset);
#else
#error "Please fix <asm/byteorder.h>"
#endif
#define generic_find_first_zero_le_bit(addr, size) \
generic_find_next_zero_le_bit((addr), (size), 0)
#endif /* _ASM_GENERIC_BITOPS_LE_H_ */

View File

@ -0,0 +1,15 @@
#ifndef _ASM_GENERIC_BITOPS_MINIX_H_
#define _ASM_GENERIC_BITOPS_MINIX_H_
#define minix_test_and_set_bit(nr,addr) \
__test_and_set_bit((nr),(unsigned long *)(addr))
#define minix_set_bit(nr,addr) \
__set_bit((nr),(unsigned long *)(addr))
#define minix_test_and_clear_bit(nr,addr) \
__test_and_clear_bit((nr),(unsigned long *)(addr))
#define minix_test_bit(nr,addr) \
test_bit((nr),(unsigned long *)(addr))
#define minix_find_first_zero_bit(addr,size) \
find_first_zero_bit((unsigned long *)(addr),(size))
#endif /* _ASM_GENERIC_BITOPS_MINIX_H_ */

View File

@ -0,0 +1,31 @@
#ifndef _ASM_GENERIC_BITOPS_SCHED_H_
#define _ASM_GENERIC_BITOPS_SCHED_H_
#include <linux/compiler.h> /* unlikely() */
#include <asm/types.h>
/*
* Every architecture must define this function. It's the fastest
* way of searching a 100-bit bitmap. It's guaranteed that at least
* one of the 100 bits is cleared.
*/
static inline int sched_find_first_bit(const unsigned long *b)
{
#if BITS_PER_LONG == 64
if (b[0])
return __ffs(b[0]);
return __ffs(b[1]) + 64;
#elif BITS_PER_LONG == 32
if (b[0])
return __ffs(b[0]);
if (b[1])
return __ffs(b[1]) + 32;
if (b[2])
return __ffs(b[2]) + 64;
return __ffs(b[3]) + 96;
#else
#error BITS_PER_LONG not defined
#endif
}
#endif /* _ASM_GENERIC_BITOPS_SCHED_H_ */

View File

@ -0,0 +1,32 @@
#ifndef __ASM_GENERIC_BITS_PER_LONG
#define __ASM_GENERIC_BITS_PER_LONG
/*
* There seems to be no way of detecting this automatically from user
* space, so 64 bit architectures should override this in their
* bitsperlong.h. In particular, an architecture that supports
* both 32 and 64 bit user space must not rely on CONFIG_64BIT
* to decide it, but rather check a compiler provided macro.
*/
#ifndef __BITS_PER_LONG
#define __BITS_PER_LONG 32
#endif
#ifdef __KERNEL__
#ifdef CONFIG_64BIT
#define BITS_PER_LONG 64
#else
#define BITS_PER_LONG 32
#endif /* CONFIG_64BIT */
/*
* FIXME: The check currently breaks x86-64 build, so it's
* temporarily disabled. Please fix x86-64 and reenable
*/
#if 0 && BITS_PER_LONG != __BITS_PER_LONG
#error Inconsistent word size. Check asm/bitsperlong.h
#endif
#endif /* __KERNEL__ */
#endif /* __ASM_GENERIC_BITS_PER_LONG */

View File

@ -0,0 +1,78 @@
/*
* asm-generic/int-ll64.h
*
* Integer declarations for architectures which use "long long"
* for 64-bit types.
*/
#ifndef _ASM_GENERIC_INT_LL64_H
#define _ASM_GENERIC_INT_LL64_H
#include <asm/bitsperlong.h>
#ifndef __ASSEMBLY__
/*
* __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
* header files exported to user space
*/
typedef __signed__ char __s8;
typedef unsigned char __u8;
typedef __signed__ short __s16;
typedef unsigned short __u16;
typedef __signed__ int __s32;
typedef unsigned int __u32;
#ifdef __GNUC__
__extension__ typedef __signed__ long long __s64;
__extension__ typedef unsigned long long __u64;
#else
typedef __signed__ long long __s64;
typedef unsigned long long __u64;
#endif
#endif /* __ASSEMBLY__ */
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
typedef signed char s8;
typedef unsigned char u8;
typedef signed short s16;
typedef unsigned short u16;
typedef signed int s32;
typedef unsigned int u32;
typedef signed long long s64;
typedef unsigned long long u64;
#define S8_C(x) x
#define U8_C(x) x ## U
#define S16_C(x) x
#define U16_C(x) x ## U
#define S32_C(x) x
#define U32_C(x) x ## U
#define S64_C(x) x ## LL
#define U64_C(x) x ## ULL
#else /* __ASSEMBLY__ */
#define S8_C(x) x
#define U8_C(x) x
#define S16_C(x) x
#define U16_C(x) x
#define S32_C(x) x
#define U32_C(x) x
#define S64_C(x) x
#define U64_C(x) x
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_GENERIC_INT_LL64_H */

View File

@ -0,0 +1,42 @@
#ifndef _ASM_GENERIC_TYPES_H
#define _ASM_GENERIC_TYPES_H
/*
* int-ll64 is used practically everywhere now,
* so use it as a reasonable default.
*/
#include <asm-generic/int-ll64.h>
#ifndef __ASSEMBLY__
typedef unsigned short umode_t;
#endif /* __ASSEMBLY__ */
/*
* These aren't exported outside the kernel to avoid name space clashes
*/
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
/*
* DMA addresses may be very different from physical addresses
* and pointers. i386 and powerpc may have 64 bit DMA on 32 bit
* systems, while sparc64 uses 32 bit DMA addresses for 64 bit
* physical addresses.
* This default defines dma_addr_t to have the same size as
* phys_addr_t, which is the most common way.
* Do not define the dma64_addr_t type, which never really
* worked.
*/
#ifndef dma_addr_t
#ifdef CONFIG_PHYS_ADDR_T_64BIT
typedef u64 dma_addr_t;
#else
typedef u32 dma_addr_t;
#endif /* CONFIG_PHYS_ADDR_T_64BIT */
#endif /* dma_addr_t */
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_GENERIC_TYPES_H */

View File

@ -0,0 +1,164 @@
#ifndef _ASM_X86_ALTERNATIVE_H
#define _ASM_X86_ALTERNATIVE_H
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/stringify.h>
#include <asm/asm.h>
/*
* Alternative inline assembly for SMP.
*
* The LOCK_PREFIX macro defined here replaces the LOCK and
* LOCK_PREFIX macros used everywhere in the source tree.
*
* SMP alternatives use the same data structures as the other
* alternatives and the X86_FEATURE_UP flag to indicate the case of a
* UP system running a SMP kernel. The existing apply_alternatives()
* works fine for patching a SMP kernel for UP.
*
* The SMP alternative tables can be kept after boot and contain both
* UP and SMP versions of the instructions to allow switching back to
* SMP at runtime, when hotplugging in a new CPU, which is especially
* useful in virtualized environments.
*
* The very common lock prefix is handled as special case in a
* separate table which is a pure address list without replacement ptr
* and size information. That keeps the table sizes small.
*/
#ifdef CONFIG_SMP
#define LOCK_PREFIX \
".section .smp_locks,\"a\"\n" \
_ASM_ALIGN "\n" \
_ASM_PTR "661f\n" /* address */ \
".previous\n" \
"661:\n\tlock; "
#else /* ! CONFIG_SMP */
#define LOCK_PREFIX ""
#endif
/* This must be included *after* the definition of LOCK_PREFIX */
#include <asm/cpufeature.h>
struct alt_instr {
u8 *instr; /* original instruction */
u8 *replacement;
u8 cpuid; /* cpuid bit set for replacement */
u8 instrlen; /* length of original instruction */
u8 replacementlen; /* length of new instruction, <= instrlen */
u8 pad1;
#ifdef CONFIG_X86_64
u32 pad2;
#endif
};
extern void alternative_instructions(void);
extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
struct module;
#ifdef CONFIG_SMP
extern void alternatives_smp_module_add(struct module *mod, char *name,
void *locks, void *locks_end,
void *text, void *text_end);
extern void alternatives_smp_module_del(struct module *mod);
extern void alternatives_smp_switch(int smp);
#else
static inline void alternatives_smp_module_add(struct module *mod, char *name,
void *locks, void *locks_end,
void *text, void *text_end) {}
static inline void alternatives_smp_module_del(struct module *mod) {}
static inline void alternatives_smp_switch(int smp) {}
#endif /* CONFIG_SMP */
/* alternative assembly primitive: */
#define ALTERNATIVE(oldinstr, newinstr, feature) \
\
"661:\n\t" oldinstr "\n662:\n" \
".section .altinstructions,\"a\"\n" \
_ASM_ALIGN "\n" \
_ASM_PTR "661b\n" /* label */ \
_ASM_PTR "663f\n" /* new instruction */ \
" .byte " __stringify(feature) "\n" /* feature bit */ \
" .byte 662b-661b\n" /* sourcelen */ \
" .byte 664f-663f\n" /* replacementlen */ \
" .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
".previous\n" \
".section .altinstr_replacement, \"ax\"\n" \
"663:\n\t" newinstr "\n664:\n" /* replacement */ \
".previous"
/*
* Alternative instructions for different CPU types or capabilities.
*
* This allows to use optimized instructions even on generic binary
* kernels.
*
* length of oldinstr must be longer or equal the length of newinstr
* It can be padded with nops as needed.
*
* For non barrier like inlines please define new variants
* without volatile and memory clobber.
*/
#define alternative(oldinstr, newinstr, feature) \
asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory")
/*
* Alternative inline assembly with input.
*
* Pecularities:
* No memory clobber here.
* Argument numbers start with 1.
* Best is to use constraints that are fixed size (like (%1) ... "r")
* If you use variable sized constraints like "m" or "g" in the
* replacement make sure to pad to the worst case length.
* Leaving an unused argument 0 to keep API compatibility.
*/
#define alternative_input(oldinstr, newinstr, feature, input...) \
asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \
: : "i" (0), ## input)
/* Like alternative_input, but with a single output argument */
#define alternative_io(oldinstr, newinstr, feature, output, input...) \
asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \
: output : "i" (0), ## input)
/*
* use this macro(s) if you need more than one output parameter
* in alternative_io
*/
#define ASM_OUTPUT2(a, b) a, b
struct paravirt_patch_site;
#ifdef CONFIG_PARAVIRT
void apply_paravirt(struct paravirt_patch_site *start,
struct paravirt_patch_site *end);
#else
static inline void apply_paravirt(struct paravirt_patch_site *start,
struct paravirt_patch_site *end)
{}
#define __parainstructions NULL
#define __parainstructions_end NULL
#endif
/*
* Clear and restore the kernel write-protection flag on the local CPU.
* Allows the kernel to edit read-only pages.
* Side-effect: any interrupt handler running between save and restore will have
* the ability to write to read-only pages.
*
* Warning:
* Code patching in the UP case is safe if NMIs and MCE handlers are stopped and
* no thread can be preempted in the instructions being modified (no iret to an
* invalid instruction possible) or if the instructions are changed from a
* consistent state to another consistent state atomically.
* More care must be taken when modifying code in the SMP case because of
* Intel's errata.
* On the local CPU you need to be protected again NMI or MCE handlers seeing an
* inconsistent instruction while you patch.
*/
extern void *text_poke(void *addr, const void *opcode, size_t len);
#endif /* _ASM_X86_ALTERNATIVE_H */

View File

@ -0,0 +1,55 @@
#ifndef _ASM_X86_ASM_H
#define _ASM_X86_ASM_H
#ifdef __ASSEMBLY__
# define __ASM_FORM(x) x
# define __ASM_EX_SEC .section __ex_table, "a"
#else
# define __ASM_FORM(x) " " #x " "
# define __ASM_EX_SEC " .section __ex_table,\"a\"\n"
#endif
#ifdef CONFIG_X86_32
# define __ASM_SEL(a,b) __ASM_FORM(a)
#else
# define __ASM_SEL(a,b) __ASM_FORM(b)
#endif
#define __ASM_SIZE(inst) __ASM_SEL(inst##l, inst##q)
#define __ASM_REG(reg) __ASM_SEL(e##reg, r##reg)
#define _ASM_PTR __ASM_SEL(.long, .quad)
#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8)
#define _ASM_MOV __ASM_SIZE(mov)
#define _ASM_INC __ASM_SIZE(inc)
#define _ASM_DEC __ASM_SIZE(dec)
#define _ASM_ADD __ASM_SIZE(add)
#define _ASM_SUB __ASM_SIZE(sub)
#define _ASM_XADD __ASM_SIZE(xadd)
#define _ASM_AX __ASM_REG(ax)
#define _ASM_BX __ASM_REG(bx)
#define _ASM_CX __ASM_REG(cx)
#define _ASM_DX __ASM_REG(dx)
#define _ASM_SP __ASM_REG(sp)
#define _ASM_BP __ASM_REG(bp)
#define _ASM_SI __ASM_REG(si)
#define _ASM_DI __ASM_REG(di)
/* Exception table entry */
#ifdef __ASSEMBLY__
# define _ASM_EXTABLE(from,to) \
__ASM_EX_SEC ; \
_ASM_ALIGN ; \
_ASM_PTR from , to ; \
.previous
#else
# define _ASM_EXTABLE(from,to) \
__ASM_EX_SEC \
_ASM_ALIGN "\n" \
_ASM_PTR #from "," #to "\n" \
" .previous\n"
#endif
#endif /* _ASM_X86_ASM_H */

View File

@ -0,0 +1,5 @@
#ifdef CONFIG_X86_32
# include "atomic_32.h"
#else
# include "atomic_64.h"
#endif

View File

@ -0,0 +1,415 @@
#ifndef _ASM_X86_ATOMIC_32_H
#define _ASM_X86_ATOMIC_32_H
#include <linux/compiler.h>
#include <linux/types.h>
//#include <asm/processor.h>
#include <asm/cmpxchg.h>
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*/
#define ATOMIC_INIT(i) { (i) }
/**
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
*
* Atomically reads the value of @v.
*/
static inline int atomic_read(const atomic_t *v)
{
return v->counter;
}
/**
* atomic_set - set atomic variable
* @v: pointer of type atomic_t
* @i: required value
*
* Atomically sets the value of @v to @i.
*/
static inline void atomic_set(atomic_t *v, int i)
{
v->counter = i;
}
/**
* atomic_add - add integer to atomic variable
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v.
*/
static inline void atomic_add(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "addl %1,%0"
: "+m" (v->counter)
: "ir" (i));
}
/**
* atomic_sub - subtract integer from atomic variable
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v.
*/
static inline void atomic_sub(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "subl %1,%0"
: "+m" (v->counter)
: "ir" (i));
}
/**
* atomic_sub_and_test - subtract value from variable and test result
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all
* other cases.
*/
static inline int atomic_sub_and_test(int i, atomic_t *v)
{
unsigned char c;
asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
: "+m" (v->counter), "=qm" (c)
: "ir" (i) : "memory");
return c;
}
/**
* atomic_inc - increment atomic variable
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1.
*/
static inline void atomic_inc(atomic_t *v)
{
asm volatile(LOCK_PREFIX "incl %0"
: "+m" (v->counter));
}
/**
* atomic_dec - decrement atomic variable
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1.
*/
static inline void atomic_dec(atomic_t *v)
{
asm volatile(LOCK_PREFIX "decl %0"
: "+m" (v->counter));
}
/**
* atomic_dec_and_test - decrement and test
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
*/
static inline int atomic_dec_and_test(atomic_t *v)
{
unsigned char c;
asm volatile(LOCK_PREFIX "decl %0; sete %1"
: "+m" (v->counter), "=qm" (c)
: : "memory");
return c != 0;
}
/**
* atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
static inline int atomic_inc_and_test(atomic_t *v)
{
unsigned char c;
asm volatile(LOCK_PREFIX "incl %0; sete %1"
: "+m" (v->counter), "=qm" (c)
: : "memory");
return c != 0;
}
/**
* atomic_add_negative - add and test if negative
* @v: pointer of type atomic_t
* @i: integer value to add
*
* Atomically adds @i to @v and returns true
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
static inline int atomic_add_negative(int i, atomic_t *v)
{
unsigned char c;
asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
: "+m" (v->counter), "=qm" (c)
: "ir" (i) : "memory");
return c;
}
/**
* atomic_add_return - add integer and return
* @v: pointer of type atomic_t
* @i: integer value to add
*
* Atomically adds @i to @v and returns @i + @v
*/
static inline int atomic_add_return(int i, atomic_t *v)
{
int __i;
#ifdef CONFIG_M386
unsigned long flags;
if (unlikely(boot_cpu_data.x86 <= 3))
goto no_xadd;
#endif
/* Modern 486+ processor */
__i = i;
asm volatile(LOCK_PREFIX "xaddl %0, %1"
: "+r" (i), "+m" (v->counter)
: : "memory");
return i + __i;
#ifdef CONFIG_M386
no_xadd: /* Legacy 386 processor */
local_irq_save(flags);
__i = atomic_read(v);
atomic_set(v, i + __i);
local_irq_restore(flags);
return i + __i;
#endif
}
/**
* atomic_sub_return - subtract integer and return
* @v: pointer of type atomic_t
* @i: integer value to subtract
*
* Atomically subtracts @i from @v and returns @v - @i
*/
static inline int atomic_sub_return(int i, atomic_t *v)
{
return atomic_add_return(-i, v);
}
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
return cmpxchg(&v->counter, old, new);
}
static inline int atomic_xchg(atomic_t *v, int new)
{
return xchg(&v->counter, new);
}
/**
* atomic_add_unless - add unless the number is already a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as @v was not already @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
for (;;) {
if (unlikely(c == (u)))
break;
old = atomic_cmpxchg((v), c, c + (a));
if (likely(old == c))
break;
c = old;
}
return c != (u);
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic_inc_return(v) (atomic_add_return(1, v))
#define atomic_dec_return(v) (atomic_sub_return(1, v))
/* These are x86-specific, used by some header files */
#define atomic_clear_mask(mask, addr) \
asm volatile(LOCK_PREFIX "andl %0,%1" \
: : "r" (~(mask)), "m" (*(addr)) : "memory")
#define atomic_set_mask(mask, addr) \
asm volatile(LOCK_PREFIX "orl %0,%1" \
: : "r" (mask), "m" (*(addr)) : "memory")
/* Atomic operations are already serializing on x86 */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
/* An 64bit atomic type */
typedef struct {
u64 __aligned(8) counter;
} atomic64_t;
#define ATOMIC64_INIT(val) { (val) }
extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
/**
* atomic64_xchg - xchg atomic64 variable
* @ptr: pointer to type atomic64_t
* @new_val: value to assign
*
* Atomically xchgs the value of @ptr to @new_val and returns
* the old value.
*/
extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
/**
* atomic64_set - set atomic64 variable
* @ptr: pointer to type atomic64_t
* @new_val: value to assign
*
* Atomically sets the value of @ptr to @new_val.
*/
extern void atomic64_set(atomic64_t *ptr, u64 new_val);
/**
* atomic64_read - read atomic64 variable
* @ptr: pointer to type atomic64_t
*
* Atomically reads the value of @ptr and returns it.
*/
static inline u64 atomic64_read(atomic64_t *ptr)
{
u64 res;
/*
* Note, we inline this atomic64_t primitive because
* it only clobbers EAX/EDX and leaves the others
* untouched. We also (somewhat subtly) rely on the
* fact that cmpxchg8b returns the current 64-bit value
* of the memory location we are touching:
*/
asm volatile(
"mov %%ebx, %%eax\n\t"
"mov %%ecx, %%edx\n\t"
LOCK_PREFIX "cmpxchg8b %1\n"
: "=&A" (res)
: "m" (*ptr)
);
return res;
}
extern u64 atomic64_read(atomic64_t *ptr);
/**
* atomic64_add_return - add and return
* @delta: integer value to add
* @ptr: pointer to type atomic64_t
*
* Atomically adds @delta to @ptr and returns @delta + *@ptr
*/
extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr);
/*
* Other variants with different arithmetic operators:
*/
extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
extern u64 atomic64_inc_return(atomic64_t *ptr);
extern u64 atomic64_dec_return(atomic64_t *ptr);
/**
* atomic64_add - add integer to atomic64 variable
* @delta: integer value to add
* @ptr: pointer to type atomic64_t
*
* Atomically adds @delta to @ptr.
*/
extern void atomic64_add(u64 delta, atomic64_t *ptr);
/**
* atomic64_sub - subtract the atomic64 variable
* @delta: integer value to subtract
* @ptr: pointer to type atomic64_t
*
* Atomically subtracts @delta from @ptr.
*/
extern void atomic64_sub(u64 delta, atomic64_t *ptr);
/**
* atomic64_sub_and_test - subtract value from variable and test result
* @delta: integer value to subtract
* @ptr: pointer to type atomic64_t
*
* Atomically subtracts @delta from @ptr and returns
* true if the result is zero, or false for all
* other cases.
*/
extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr);
/**
* atomic64_inc - increment atomic64 variable
* @ptr: pointer to type atomic64_t
*
* Atomically increments @ptr by 1.
*/
extern void atomic64_inc(atomic64_t *ptr);
/**
* atomic64_dec - decrement atomic64 variable
* @ptr: pointer to type atomic64_t
*
* Atomically decrements @ptr by 1.
*/
extern void atomic64_dec(atomic64_t *ptr);
/**
* atomic64_dec_and_test - decrement and test
* @ptr: pointer to type atomic64_t
*
* Atomically decrements @ptr by 1 and
* returns true if the result is 0, or false for all other
* cases.
*/
extern int atomic64_dec_and_test(atomic64_t *ptr);
/**
* atomic64_inc_and_test - increment and test
* @ptr: pointer to type atomic64_t
*
* Atomically increments @ptr by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
extern int atomic64_inc_and_test(atomic64_t *ptr);
/**
* atomic64_add_negative - add and test if negative
* @delta: integer value to add
* @ptr: pointer to type atomic64_t
*
* Atomically adds @delta to @ptr and returns true
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
extern int atomic64_add_negative(u64 delta, atomic64_t *ptr);
#include <asm-generic/atomic-long.h>
#endif /* _ASM_X86_ATOMIC_32_H */

View File

@ -0,0 +1,465 @@
#ifndef _ASM_X86_BITOPS_H
#define _ASM_X86_BITOPS_H
/*
* Copyright 1992, Linus Torvalds.
*
* Note: inlines with more than a single statement should be marked
* __always_inline to avoid problems with older gcc's inlining heuristics.
*/
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
#include <linux/compiler.h>
#include <asm/alternative.h>
/*
* These have to be done with inline assembly: that way the bit-setting
* is guaranteed to be atomic. All bit operations return 0 if the bit
* was cleared before the operation and != 0 if it was not.
*
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
*/
#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
/* Technically wrong, but this avoids compilation errors on some gcc
versions. */
#define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
#else
#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
#endif
#define ADDR BITOP_ADDR(addr)
/*
* We do the locked ops that don't return the old value as
* a mask operation on a byte.
*/
#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
#define CONST_MASK(nr) (1 << ((nr) & 7))
/**
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* This function is atomic and may not be reordered. See __set_bit()
* if you do not require the atomic guarantees.
*
* Note: there are no guarantees that this function will not be reordered
* on non x86 architectures, so if you are writing portable code,
* make sure not to rely on its reordering guarantees.
*
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static __always_inline void
set_bit(unsigned int nr, volatile unsigned long *addr)
{
if (IS_IMMEDIATE(nr)) {
asm volatile(LOCK_PREFIX "orb %1,%0"
: CONST_MASK_ADDR(nr, addr)
: "iq" ((u8)CONST_MASK(nr))
: "memory");
} else {
asm volatile(LOCK_PREFIX "bts %1,%0"
: BITOP_ADDR(addr) : "Ir" (nr) : "memory");
}
}
/**
* __set_bit - Set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* Unlike set_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static inline void __set_bit(int nr, volatile unsigned long *addr)
{
asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
}
/**
* clear_bit - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* clear_bit() is atomic and may not be reordered. However, it does
* not contain a memory barrier, so if it is used for locking purposes,
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
* in order to ensure changes are visible on other processors.
*/
static __always_inline void
clear_bit(int nr, volatile unsigned long *addr)
{
if (IS_IMMEDIATE(nr)) {
asm volatile(LOCK_PREFIX "andb %1,%0"
: CONST_MASK_ADDR(nr, addr)
: "iq" ((u8)~CONST_MASK(nr)));
} else {
asm volatile(LOCK_PREFIX "btr %1,%0"
: BITOP_ADDR(addr)
: "Ir" (nr));
}
}
/*
* clear_bit_unlock - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* clear_bit() is atomic and implies release semantics before the memory
* operation. It can be used for an unlock.
*/
static inline void clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
{
barrier();
clear_bit(nr, addr);
}
static inline void __clear_bit(int nr, volatile unsigned long *addr)
{
asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
}
/*
* __clear_bit_unlock - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* __clear_bit() is non-atomic and implies release semantics before the memory
* operation. It can be used for an unlock if no other CPUs can concurrently
* modify other bits in the word.
*
* No memory barrier is required here, because x86 cannot reorder stores past
* older loads. Same principle as spin_unlock.
*/
static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
{
barrier();
__clear_bit(nr, addr);
}
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
/**
* __change_bit - Toggle a bit in memory
* @nr: the bit to change
* @addr: the address to start counting from
*
* Unlike change_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static inline void __change_bit(int nr, volatile unsigned long *addr)
{
asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
}
/**
* change_bit - Toggle a bit in memory
* @nr: Bit to change
* @addr: Address to start counting from
*
* change_bit() is atomic and may not be reordered.
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static inline void change_bit(int nr, volatile unsigned long *addr)
{
if (IS_IMMEDIATE(nr)) {
asm volatile(LOCK_PREFIX "xorb %1,%0"
: CONST_MASK_ADDR(nr, addr)
: "iq" ((u8)CONST_MASK(nr)));
} else {
asm volatile(LOCK_PREFIX "btc %1,%0"
: BITOP_ADDR(addr)
: "Ir" (nr));
}
}
/**
* test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
{
int oldbit;
asm volatile(LOCK_PREFIX "bts %2,%1\n\t"
"sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
return oldbit;
}
/**
* test_and_set_bit_lock - Set a bit and return its old value for lock
* @nr: Bit to set
* @addr: Address to count from
*
* This is the same as test_and_set_bit on x86.
*/
static __always_inline int
test_and_set_bit_lock(int nr, volatile unsigned long *addr)
{
return test_and_set_bit(nr, addr);
}
/**
* __test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
{
int oldbit;
asm("bts %2,%1\n\t"
"sbb %0,%0"
: "=r" (oldbit), ADDR
: "Ir" (nr));
return oldbit;
}
/**
* test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
{
int oldbit;
asm volatile(LOCK_PREFIX "btr %2,%1\n\t"
"sbb %0,%0"
: "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
return oldbit;
}
/**
* __test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
{
int oldbit;
asm volatile("btr %2,%1\n\t"
"sbb %0,%0"
: "=r" (oldbit), ADDR
: "Ir" (nr));
return oldbit;
}
/* WARNING: non atomic and it can be reordered! */
static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
{
int oldbit;
asm volatile("btc %2,%1\n\t"
"sbb %0,%0"
: "=r" (oldbit), ADDR
: "Ir" (nr) : "memory");
return oldbit;
}
/**
* test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
{
int oldbit;
asm volatile(LOCK_PREFIX "btc %2,%1\n\t"
"sbb %0,%0"
: "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
return oldbit;
}
static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
{
return ((1UL << (nr % BITS_PER_LONG)) &
(((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
}
static inline int variable_test_bit(int nr, volatile const unsigned long *addr)
{
int oldbit;
asm volatile("bt %2,%1\n\t"
"sbb %0,%0"
: "=r" (oldbit)
: "m" (*(unsigned long *)addr), "Ir" (nr));
return oldbit;
}
#if 0 /* Fool kernel-doc since it doesn't do macros yet */
/**
* test_bit - Determine whether a bit is set
* @nr: bit number to test
* @addr: Address to start counting from
*/
static int test_bit(int nr, const volatile unsigned long *addr);
#endif
#define test_bit(nr, addr) \
(__builtin_constant_p((nr)) \
? constant_test_bit((nr), (addr)) \
: variable_test_bit((nr), (addr)))
/**
* __ffs - find first set bit in word
* @word: The word to search
*
* Undefined if no bit exists, so code should check against 0 first.
*/
static inline unsigned long __ffs(unsigned long word)
{
asm("bsf %1,%0"
: "=r" (word)
: "rm" (word));
return word;
}
/**
* ffz - find first zero bit in word
* @word: The word to search
*
* Undefined if no zero exists, so code should check against ~0UL first.
*/
static inline unsigned long ffz(unsigned long word)
{
asm("bsf %1,%0"
: "=r" (word)
: "r" (~word));
return word;
}
/*
* __fls: find last set bit in word
* @word: The word to search
*
* Undefined if no set bit exists, so code should check against 0 first.
*/
static inline unsigned long __fls(unsigned long word)
{
asm("bsr %1,%0"
: "=r" (word)
: "rm" (word));
return word;
}
#ifdef __KERNEL__
/**
* ffs - find first set bit in word
* @x: the word to search
*
* This is defined the same way as the libc and compiler builtin ffs
* routines, therefore differs in spirit from the other bitops.
*
* ffs(value) returns 0 if value is 0 or the position of the first
* set bit if value is nonzero. The first (least significant) bit
* is at position 1.
*/
static inline int ffs(int x)
{
int r;
#ifdef CONFIG_X86_CMOV
asm("bsfl %1,%0\n\t"
"cmovzl %2,%0"
: "=r" (r) : "rm" (x), "r" (-1));
#else
asm("bsfl %1,%0\n\t"
"jnz 1f\n\t"
"movl $-1,%0\n"
"1:" : "=r" (r) : "rm" (x));
#endif
return r + 1;
}
/**
* fls - find last set bit in word
* @x: the word to search
*
* This is defined in a similar way as the libc and compiler builtin
* ffs, but returns the position of the most significant set bit.
*
* fls(value) returns 0 if value is 0 or the position of the last
* set bit if value is nonzero. The last (most significant) bit is
* at position 32.
*/
static inline int fls(int x)
{
int r;
#ifdef CONFIG_X86_CMOV
asm("bsrl %1,%0\n\t"
"cmovzl %2,%0"
: "=&r" (r) : "rm" (x), "rm" (-1));
#else
asm("bsrl %1,%0\n\t"
"jnz 1f\n\t"
"movl $-1,%0\n"
"1:" : "=r" (r) : "rm" (x));
#endif
return r + 1;
}
#endif /* __KERNEL__ */
#undef ADDR
#ifdef __KERNEL__
#include <asm-generic/bitops/sched.h>
#define ARCH_HAS_FAST_MULTIPLIER 1
#include <asm-generic/bitops/hweight.h>
#endif /* __KERNEL__ */
#include <asm-generic/bitops/fls64.h>
#ifdef __KERNEL__
#include <asm-generic/bitops/ext2-non-atomic.h>
#define ext2_set_bit_atomic(lock, nr, addr) \
test_and_set_bit((nr), (unsigned long *)(addr))
#define ext2_clear_bit_atomic(lock, nr, addr) \
test_and_clear_bit((nr), (unsigned long *)(addr))
#include <asm-generic/bitops/minix.h>
#endif /* __KERNEL__ */
#endif /* _ASM_X86_BITOPS_H */

View File

@ -0,0 +1,13 @@
#ifndef __ASM_X86_BITSPERLONG_H
#define __ASM_X86_BITSPERLONG_H
#ifdef __x86_64__
# define __BITS_PER_LONG 64
#else
# define __BITS_PER_LONG 32
#endif
#include <asm-generic/bitsperlong.h>
#endif /* __ASM_X86_BITSPERLONG_H */

View File

@ -0,0 +1,6 @@
#ifndef _ASM_X86_BYTEORDER_H
#define _ASM_X86_BYTEORDER_H
#include <linux/byteorder/little_endian.h>
#endif /* _ASM_X86_BYTEORDER_H */

View File

@ -0,0 +1,5 @@
#ifdef CONFIG_X86_32
# include "cmpxchg_32.h"
#else
# include "cmpxchg_64.h"
#endif

View File

@ -0,0 +1,274 @@
#ifndef _ASM_X86_CMPXCHG_32_H
#define _ASM_X86_CMPXCHG_32_H
#include <linux/bitops.h> /* for LOCK_PREFIX */
/*
* Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
* you need to test for the feature in boot_cpu_data.
*/
extern void __xchg_wrong_size(void);
/*
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway
* Note 2: xchg has side effect, so that attribute volatile is necessary,
* but generally the primitive is invalid, *ptr is output argument. --ANK
*/
struct __xchg_dummy {
unsigned long a[100];
};
#define __xg(x) ((struct __xchg_dummy *)(x))
#define __xchg(x, ptr, size) \
({ \
__typeof(*(ptr)) __x = (x); \
switch (size) { \
case 1: \
asm volatile("xchgb %b0,%1" \
: "=q" (__x) \
: "m" (*__xg(ptr)), "0" (__x) \
: "memory"); \
break; \
case 2: \
asm volatile("xchgw %w0,%1" \
: "=r" (__x) \
: "m" (*__xg(ptr)), "0" (__x) \
: "memory"); \
break; \
case 4: \
asm volatile("xchgl %0,%1" \
: "=r" (__x) \
: "m" (*__xg(ptr)), "0" (__x) \
: "memory"); \
break; \
default: \
__xchg_wrong_size(); \
} \
__x; \
})
#define xchg(ptr, v) \
__xchg((v), (ptr), sizeof(*ptr))
/*
* The semantics of XCHGCMP8B are a bit strange, this is why
* there is a loop and the loading of %%eax and %%edx has to
* be inside. This inlines well in most cases, the cached
* cost is around ~38 cycles. (in the future we might want
* to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
* might have an implicit FPU-save as a cost, so it's not
* clear which path to go.)
*
* cmpxchg8b must be used with the lock prefix here to allow
* the instruction to be executed atomically, see page 3-102
* of the instruction set reference 24319102.pdf. We need
* the reader side to see the coherent 64bit value.
*/
static inline void __set_64bit(unsigned long long *ptr,
unsigned int low, unsigned int high)
{
asm volatile("\n1:\t"
"movl (%0), %%eax\n\t"
"movl 4(%0), %%edx\n\t"
LOCK_PREFIX "cmpxchg8b (%0)\n\t"
"jnz 1b"
: /* no outputs */
: "D"(ptr),
"b"(low),
"c"(high)
: "ax", "dx", "memory");
}
static inline void __set_64bit_constant(unsigned long long *ptr,
unsigned long long value)
{
__set_64bit(ptr, (unsigned int)value, (unsigned int)(value >> 32));
}
#define ll_low(x) *(((unsigned int *)&(x)) + 0)
#define ll_high(x) *(((unsigned int *)&(x)) + 1)
static inline void __set_64bit_var(unsigned long long *ptr,
unsigned long long value)
{
__set_64bit(ptr, ll_low(value), ll_high(value));
}
#define set_64bit(ptr, value) \
(__builtin_constant_p((value)) \
? __set_64bit_constant((ptr), (value)) \
: __set_64bit_var((ptr), (value)))
#define _set_64bit(ptr, value) \
(__builtin_constant_p(value) \
? __set_64bit(ptr, (unsigned int)(value), \
(unsigned int)((value) >> 32)) \
: __set_64bit(ptr, ll_low((value)), ll_high((value))))
extern void __cmpxchg_wrong_size(void);
/*
* Atomic compare and exchange. Compare OLD with MEM, if identical,
* store NEW in MEM. Return the initial value in MEM. Success is
* indicated by comparing RETURN with OLD.
*/
#define __raw_cmpxchg(ptr, old, new, size, lock) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) __old = (old); \
__typeof__(*(ptr)) __new = (new); \
switch (size) { \
case 1: \
asm volatile(lock "cmpxchgb %b1,%2" \
: "=a"(__ret) \
: "q"(__new), "m"(*__xg(ptr)), "0"(__old) \
: "memory"); \
break; \
case 2: \
asm volatile(lock "cmpxchgw %w1,%2" \
: "=a"(__ret) \
: "r"(__new), "m"(*__xg(ptr)), "0"(__old) \
: "memory"); \
break; \
case 4: \
asm volatile(lock "cmpxchgl %1,%2" \
: "=a"(__ret) \
: "r"(__new), "m"(*__xg(ptr)), "0"(__old) \
: "memory"); \
break; \
default: \
__cmpxchg_wrong_size(); \
} \
__ret; \
})
#define __cmpxchg(ptr, old, new, size) \
__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
#define __sync_cmpxchg(ptr, old, new, size) \
__raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
#define __cmpxchg_local(ptr, old, new, size) \
__raw_cmpxchg((ptr), (old), (new), (size), "")
#ifdef CONFIG_X86_CMPXCHG
#define __HAVE_ARCH_CMPXCHG 1
#define cmpxchg(ptr, old, new) \
__cmpxchg((ptr), (old), (new), sizeof(*ptr))
#define sync_cmpxchg(ptr, old, new) \
__sync_cmpxchg((ptr), (old), (new), sizeof(*ptr))
#define cmpxchg_local(ptr, old, new) \
__cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
#endif
#ifdef CONFIG_X86_CMPXCHG64
#define cmpxchg64(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
(unsigned long long)(n)))
#define cmpxchg64_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
(unsigned long long)(n)))
#endif
static inline unsigned long long __cmpxchg64(volatile void *ptr,
unsigned long long old,
unsigned long long new)
{
unsigned long long prev;
asm volatile(LOCK_PREFIX "cmpxchg8b %3"
: "=A"(prev)
: "b"((unsigned long)new),
"c"((unsigned long)(new >> 32)),
"m"(*__xg(ptr)),
"0"(old)
: "memory");
return prev;
}
static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
unsigned long long old,
unsigned long long new)
{
unsigned long long prev;
asm volatile("cmpxchg8b %3"
: "=A"(prev)
: "b"((unsigned long)new),
"c"((unsigned long)(new >> 32)),
"m"(*__xg(ptr)),
"0"(old)
: "memory");
return prev;
}
#ifndef CONFIG_X86_CMPXCHG
/*
* Building a kernel capable running on 80386. It may be necessary to
* simulate the cmpxchg on the 80386 CPU. For that purpose we define
* a function for each of the sizes we support.
*/
#define cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
__ret = (__typeof__(*(ptr)))__cmpxchg((ptr), \
(unsigned long)(o), (unsigned long)(n), \
sizeof(*(ptr))); \
__ret; \
})
#define cmpxchg_local(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
__ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr), \
(unsigned long)(o), (unsigned long)(n), \
sizeof(*(ptr))); \
__ret; \
})
#endif
#ifndef CONFIG_X86_CMPXCHG64
/*
* Building a kernel capable running on 80386 and 80486. It may be necessary
* to simulate the cmpxchg8b on the 80386 and 80486 CPU.
*/
extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64);
#define cmpxchg64(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) __old = (o); \
__typeof__(*(ptr)) __new = (n); \
alternative_io("call cmpxchg8b_emu", \
"lock; cmpxchg8b (%%esi)" , \
X86_FEATURE_CX8, \
"=A" (__ret), \
"S" ((ptr)), "0" (__old), \
"b" ((unsigned int)__new), \
"c" ((unsigned int)(__new>>32)) \
: "memory"); \
__ret; })
#define cmpxchg64_local(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
if (likely(boot_cpu_data.x86 > 4)) \
__ret = (__typeof__(*(ptr)))__cmpxchg64_local((ptr), \
(unsigned long long)(o), \
(unsigned long long)(n)); \
else \
__ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr), \
(unsigned long long)(o), \
(unsigned long long)(n)); \
__ret; \
})
#endif
#endif /* _ASM_X86_CMPXCHG_32_H */

View File

@ -0,0 +1,283 @@
/*
* Defines x86 CPU feature bits
*/
#ifndef _ASM_X86_CPUFEATURE_H
#define _ASM_X86_CPUFEATURE_H
#include <asm/required-features.h>
#define NCAPINTS 9 /* N 32-bit words worth of info */
/*
* Note: If the comment begins with a quoted string, that string is used
* in /proc/cpuinfo instead of the macro name. If the string is "",
* this feature bit is not displayed in /proc/cpuinfo at all.
*/
/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
#define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */
#define X86_FEATURE_VME (0*32+ 1) /* Virtual Mode Extensions */
#define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */
#define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */
#define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */
#define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers */
#define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */
#define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Exception */
#define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */
#define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */
#define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */
#define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */
#define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */
#define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */
#define X86_FEATURE_CMOV (0*32+15) /* CMOV instructions */
/* (plus FCMOVcc, FCOMI with FPU) */
#define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */
#define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */
#define X86_FEATURE_PN (0*32+18) /* Processor serial number */
#define X86_FEATURE_CLFLSH (0*32+19) /* "clflush" CLFLUSH instruction */
#define X86_FEATURE_DS (0*32+21) /* "dts" Debug Store */
#define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */
#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */
#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
#define X86_FEATURE_XMM (0*32+25) /* "sse" */
#define X86_FEATURE_XMM2 (0*32+26) /* "sse2" */
#define X86_FEATURE_SELFSNOOP (0*32+27) /* "ss" CPU self snoop */
#define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */
#define X86_FEATURE_ACC (0*32+29) /* "tm" Automatic clock control */
#define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */
#define X86_FEATURE_PBE (0*32+31) /* Pending Break Enable */
/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
/* Don't duplicate feature flags which are redundant with Intel! */
#define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */
#define X86_FEATURE_MP (1*32+19) /* MP Capable. */
#define X86_FEATURE_NX (1*32+20) /* Execute Disable */
#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
#define X86_FEATURE_FXSR_OPT (1*32+25) /* FXSAVE/FXRSTOR optimizations */
#define X86_FEATURE_GBPAGES (1*32+26) /* "pdpe1gb" GB pages */
#define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */
#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */
#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */
#define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */
/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
#define X86_FEATURE_RECOVERY (2*32+ 0) /* CPU in recovery mode */
#define X86_FEATURE_LONGRUN (2*32+ 1) /* Longrun power control */
#define X86_FEATURE_LRTI (2*32+ 3) /* LongRun table interface */
/* Other features, Linux-defined mapping, word 3 */
/* This range is used for feature bits which conflict or are synthesized */
#define X86_FEATURE_CXMMX (3*32+ 0) /* Cyrix MMX extensions */
#define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */
#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */
#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */
/* cpu types for specific tunings: */
#define X86_FEATURE_K8 (3*32+ 4) /* "" Opteron, Athlon64 */
#define X86_FEATURE_K7 (3*32+ 5) /* "" Athlon */
#define X86_FEATURE_P3 (3*32+ 6) /* "" P3 */
#define X86_FEATURE_P4 (3*32+ 7) /* "" P4 */
#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */
#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */
#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* "" FXSAVE leaks FOP/FIP/FOP */
#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */
#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */
#define X86_FEATURE_SYSCALL32 (3*32+14) /* "" syscall in ia32 userspace */
#define X86_FEATURE_SYSENTER32 (3*32+15) /* "" sysenter in ia32 userspace */
#define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well */
#define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* "" Mfence synchronizes RDTSC */
#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */
#define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */
#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
#define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */
#define X86_FEATURE_XTOPOLOGY (3*32+22) /* cpu topology enum extensions */
#define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */
#define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */
#define X86_FEATURE_CLFLUSH_MONITOR (3*32+25) /* "" clflush reqd with monitor */
#define X86_FEATURE_EXTD_APICID (3*32+26) /* has extended APICID (8 bits) */
#define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */
#define X86_FEATURE_APERFMPERF (3*32+28) /* APERFMPERF */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
#define X86_FEATURE_PCLMULQDQ (4*32+ 1) /* PCLMULQDQ instruction */
#define X86_FEATURE_DTES64 (4*32+ 2) /* 64-bit Debug Store */
#define X86_FEATURE_MWAIT (4*32+ 3) /* "monitor" Monitor/Mwait support */
#define X86_FEATURE_DSCPL (4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
#define X86_FEATURE_VMX (4*32+ 5) /* Hardware virtualization */
#define X86_FEATURE_SMX (4*32+ 6) /* Safer mode */
#define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */
#define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */
#define X86_FEATURE_SSSE3 (4*32+ 9) /* Supplemental SSE-3 */
#define X86_FEATURE_CID (4*32+10) /* Context ID */
#define X86_FEATURE_FMA (4*32+12) /* Fused multiply-add */
#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */
#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */
#define X86_FEATURE_PDCM (4*32+15) /* Performance Capabilities */
#define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */
#define X86_FEATURE_XMM4_1 (4*32+19) /* "sse4_1" SSE-4.1 */
#define X86_FEATURE_XMM4_2 (4*32+20) /* "sse4_2" SSE-4.2 */
#define X86_FEATURE_X2APIC (4*32+21) /* x2APIC */
#define X86_FEATURE_MOVBE (4*32+22) /* MOVBE instruction */
#define X86_FEATURE_POPCNT (4*32+23) /* POPCNT instruction */
#define X86_FEATURE_AES (4*32+25) /* AES instructions */
#define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
#define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */
#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */
#define X86_FEATURE_HYPERVISOR (4*32+31) /* Running on a hypervisor */
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
#define X86_FEATURE_XSTORE (5*32+ 2) /* "rng" RNG present (xstore) */
#define X86_FEATURE_XSTORE_EN (5*32+ 3) /* "rng_en" RNG enabled */
#define X86_FEATURE_XCRYPT (5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
#define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* "ace_en" on-CPU crypto enabled */
#define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */
#define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */
#define X86_FEATURE_PHE (5*32+10) /* PadLock Hash Engine */
#define X86_FEATURE_PHE_EN (5*32+11) /* PHE enabled */
#define X86_FEATURE_PMM (5*32+12) /* PadLock Montgomery Multiplier */
#define X86_FEATURE_PMM_EN (5*32+13) /* PMM enabled */
/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */
#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */
#define X86_FEATURE_SVM (6*32+ 2) /* Secure virtual machine */
#define X86_FEATURE_EXTAPIC (6*32+ 3) /* Extended APIC space */
#define X86_FEATURE_CR8_LEGACY (6*32+ 4) /* CR8 in 32-bit mode */
#define X86_FEATURE_ABM (6*32+ 5) /* Advanced bit manipulation */
#define X86_FEATURE_SSE4A (6*32+ 6) /* SSE-4A */
#define X86_FEATURE_MISALIGNSSE (6*32+ 7) /* Misaligned SSE mode */
#define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */
#define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */
#define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */
#define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */
#define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */
#define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */
/*
* Auxiliary flags: Linux defined - For features scattered in various
* CPUID levels like 0x6, 0xA etc
*/
#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */
#define X86_FEATURE_ARAT (7*32+ 1) /* Always Running APIC Timer */
/* Virtualization flags: Linux defined */
#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
#define X86_FEATURE_VNMI (8*32+ 1) /* Intel Virtual NMI */
#define X86_FEATURE_FLEXPRIORITY (8*32+ 2) /* Intel FlexPriority */
#define X86_FEATURE_EPT (8*32+ 3) /* Intel Extended Page Table */
#define X86_FEATURE_VPID (8*32+ 4) /* Intel Virtual Processor ID */
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
#include <linux/bitops.h>
extern const char * const x86_cap_flags[NCAPINTS*32];
extern const char * const x86_power_flags[32];
#define test_cpu_cap(c, bit) \
test_bit(bit, (unsigned long *)((c)->x86_capability))
#define cpu_has(c, bit) \
(__builtin_constant_p(bit) && \
( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \
(((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \
(((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \
(((bit)>>5)==3 && (1UL<<((bit)&31) & REQUIRED_MASK3)) || \
(((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \
(((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \
(((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \
(((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) ) \
? 1 : \
test_cpu_cap(c, bit))
#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit)
#define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability))
#define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability))
#define setup_clear_cpu_cap(bit) do { \
clear_cpu_cap(&boot_cpu_data, bit); \
set_bit(bit, (unsigned long *)cpu_caps_cleared); \
} while (0)
#define setup_force_cpu_cap(bit) do { \
set_cpu_cap(&boot_cpu_data, bit); \
set_bit(bit, (unsigned long *)cpu_caps_set); \
} while (0)
#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
#define cpu_has_vme boot_cpu_has(X86_FEATURE_VME)
#define cpu_has_de boot_cpu_has(X86_FEATURE_DE)
#define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE)
#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC)
#define cpu_has_pae boot_cpu_has(X86_FEATURE_PAE)
#define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE)
#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC)
#define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP)
#define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR)
#define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX)
#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR)
#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2)
#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3)
#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES)
#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
#define cpu_has_mp boot_cpu_has(X86_FEATURE_MP)
#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX)
#define cpu_has_k6_mtrr boot_cpu_has(X86_FEATURE_K6_MTRR)
#define cpu_has_cyrix_arr boot_cpu_has(X86_FEATURE_CYRIX_ARR)
#define cpu_has_centaur_mcr boot_cpu_has(X86_FEATURE_CENTAUR_MCR)
#define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE)
#define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN)
#define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT)
#define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN)
#define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2)
#define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN)
#define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE)
#define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN)
#define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM)
#define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN)
#define cpu_has_ds boot_cpu_has(X86_FEATURE_DS)
#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS)
#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH)
#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS)
#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES)
#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT)
#define cpu_has_xmm4_1 boot_cpu_has(X86_FEATURE_XMM4_1)
#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
# define cpu_has_invlpg 1
#else
# define cpu_has_invlpg (boot_cpu_data.x86 > 3)
#endif
#ifdef CONFIG_X86_64
#undef cpu_has_vme
#define cpu_has_vme 0
#undef cpu_has_pae
#define cpu_has_pae ___BUG___
#undef cpu_has_mp
#define cpu_has_mp 1
#undef cpu_has_k6_mtrr
#define cpu_has_k6_mtrr 0
#undef cpu_has_cyrix_arr
#define cpu_has_cyrix_arr 0
#undef cpu_has_centaur_mcr
#define cpu_has_centaur_mcr 0
#endif /* CONFIG_X86_64 */
#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */
#endif /* _ASM_X86_CPUFEATURE_H */

View File

@ -0,0 +1,13 @@
#ifdef __KERNEL__
# ifdef CONFIG_X86_32
# include "posix_types_32.h"
# else
# include "posix_types_64.h"
# endif
#else
# ifdef __i386__
# include "posix_types_32.h"
# else
# include "posix_types_64.h"
# endif
#endif

View File

@ -0,0 +1,85 @@
#ifndef _ASM_X86_POSIX_TYPES_32_H
#define _ASM_X86_POSIX_TYPES_32_H
/*
* This file is generally used by user-level software, so you need to
* be a little careful about namespace pollution etc. Also, we cannot
* assume GCC is being used.
*/
typedef unsigned long __kernel_ino_t;
typedef unsigned short __kernel_mode_t;
typedef unsigned short __kernel_nlink_t;
typedef long __kernel_off_t;
typedef int __kernel_pid_t;
typedef unsigned short __kernel_ipc_pid_t;
typedef unsigned short __kernel_uid_t;
typedef unsigned short __kernel_gid_t;
typedef unsigned int __kernel_size_t;
typedef int __kernel_ssize_t;
typedef int __kernel_ptrdiff_t;
typedef long __kernel_time_t;
typedef long __kernel_suseconds_t;
typedef long __kernel_clock_t;
typedef int __kernel_timer_t;
typedef int __kernel_clockid_t;
typedef int __kernel_daddr_t;
typedef char * __kernel_caddr_t;
typedef unsigned short __kernel_uid16_t;
typedef unsigned short __kernel_gid16_t;
typedef unsigned int __kernel_uid32_t;
typedef unsigned int __kernel_gid32_t;
typedef unsigned short __kernel_old_uid_t;
typedef unsigned short __kernel_old_gid_t;
typedef unsigned short __kernel_old_dev_t;
#ifdef __GNUC__
typedef long long __kernel_loff_t;
#endif
typedef struct {
int val[2];
} __kernel_fsid_t;
#if defined(__KERNEL__)
#undef __FD_SET
#define __FD_SET(fd,fdsetp) \
asm volatile("btsl %1,%0": \
"+m" (*(__kernel_fd_set *)(fdsetp)) \
: "r" ((int)(fd)))
#undef __FD_CLR
#define __FD_CLR(fd,fdsetp) \
asm volatile("btrl %1,%0": \
"+m" (*(__kernel_fd_set *)(fdsetp)) \
: "r" ((int) (fd)))
#undef __FD_ISSET
#define __FD_ISSET(fd,fdsetp) \
(__extension__ \
({ \
unsigned char __result; \
asm volatile("btl %1,%2 ; setb %0" \
: "=q" (__result) \
: "r" ((int)(fd)), \
"m" (*(__kernel_fd_set *)(fdsetp))); \
__result; \
}))
#undef __FD_ZERO
#define __FD_ZERO(fdsetp) \
do { \
int __d0, __d1; \
asm volatile("cld ; rep ; stosl" \
: "=m" (*(__kernel_fd_set *)(fdsetp)), \
"=&c" (__d0), "=&D" (__d1) \
: "a" (0), "1" (__FDSET_LONGS), \
"2" ((__kernel_fd_set *)(fdsetp)) \
: "memory"); \
} while (0)
#endif /* defined(__KERNEL__) */
#endif /* _ASM_X86_POSIX_TYPES_32_H */

View File

@ -0,0 +1,88 @@
#ifndef _ASM_X86_REQUIRED_FEATURES_H
#define _ASM_X86_REQUIRED_FEATURES_H
/* Define minimum CPUID feature set for kernel These bits are checked
really early to actually display a visible error message before the
kernel dies. Make sure to assign features to the proper mask!
Some requirements that are not in CPUID yet are also in the
CONFIG_X86_MINIMUM_CPU_FAMILY which is checked too.
The real information is in arch/x86/Kconfig.cpu, this just converts
the CONFIGs into a bitmask */
#ifndef CONFIG_MATH_EMULATION
# define NEED_FPU (1<<(X86_FEATURE_FPU & 31))
#else
# define NEED_FPU 0
#endif
#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
# define NEED_PAE (1<<(X86_FEATURE_PAE & 31))
#else
# define NEED_PAE 0
#endif
#ifdef CONFIG_X86_CMPXCHG64
# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31))
#else
# define NEED_CX8 0
#endif
#if defined(CONFIG_X86_CMOV) || defined(CONFIG_X86_64)
# define NEED_CMOV (1<<(X86_FEATURE_CMOV & 31))
#else
# define NEED_CMOV 0
#endif
#ifdef CONFIG_X86_USE_3DNOW
# define NEED_3DNOW (1<<(X86_FEATURE_3DNOW & 31))
#else
# define NEED_3DNOW 0
#endif
#if defined(CONFIG_X86_P6_NOP) || defined(CONFIG_X86_64)
# define NEED_NOPL (1<<(X86_FEATURE_NOPL & 31))
#else
# define NEED_NOPL 0
#endif
#ifdef CONFIG_X86_64
#ifdef CONFIG_PARAVIRT
/* Paravirtualized systems may not have PSE or PGE available */
#define NEED_PSE 0
#define NEED_PGE 0
#else
#define NEED_PSE (1<<(X86_FEATURE_PSE) & 31)
#define NEED_PGE (1<<(X86_FEATURE_PGE) & 31)
#endif
#define NEED_MSR (1<<(X86_FEATURE_MSR & 31))
#define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31))
#define NEED_XMM (1<<(X86_FEATURE_XMM & 31))
#define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31))
#define NEED_LM (1<<(X86_FEATURE_LM & 31))
#else
#define NEED_PSE 0
#define NEED_MSR 0
#define NEED_PGE 0
#define NEED_FXSR 0
#define NEED_XMM 0
#define NEED_XMM2 0
#define NEED_LM 0
#endif
#define REQUIRED_MASK0 (NEED_FPU|NEED_PSE|NEED_MSR|NEED_PAE|\
NEED_CX8|NEED_PGE|NEED_FXSR|NEED_CMOV|\
NEED_XMM|NEED_XMM2)
#define SSE_MASK (NEED_XMM|NEED_XMM2)
#define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW)
#define REQUIRED_MASK2 0
#define REQUIRED_MASK3 (NEED_NOPL)
#define REQUIRED_MASK4 0
#define REQUIRED_MASK5 0
#define REQUIRED_MASK6 0
#define REQUIRED_MASK7 0
#endif /* _ASM_X86_REQUIRED_FEATURES_H */

View File

@ -0,0 +1,20 @@
#ifndef _ASM_X86_SPINLOCK_TYPES_H
#define _ASM_X86_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_H
# error "please don't include this file directly"
#endif
typedef struct raw_spinlock {
unsigned int slock;
} raw_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
unsigned int lock;
} raw_rwlock_t;
#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
#endif /* _ASM_X86_SPINLOCK_TYPES_H */

View File

@ -0,0 +1,5 @@
#ifdef CONFIG_X86_32
# include "string_32.h"
#else
# include "string_64.h"
#endif

View File

@ -0,0 +1,342 @@
#ifndef _ASM_X86_STRING_32_H
#define _ASM_X86_STRING_32_H
#ifdef __KERNEL__
/* Let gcc decide whether to inline or use the out of line functions */
#define __HAVE_ARCH_STRCPY
extern char *strcpy(char *dest, const char *src);
#define __HAVE_ARCH_STRNCPY
extern char *strncpy(char *dest, const char *src, size_t count);
#define __HAVE_ARCH_STRCAT
extern char *strcat(char *dest, const char *src);
#define __HAVE_ARCH_STRNCAT
extern char *strncat(char *dest, const char *src, size_t count);
#define __HAVE_ARCH_STRCMP
extern int strcmp(const char *cs, const char *ct);
#define __HAVE_ARCH_STRNCMP
extern int strncmp(const char *cs, const char *ct, size_t count);
#define __HAVE_ARCH_STRCHR
extern char *strchr(const char *s, int c);
#define __HAVE_ARCH_STRLEN
extern size_t strlen(const char *s);
static __always_inline void *__memcpy(void *to, const void *from, size_t n)
{
int d0, d1, d2;
asm volatile("rep ; movsl\n\t"
"movl %4,%%ecx\n\t"
"andl $3,%%ecx\n\t"
"jz 1f\n\t"
"rep ; movsb\n\t"
"1:"
: "=&c" (d0), "=&D" (d1), "=&S" (d2)
: "0" (n / 4), "g" (n), "1" ((long)to), "2" ((long)from)
: "memory");
return to;
}
/*
* This looks ugly, but the compiler can optimize it totally,
* as the count is constant.
*/
static __always_inline void *__constant_memcpy(void *to, const void *from,
size_t n)
{
long esi, edi;
if (!n)
return to;
switch (n) {
case 1:
*(char *)to = *(char *)from;
return to;
case 2:
*(short *)to = *(short *)from;
return to;
case 4:
*(int *)to = *(int *)from;
return to;
case 3:
*(short *)to = *(short *)from;
*((char *)to + 2) = *((char *)from + 2);
return to;
case 5:
*(int *)to = *(int *)from;
*((char *)to + 4) = *((char *)from + 4);
return to;
case 6:
*(int *)to = *(int *)from;
*((short *)to + 2) = *((short *)from + 2);
return to;
case 8:
*(int *)to = *(int *)from;
*((int *)to + 1) = *((int *)from + 1);
return to;
}
esi = (long)from;
edi = (long)to;
if (n >= 5 * 4) {
/* large block: use rep prefix */
int ecx;
asm volatile("rep ; movsl"
: "=&c" (ecx), "=&D" (edi), "=&S" (esi)
: "0" (n / 4), "1" (edi), "2" (esi)
: "memory"
);
} else {
/* small block: don't clobber ecx + smaller code */
if (n >= 4 * 4)
asm volatile("movsl"
: "=&D"(edi), "=&S"(esi)
: "0"(edi), "1"(esi)
: "memory");
if (n >= 3 * 4)
asm volatile("movsl"
: "=&D"(edi), "=&S"(esi)
: "0"(edi), "1"(esi)
: "memory");
if (n >= 2 * 4)
asm volatile("movsl"
: "=&D"(edi), "=&S"(esi)
: "0"(edi), "1"(esi)
: "memory");
if (n >= 1 * 4)
asm volatile("movsl"
: "=&D"(edi), "=&S"(esi)
: "0"(edi), "1"(esi)
: "memory");
}
switch (n % 4) {
/* tail */
case 0:
return to;
case 1:
asm volatile("movsb"
: "=&D"(edi), "=&S"(esi)
: "0"(edi), "1"(esi)
: "memory");
return to;
case 2:
asm volatile("movsw"
: "=&D"(edi), "=&S"(esi)
: "0"(edi), "1"(esi)
: "memory");
return to;
default:
asm volatile("movsw\n\tmovsb"
: "=&D"(edi), "=&S"(esi)
: "0"(edi), "1"(esi)
: "memory");
return to;
}
}
#define __HAVE_ARCH_MEMCPY
#ifdef CONFIG_X86_USE_3DNOW
#include <asm/mmx.h>
/*
* This CPU favours 3DNow strongly (eg AMD Athlon)
*/
static inline void *__constant_memcpy3d(void *to, const void *from, size_t len)
{
if (len < 512)
return __constant_memcpy(to, from, len);
return _mmx_memcpy(to, from, len);
}
static inline void *__memcpy3d(void *to, const void *from, size_t len)
{
if (len < 512)
return __memcpy(to, from, len);
return _mmx_memcpy(to, from, len);
}
#define memcpy(t, f, n) \
(__builtin_constant_p((n)) \
? __constant_memcpy3d((t), (f), (n)) \
: __memcpy3d((t), (f), (n)))
#else
/*
* No 3D Now!
*/
#ifndef CONFIG_KMEMCHECK
#if (__GNUC__ >= 4)
#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
#else
#define memcpy(t, f, n) \
(__builtin_constant_p((n)) \
? __constant_memcpy((t), (f), (n)) \
: __memcpy((t), (f), (n)))
#endif
#else
/*
* kmemcheck becomes very happy if we use the REP instructions unconditionally,
* because it means that we know both memory operands in advance.
*/
#define memcpy(t, f, n) __memcpy((t), (f), (n))
#endif
#endif
#define __HAVE_ARCH_MEMMOVE
void *memmove(void *dest, const void *src, size_t n);
#define memcmp __builtin_memcmp
#define __HAVE_ARCH_MEMCHR
extern void *memchr(const void *cs, int c, size_t count);
static inline void *__memset_generic(void *s, char c, size_t count)
{
int d0, d1;
asm volatile("rep\n\t"
"stosb"
: "=&c" (d0), "=&D" (d1)
: "a" (c), "1" (s), "0" (count)
: "memory");
return s;
}
/* we might want to write optimized versions of these later */
#define __constant_count_memset(s, c, count) __memset_generic((s), (c), (count))
/*
* memset(x, 0, y) is a reasonably common thing to do, so we want to fill
* things 32 bits at a time even when we don't know the size of the
* area at compile-time..
*/
static __always_inline
void *__constant_c_memset(void *s, unsigned long c, size_t count)
{
int d0, d1;
asm volatile("rep ; stosl\n\t"
"testb $2,%b3\n\t"
"je 1f\n\t"
"stosw\n"
"1:\ttestb $1,%b3\n\t"
"je 2f\n\t"
"stosb\n"
"2:"
: "=&c" (d0), "=&D" (d1)
: "a" (c), "q" (count), "0" (count/4), "1" ((long)s)
: "memory");
return s;
}
/* Added by Gertjan van Wingerde to make minix and sysv module work */
#define __HAVE_ARCH_STRNLEN
extern size_t strnlen(const char *s, size_t count);
/* end of additional stuff */
#define __HAVE_ARCH_STRSTR
extern char *strstr(const char *cs, const char *ct);
/*
* This looks horribly ugly, but the compiler can optimize it totally,
* as we by now know that both pattern and count is constant..
*/
static __always_inline
void *__constant_c_and_count_memset(void *s, unsigned long pattern,
size_t count)
{
switch (count) {
case 0:
return s;
case 1:
*(unsigned char *)s = pattern & 0xff;
return s;
case 2:
*(unsigned short *)s = pattern & 0xffff;
return s;
case 3:
*(unsigned short *)s = pattern & 0xffff;
*((unsigned char *)s + 2) = pattern & 0xff;
return s;
case 4:
*(unsigned long *)s = pattern;
return s;
}
#define COMMON(x) \
asm volatile("rep ; stosl" \
x \
: "=&c" (d0), "=&D" (d1) \
: "a" (eax), "0" (count/4), "1" ((long)s) \
: "memory")
{
int d0, d1;
#if __GNUC__ == 4 && __GNUC_MINOR__ == 0
/* Workaround for broken gcc 4.0 */
register unsigned long eax asm("%eax") = pattern;
#else
unsigned long eax = pattern;
#endif
switch (count % 4) {
case 0:
COMMON("");
return s;
case 1:
COMMON("\n\tstosb");
return s;
case 2:
COMMON("\n\tstosw");
return s;
default:
COMMON("\n\tstosw\n\tstosb");
return s;
}
}
#undef COMMON
}
#define __constant_c_x_memset(s, c, count) \
(__builtin_constant_p(count) \
? __constant_c_and_count_memset((s), (c), (count)) \
: __constant_c_memset((s), (c), (count)))
#define __memset(s, c, count) \
(__builtin_constant_p(count) \
? __constant_count_memset((s), (c), (count)) \
: __memset_generic((s), (c), (count)))
#define __HAVE_ARCH_MEMSET
#if (__GNUC__ >= 4)
#define memset(s, c, count) __builtin_memset(s, c, count)
#else
#define memset(s, c, count) \
(__builtin_constant_p(c) \
? __constant_c_x_memset((s), (0x01010101UL * (unsigned char)(c)), \
(count)) \
: __memset((s), (c), (count)))
#endif
/*
* find the first occurrence of byte 'c', or 1 past the area if none
*/
#define __HAVE_ARCH_MEMSCAN
extern void *memscan(void *addr, int c, size_t size);
#endif /* __KERNEL__ */
#endif /* _ASM_X86_STRING_32_H */

View File

@ -0,0 +1,61 @@
#ifndef _ASM_X86_SWAB_H
#define _ASM_X86_SWAB_H
#include <linux/types.h>
#include <linux/compiler.h>
static inline __attribute_const__ __u32 __arch_swab32(__u32 val)
{
#ifdef __i386__
# ifdef CONFIG_X86_BSWAP
asm("bswap %0" : "=r" (val) : "0" (val));
# else
asm("xchgb %b0,%h0\n\t" /* swap lower bytes */
"rorl $16,%0\n\t" /* swap words */
"xchgb %b0,%h0" /* swap higher bytes */
: "=q" (val)
: "0" (val));
# endif
#else /* __i386__ */
asm("bswapl %0"
: "=r" (val)
: "0" (val));
#endif
return val;
}
#define __arch_swab32 __arch_swab32
static inline __attribute_const__ __u64 __arch_swab64(__u64 val)
{
#ifdef __i386__
union {
struct {
__u32 a;
__u32 b;
} s;
__u64 u;
} v;
v.u = val;
# ifdef CONFIG_X86_BSWAP
asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
: "=r" (v.s.a), "=r" (v.s.b)
: "0" (v.s.a), "1" (v.s.b));
# else
v.s.a = __arch_swab32(v.s.a);
v.s.b = __arch_swab32(v.s.b);
asm("xchgl %0,%1"
: "=r" (v.s.a), "=r" (v.s.b)
: "0" (v.s.a), "1" (v.s.b));
# endif
return v.u;
#else /* __i386__ */
asm("bswapq %0"
: "=r" (val)
: "0" (val));
return val;
#endif
}
#define __arch_swab64 __arch_swab64
#endif /* _ASM_X86_SWAB_H */

View File

@ -0,0 +1,22 @@
#ifndef _ASM_X86_TYPES_H
#define _ASM_X86_TYPES_H
#define dma_addr_t dma_addr_t
#include <asm-generic/types.h>
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
typedef u64 dma64_addr_t;
#if defined(CONFIG_X86_64) || defined(CONFIG_HIGHMEM64G)
/* DMA addresses come in 32-bit and 64-bit flavours. */
typedef u64 dma_addr_t;
#else
typedef u32 dma_addr_t;
#endif
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_X86_TYPES_H */

View File

@ -0,0 +1,293 @@
#ifndef __LINUX_BITMAP_H
#define __LINUX_BITMAP_H
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/string.h>
#include <linux/kernel.h>
/*
* bitmaps provide bit arrays that consume one or more unsigned
* longs. The bitmap interface and available operations are listed
* here, in bitmap.h
*
* Function implementations generic to all architectures are in
* lib/bitmap.c. Functions implementations that are architecture
* specific are in various include/asm-<arch>/bitops.h headers
* and other arch/<arch> specific files.
*
* See lib/bitmap.c for more details.
*/
/*
* The available bitmap operations and their rough meaning in the
* case that the bitmap is a single unsigned long are thus:
*
* Note that nbits should be always a compile time evaluable constant.
* Otherwise many inlines will generate horrible code.
*
* bitmap_zero(dst, nbits) *dst = 0UL
* bitmap_fill(dst, nbits) *dst = ~0UL
* bitmap_copy(dst, src, nbits) *dst = *src
* bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2
* bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2
* bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2
* bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2)
* bitmap_complement(dst, src, nbits) *dst = ~(*src)
* bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal?
* bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap?
* bitmap_subset(src1, src2, nbits) Is *src1 a subset of *src2?
* bitmap_empty(src, nbits) Are all bits zero in *src?
* bitmap_full(src, nbits) Are all bits set in *src?
* bitmap_weight(src, nbits) Hamming Weight: number set bits
* bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
* bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
* bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
* bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit)
* bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap
* bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz
* bitmap_scnprintf(buf, len, src, nbits) Print bitmap src to buf
* bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf
* bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf
* bitmap_scnlistprintf(buf, len, src, nbits) Print bitmap src as list to buf
* bitmap_parselist(buf, dst, nbits) Parse bitmap dst from list
* bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
* bitmap_release_region(bitmap, pos, order) Free specified bit region
* bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
*/
/*
* Also the following operations in asm/bitops.h apply to bitmaps.
*
* set_bit(bit, addr) *addr |= bit
* clear_bit(bit, addr) *addr &= ~bit
* change_bit(bit, addr) *addr ^= bit
* test_bit(bit, addr) Is bit set in *addr?
* test_and_set_bit(bit, addr) Set bit and return old value
* test_and_clear_bit(bit, addr) Clear bit and return old value
* test_and_change_bit(bit, addr) Change bit and return old value
* find_first_zero_bit(addr, nbits) Position first zero bit in *addr
* find_first_bit(addr, nbits) Position first set bit in *addr
* find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit
* find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit
*/
/*
* The DECLARE_BITMAP(name,bits) macro, in linux/types.h, can be used
* to declare an array named 'name' of just enough unsigned longs to
* contain all bit positions from 0 to 'bits' - 1.
*/
/*
* lib/bitmap.c provides these functions:
*/
extern int __bitmap_empty(const unsigned long *bitmap, int bits);
extern int __bitmap_full(const unsigned long *bitmap, int bits);
extern int __bitmap_equal(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern void __bitmap_complement(unsigned long *dst, const unsigned long *src,
int bits);
extern void __bitmap_shift_right(unsigned long *dst,
const unsigned long *src, int shift, int bits);
extern void __bitmap_shift_left(unsigned long *dst,
const unsigned long *src, int shift, int bits);
extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern int __bitmap_intersects(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern int __bitmap_subset(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern int __bitmap_weight(const unsigned long *bitmap, int bits);
extern int bitmap_scnprintf(char *buf, unsigned int len,
const unsigned long *src, int nbits);
extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
unsigned long *dst, int nbits);
extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen,
unsigned long *dst, int nbits);
extern int bitmap_scnlistprintf(char *buf, unsigned int len,
const unsigned long *src, int nbits);
extern int bitmap_parselist(const char *buf, unsigned long *maskp,
int nmaskbits);
extern void bitmap_remap(unsigned long *dst, const unsigned long *src,
const unsigned long *old, const unsigned long *new, int bits);
extern int bitmap_bitremap(int oldbit,
const unsigned long *old, const unsigned long *new, int bits);
extern void bitmap_onto(unsigned long *dst, const unsigned long *orig,
const unsigned long *relmap, int bits);
extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
int sz, int bits);
extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order);
extern void bitmap_release_region(unsigned long *bitmap, int pos, int order);
extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order);
extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits);
#define BITMAP_LAST_WORD_MASK(nbits) \
( \
((nbits) % BITS_PER_LONG) ? \
(1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \
)
#define small_const_nbits(nbits) \
(__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
static inline void bitmap_zero(unsigned long *dst, int nbits)
{
if (small_const_nbits(nbits))
*dst = 0UL;
else {
int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
memset(dst, 0, len);
}
}
static inline void bitmap_fill(unsigned long *dst, int nbits)
{
size_t nlongs = BITS_TO_LONGS(nbits);
if (!small_const_nbits(nbits)) {
int len = (nlongs - 1) * sizeof(unsigned long);
memset(dst, 0xff, len);
}
dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits);
}
static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
int nbits)
{
if (small_const_nbits(nbits))
*dst = *src;
else {
int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
memcpy(dst, src, len);
}
}
static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, int nbits)
{
if (small_const_nbits(nbits))
return (*dst = *src1 & *src2) != 0;
return __bitmap_and(dst, src1, src2, nbits);
}
static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, int nbits)
{
if (small_const_nbits(nbits))
*dst = *src1 | *src2;
else
__bitmap_or(dst, src1, src2, nbits);
}
static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, int nbits)
{
if (small_const_nbits(nbits))
*dst = *src1 ^ *src2;
else
__bitmap_xor(dst, src1, src2, nbits);
}
static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, int nbits)
{
if (small_const_nbits(nbits))
return (*dst = *src1 & ~(*src2)) != 0;
return __bitmap_andnot(dst, src1, src2, nbits);
}
static inline void bitmap_complement(unsigned long *dst, const unsigned long *src,
int nbits)
{
if (small_const_nbits(nbits))
*dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits);
else
__bitmap_complement(dst, src, nbits);
}
static inline int bitmap_equal(const unsigned long *src1,
const unsigned long *src2, int nbits)
{
if (small_const_nbits(nbits))
return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
else
return __bitmap_equal(src1, src2, nbits);
}
static inline int bitmap_intersects(const unsigned long *src1,
const unsigned long *src2, int nbits)
{
if (small_const_nbits(nbits))
return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
else
return __bitmap_intersects(src1, src2, nbits);
}
static inline int bitmap_subset(const unsigned long *src1,
const unsigned long *src2, int nbits)
{
if (small_const_nbits(nbits))
return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits));
else
return __bitmap_subset(src1, src2, nbits);
}
static inline int bitmap_empty(const unsigned long *src, int nbits)
{
if (small_const_nbits(nbits))
return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
else
return __bitmap_empty(src, nbits);
}
static inline int bitmap_full(const unsigned long *src, int nbits)
{
if (small_const_nbits(nbits))
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
else
return __bitmap_full(src, nbits);
}
static inline int bitmap_weight(const unsigned long *src, int nbits)
{
if (small_const_nbits(nbits))
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
return __bitmap_weight(src, nbits);
}
static inline void bitmap_shift_right(unsigned long *dst,
const unsigned long *src, int n, int nbits)
{
if (small_const_nbits(nbits))
*dst = *src >> n;
else
__bitmap_shift_right(dst, src, n, nbits);
}
static inline void bitmap_shift_left(unsigned long *dst,
const unsigned long *src, int n, int nbits)
{
if (small_const_nbits(nbits))
*dst = (*src << n) & BITMAP_LAST_WORD_MASK(nbits);
else
__bitmap_shift_left(dst, src, n, nbits);
}
static inline int bitmap_parse(const char *buf, unsigned int buflen,
unsigned long *maskp, int nmaskbits)
{
return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits);
}
#endif /* __ASSEMBLY__ */
#endif /* __LINUX_BITMAP_H */

View File

@ -0,0 +1,173 @@
#ifndef _LINUX_BYTEORDER_GENERIC_H
#define _LINUX_BYTEORDER_GENERIC_H
/*
* linux/byteorder_generic.h
* Generic Byte-reordering support
*
* The "... p" macros, like le64_to_cpup, can be used with pointers
* to unaligned data, but there will be a performance penalty on
* some architectures. Use get_unaligned for unaligned data.
*
* Francois-Rene Rideau <fare@tunes.org> 19970707
* gathered all the good ideas from all asm-foo/byteorder.h into one file,
* cleaned them up.
* I hope it is compliant with non-GCC compilers.
* I decided to put __BYTEORDER_HAS_U64__ in byteorder.h,
* because I wasn't sure it would be ok to put it in types.h
* Upgraded it to 2.1.43
* Francois-Rene Rideau <fare@tunes.org> 19971012
* Upgraded it to 2.1.57
* to please Linus T., replaced huge #ifdef's between little/big endian
* by nestedly #include'd files.
* Francois-Rene Rideau <fare@tunes.org> 19971205
* Made it to 2.1.71; now a facelift:
* Put files under include/linux/byteorder/
* Split swab from generic support.
*
* TODO:
* = Regular kernel maintainers could also replace all these manual
* byteswap macros that remain, disseminated among drivers,
* after some grep or the sources...
* = Linus might want to rename all these macros and files to fit his taste,
* to fit his personal naming scheme.
* = it seems that a few drivers would also appreciate
* nybble swapping support...
* = every architecture could add their byteswap macro in asm/byteorder.h
* see how some architectures already do (i386, alpha, ppc, etc)
* = cpu_to_beXX and beXX_to_cpu might some day need to be well
* distinguished throughout the kernel. This is not the case currently,
* since little endian, big endian, and pdp endian machines needn't it.
* But this might be the case for, say, a port of Linux to 20/21 bit
* architectures (and F21 Linux addict around?).
*/
/*
* The following macros are to be defined by <asm/byteorder.h>:
*
* Conversion of long and short int between network and host format
* ntohl(__u32 x)
* ntohs(__u16 x)
* htonl(__u32 x)
* htons(__u16 x)
* It seems that some programs (which? where? or perhaps a standard? POSIX?)
* might like the above to be functions, not macros (why?).
* if that's true, then detect them, and take measures.
* Anyway, the measure is: define only ___ntohl as a macro instead,
* and in a separate file, have
* unsigned long inline ntohl(x){return ___ntohl(x);}
*
* The same for constant arguments
* __constant_ntohl(__u32 x)
* __constant_ntohs(__u16 x)
* __constant_htonl(__u32 x)
* __constant_htons(__u16 x)
*
* Conversion of XX-bit integers (16- 32- or 64-)
* between native CPU format and little/big endian format
* 64-bit stuff only defined for proper architectures
* cpu_to_[bl]eXX(__uXX x)
* [bl]eXX_to_cpu(__uXX x)
*
* The same, but takes a pointer to the value to convert
* cpu_to_[bl]eXXp(__uXX x)
* [bl]eXX_to_cpup(__uXX x)
*
* The same, but change in situ
* cpu_to_[bl]eXXs(__uXX x)
* [bl]eXX_to_cpus(__uXX x)
*
* See asm-foo/byteorder.h for examples of how to provide
* architecture-optimized versions
*
*/
#define cpu_to_le64 __cpu_to_le64
#define le64_to_cpu __le64_to_cpu
#define cpu_to_le32 __cpu_to_le32
#define le32_to_cpu __le32_to_cpu
#define cpu_to_le16 __cpu_to_le16
#define le16_to_cpu __le16_to_cpu
#define cpu_to_be64 __cpu_to_be64
#define be64_to_cpu __be64_to_cpu
#define cpu_to_be32 __cpu_to_be32
#define be32_to_cpu __be32_to_cpu
#define cpu_to_be16 __cpu_to_be16
#define be16_to_cpu __be16_to_cpu
#define cpu_to_le64p __cpu_to_le64p
#define le64_to_cpup __le64_to_cpup
#define cpu_to_le32p __cpu_to_le32p
#define le32_to_cpup __le32_to_cpup
#define cpu_to_le16p __cpu_to_le16p
#define le16_to_cpup __le16_to_cpup
#define cpu_to_be64p __cpu_to_be64p
#define be64_to_cpup __be64_to_cpup
#define cpu_to_be32p __cpu_to_be32p
#define be32_to_cpup __be32_to_cpup
#define cpu_to_be16p __cpu_to_be16p
#define be16_to_cpup __be16_to_cpup
#define cpu_to_le64s __cpu_to_le64s
#define le64_to_cpus __le64_to_cpus
#define cpu_to_le32s __cpu_to_le32s
#define le32_to_cpus __le32_to_cpus
#define cpu_to_le16s __cpu_to_le16s
#define le16_to_cpus __le16_to_cpus
#define cpu_to_be64s __cpu_to_be64s
#define be64_to_cpus __be64_to_cpus
#define cpu_to_be32s __cpu_to_be32s
#define be32_to_cpus __be32_to_cpus
#define cpu_to_be16s __cpu_to_be16s
#define be16_to_cpus __be16_to_cpus
/*
* They have to be macros in order to do the constant folding
* correctly - if the argument passed into a inline function
* it is no longer constant according to gcc..
*/
#undef ntohl
#undef ntohs
#undef htonl
#undef htons
#define ___htonl(x) __cpu_to_be32(x)
#define ___htons(x) __cpu_to_be16(x)
#define ___ntohl(x) __be32_to_cpu(x)
#define ___ntohs(x) __be16_to_cpu(x)
#define htonl(x) ___htonl(x)
#define ntohl(x) ___ntohl(x)
#define htons(x) ___htons(x)
#define ntohs(x) ___ntohs(x)
static inline void le16_add_cpu(__le16 *var, u16 val)
{
*var = cpu_to_le16(le16_to_cpu(*var) + val);
}
static inline void le32_add_cpu(__le32 *var, u32 val)
{
*var = cpu_to_le32(le32_to_cpu(*var) + val);
}
static inline void le64_add_cpu(__le64 *var, u64 val)
{
*var = cpu_to_le64(le64_to_cpu(*var) + val);
}
static inline void be16_add_cpu(__be16 *var, u16 val)
{
*var = cpu_to_be16(be16_to_cpu(*var) + val);
}
static inline void be32_add_cpu(__be32 *var, u32 val)
{
*var = cpu_to_be32(be32_to_cpu(*var) + val);
}
static inline void be64_add_cpu(__be64 *var, u64 val)
{
*var = cpu_to_be64(be64_to_cpu(*var) + val);
}
#endif /* _LINUX_BYTEORDER_GENERIC_H */

View File

@ -0,0 +1,108 @@
#ifndef _LINUX_BYTEORDER_LITTLE_ENDIAN_H
#define _LINUX_BYTEORDER_LITTLE_ENDIAN_H
#ifndef __LITTLE_ENDIAN
#define __LITTLE_ENDIAN 1234
#endif
#ifndef __LITTLE_ENDIAN_BITFIELD
#define __LITTLE_ENDIAN_BITFIELD
#endif
#include <linux/types.h>
#include <linux/swab.h>
#define __constant_htonl(x) ((__force __be32)___constant_swab32((x)))
#define __constant_ntohl(x) ___constant_swab32((__force __be32)(x))
#define __constant_htons(x) ((__force __be16)___constant_swab16((x)))
#define __constant_ntohs(x) ___constant_swab16((__force __be16)(x))
#define __constant_cpu_to_le64(x) ((__force __le64)(__u64)(x))
#define __constant_le64_to_cpu(x) ((__force __u64)(__le64)(x))
#define __constant_cpu_to_le32(x) ((__force __le32)(__u32)(x))
#define __constant_le32_to_cpu(x) ((__force __u32)(__le32)(x))
#define __constant_cpu_to_le16(x) ((__force __le16)(__u16)(x))
#define __constant_le16_to_cpu(x) ((__force __u16)(__le16)(x))
#define __constant_cpu_to_be64(x) ((__force __be64)___constant_swab64((x)))
#define __constant_be64_to_cpu(x) ___constant_swab64((__force __u64)(__be64)(x))
#define __constant_cpu_to_be32(x) ((__force __be32)___constant_swab32((x)))
#define __constant_be32_to_cpu(x) ___constant_swab32((__force __u32)(__be32)(x))
#define __constant_cpu_to_be16(x) ((__force __be16)___constant_swab16((x)))
#define __constant_be16_to_cpu(x) ___constant_swab16((__force __u16)(__be16)(x))
#define __cpu_to_le64(x) ((__force __le64)(__u64)(x))
#define __le64_to_cpu(x) ((__force __u64)(__le64)(x))
#define __cpu_to_le32(x) ((__force __le32)(__u32)(x))
#define __le32_to_cpu(x) ((__force __u32)(__le32)(x))
#define __cpu_to_le16(x) ((__force __le16)(__u16)(x))
#define __le16_to_cpu(x) ((__force __u16)(__le16)(x))
#define __cpu_to_be64(x) ((__force __be64)__swab64((x)))
#define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x))
#define __cpu_to_be32(x) ((__force __be32)__swab32((x)))
#define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x))
#define __cpu_to_be16(x) ((__force __be16)__swab16((x)))
#define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x))
static inline __le64 __cpu_to_le64p(const __u64 *p)
{
return (__force __le64)*p;
}
static inline __u64 __le64_to_cpup(const __le64 *p)
{
return (__force __u64)*p;
}
static inline __le32 __cpu_to_le32p(const __u32 *p)
{
return (__force __le32)*p;
}
static inline __u32 __le32_to_cpup(const __le32 *p)
{
return (__force __u32)*p;
}
static inline __le16 __cpu_to_le16p(const __u16 *p)
{
return (__force __le16)*p;
}
static inline __u16 __le16_to_cpup(const __le16 *p)
{
return (__force __u16)*p;
}
static inline __be64 __cpu_to_be64p(const __u64 *p)
{
return (__force __be64)__swab64p(p);
}
static inline __u64 __be64_to_cpup(const __be64 *p)
{
return __swab64p((__u64 *)p);
}
static inline __be32 __cpu_to_be32p(const __u32 *p)
{
return (__force __be32)__swab32p(p);
}
static inline __u32 __be32_to_cpup(const __be32 *p)
{
return __swab32p((__u32 *)p);
}
static inline __be16 __cpu_to_be16p(const __u16 *p)
{
return (__force __be16)__swab16p(p);
}
static inline __u16 __be16_to_cpup(const __be16 *p)
{
return __swab16p((__u16 *)p);
}
#define __cpu_to_le64s(x) do { (void)(x); } while (0)
#define __le64_to_cpus(x) do { (void)(x); } while (0)
#define __cpu_to_le32s(x) do { (void)(x); } while (0)
#define __le32_to_cpus(x) do { (void)(x); } while (0)
#define __cpu_to_le16s(x) do { (void)(x); } while (0)
#define __le16_to_cpus(x) do { (void)(x); } while (0)
#define __cpu_to_be64s(x) __swab64s((x))
#define __be64_to_cpus(x) __swab64s((x))
#define __cpu_to_be32s(x) __swab32s((x))
#define __be32_to_cpus(x) __swab32s((x))
#define __cpu_to_be16s(x) __swab16s((x))
#define __be16_to_cpus(x) __swab16s((x))
#ifdef __KERNEL__
#include <linux/byteorder/generic.h>
#endif
#endif /* _LINUX_BYTEORDER_LITTLE_ENDIAN_H */

View File

@ -0,0 +1,87 @@
#ifndef __LINUX_COMPILER_H
#error "Please don't include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead."
#endif
/*
* Common definitions for all gcc versions go here.
*/
/* Optimization barrier */
/* The "volatile" is due to gcc bugs */
#define barrier() __asm__ __volatile__("": : :"memory")
/*
* This macro obfuscates arithmetic on a variable address so that gcc
* shouldn't recognize the original var, and make assumptions about it.
*
* This is needed because the C standard makes it undefined to do
* pointer arithmetic on "objects" outside their boundaries and the
* gcc optimizers assume this is the case. In particular they
* assume such arithmetic does not wrap.
*
* A miscompilation has been observed because of this on PPC.
* To work around it we hide the relationship of the pointer and the object
* using this macro.
*
* Versions of the ppc64 compiler before 4.1 had a bug where use of
* RELOC_HIDE could trash r30. The bug can be worked around by changing
* the inline assembly constraint from =g to =r, in this particular
* case either is valid.
*/
#define RELOC_HIDE(ptr, off) \
({ unsigned long __ptr; \
__asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
(typeof(ptr)) (__ptr + (off)); })
/* &a[0] degrades to a pointer: a different type from an array */
#define __must_be_array(a) \
BUILD_BUG_ON_ZERO(__builtin_types_compatible_p(typeof(a), typeof(&a[0])))
/*
* Force always-inline if the user requests it so via the .config,
* or if gcc is too old:
*/
#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
!defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
# define inline inline __attribute__((always_inline))
# define __inline__ __inline__ __attribute__((always_inline))
# define __inline __inline __attribute__((always_inline))
#endif
#define __deprecated __attribute__((deprecated))
#define __packed __attribute__((packed))
#define __weak __attribute__((weak))
/*
* it doesn't make sense on ARM (currently the only user of __naked) to trace
* naked functions because then mcount is called without stack and frame pointer
* being set up and there is no chance to restore the lr register to the value
* before mcount was called.
*/
#define __naked __attribute__((naked)) notrace
#define __noreturn __attribute__((noreturn))
/*
* From the GCC manual:
*
* Many functions have no effects except the return value and their
* return value depends only on the parameters and/or global
* variables. Such a function can be subject to common subexpression
* elimination and loop optimization just as an arithmetic operator
* would be.
* [...]
*/
#define __pure __attribute__((pure))
#define __aligned(x) __attribute__((aligned(x)))
#define __printf(a,b) __attribute__((format(printf,a,b)))
#define noinline __attribute__((noinline))
#define __attribute_const__ __attribute__((__const__))
#define __maybe_unused __attribute__((unused))
#define __always_unused __attribute__((unused))
#define __gcc_header(x) #x
#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h)
#define gcc_header(x) _gcc_header(x)
#include gcc_header(__GNUC__)

View File

@ -0,0 +1,61 @@
#ifndef __LINUX_COMPILER_H
#error "Please don't include <linux/compiler-gcc4.h> directly, include <linux/compiler.h> instead."
#endif
/* GCC 4.1.[01] miscompiles __weak */
#ifdef __KERNEL__
# if __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ <= 1
# error Your version of gcc miscompiles the __weak directive
# endif
#endif
#define __used __attribute__((__used__))
#define __must_check __attribute__((warn_unused_result))
#define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
#define __always_inline inline __attribute__((always_inline))
/*
* A trick to suppress uninitialized variable warning without generating any
* code
*/
#define uninitialized_var(x) x = x
#if __GNUC_MINOR__ >= 3
/* Mark functions as cold. gcc will assume any path leading to a call
to them will be unlikely. This means a lot of manual unlikely()s
are unnecessary now for any paths leading to the usual suspects
like BUG(), printk(), panic() etc. [but let's keep them for now for
older compilers]
Early snapshots of gcc 4.3 don't support this and we can't detect this
in the preprocessor, but we can live with this because they're unreleased.
Maketime probing would be overkill here.
gcc also has a __attribute__((__hot__)) to move hot functions into
a special section, but I don't see any sense in this right now in
the kernel context */
#define __cold __attribute__((__cold__))
#if __GNUC_MINOR__ >= 5
/*
* Mark a position in code as unreachable. This can be used to
* suppress control flow warnings after asm blocks that transfer
* control elsewhere.
*
* Early snapshots of gcc 4.5 don't support this and we can't detect
* this in the preprocessor, but we can live with this because they're
* unreleased. Really, we need to have autoconf for the kernel.
*/
#define unreachable() __builtin_unreachable()
#endif
#endif
#if __GNUC_MINOR__ > 0
#define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
#endif
#if __GNUC_MINOR__ >= 4
#define __compiletime_warning(message) __attribute__((warning(message)))
#define __compiletime_error(message) __attribute__((error(message)))
#endif

View File

@ -0,0 +1,303 @@
#ifndef __LINUX_COMPILER_H
#define __LINUX_COMPILER_H
#ifndef __ASSEMBLY__
#ifdef __CHECKER__
# define __user __attribute__((noderef, address_space(1)))
# define __kernel /* default address space */
# define __safe __attribute__((safe))
# define __force __attribute__((force))
# define __nocast __attribute__((nocast))
# define __iomem __attribute__((noderef, address_space(2)))
# define __acquires(x) __attribute__((context(x,0,1)))
# define __releases(x) __attribute__((context(x,1,0)))
# define __acquire(x) __context__(x,1)
# define __release(x) __context__(x,-1)
# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
extern void __chk_user_ptr(const volatile void __user *);
extern void __chk_io_ptr(const volatile void __iomem *);
#else
# define __user
# define __kernel
# define __safe
# define __force
# define __nocast
# define __iomem
# define __chk_user_ptr(x) (void)0
# define __chk_io_ptr(x) (void)0
# define __builtin_warning(x, y...) (1)
# define __acquires(x)
# define __releases(x)
# define __acquire(x) (void)0
# define __release(x) (void)0
# define __cond_lock(x,c) (c)
#endif
#ifdef __KERNEL__
#ifdef __GNUC__
#include <linux/compiler-gcc.h>
#endif
#define notrace __attribute__((no_instrument_function))
/* Intel compiler defines __GNUC__. So we will overwrite implementations
* coming from above header files here
*/
#ifdef __INTEL_COMPILER
# include <linux/compiler-intel.h>
#endif
/*
* Generic compiler-dependent macros required for kernel
* build go below this comment. Actual compiler/compiler version
* specific implementations come from the above header files
*/
struct ftrace_branch_data {
const char *func;
const char *file;
unsigned line;
union {
struct {
unsigned long correct;
unsigned long incorrect;
};
struct {
unsigned long miss;
unsigned long hit;
};
unsigned long miss_hit[2];
};
};
/*
* Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
* to disable branch tracing on a per file basis.
*/
#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
&& !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
#define likely_notrace(x) __builtin_expect(!!(x), 1)
#define unlikely_notrace(x) __builtin_expect(!!(x), 0)
#define __branch_check__(x, expect) ({ \
int ______r; \
static struct ftrace_branch_data \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_annotated_branch"))) \
______f = { \
.func = __func__, \
.file = __FILE__, \
.line = __LINE__, \
}; \
______r = likely_notrace(x); \
ftrace_likely_update(&______f, ______r, expect); \
______r; \
})
/*
* Using __builtin_constant_p(x) to ignore cases where the return
* value is always the same. This idea is taken from a similar patch
* written by Daniel Walker.
*/
# ifndef likely
# define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1))
# endif
# ifndef unlikely
# define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0))
# endif
#ifdef CONFIG_PROFILE_ALL_BRANCHES
/*
* "Define 'is'", Bill Clinton
* "Define 'if'", Steven Rostedt
*/
#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
#define __trace_if(cond) \
if (__builtin_constant_p((cond)) ? !!(cond) : \
({ \
int ______r; \
static struct ftrace_branch_data \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_branch"))) \
______f = { \
.func = __func__, \
.file = __FILE__, \
.line = __LINE__, \
}; \
______r = !!(cond); \
______f.miss_hit[______r]++; \
______r; \
}))
#endif /* CONFIG_PROFILE_ALL_BRANCHES */
#else
# define likely(x) __builtin_expect(!!(x), 1)
# define unlikely(x) __builtin_expect(!!(x), 0)
#endif
/* Optimization barrier */
#ifndef barrier
# define barrier() __memory_barrier()
#endif
/* Unreachable code */
#ifndef unreachable
# define unreachable() do { } while (1)
#endif
#ifndef RELOC_HIDE
# define RELOC_HIDE(ptr, off) \
({ unsigned long __ptr; \
__ptr = (unsigned long) (ptr); \
(typeof(ptr)) (__ptr + (off)); })
#endif
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
#ifdef __KERNEL__
/*
* Allow us to mark functions as 'deprecated' and have gcc emit a nice
* warning for each use, in hopes of speeding the functions removal.
* Usage is:
* int __deprecated foo(void)
*/
#ifndef __deprecated
# define __deprecated /* unimplemented */
#endif
#ifdef MODULE
#define __deprecated_for_modules __deprecated
#else
#define __deprecated_for_modules
#endif
#ifndef __must_check
#define __must_check
#endif
#ifndef CONFIG_ENABLE_MUST_CHECK
#undef __must_check
#define __must_check
#endif
#ifndef CONFIG_ENABLE_WARN_DEPRECATED
#undef __deprecated
#undef __deprecated_for_modules
#define __deprecated
#define __deprecated_for_modules
#endif
/*
* Allow us to avoid 'defined but not used' warnings on functions and data,
* as well as force them to be emitted to the assembly file.
*
* As of gcc 3.4, static functions that are not marked with attribute((used))
* may be elided from the assembly file. As of gcc 3.4, static data not so
* marked will not be elided, but this may change in a future gcc version.
*
* NOTE: Because distributions shipped with a backported unit-at-a-time
* compiler in gcc 3.3, we must define __used to be __attribute__((used))
* for gcc >=3.3 instead of 3.4.
*
* In prior versions of gcc, such functions and data would be emitted, but
* would be warned about except with attribute((unused)).
*
* Mark functions that are referenced only in inline assembly as __used so
* the code is emitted even though it appears to be unreferenced.
*/
#ifndef __used
# define __used /* unimplemented */
#endif
#ifndef __maybe_unused
# define __maybe_unused /* unimplemented */
#endif
#ifndef __always_unused
# define __always_unused /* unimplemented */
#endif
#ifndef noinline
#define noinline
#endif
/*
* Rather then using noinline to prevent stack consumption, use
* noinline_for_stack instead. For documentaiton reasons.
*/
#define noinline_for_stack noinline
#ifndef __always_inline
#define __always_inline inline
#endif
#endif /* __KERNEL__ */
/*
* From the GCC manual:
*
* Many functions do not examine any values except their arguments,
* and have no effects except the return value. Basically this is
* just slightly more strict class than the `pure' attribute above,
* since function is not allowed to read global memory.
*
* Note that a function that has pointer arguments and examines the
* data pointed to must _not_ be declared `const'. Likewise, a
* function that calls a non-`const' function usually must not be
* `const'. It does not make sense for a `const' function to return
* `void'.
*/
#ifndef __attribute_const__
# define __attribute_const__ /* unimplemented */
#endif
/*
* Tell gcc if a function is cold. The compiler will assume any path
* directly leading to the call is unlikely.
*/
#ifndef __cold
#define __cold
#endif
/* Simple shorthand for a section definition */
#ifndef __section
# define __section(S) __attribute__ ((__section__(#S)))
#endif
/* Are two types/vars the same type (ignoring qualifiers)? */
#ifndef __same_type
# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
#endif
/* Compile time object size, -1 for unknown */
#ifndef __compiletime_object_size
# define __compiletime_object_size(obj) -1
#endif
#ifndef __compiletime_warning
# define __compiletime_warning(message)
#endif
#ifndef __compiletime_error
# define __compiletime_error(message)
#endif
/*
* Prevent the compiler from merging or refetching accesses. The compiler
* is also forbidden from reordering successive instances of ACCESS_ONCE(),
* but only when the compiler is aware of some particular ordering. One way
* to make the compiler aware of ordering is to put the two invocations of
* ACCESS_ONCE() in different C statements.
*
* This macro does absolutely -nothing- to prevent the CPU from reordering,
* merging, or refetching absolutely anything at any time. Its main intended
* use is to mediate communication between process-level code and irq/NMI
* handlers, all running on the same CPU.
*/
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
#endif /* __LINUX_COMPILER_H */

View File

@ -0,0 +1,114 @@
#ifndef _ASM_GENERIC_ERRNO_H
#define _ASM_GENERIC_ERRNO_H
#include <errno-base.h>
#define ERESTARTSYS 512
#define EDEADLK 35 /* Resource deadlock would occur */
#define ENAMETOOLONG 36 /* File name too long */
#define ENOLCK 37 /* No record locks available */
#define ENOSYS 38 /* Function not implemented */
#define ENOTEMPTY 39 /* Directory not empty */
#define ELOOP 40 /* Too many symbolic links encountered */
#define EWOULDBLOCK EAGAIN /* Operation would block */
#define ENOMSG 42 /* No message of desired type */
#define EIDRM 43 /* Identifier removed */
#define ECHRNG 44 /* Channel number out of range */
#define EL2NSYNC 45 /* Level 2 not synchronized */
#define EL3HLT 46 /* Level 3 halted */
#define EL3RST 47 /* Level 3 reset */
#define ELNRNG 48 /* Link number out of range */
#define EUNATCH 49 /* Protocol driver not attached */
#define ENOCSI 50 /* No CSI structure available */
#define EL2HLT 51 /* Level 2 halted */
#define EBADE 52 /* Invalid exchange */
#define EBADR 53 /* Invalid request descriptor */
#define EXFULL 54 /* Exchange full */
#define ENOANO 55 /* No anode */
#define EBADRQC 56 /* Invalid request code */
#define EBADSLT 57 /* Invalid slot */
#define EDEADLOCK EDEADLK
#define EBFONT 59 /* Bad font file format */
#define ENOSTR 60 /* Device not a stream */
#define ENODATA 61 /* No data available */
#define ETIME 62 /* Timer expired */
#define ENOSR 63 /* Out of streams resources */
#define ENONET 64 /* Machine is not on the network */
#define ENOPKG 65 /* Package not installed */
#define EREMOTE 66 /* Object is remote */
#define ENOLINK 67 /* Link has been severed */
#define EADV 68 /* Advertise error */
#define ESRMNT 69 /* Srmount error */
#define ECOMM 70 /* Communication error on send */
#define EPROTO 71 /* Protocol error */
#define EMULTIHOP 72 /* Multihop attempted */
#define EDOTDOT 73 /* RFS specific error */
#define EBADMSG 74 /* Not a data message */
#define EOVERFLOW 75 /* Value too large for defined data type */
#define ENOTUNIQ 76 /* Name not unique on network */
#define EBADFD 77 /* File descriptor in bad state */
#define EREMCHG 78 /* Remote address changed */
#define ELIBACC 79 /* Can not access a needed shared library */
#define ELIBBAD 80 /* Accessing a corrupted shared library */
#define ELIBSCN 81 /* .lib section in a.out corrupted */
#define ELIBMAX 82 /* Attempting to link in too many shared libraries */
#define ELIBEXEC 83 /* Cannot exec a shared library directly */
#define EILSEQ 84 /* Illegal byte sequence */
#define ERESTART 85 /* Interrupted system call should be restarted */
#define ESTRPIPE 86 /* Streams pipe error */
#define EUSERS 87 /* Too many users */
#define ENOTSOCK 88 /* Socket operation on non-socket */
#define EDESTADDRREQ 89 /* Destination address required */
#define EMSGSIZE 90 /* Message too long */
#define EPROTOTYPE 91 /* Protocol wrong type for socket */
#define ENOPROTOOPT 92 /* Protocol not available */
#define EPROTONOSUPPORT 93 /* Protocol not supported */
#define ESOCKTNOSUPPORT 94 /* Socket type not supported */
#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */
#define EPFNOSUPPORT 96 /* Protocol family not supported */
#define EAFNOSUPPORT 97 /* Address family not supported by protocol */
#define EADDRINUSE 98 /* Address already in use */
#define EADDRNOTAVAIL 99 /* Cannot assign requested address */
#define ENETDOWN 100 /* Network is down */
#define ENETUNREACH 101 /* Network is unreachable */
#define ENETRESET 102 /* Network dropped connection because of reset */
#define ECONNABORTED 103 /* Software caused connection abort */
#define ECONNRESET 104 /* Connection reset by peer */
#define ENOBUFS 105 /* No buffer space available */
#define EISCONN 106 /* Transport endpoint is already connected */
#define ENOTCONN 107 /* Transport endpoint is not connected */
#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */
#define ETOOMANYREFS 109 /* Too many references: cannot splice */
#define ETIMEDOUT 110 /* Connection timed out */
#define ECONNREFUSED 111 /* Connection refused */
#define EHOSTDOWN 112 /* Host is down */
#define EHOSTUNREACH 113 /* No route to host */
#define EALREADY 114 /* Operation already in progress */
#define EINPROGRESS 115 /* Operation now in progress */
#define ESTALE 116 /* Stale NFS file handle */
#define EUCLEAN 117 /* Structure needs cleaning */
#define ENOTNAM 118 /* Not a XENIX named type file */
#define ENAVAIL 119 /* No XENIX semaphores available */
#define EISNAM 120 /* Is a named type file */
#define EREMOTEIO 121 /* Remote I/O error */
#define EDQUOT 122 /* Quota exceeded */
#define ENOMEDIUM 123 /* No medium found */
#define EMEDIUMTYPE 124 /* Wrong medium type */
#define ECANCELED 125 /* Operation Canceled */
#define ENOKEY 126 /* Required key not available */
#define EKEYEXPIRED 127 /* Key has expired */
#define EKEYREVOKED 128 /* Key has been revoked */
#define EKEYREJECTED 129 /* Key was rejected by service */
/* for robust mutexes */
#define EOWNERDEAD 130 /* Owner died */
#define ENOTRECOVERABLE 131 /* State not recoverable */
#define ERFKILL 132 /* Operation not possible due to RF-kill */
#endif

View File

@ -1,3 +1,111 @@
#ifndef _LINUX_KERNEL_H
#define _LINUX_KERNEL_H
/*
* 'kernel.h' contains some often-used function prototypes etc
*/
#ifdef __KERNEL__
#include <stdarg.h>
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/compiler.h>
#define USHORT_MAX ((u16)(~0U))
#define SHORT_MAX ((s16)(USHORT_MAX>>1))
#define SHORT_MIN (-SHORT_MAX - 1)
#define INT_MAX ((int)(~0U>>1))
#define INT_MIN (-INT_MAX - 1)
#define UINT_MAX (~0U)
#define LONG_MAX ((long)(~0UL>>1))
#define LONG_MIN (-LONG_MAX - 1)
#define ULONG_MAX (~0UL)
#define LLONG_MAX ((long long)(~0ULL>>1))
#define LLONG_MIN (-LLONG_MAX - 1)
#define ULLONG_MAX (~0ULL)
#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1)
#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask))
#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
/**
* upper_32_bits - return bits 32-63 of a number
* @n: the number we're accessing
*
* A basic shift-right of a 64- or 32-bit quantity. Use this to suppress
* the "right shift count >= width of type" warning when that quantity is
* 32-bits.
*/
#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
/**
* lower_32_bits - return bits 0-31 of a number
* @n: the number we're accessing
*/
#define lower_32_bits(n) ((u32)(n))
#define KERN_EMERG "<0>" /* system is unusable */
#define KERN_ALERT "<1>" /* action must be taken immediately */
#define KERN_CRIT "<2>" /* critical conditions */
#define KERN_ERR "<3>" /* error conditions */
#define KERN_WARNING "<4>" /* warning conditions */
#define KERN_NOTICE "<5>" /* normal but significant condition */
#define KERN_INFO "<6>" /* informational */
#define KERN_DEBUG "<7>" /* debug-level messages */
//int printk(const char *fmt, ...);
#define printk(fmt, arg...) dbgprintf(fmt , ##arg)
/*
* min()/max()/clamp() macros that also do
* strict type-checking.. See the
* "unnecessary" pointer comparison.
*/
#define min(x, y) ({ \
typeof(x) _min1 = (x); \
typeof(y) _min2 = (y); \
(void) (&_min1 == &_min2); \
_min1 < _min2 ? _min1 : _min2; })
#define max(x, y) ({ \
typeof(x) _max1 = (x); \
typeof(y) _max2 = (y); \
(void) (&_max1 == &_max2); \
_max1 > _max2 ? _max1 : _max2; })
/**
* container_of - cast a member of a structure out to the containing structure
* @ptr: the pointer to the member.
* @type: the type of the container struct this is embedded in.
* @member: the name of the member within the struct.
*
*/
#define container_of(ptr, type, member) ({ \
const typeof( ((type *)0)->member ) *__mptr = (ptr); \
(type *)( (char *)__mptr - offsetof(type,member) );})
static inline void *kcalloc(size_t n, size_t size, uint32_t flags)
{
if (n != 0 && size > ULONG_MAX / n)
return NULL;
return kzalloc(n * size, 0);
}
#endif /* __KERNEL__ */
typedef unsigned long pgprotval_t;
typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
struct file {};
struct vm_area_struct {};
struct address_space {};
#endif
#include <types.h>
#include <list.h>

View File

@ -0,0 +1,29 @@
/*
* kref.c - library routines for handling generic reference counted objects
*
* Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (C) 2004 IBM Corp.
*
* based on kobject.h which was:
* Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org>
* Copyright (C) 2002-2003 Open Source Development Labs
*
* This file is released under the GPLv2.
*
*/
#ifndef _KREF_H_
#define _KREF_H_
#include <linux/types.h>
struct kref {
atomic_t refcount;
};
void kref_set(struct kref *kref, int num);
void kref_init(struct kref *kref);
void kref_get(struct kref *kref);
int kref_put(struct kref *kref, void (*release) (struct kref *kref));
#endif /* _KREF_H_ */

View File

@ -1,7 +1,7 @@
#ifndef _LINUX_LIST_H #ifndef _LINUX_LIST_H
#define _LINUX_LIST_H #define _LINUX_LIST_H
//#include <linux/stddef.h> #include <linux/stddef.h>
//#include <linux/poison.h> //#include <linux/poison.h>
//#include <linux/prefetch.h> //#include <linux/prefetch.h>
//#include <asm/system.h> //#include <asm/system.h>
@ -542,7 +542,6 @@ static inline void list_splice_tail_init(struct list_head *list,
* You lose the ability to access the tail in O(1). * You lose the ability to access the tail in O(1).
*/ */
#if 0
struct hlist_head { struct hlist_head {
struct hlist_node *first; struct hlist_node *first;
}; };
@ -699,5 +698,3 @@ static inline void hlist_move_list(struct hlist_head *old,
pos = n) pos = n)
#endif #endif
#endif

View File

@ -0,0 +1,537 @@
/*
* Runtime locking correctness validator
*
* Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
*
* see Documentation/lockdep-design.txt for more details.
*/
#ifndef __LINUX_LOCKDEP_H
#define __LINUX_LOCKDEP_H
struct task_struct;
struct lockdep_map;
#ifdef CONFIG_LOCKDEP
#include <linux/linkage.h>
#include <linux/list.h>
#include <linux/debug_locks.h>
#include <linux/stacktrace.h>
/*
* We'd rather not expose kernel/lockdep_states.h this wide, but we do need
* the total number of states... :-(
*/
#define XXX_LOCK_USAGE_STATES (1+3*4)
#define MAX_LOCKDEP_SUBCLASSES 8UL
/*
* Lock-classes are keyed via unique addresses, by embedding the
* lockclass-key into the kernel (or module) .data section. (For
* static locks we use the lock address itself as the key.)
*/
struct lockdep_subclass_key {
char __one_byte;
} __attribute__ ((__packed__));
struct lock_class_key {
struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
};
#define LOCKSTAT_POINTS 4
/*
* The lock-class itself:
*/
struct lock_class {
/*
* class-hash:
*/
struct list_head hash_entry;
/*
* global list of all lock-classes:
*/
struct list_head lock_entry;
struct lockdep_subclass_key *key;
unsigned int subclass;
unsigned int dep_gen_id;
/*
* IRQ/softirq usage tracking bits:
*/
unsigned long usage_mask;
struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES];
/*
* These fields represent a directed graph of lock dependencies,
* to every node we attach a list of "forward" and a list of
* "backward" graph nodes.
*/
struct list_head locks_after, locks_before;
/*
* Generation counter, when doing certain classes of graph walking,
* to ensure that we check one node only once:
*/
unsigned int version;
/*
* Statistics counter:
*/
unsigned long ops;
const char *name;
int name_version;
#ifdef CONFIG_LOCK_STAT
unsigned long contention_point[LOCKSTAT_POINTS];
unsigned long contending_point[LOCKSTAT_POINTS];
#endif
};
#ifdef CONFIG_LOCK_STAT
struct lock_time {
s64 min;
s64 max;
s64 total;
unsigned long nr;
};
enum bounce_type {
bounce_acquired_write,
bounce_acquired_read,
bounce_contended_write,
bounce_contended_read,
nr_bounce_types,
bounce_acquired = bounce_acquired_write,
bounce_contended = bounce_contended_write,
};
struct lock_class_stats {
unsigned long contention_point[4];
unsigned long contending_point[4];
struct lock_time read_waittime;
struct lock_time write_waittime;
struct lock_time read_holdtime;
struct lock_time write_holdtime;
unsigned long bounces[nr_bounce_types];
};
struct lock_class_stats lock_stats(struct lock_class *class);
void clear_lock_stats(struct lock_class *class);
#endif
/*
* Map the lock object (the lock instance) to the lock-class object.
* This is embedded into specific lock instances:
*/
struct lockdep_map {
struct lock_class_key *key;
struct lock_class *class_cache;
const char *name;
#ifdef CONFIG_LOCK_STAT
int cpu;
unsigned long ip;
#endif
};
/*
* Every lock has a list of other locks that were taken after it.
* We only grow the list, never remove from it:
*/
struct lock_list {
struct list_head entry;
struct lock_class *class;
struct stack_trace trace;
int distance;
/*
* The parent field is used to implement breadth-first search, and the
* bit 0 is reused to indicate if the lock has been accessed in BFS.
*/
struct lock_list *parent;
};
/*
* We record lock dependency chains, so that we can cache them:
*/
struct lock_chain {
u8 irq_context;
u8 depth;
u16 base;
struct list_head entry;
u64 chain_key;
};
#define MAX_LOCKDEP_KEYS_BITS 13
/*
* Subtract one because we offset hlock->class_idx by 1 in order
* to make 0 mean no class. This avoids overflowing the class_idx
* bitfield and hitting the BUG in hlock_class().
*/
#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
struct held_lock {
/*
* One-way hash of the dependency chain up to this point. We
* hash the hashes step by step as the dependency chain grows.
*
* We use it for dependency-caching and we skip detection
* passes and dependency-updates if there is a cache-hit, so
* it is absolutely critical for 100% coverage of the validator
* to have a unique key value for every unique dependency path
* that can occur in the system, to make a unique hash value
* as likely as possible - hence the 64-bit width.
*
* The task struct holds the current hash value (initialized
* with zero), here we store the previous hash value:
*/
u64 prev_chain_key;
unsigned long acquire_ip;
struct lockdep_map *instance;
struct lockdep_map *nest_lock;
#ifdef CONFIG_LOCK_STAT
u64 waittime_stamp;
u64 holdtime_stamp;
#endif
unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
/*
* The lock-stack is unified in that the lock chains of interrupt
* contexts nest ontop of process context chains, but we 'separate'
* the hashes by starting with 0 if we cross into an interrupt
* context, and we also keep do not add cross-context lock
* dependencies - the lock usage graph walking covers that area
* anyway, and we'd just unnecessarily increase the number of
* dependencies otherwise. [Note: hardirq and softirq contexts
* are separated from each other too.]
*
* The following field is used to detect when we cross into an
* interrupt context:
*/
unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
unsigned int trylock:1; /* 16 bits */
unsigned int read:2; /* see lock_acquire() comment */
unsigned int check:2; /* see lock_acquire() comment */
unsigned int hardirqs_off:1;
unsigned int references:11; /* 32 bits */
};
/*
* Initialization, self-test and debugging-output methods:
*/
extern void lockdep_init(void);
extern void lockdep_info(void);
extern void lockdep_reset(void);
extern void lockdep_reset_lock(struct lockdep_map *lock);
extern void lockdep_free_key_range(void *start, unsigned long size);
extern void lockdep_sys_exit(void);
extern void lockdep_off(void);
extern void lockdep_on(void);
/*
* These methods are used by specific locking variants (spinlocks,
* rwlocks, mutexes and rwsems) to pass init/acquire/release events
* to lockdep:
*/
extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass);
/*
* To initialize a lockdep_map statically use this macro.
* Note that _name must not be NULL.
*/
#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
{ .name = (_name), .key = (void *)(_key), }
/*
* Reinitialize a lock key - for cases where there is special locking or
* special initialization of locks so that the validator gets the scope
* of dependencies wrong: they are either too broad (they need a class-split)
* or they are too narrow (they suffer from a false class-split):
*/
#define lockdep_set_class(lock, key) \
lockdep_init_map(&(lock)->dep_map, #key, key, 0)
#define lockdep_set_class_and_name(lock, key, name) \
lockdep_init_map(&(lock)->dep_map, name, key, 0)
#define lockdep_set_class_and_subclass(lock, key, sub) \
lockdep_init_map(&(lock)->dep_map, #key, key, sub)
#define lockdep_set_subclass(lock, sub) \
lockdep_init_map(&(lock)->dep_map, #lock, \
(lock)->dep_map.key, sub)
/*
* Compare locking classes
*/
#define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
static inline int lockdep_match_key(struct lockdep_map *lock,
struct lock_class_key *key)
{
return lock->key == key;
}
/*
* Acquire a lock.
*
* Values for "read":
*
* 0: exclusive (write) acquire
* 1: read-acquire (no recursion allowed)
* 2: read-acquire with same-instance recursion allowed
*
* Values for check:
*
* 0: disabled
* 1: simple checks (freeing, held-at-exit-time, etc.)
* 2: full validation
*/
extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
int trylock, int read, int check,
struct lockdep_map *nest_lock, unsigned long ip);
extern void lock_release(struct lockdep_map *lock, int nested,
unsigned long ip);
#define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
extern int lock_is_held(struct lockdep_map *lock);
extern void lock_set_class(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, unsigned int subclass,
unsigned long ip);
static inline void lock_set_subclass(struct lockdep_map *lock,
unsigned int subclass, unsigned long ip)
{
lock_set_class(lock, lock->name, lock->key, subclass, ip);
}
extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
extern void lockdep_clear_current_reclaim_state(void);
extern void lockdep_trace_alloc(gfp_t mask);
# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
#define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l))
#else /* !LOCKDEP */
static inline void lockdep_off(void)
{
}
static inline void lockdep_on(void)
{
}
# define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
# define lock_release(l, n, i) do { } while (0)
# define lock_set_class(l, n, k, s, i) do { } while (0)
# define lock_set_subclass(l, s, i) do { } while (0)
# define lockdep_set_current_reclaim_state(g) do { } while (0)
# define lockdep_clear_current_reclaim_state() do { } while (0)
# define lockdep_trace_alloc(g) do { } while (0)
# define lockdep_init() do { } while (0)
# define lockdep_info() do { } while (0)
# define lockdep_init_map(lock, name, key, sub) \
do { (void)(name); (void)(key); } while (0)
# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
# define lockdep_set_class_and_name(lock, key, name) \
do { (void)(key); (void)(name); } while (0)
#define lockdep_set_class_and_subclass(lock, key, sub) \
do { (void)(key); } while (0)
#define lockdep_set_subclass(lock, sub) do { } while (0)
/*
* We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
* case since the result is not well defined and the caller should rather
* #ifdef the call himself.
*/
# define INIT_LOCKDEP
# define lockdep_reset() do { debug_locks = 1; } while (0)
# define lockdep_free_key_range(start, size) do { } while (0)
# define lockdep_sys_exit() do { } while (0)
/*
* The class key takes no space if lockdep is disabled:
*/
struct lock_class_key { };
#define lockdep_depth(tsk) (0)
#define lockdep_assert_held(l) do { } while (0)
#endif /* !LOCKDEP */
#ifdef CONFIG_LOCK_STAT
extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
#define LOCK_CONTENDED(_lock, try, lock) \
do { \
if (!try(_lock)) { \
lock_contended(&(_lock)->dep_map, _RET_IP_); \
lock(_lock); \
} \
lock_acquired(&(_lock)->dep_map, _RET_IP_); \
} while (0)
#else /* CONFIG_LOCK_STAT */
#define lock_contended(lockdep_map, ip) do {} while (0)
#define lock_acquired(lockdep_map, ip) do {} while (0)
#define LOCK_CONTENDED(_lock, try, lock) \
lock(_lock)
#endif /* CONFIG_LOCK_STAT */
#ifdef CONFIG_LOCKDEP
/*
* On lockdep we dont want the hand-coded irq-enable of
* _raw_*_lock_flags() code, because lockdep assumes
* that interrupts are not re-enabled during lock-acquire:
*/
#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
LOCK_CONTENDED((_lock), (try), (lock))
#else /* CONFIG_LOCKDEP */
#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
lockfl((_lock), (flags))
#endif /* CONFIG_LOCKDEP */
#ifdef CONFIG_GENERIC_HARDIRQS
extern void early_init_irq_lock_class(void);
#else
static inline void early_init_irq_lock_class(void)
{
}
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
extern void early_boot_irqs_off(void);
extern void early_boot_irqs_on(void);
extern void print_irqtrace_events(struct task_struct *curr);
#else
static inline void early_boot_irqs_off(void)
{
}
static inline void early_boot_irqs_on(void)
{
}
static inline void print_irqtrace_events(struct task_struct *curr)
{
}
#endif
/*
* For trivial one-depth nesting of a lock-class, the following
* global define can be used. (Subsystems with multiple levels
* of nesting should define their own lock-nesting subclasses.)
*/
#define SINGLE_DEPTH_NESTING 1
/*
* Map the dependency ops to NOP or to real lockdep ops, depending
* on the per lock-class debug mode:
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
# else
# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i)
# endif
# define spin_release(l, n, i) lock_release(l, n, i)
#else
# define spin_acquire(l, s, t, i) do { } while (0)
# define spin_release(l, n, i) do { } while (0)
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i)
# else
# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i)
# endif
# define rwlock_release(l, n, i) lock_release(l, n, i)
#else
# define rwlock_acquire(l, s, t, i) do { } while (0)
# define rwlock_acquire_read(l, s, t, i) do { } while (0)
# define rwlock_release(l, n, i) do { } while (0)
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
# else
# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
# endif
# define mutex_release(l, n, i) lock_release(l, n, i)
#else
# define mutex_acquire(l, s, t, i) do { } while (0)
# define mutex_release(l, n, i) do { } while (0)
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i)
# else
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i)
# endif
# define rwsem_release(l, n, i) lock_release(l, n, i)
#else
# define rwsem_acquire(l, s, t, i) do { } while (0)
# define rwsem_acquire_read(l, s, t, i) do { } while (0)
# define rwsem_release(l, n, i) do { } while (0)
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
# else
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
# endif
# define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
#else
# define lock_map_acquire(l) do { } while (0)
# define lock_map_release(l) do { } while (0)
#endif
#ifdef CONFIG_PROVE_LOCKING
# define might_lock(lock) \
do { \
typecheck(struct lockdep_map *, &(lock)->dep_map); \
lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \
lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
} while (0)
# define might_lock_read(lock) \
do { \
typecheck(struct lockdep_map *, &(lock)->dep_map); \
lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \
lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
} while (0)
#else
# define might_lock(lock) do { } while (0)
# define might_lock_read(lock) do { } while (0)
#endif
#endif /* __LINUX_LOCKDEP_H */

View File

@ -1,6 +1,15 @@
#include <types.h> #ifndef _LINUX_MODULE_H
#include <list.h> #define _LINUX_MODULE_H
#include <syscall.h>
#include <linux/list.h>
#include <linux/compiler.h>
#define EXPORT_SYMBOL(x)
#define MODULE_FIRMWARE(x) #define MODULE_FIRMWARE(x)
#endif /* _LINUX_MODULE_H */

View File

@ -0,0 +1,566 @@
#include <types.h>
#include <list.h>
#ifndef __PCI_H__
#define __PCI_H__
#define PCI_ANY_ID (~0)
#define PCI_CLASS_NOT_DEFINED 0x0000
#define PCI_CLASS_NOT_DEFINED_VGA 0x0001
#define PCI_BASE_CLASS_STORAGE 0x01
#define PCI_CLASS_STORAGE_SCSI 0x0100
#define PCI_CLASS_STORAGE_IDE 0x0101
#define PCI_CLASS_STORAGE_FLOPPY 0x0102
#define PCI_CLASS_STORAGE_IPI 0x0103
#define PCI_CLASS_STORAGE_RAID 0x0104
#define PCI_CLASS_STORAGE_SATA 0x0106
#define PCI_CLASS_STORAGE_SATA_AHCI 0x010601
#define PCI_CLASS_STORAGE_SAS 0x0107
#define PCI_CLASS_STORAGE_OTHER 0x0180
#define PCI_BASE_CLASS_NETWORK 0x02
#define PCI_CLASS_NETWORK_ETHERNET 0x0200
#define PCI_CLASS_NETWORK_TOKEN_RING 0x0201
#define PCI_CLASS_NETWORK_FDDI 0x0202
#define PCI_CLASS_NETWORK_ATM 0x0203
#define PCI_CLASS_NETWORK_OTHER 0x0280
#define PCI_BASE_CLASS_DISPLAY 0x03
#define PCI_CLASS_DISPLAY_VGA 0x0300
#define PCI_CLASS_DISPLAY_XGA 0x0301
#define PCI_CLASS_DISPLAY_3D 0x0302
#define PCI_CLASS_DISPLAY_OTHER 0x0380
#define PCI_BASE_CLASS_MULTIMEDIA 0x04
#define PCI_CLASS_MULTIMEDIA_VIDEO 0x0400
#define PCI_CLASS_MULTIMEDIA_AUDIO 0x0401
#define PCI_CLASS_MULTIMEDIA_PHONE 0x0402
#define PCI_CLASS_MULTIMEDIA_OTHER 0x0480
#define PCI_BASE_CLASS_MEMORY 0x05
#define PCI_CLASS_MEMORY_RAM 0x0500
#define PCI_CLASS_MEMORY_FLASH 0x0501
#define PCI_CLASS_MEMORY_OTHER 0x0580
#define PCI_BASE_CLASS_BRIDGE 0x06
#define PCI_CLASS_BRIDGE_HOST 0x0600
#define PCI_CLASS_BRIDGE_ISA 0x0601
#define PCI_CLASS_BRIDGE_EISA 0x0602
#define PCI_CLASS_BRIDGE_MC 0x0603
#define PCI_CLASS_BRIDGE_PCI 0x0604
#define PCI_CLASS_BRIDGE_PCMCIA 0x0605
#define PCI_CLASS_BRIDGE_NUBUS 0x0606
#define PCI_CLASS_BRIDGE_CARDBUS 0x0607
#define PCI_CLASS_BRIDGE_RACEWAY 0x0608
#define PCI_CLASS_BRIDGE_OTHER 0x0680
#define PCI_BASE_CLASS_COMMUNICATION 0x07
#define PCI_CLASS_COMMUNICATION_SERIAL 0x0700
#define PCI_CLASS_COMMUNICATION_PARALLEL 0x0701
#define PCI_CLASS_COMMUNICATION_MULTISERIAL 0x0702
#define PCI_CLASS_COMMUNICATION_MODEM 0x0703
#define PCI_CLASS_COMMUNICATION_OTHER 0x0780
#define PCI_BASE_CLASS_SYSTEM 0x08
#define PCI_CLASS_SYSTEM_PIC 0x0800
#define PCI_CLASS_SYSTEM_PIC_IOAPIC 0x080010
#define PCI_CLASS_SYSTEM_PIC_IOXAPIC 0x080020
#define PCI_CLASS_SYSTEM_DMA 0x0801
#define PCI_CLASS_SYSTEM_TIMER 0x0802
#define PCI_CLASS_SYSTEM_RTC 0x0803
#define PCI_CLASS_SYSTEM_PCI_HOTPLUG 0x0804
#define PCI_CLASS_SYSTEM_SDHCI 0x0805
#define PCI_CLASS_SYSTEM_OTHER 0x0880
#define PCI_BASE_CLASS_INPUT 0x09
#define PCI_CLASS_INPUT_KEYBOARD 0x0900
#define PCI_CLASS_INPUT_PEN 0x0901
#define PCI_CLASS_INPUT_MOUSE 0x0902
#define PCI_CLASS_INPUT_SCANNER 0x0903
#define PCI_CLASS_INPUT_GAMEPORT 0x0904
#define PCI_CLASS_INPUT_OTHER 0x0980
#define PCI_BASE_CLASS_DOCKING 0x0a
#define PCI_CLASS_DOCKING_GENERIC 0x0a00
#define PCI_CLASS_DOCKING_OTHER 0x0a80
#define PCI_BASE_CLASS_PROCESSOR 0x0b
#define PCI_CLASS_PROCESSOR_386 0x0b00
#define PCI_CLASS_PROCESSOR_486 0x0b01
#define PCI_CLASS_PROCESSOR_PENTIUM 0x0b02
#define PCI_CLASS_PROCESSOR_ALPHA 0x0b10
#define PCI_CLASS_PROCESSOR_POWERPC 0x0b20
#define PCI_CLASS_PROCESSOR_MIPS 0x0b30
#define PCI_CLASS_PROCESSOR_CO 0x0b40
#define PCI_BASE_CLASS_SERIAL 0x0c
#define PCI_CLASS_SERIAL_FIREWIRE 0x0c00
#define PCI_CLASS_SERIAL_FIREWIRE_OHCI 0x0c0010
#define PCI_CLASS_SERIAL_ACCESS 0x0c01
#define PCI_CLASS_SERIAL_SSA 0x0c02
#define PCI_CLASS_SERIAL_USB 0x0c03
#define PCI_CLASS_SERIAL_USB_UHCI 0x0c0300
#define PCI_CLASS_SERIAL_USB_OHCI 0x0c0310
#define PCI_CLASS_SERIAL_USB_EHCI 0x0c0320
#define PCI_CLASS_SERIAL_FIBER 0x0c04
#define PCI_CLASS_SERIAL_SMBUS 0x0c05
#define PCI_BASE_CLASS_WIRELESS 0x0d
#define PCI_CLASS_WIRELESS_RF_CONTROLLER 0x0d10
#define PCI_CLASS_WIRELESS_WHCI 0x0d1010
#define PCI_BASE_CLASS_INTELLIGENT 0x0e
#define PCI_CLASS_INTELLIGENT_I2O 0x0e00
#define PCI_BASE_CLASS_SATELLITE 0x0f
#define PCI_CLASS_SATELLITE_TV 0x0f00
#define PCI_CLASS_SATELLITE_AUDIO 0x0f01
#define PCI_CLASS_SATELLITE_VOICE 0x0f03
#define PCI_CLASS_SATELLITE_DATA 0x0f04
#define PCI_BASE_CLASS_CRYPT 0x10
#define PCI_CLASS_CRYPT_NETWORK 0x1000
#define PCI_CLASS_CRYPT_ENTERTAINMENT 0x1001
#define PCI_CLASS_CRYPT_OTHER 0x1080
#define PCI_BASE_CLASS_SIGNAL_PROCESSING 0x11
#define PCI_CLASS_SP_DPIO 0x1100
#define PCI_CLASS_SP_OTHER 0x1180
#define PCI_CLASS_OTHERS 0xff
/*
* Under PCI, each device has 256 bytes of configuration address space,
* of which the first 64 bytes are standardized as follows:
*/
#define PCI_VENDOR_ID 0x000 /* 16 bits */
#define PCI_DEVICE_ID 0x002 /* 16 bits */
#define PCI_COMMAND 0x004 /* 16 bits */
#define PCI_COMMAND_IO 0x001 /* Enable response in I/O space */
#define PCI_COMMAND_MEMORY 0x002 /* Enable response in Memory space */
#define PCI_COMMAND_MASTER 0x004 /* Enable bus mastering */
#define PCI_COMMAND_SPECIAL 0x008 /* Enable response to special cycles */
#define PCI_COMMAND_INVALIDATE 0x010 /* Use memory write and invalidate */
#define PCI_COMMAND_VGA_PALETTE 0x020 /* Enable palette snooping */
#define PCI_COMMAND_PARITY 0x040 /* Enable parity checking */
#define PCI_COMMAND_WAIT 0x080 /* Enable address/data stepping */
#define PCI_COMMAND_SERR 0x100 /* Enable SERR */
#define PCI_COMMAND_FAST_BACK 0x200 /* Enable back-to-back writes */
#define PCI_COMMAND_INTX_DISABLE 0x400 /* INTx Emulation Disable */
#define PCI_STATUS 0x006 /* 16 bits */
#define PCI_STATUS_CAP_LIST 0x010 /* Support Capability List */
#define PCI_STATUS_66MHZ 0x020 /* Support 66 Mhz PCI 2.1 bus */
#define PCI_STATUS_UDF 0x040 /* Support User Definable Features [obsolete] */
#define PCI_STATUS_FAST_BACK 0x080 /* Accept fast-back to back */
#define PCI_STATUS_PARITY 0x100 /* Detected parity error */
#define PCI_STATUS_DEVSEL_MASK 0x600 /* DEVSEL timing */
#define PCI_STATUS_DEVSEL_FAST 0x000
#define PCI_STATUS_DEVSEL_MEDIUM 0x200
#define PCI_STATUS_DEVSEL_SLOW 0x400
#define PCI_STATUS_SIG_TARGET_ABORT 0x800 /* Set on target abort */
#define PCI_STATUS_REC_TARGET_ABORT 0x1000 /* Master ack of " */
#define PCI_STATUS_REC_MASTER_ABORT 0x2000 /* Set on master abort */
#define PCI_STATUS_SIG_SYSTEM_ERROR 0x4000 /* Set when we drive SERR */
#define PCI_STATUS_DETECTED_PARITY 0x8000 /* Set on parity error */
#define PCI_CLASS_REVISION 0x08 /* High 24 bits are class, low 8 revision */
#define PCI_REVISION_ID 0x08 /* Revision ID */
#define PCI_CLASS_PROG 0x09 /* Reg. Level Programming Interface */
#define PCI_CLASS_DEVICE 0x0a /* Device class */
#define PCI_CACHE_LINE_SIZE 0x0c /* 8 bits */
#define PCI_LATENCY_TIMER 0x0d /* 8 bits */
#define PCI_HEADER_TYPE 0x0e /* 8 bits */
#define PCI_HEADER_TYPE_NORMAL 0
#define PCI_HEADER_TYPE_BRIDGE 1
#define PCI_HEADER_TYPE_CARDBUS 2
#define PCI_BIST 0x0f /* 8 bits */
#define PCI_BIST_CODE_MASK 0x0f /* Return result */
#define PCI_BIST_START 0x40 /* 1 to start BIST, 2 secs or less */
#define PCI_BIST_CAPABLE 0x80 /* 1 if BIST capable */
/*
* Base addresses specify locations in memory or I/O space.
* Decoded size can be determined by writing a value of
* 0xffffffff to the register, and reading it back. Only
* 1 bits are decoded.
*/
#define PCI_BASE_ADDRESS_0 0x10 /* 32 bits */
#define PCI_BASE_ADDRESS_1 0x14 /* 32 bits [htype 0,1 only] */
#define PCI_BASE_ADDRESS_2 0x18 /* 32 bits [htype 0 only] */
#define PCI_BASE_ADDRESS_3 0x1c /* 32 bits */
#define PCI_BASE_ADDRESS_4 0x20 /* 32 bits */
#define PCI_BASE_ADDRESS_5 0x24 /* 32 bits */
#define PCI_BASE_ADDRESS_SPACE 0x01 /* 0 = memory, 1 = I/O */
#define PCI_BASE_ADDRESS_SPACE_IO 0x01
#define PCI_BASE_ADDRESS_SPACE_MEMORY 0x00
#define PCI_BASE_ADDRESS_MEM_TYPE_MASK 0x06
#define PCI_BASE_ADDRESS_MEM_TYPE_32 0x00 /* 32 bit address */
#define PCI_BASE_ADDRESS_MEM_TYPE_1M 0x02 /* Below 1M [obsolete] */
#define PCI_BASE_ADDRESS_MEM_TYPE_64 0x04 /* 64 bit address */
#define PCI_BASE_ADDRESS_MEM_PREFETCH 0x08 /* prefetchable? */
#define PCI_BASE_ADDRESS_MEM_MASK (~0x0fUL)
#define PCI_BASE_ADDRESS_IO_MASK (~0x03UL)
/* bit 1 is reserved if address_space = 1 */
#define PCI_ROM_ADDRESS1 0x38 /* Same as PCI_ROM_ADDRESS, but for htype 1 */
/* Header type 0 (normal devices) */
#define PCI_CARDBUS_CIS 0x28
#define PCI_SUBSYSTEM_VENDOR_ID 0x2c
#define PCI_SUBSYSTEM_ID 0x2e
#define PCI_ROM_ADDRESS 0x30 /* Bits 31..11 are address, 10..1 reserved */
#define PCI_ROM_ADDRESS_ENABLE 0x01
#define PCI_ROM_ADDRESS_MASK (~0x7ffUL)
#define PCI_INTERRUPT_LINE 0x3c /* 8 bits */
#define PCI_INTERRUPT_PIN 0x3d /* 8 bits */
#define PCI_CB_SUBSYSTEM_VENDOR_ID 0x40
#define PCI_CB_SUBSYSTEM_ID 0x42
#define PCI_CAPABILITY_LIST 0x34 /* Offset of first capability list entry */
#define PCI_CB_CAPABILITY_LIST 0x14
/* Capability lists */
#define PCI_CAP_LIST_ID 0 /* Capability ID */
#define PCI_CAP_ID_PM 0x01 /* Power Management */
#define PCI_CAP_ID_AGP 0x02 /* Accelerated Graphics Port */
#define PCI_CAP_ID_VPD 0x03 /* Vital Product Data */
#define PCI_CAP_ID_SLOTID 0x04 /* Slot Identification */
#define PCI_CAP_ID_MSI 0x05 /* Message Signalled Interrupts */
#define PCI_CAP_ID_CHSWP 0x06 /* CompactPCI HotSwap */
#define PCI_CAP_ID_PCIX 0x07 /* PCI-X */
#define PCI_CAP_ID_HT 0x08 /* HyperTransport */
#define PCI_CAP_ID_VNDR 0x09 /* Vendor specific capability */
#define PCI_CAP_ID_SHPC 0x0C /* PCI Standard Hot-Plug Controller */
#define PCI_CAP_ID_EXP 0x10 /* PCI Express */
#define PCI_CAP_ID_MSIX 0x11 /* MSI-X */
#define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */
#define PCI_CAP_FLAGS 2 /* Capability defined flags (16 bits) */
#define PCI_CAP_SIZEOF 4
/* AGP registers */
#define PCI_AGP_VERSION 2 /* BCD version number */
#define PCI_AGP_RFU 3 /* Rest of capability flags */
#define PCI_AGP_STATUS 4 /* Status register */
#define PCI_AGP_STATUS_RQ_MASK 0xff000000 /* Maximum number of requests - 1 */
#define PCI_AGP_STATUS_SBA 0x0200 /* Sideband addressing supported */
#define PCI_AGP_STATUS_64BIT 0x0020 /* 64-bit addressing supported */
#define PCI_AGP_STATUS_FW 0x0010 /* FW transfers supported */
#define PCI_AGP_STATUS_RATE4 0x0004 /* 4x transfer rate supported */
#define PCI_AGP_STATUS_RATE2 0x0002 /* 2x transfer rate supported */
#define PCI_AGP_STATUS_RATE1 0x0001 /* 1x transfer rate supported */
#define PCI_AGP_COMMAND 8 /* Control register */
#define PCI_AGP_COMMAND_RQ_MASK 0xff000000 /* Master: Maximum number of requests */
#define PCI_AGP_COMMAND_SBA 0x0200 /* Sideband addressing enabled */
#define PCI_AGP_COMMAND_AGP 0x0100 /* Allow processing of AGP transactions */
#define PCI_AGP_COMMAND_64BIT 0x0020 /* Allow processing of 64-bit addresses */
#define PCI_AGP_COMMAND_FW 0x0010 /* Force FW transfers */
#define PCI_AGP_COMMAND_RATE4 0x0004 /* Use 4x rate */
#define PCI_AGP_COMMAND_RATE2 0x0002 /* Use 2x rate */
#define PCI_AGP_COMMAND_RATE1 0x0001 /* Use 1x rate */
#define PCI_AGP_SIZEOF 12
#define PCI_MAP_REG_START 0x10
#define PCI_MAP_REG_END 0x28
#define PCI_MAP_ROM_REG 0x30
#define PCI_MAP_MEMORY 0x00000000
#define PCI_MAP_IO 0x00000001
#define PCI_MAP_MEMORY_TYPE 0x00000007
#define PCI_MAP_IO_TYPE 0x00000003
#define PCI_MAP_MEMORY_TYPE_32BIT 0x00000000
#define PCI_MAP_MEMORY_TYPE_32BIT_1M 0x00000002
#define PCI_MAP_MEMORY_TYPE_64BIT 0x00000004
#define PCI_MAP_MEMORY_TYPE_MASK 0x00000006
#define PCI_MAP_MEMORY_CACHABLE 0x00000008
#define PCI_MAP_MEMORY_ATTR_MASK 0x0000000e
#define PCI_MAP_MEMORY_ADDRESS_MASK 0xfffffff0
#define PCI_MAP_IO_ATTR_MASK 0x00000003
#define PCI_MAP_IS_IO(b) ((b) & PCI_MAP_IO)
#define PCI_MAP_IS_MEM(b) (!PCI_MAP_IS_IO(b))
#define PCI_MAP_IS64BITMEM(b) \
(((b) & PCI_MAP_MEMORY_TYPE_MASK) == PCI_MAP_MEMORY_TYPE_64BIT)
#define PCIGETMEMORY(b) ((b) & PCI_MAP_MEMORY_ADDRESS_MASK)
#define PCIGETMEMORY64HIGH(b) (*((CARD32*)&b + 1))
#define PCIGETMEMORY64(b) \
(PCIGETMEMORY(b) | ((CARD64)PCIGETMEMORY64HIGH(b) << 32))
#define PCI_MAP_IO_ADDRESS_MASK 0xfffffffc
#define PCIGETIO(b) ((b) & PCI_MAP_IO_ADDRESS_MASK)
#define PCI_MAP_ROM_DECODE_ENABLE 0x00000001
#define PCI_MAP_ROM_ADDRESS_MASK 0xfffff800
#define PCIGETROM(b) ((b) & PCI_MAP_ROM_ADDRESS_MASK)
#ifndef PCI_DOM_MASK
# define PCI_DOM_MASK 0x0ffu
#endif
#define PCI_DOMBUS_MASK (((PCI_DOM_MASK) << 8) | 0x0ffu)
#define PCI_MAKE_TAG(b,d,f) ((((b) & (PCI_DOMBUS_MASK)) << 16) | \
(((d) & 0x00001fu) << 11) | \
(((f) & 0x000007u) << 8))
#define PCI_BUS_FROM_TAG(tag) (((tag) >> 16) & (PCI_DOMBUS_MASK))
#define PCI_DEV_FROM_TAG(tag) (((tag) & 0x0000f800u) >> 11)
#define PCI_FUNC_FROM_TAG(tag) (((tag) & 0x00000700u) >> 8)
#define PCI_DFN_FROM_TAG(tag) (((tag) & 0x0000ff00u) >> 8)
#define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07))
#define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f)
#define PCI_FUNC(devfn) ((devfn) & 0x07)
typedef unsigned int PCITAG;
extern inline PCITAG
pciTag(int busnum, int devnum, int funcnum)
{
return(PCI_MAKE_TAG(busnum,devnum,funcnum));
}
struct resource
{
resource_size_t start;
resource_size_t end;
// const char *name;
unsigned long flags;
// struct resource *parent, *sibling, *child;
};
/*
* IO resources have these defined flags.
*/
#define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */
#define IORESOURCE_IO 0x00000100 /* Resource type */
#define IORESOURCE_MEM 0x00000200
#define IORESOURCE_IRQ 0x00000400
#define IORESOURCE_DMA 0x00000800
#define IORESOURCE_PREFETCH 0x00001000 /* No side effects */
#define IORESOURCE_READONLY 0x00002000
#define IORESOURCE_CACHEABLE 0x00004000
#define IORESOURCE_RANGELENGTH 0x00008000
#define IORESOURCE_SHADOWABLE 0x00010000
#define IORESOURCE_BUS_HAS_VGA 0x00080000
#define IORESOURCE_DISABLED 0x10000000
#define IORESOURCE_UNSET 0x20000000
#define IORESOURCE_AUTO 0x40000000
#define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */
/* ISA PnP IRQ specific bits (IORESOURCE_BITS) */
#define IORESOURCE_IRQ_HIGHEDGE (1<<0)
#define IORESOURCE_IRQ_LOWEDGE (1<<1)
#define IORESOURCE_IRQ_HIGHLEVEL (1<<2)
#define IORESOURCE_IRQ_LOWLEVEL (1<<3)
#define IORESOURCE_IRQ_SHAREABLE (1<<4)
/* ISA PnP DMA specific bits (IORESOURCE_BITS) */
#define IORESOURCE_DMA_TYPE_MASK (3<<0)
#define IORESOURCE_DMA_8BIT (0<<0)
#define IORESOURCE_DMA_8AND16BIT (1<<0)
#define IORESOURCE_DMA_16BIT (2<<0)
#define IORESOURCE_DMA_MASTER (1<<2)
#define IORESOURCE_DMA_BYTE (1<<3)
#define IORESOURCE_DMA_WORD (1<<4)
#define IORESOURCE_DMA_SPEED_MASK (3<<6)
#define IORESOURCE_DMA_COMPATIBLE (0<<6)
#define IORESOURCE_DMA_TYPEA (1<<6)
#define IORESOURCE_DMA_TYPEB (2<<6)
#define IORESOURCE_DMA_TYPEF (3<<6)
/* ISA PnP memory I/O specific bits (IORESOURCE_BITS) */
#define IORESOURCE_MEM_WRITEABLE (1<<0) /* dup: IORESOURCE_READONLY */
#define IORESOURCE_MEM_CACHEABLE (1<<1) /* dup: IORESOURCE_CACHEABLE */
#define IORESOURCE_MEM_RANGELENGTH (1<<2) /* dup: IORESOURCE_RANGELENGTH */
#define IORESOURCE_MEM_TYPE_MASK (3<<3)
#define IORESOURCE_MEM_8BIT (0<<3)
#define IORESOURCE_MEM_16BIT (1<<3)
#define IORESOURCE_MEM_8AND16BIT (2<<3)
#define IORESOURCE_MEM_32BIT (3<<3)
#define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */
#define IORESOURCE_MEM_EXPANSIONROM (1<<6)
/* PCI ROM control bits (IORESOURCE_BITS) */
#define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */
#define IORESOURCE_ROM_SHADOW (1<<1) /* ROM is copy at C000:0 */
#define IORESOURCE_ROM_COPY (1<<2) /* ROM is alloc'd copy, resource field overlaid */
#define IORESOURCE_ROM_BIOS_COPY (1<<3) /* ROM is BIOS copy, resource field overlaid */
/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */
#define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */
/*
* For PCI devices, the region numbers are assigned this way:
*
* 0-5 standard PCI regions
* 6 expansion ROM
* 7-10 bridges: address space assigned to buses behind the bridge
*/
#define PCI_ROM_RESOURCE 6
#define PCI_BRIDGE_RESOURCES 7
#define PCI_NUM_RESOURCES 11
#ifndef PCI_BUS_NUM_RESOURCES
#define PCI_BUS_NUM_RESOURCES 8
#endif
#define DEVICE_COUNT_RESOURCE 12
/*
* The pci_dev structure is used to describe PCI devices.
*/
struct pci_dev {
// struct list_head bus_list; /* node in per-bus list */
// struct pci_bus *bus; /* bus this device is on */
// struct pci_bus *subordinate; /* bus this device bridges to */
// void *sysdata; /* hook for sys-specific extension */
// struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */
// struct pci_slot *slot; /* Physical slot this device is in */
u32_t bus;
u32_t devfn; /* encoded device & function index */
u16_t vendor;
u16_t device;
u16_t subsystem_vendor;
u16_t subsystem_device;
u32_t class; /* 3 bytes: (base,sub,prog-if) */
uint8_t revision; /* PCI revision, low byte of class word */
uint8_t hdr_type; /* PCI header type (`multi' flag masked out) */
uint8_t pcie_type; /* PCI-E device/port type */
uint8_t rom_base_reg; /* which config register controls the ROM */
uint8_t pin; /* which interrupt pin this device uses */
// struct pci_driver *driver; /* which driver has allocated this device */
uint64_t dma_mask; /* Mask of the bits of bus address this
device implements. Normally this is
0xffffffff. You only need to change
this if your device has broken DMA
or supports 64-bit transfers. */
// struct device_dma_parameters dma_parms;
// pci_power_t current_state; /* Current operating state. In ACPI-speak,
// this is D0-D3, D0 being fully functional,
// and D3 being off. */
// int pm_cap; /* PM capability offset in the
// configuration space */
unsigned int pme_support:5; /* Bitmask of states from which PME#
can be generated */
unsigned int d1_support:1; /* Low power state D1 is supported */
unsigned int d2_support:1; /* Low power state D2 is supported */
unsigned int no_d1d2:1; /* Only allow D0 and D3 */
// pci_channel_state_t error_state; /* current connectivity state */
// struct device dev; /* Generic device interface */
// int cfg_size; /* Size of configuration space */
/*
* Instead of touching interrupt line and base address registers
* directly, use the values stored here. They might be different!
*/
unsigned int irq;
struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
/* These fields are used by common fixups */
unsigned int transparent:1; /* Transparent PCI bridge */
unsigned int multifunction:1;/* Part of multi-function device */
/* keep track of device state */
unsigned int is_added:1;
unsigned int is_busmaster:1; /* device is busmaster */
unsigned int no_msi:1; /* device may not use msi */
unsigned int block_ucfg_access:1; /* userspace config space access is blocked */
unsigned int broken_parity_status:1; /* Device generates false positive parity */
unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */
unsigned int msi_enabled:1;
unsigned int msix_enabled:1;
unsigned int ari_enabled:1; /* ARI forwarding */
unsigned int is_managed:1;
unsigned int is_pcie:1;
unsigned int state_saved:1;
unsigned int is_physfn:1;
unsigned int is_virtfn:1;
// pci_dev_flags_t dev_flags;
// atomic_t enable_cnt; /* pci_enable_device has been called */
// u32 saved_config_space[16]; /* config space saved at suspend time */
// struct hlist_head saved_cap_space;
// struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */
// int rom_attr_enabled; /* has display of the rom attribute been enabled? */
// struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
// struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
};
#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
#define pci_resource_end(dev, bar) ((dev)->resource[(bar)].end)
#define pci_resource_flags(dev, bar) ((dev)->resource[(bar)].flags)
#define pci_resource_len(dev,bar) \
((pci_resource_start((dev), (bar)) == 0 && \
pci_resource_end((dev), (bar)) == \
pci_resource_start((dev), (bar))) ? 0 : \
\
(pci_resource_end((dev), (bar)) - \
pci_resource_start((dev), (bar)) + 1))
struct pci_device_id
{
u16_t vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
u16_t subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */
u32_t class, class_mask; /* (class,subclass,prog-if) triplet */
u32_t driver_data; /* Data private to the driver */
};
typedef struct
{
struct list_head link;
struct pci_dev pci_dev;
}pci_dev_t;
int enum_pci_devices(void);
struct pci_device_id*
find_pci_device(pci_dev_t* pdev, struct pci_device_id *idlist);
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
int pci_set_dma_mask(struct pci_dev *dev, u64 mask);
#define pci_name(x) "radeon"
#endif //__PCI__H__

View File

@ -0,0 +1,49 @@
#ifndef _LINUX_POSIX_TYPES_H
#define _LINUX_POSIX_TYPES_H
#include <linux/stddef.h>
/*
* This allows for 1024 file descriptors: if NR_OPEN is ever grown
* beyond that you'll have to change this too. But 1024 fd's seem to be
* enough even for such "real" unices like OSF/1, so hopefully this is
* one limit that doesn't have to be changed [again].
*
* Note that POSIX wants the FD_CLEAR(fd,fdsetp) defines to be in
* <sys/time.h> (and thus <linux/time.h>) - but this is a more logical
* place for them. Solved by having dummy defines in <sys/time.h>.
*/
/*
* Those macros may have been defined in <gnu/types.h>. But we always
* use the ones here.
*/
#undef __NFDBITS
#define __NFDBITS (8 * sizeof(unsigned long))
#undef __FD_SETSIZE
#define __FD_SETSIZE 1024
#undef __FDSET_LONGS
#define __FDSET_LONGS (__FD_SETSIZE/__NFDBITS)
#undef __FDELT
#define __FDELT(d) ((d) / __NFDBITS)
#undef __FDMASK
#define __FDMASK(d) (1UL << ((d) % __NFDBITS))
typedef struct {
unsigned long fds_bits [__FDSET_LONGS];
} __kernel_fd_set;
/* Type of a signal handler. */
typedef void (*__kernel_sighandler_t)(int);
/* Type of a SYSV IPC key. */
typedef int __kernel_key_t;
typedef int __kernel_mqd_t;
#include <asm/posix_types.h>
#endif /* _LINUX_POSIX_TYPES_H */

View File

@ -0,0 +1,347 @@
#ifndef __LINUX_SPINLOCK_H
#define __LINUX_SPINLOCK_H
/*
* include/linux/spinlock.h - generic spinlock/rwlock declarations
*
* here's the role of the various spinlock/rwlock related include files:
*
* on SMP builds:
*
* asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
* initializers
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel
* implementations, mostly inline assembly code
*
* (also included on UP-debug builds:)
*
* linux/spinlock_api_smp.h:
* contains the prototypes for the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*
* on UP builds:
*
* linux/spinlock_type_up.h:
* contains the generic, simplified UP spinlock type.
* (which is an empty structure on non-debug builds)
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* linux/spinlock_up.h:
* contains the __raw_spin_*()/etc. version of UP
* builds. (which are NOPs on non-debug, non-preempt
* builds)
*
* (included on UP-non-debug builds:)
*
* linux/spinlock_api_up.h:
* builds the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*/
#include <linux/typecheck.h>
//#include <linux/preempt.h>
//#include <linux/linkage.h>
#include <linux/compiler.h>
//#include <linux/thread_info.h>
#include <linux/kernel.h>
#include <linux/stringify.h>
//#include <linux/bottom_half.h>
//#include <asm/system.h>
/*
* Must define these before including other files, inline functions need them
*/
#define LOCK_SECTION_NAME ".text.lock."KBUILD_BASENAME
#define LOCK_SECTION_START(extra) \
".subsection 1\n\t" \
extra \
".ifndef " LOCK_SECTION_NAME "\n\t" \
LOCK_SECTION_NAME ":\n\t" \
".endif\n"
#define LOCK_SECTION_END \
".previous\n\t"
#define __lockfunc __attribute__((section(".spinlock.text")))
/*
* Pull the raw_spinlock_t and raw_rwlock_t definitions:
*/
#include <linux/spinlock_types.h>
/*
* Pull the __raw*() functions/declarations (UP-nondebug doesnt need them):
*/
#ifdef CONFIG_SMP
# include <asm/spinlock.h>
#else
# include <linux/spinlock_up.h>
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
extern void __spin_lock_init(spinlock_t *lock, const char *name,
struct lock_class_key *key);
# define spin_lock_init(lock) \
do { \
static struct lock_class_key __key; \
\
__spin_lock_init((lock), #lock, &__key); \
} while (0)
#else
# define spin_lock_init(lock) \
do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0)
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
extern void __rwlock_init(rwlock_t *lock, const char *name,
struct lock_class_key *key);
# define rwlock_init(lock) \
do { \
static struct lock_class_key __key; \
\
__rwlock_init((lock), #lock, &__key); \
} while (0)
#else
# define rwlock_init(lock) \
do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
#endif
#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
#ifdef CONFIG_GENERIC_LOCKBREAK
#define spin_is_contended(lock) ((lock)->break_lock)
#else
#ifdef __raw_spin_is_contended
#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock)
#else
#define spin_is_contended(lock) (((void)(lock), 0))
#endif /*__raw_spin_is_contended*/
#endif
/* The lock does not imply full memory barrier. */
#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK
static inline void smp_mb__after_lock(void) { smp_mb(); }
#endif
/**
* spin_unlock_wait - wait until the spinlock gets unlocked
* @lock: the spinlock in question.
*/
#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock)
#ifdef CONFIG_DEBUG_SPINLOCK
extern void _raw_spin_lock(spinlock_t *lock);
#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
extern int _raw_spin_trylock(spinlock_t *lock);
extern void _raw_spin_unlock(spinlock_t *lock);
extern void _raw_read_lock(rwlock_t *lock);
#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
extern int _raw_read_trylock(rwlock_t *lock);
extern void _raw_read_unlock(rwlock_t *lock);
extern void _raw_write_lock(rwlock_t *lock);
#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
extern int _raw_write_trylock(rwlock_t *lock);
extern void _raw_write_unlock(rwlock_t *lock);
#else
# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock)
# define _raw_spin_lock_flags(lock, flags) \
__raw_spin_lock_flags(&(lock)->raw_lock, *(flags))
# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock)
# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock)
# define _raw_read_lock_flags(lock, flags) \
__raw_read_lock_flags(&(lock)->raw_lock, *(flags))
# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock)
# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock)
# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock)
# define _raw_write_lock_flags(lock, flags) \
__raw_write_lock_flags(&(lock)->raw_lock, *(flags))
# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock)
# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock)
#endif
#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock)
#define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock)
/*
* Define the various spin_lock and rw_lock methods. Note we define these
* regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
* methods are defined as nops in the case they are not required.
*/
#define spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock))
#define read_trylock(lock) __cond_lock(lock, _read_trylock(lock))
#define write_trylock(lock) __cond_lock(lock, _write_trylock(lock))
#define spin_lock(lock) _spin_lock(lock)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
# define spin_lock_nest_lock(lock, nest_lock) \
do { \
typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
} while (0)
#else
# define spin_lock_nested(lock, subclass) _spin_lock(lock)
# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock)
#endif
#define write_lock(lock) _write_lock(lock)
#define read_lock(lock) _read_lock(lock)
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
#define spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
flags = _spin_lock_irqsave(lock); \
} while (0)
#define read_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
flags = _read_lock_irqsave(lock); \
} while (0)
#define write_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
flags = _write_lock_irqsave(lock); \
} while (0)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#define spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
typecheck(unsigned long, flags); \
flags = _spin_lock_irqsave_nested(lock, subclass); \
} while (0)
#else
#define spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
typecheck(unsigned long, flags); \
flags = _spin_lock_irqsave(lock); \
} while (0)
#endif
#else
#define spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
_spin_lock_irqsave(lock, flags); \
} while (0)
#define read_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
_read_lock_irqsave(lock, flags); \
} while (0)
#define write_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
_write_lock_irqsave(lock, flags); \
} while (0)
#define spin_lock_irqsave_nested(lock, flags, subclass) \
spin_lock_irqsave(lock, flags)
#endif
#define spin_lock_irq(lock) _spin_lock_irq(lock)
#define spin_lock_bh(lock) _spin_lock_bh(lock)
#define read_lock_irq(lock) _read_lock_irq(lock)
#define read_lock_bh(lock) _read_lock_bh(lock)
#define write_lock_irq(lock) _write_lock_irq(lock)
#define write_lock_bh(lock) _write_lock_bh(lock)
#define spin_unlock(lock) _spin_unlock(lock)
#define read_unlock(lock) _read_unlock(lock)
#define write_unlock(lock) _write_unlock(lock)
#define spin_unlock_irq(lock) _spin_unlock_irq(lock)
#define read_unlock_irq(lock) _read_unlock_irq(lock)
#define write_unlock_irq(lock) _write_unlock_irq(lock)
#define spin_unlock_irqrestore(lock, flags) \
do { \
typecheck(unsigned long, flags); \
_spin_unlock_irqrestore(lock, flags); \
} while (0)
#define spin_unlock_bh(lock) _spin_unlock_bh(lock)
#define read_unlock_irqrestore(lock, flags) \
do { \
typecheck(unsigned long, flags); \
_read_unlock_irqrestore(lock, flags); \
} while (0)
#define read_unlock_bh(lock) _read_unlock_bh(lock)
#define write_unlock_irqrestore(lock, flags) \
do { \
typecheck(unsigned long, flags); \
_write_unlock_irqrestore(lock, flags); \
} while (0)
#define write_unlock_bh(lock) _write_unlock_bh(lock)
#define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock))
#define spin_trylock_irq(lock) \
({ \
local_irq_disable(); \
spin_trylock(lock) ? \
1 : ({ local_irq_enable(); 0; }); \
})
#define spin_trylock_irqsave(lock, flags) \
({ \
local_irq_save(flags); \
spin_trylock(lock) ? \
1 : ({ local_irq_restore(flags); 0; }); \
})
#define write_trylock_irqsave(lock, flags) \
({ \
local_irq_save(flags); \
write_trylock(lock) ? \
1 : ({ local_irq_restore(flags); 0; }); \
})
/*
* Pull the atomic_t declaration:
* (asm-mips/atomic.h needs above definitions)
*/
#include <asm/atomic.h>
/**
* atomic_dec_and_lock - lock on reaching reference count zero
* @atomic: the atomic counter
* @lock: the spinlock in question
*
* Decrements @atomic by 1. If the result is 0, returns true and locks
* @lock. Returns false for all other cases.
*/
extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
#define atomic_dec_and_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
/**
* spin_can_lock - would spin_trylock() succeed?
* @lock: the spinlock in question.
*/
#define spin_can_lock(lock) (!spin_is_locked(lock))
/*
* Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
*/
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
# include <linux/spinlock_api_smp.h>
#else
# include <linux/spinlock_api_up.h>
#endif
#endif /* __LINUX_SPINLOCK_H */

View File

@ -0,0 +1,81 @@
#ifndef __LINUX_SPINLOCK_API_UP_H
#define __LINUX_SPINLOCK_API_UP_H
#ifndef __LINUX_SPINLOCK_H
# error "please don't include this file directly"
#endif
/*
* include/linux/spinlock_api_up.h
*
* spinlock API implementation on UP-nondebug (inlined implementation)
*
* portions Copyright 2005, Red Hat, Inc., Ingo Molnar
* Released under the General Public License (GPL).
*/
#define in_lock_functions(ADDR) 0
#define assert_spin_locked(lock) do { (void)(lock); } while (0)
/*
* In the UP-nondebug case there's no real locking going on, so the
* only thing we have to do is to keep the preempt counts and irq
* flags straight, to suppress compiler warnings of unused lock
* variables, and to add the proper checker annotations:
*/
#define __LOCK(lock) \
do { preempt_disable(); __acquire(lock); (void)(lock); } while (0)
#define __LOCK_BH(lock) \
do { local_bh_disable(); __LOCK(lock); } while (0)
#define __LOCK_IRQ(lock) \
do { local_irq_disable(); __LOCK(lock); } while (0)
#define __LOCK_IRQSAVE(lock, flags) \
do { local_irq_save(flags); __LOCK(lock); } while (0)
#define __UNLOCK(lock) \
do { preempt_enable(); __release(lock); (void)(lock); } while (0)
#define __UNLOCK_BH(lock) \
do { preempt_enable_no_resched(); local_bh_enable(); __release(lock); (void)(lock); } while (0)
#define __UNLOCK_IRQ(lock) \
do { local_irq_enable(); __UNLOCK(lock); } while (0)
#define __UNLOCK_IRQRESTORE(lock, flags) \
do { local_irq_restore(flags); __UNLOCK(lock); } while (0)
#define _spin_lock(lock) __LOCK(lock)
#define _spin_lock_nested(lock, subclass) __LOCK(lock)
#define _read_lock(lock) __LOCK(lock)
#define _write_lock(lock) __LOCK(lock)
#define _spin_lock_bh(lock) __LOCK_BH(lock)
#define _read_lock_bh(lock) __LOCK_BH(lock)
#define _write_lock_bh(lock) __LOCK_BH(lock)
#define _spin_lock_irq(lock) __LOCK_IRQ(lock)
#define _read_lock_irq(lock) __LOCK_IRQ(lock)
#define _write_lock_irq(lock) __LOCK_IRQ(lock)
#define _spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
#define _read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
#define _write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
#define _spin_trylock(lock) ({ __LOCK(lock); 1; })
#define _read_trylock(lock) ({ __LOCK(lock); 1; })
#define _write_trylock(lock) ({ __LOCK(lock); 1; })
#define _spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; })
#define _spin_unlock(lock) __UNLOCK(lock)
#define _read_unlock(lock) __UNLOCK(lock)
#define _write_unlock(lock) __UNLOCK(lock)
#define _spin_unlock_bh(lock) __UNLOCK_BH(lock)
#define _write_unlock_bh(lock) __UNLOCK_BH(lock)
#define _read_unlock_bh(lock) __UNLOCK_BH(lock)
#define _spin_unlock_irq(lock) __UNLOCK_IRQ(lock)
#define _read_unlock_irq(lock) __UNLOCK_IRQ(lock)
#define _write_unlock_irq(lock) __UNLOCK_IRQ(lock)
#define _spin_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags)
#define _read_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags)
#define _write_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags)
#endif /* __LINUX_SPINLOCK_API_UP_H */

View File

@ -0,0 +1,100 @@
#ifndef __LINUX_SPINLOCK_TYPES_H
#define __LINUX_SPINLOCK_TYPES_H
/*
* include/linux/spinlock_types.h - generic spinlock type definitions
* and initializers
*
* portions Copyright 2005, Red Hat, Inc., Ingo Molnar
* Released under the General Public License (GPL).
*/
#if defined(CONFIG_SMP)
# include <asm/spinlock_types.h>
#else
# include <linux/spinlock_types_up.h>
#endif
#include <linux/lockdep.h>
typedef struct {
raw_spinlock_t raw_lock;
#ifdef CONFIG_GENERIC_LOCKBREAK
unsigned int break_lock;
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
unsigned int magic, owner_cpu;
void *owner;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
} spinlock_t;
#define SPINLOCK_MAGIC 0xdead4ead
typedef struct {
raw_rwlock_t raw_lock;
#ifdef CONFIG_GENERIC_LOCKBREAK
unsigned int break_lock;
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
unsigned int magic, owner_cpu;
void *owner;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
} rwlock_t;
#define RWLOCK_MAGIC 0xdeaf1eed
#define SPINLOCK_OWNER_INIT ((void *)-1L)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
#else
# define SPIN_DEP_MAP_INIT(lockname)
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
#else
# define RW_DEP_MAP_INIT(lockname)
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
# define __SPIN_LOCK_UNLOCKED(lockname) \
(spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
.magic = SPINLOCK_MAGIC, \
.owner = SPINLOCK_OWNER_INIT, \
.owner_cpu = -1, \
SPIN_DEP_MAP_INIT(lockname) }
#define __RW_LOCK_UNLOCKED(lockname) \
(rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
.magic = RWLOCK_MAGIC, \
.owner = SPINLOCK_OWNER_INIT, \
.owner_cpu = -1, \
RW_DEP_MAP_INIT(lockname) }
#else
# define __SPIN_LOCK_UNLOCKED(lockname) \
(spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
SPIN_DEP_MAP_INIT(lockname) }
#define __RW_LOCK_UNLOCKED(lockname) \
(rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
RW_DEP_MAP_INIT(lockname) }
#endif
/*
* SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED defeat lockdep state tracking and
* are hence deprecated.
* Please use DEFINE_SPINLOCK()/DEFINE_RWLOCK() or
* __SPIN_LOCK_UNLOCKED()/__RW_LOCK_UNLOCKED() as appropriate.
*/
#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init)
#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
#endif /* __LINUX_SPINLOCK_TYPES_H */

View File

@ -0,0 +1,37 @@
#ifndef __LINUX_SPINLOCK_TYPES_UP_H
#define __LINUX_SPINLOCK_TYPES_UP_H
#ifndef __LINUX_SPINLOCK_TYPES_H
# error "please don't include this file directly"
#endif
/*
* include/linux/spinlock_types_up.h - spinlock type definitions for UP
*
* portions Copyright 2005, Red Hat, Inc., Ingo Molnar
* Released under the General Public License (GPL).
*/
#ifdef CONFIG_DEBUG_SPINLOCK
typedef struct {
volatile unsigned int slock;
} raw_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
#else
typedef struct { } raw_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { }
#endif
typedef struct {
/* no debug version on UP */
} raw_rwlock_t;
#define __RAW_RW_LOCK_UNLOCKED { }
#endif /* __LINUX_SPINLOCK_TYPES_UP_H */

View File

@ -0,0 +1,76 @@
#ifndef __LINUX_SPINLOCK_UP_H
#define __LINUX_SPINLOCK_UP_H
#ifndef __LINUX_SPINLOCK_H
# error "please don't include this file directly"
#endif
/*
* include/linux/spinlock_up.h - UP-debug version of spinlocks.
*
* portions Copyright 2005, Red Hat, Inc., Ingo Molnar
* Released under the General Public License (GPL).
*
* In the debug case, 1 means unlocked, 0 means locked. (the values
* are inverted, to catch initialization bugs)
*
* No atomicity anywhere, we are on UP.
*/
#ifdef CONFIG_DEBUG_SPINLOCK
#define __raw_spin_is_locked(x) ((x)->slock == 0)
static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
lock->slock = 0;
}
static inline void
__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
{
local_irq_save(flags);
lock->slock = 0;
}
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
{
char oldval = lock->slock;
lock->slock = 0;
return oldval > 0;
}
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
lock->slock = 1;
}
/*
* Read-write spinlocks. No debug version.
*/
#define __raw_read_lock(lock) do { (void)(lock); } while (0)
#define __raw_write_lock(lock) do { (void)(lock); } while (0)
#define __raw_read_trylock(lock) ({ (void)(lock); 1; })
#define __raw_write_trylock(lock) ({ (void)(lock); 1; })
#define __raw_read_unlock(lock) do { (void)(lock); } while (0)
#define __raw_write_unlock(lock) do { (void)(lock); } while (0)
#else /* DEBUG_SPINLOCK */
#define __raw_spin_is_locked(lock) ((void)(lock), 0)
/* for sched.c and kernel_lock.c: */
# define __raw_spin_lock(lock) do { (void)(lock); } while (0)
# define __raw_spin_lock_flags(lock, flags) do { (void)(lock); } while (0)
# define __raw_spin_unlock(lock) do { (void)(lock); } while (0)
# define __raw_spin_trylock(lock) ({ (void)(lock); 1; })
#endif /* DEBUG_SPINLOCK */
#define __raw_spin_is_contended(lock) (((void)(lock), 0))
#define __raw_read_can_lock(lock) (((void)(lock), 1))
#define __raw_write_can_lock(lock) (((void)(lock), 1))
#define __raw_spin_unlock_wait(lock) \
do { cpu_relax(); } while (__raw_spin_is_locked(lock))
#endif /* __LINUX_SPINLOCK_UP_H */

View File

@ -0,0 +1,28 @@
#ifndef _LINUX_STDDEF_H
#define _LINUX_STDDEF_H
#include <linux/compiler.h>
#undef NULL
#if defined(__cplusplus)
#define NULL 0
#else
#define NULL ((void *)0)
#endif
#ifdef __KERNEL__
enum {
false = 0,
true = 1
};
#undef offsetof
#ifdef __compiler_offsetof
#define offsetof(TYPE,MEMBER) __compiler_offsetof(TYPE,MEMBER)
#else
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
#endif
#endif /* __KERNEL__ */
#endif

View File

@ -0,0 +1,135 @@
#ifndef _LINUX_STRING_H_
#define _LINUX_STRING_H_
/* We don't want strings.h stuff being used by user stuff by accident */
#ifndef __KERNEL__
#include <string.h>
#else
#include <linux/compiler.h> /* for inline */
#include <linux/types.h> /* for size_t */
#include <linux/stddef.h> /* for NULL */
#include <stdarg.h>
extern char *strndup_user(const char __user *, long);
extern void *memdup_user(const void __user *, size_t);
/*
* Include machine specific inline routines
*/
#include <asm/string.h>
#ifndef __HAVE_ARCH_STRCPY
extern char * strcpy(char *,const char *);
#endif
#ifndef __HAVE_ARCH_STRNCPY
extern char * strncpy(char *,const char *, __kernel_size_t);
#endif
#ifndef __HAVE_ARCH_STRLCPY
size_t strlcpy(char *, const char *, size_t);
#endif
#ifndef __HAVE_ARCH_STRCAT
extern char * strcat(char *, const char *);
#endif
#ifndef __HAVE_ARCH_STRNCAT
extern char * strncat(char *, const char *, __kernel_size_t);
#endif
#ifndef __HAVE_ARCH_STRLCAT
extern size_t strlcat(char *, const char *, __kernel_size_t);
#endif
#ifndef __HAVE_ARCH_STRCMP
extern int strcmp(const char *,const char *);
#endif
#ifndef __HAVE_ARCH_STRNCMP
extern int strncmp(const char *,const char *,__kernel_size_t);
#endif
#ifndef __HAVE_ARCH_STRNICMP
extern int strnicmp(const char *, const char *, __kernel_size_t);
#endif
#ifndef __HAVE_ARCH_STRCASECMP
extern int strcasecmp(const char *s1, const char *s2);
#endif
#ifndef __HAVE_ARCH_STRNCASECMP
extern int strncasecmp(const char *s1, const char *s2, size_t n);
#endif
#ifndef __HAVE_ARCH_STRCHR
extern char * strchr(const char *,int);
#endif
#ifndef __HAVE_ARCH_STRNCHR
extern char * strnchr(const char *, size_t, int);
#endif
#ifndef __HAVE_ARCH_STRRCHR
extern char * strrchr(const char *,int);
#endif
extern char * __must_check strstrip(char *);
#ifndef __HAVE_ARCH_STRSTR
extern char * strstr(const char *,const char *);
#endif
#ifndef __HAVE_ARCH_STRLEN
extern __kernel_size_t strlen(const char *);
#endif
#ifndef __HAVE_ARCH_STRNLEN
extern __kernel_size_t strnlen(const char *,__kernel_size_t);
#endif
#ifndef __HAVE_ARCH_STRPBRK
extern char * strpbrk(const char *,const char *);
#endif
#ifndef __HAVE_ARCH_STRSEP
extern char * strsep(char **,const char *);
#endif
#ifndef __HAVE_ARCH_STRSPN
extern __kernel_size_t strspn(const char *,const char *);
#endif
#ifndef __HAVE_ARCH_STRCSPN
extern __kernel_size_t strcspn(const char *,const char *);
#endif
#ifndef __HAVE_ARCH_MEMSET
extern void * memset(void *,int,__kernel_size_t);
#endif
#ifndef __HAVE_ARCH_MEMCPY
extern void * memcpy(void *,const void *,__kernel_size_t);
#endif
#ifndef __HAVE_ARCH_MEMMOVE
extern void * memmove(void *,const void *,__kernel_size_t);
#endif
#ifndef __HAVE_ARCH_MEMSCAN
extern void * memscan(void *,int,__kernel_size_t);
#endif
#ifndef __HAVE_ARCH_MEMCMP
extern int memcmp(const void *,const void *,__kernel_size_t);
#endif
#ifndef __HAVE_ARCH_MEMCHR
extern void * memchr(const void *,int,__kernel_size_t);
#endif
extern char *kstrdup(const char *s, gfp_t gfp);
extern char *kstrndup(const char *s, size_t len, gfp_t gfp);
extern void *kmemdup(const void *src, size_t len, gfp_t gfp);
extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
extern void argv_free(char **argv);
extern bool sysfs_streq(const char *s1, const char *s2);
#ifdef CONFIG_BINARY_PRINTF
int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf);
int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
#endif
extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
const void *from, size_t available);
/**
* strstarts - does @str start with @prefix?
* @str: string to examine
* @prefix: prefix to look for.
*/
static inline bool strstarts(const char *str, const char *prefix)
{
return strncmp(str, prefix, strlen(prefix)) == 0;
}
#endif
#endif /* _LINUX_STRING_H_ */

View File

@ -0,0 +1,12 @@
#ifndef __LINUX_STRINGIFY_H
#define __LINUX_STRINGIFY_H
/* Indirect stringification. Doing two levels allows the parameter to be a
* macro itself. For example, compile with -DFOO=bar, __stringify(FOO)
* converts to "bar".
*/
#define __stringify_1(x...) #x
#define __stringify(x...) __stringify_1(x)
#endif /* !__LINUX_STRINGIFY_H */

View File

@ -0,0 +1,299 @@
#ifndef _LINUX_SWAB_H
#define _LINUX_SWAB_H
#include <linux/types.h>
#include <linux/compiler.h>
#include <asm/swab.h>
/*
* casts are necessary for constants, because we never know how for sure
* how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
*/
#define ___constant_swab16(x) ((__u16)( \
(((__u16)(x) & (__u16)0x00ffU) << 8) | \
(((__u16)(x) & (__u16)0xff00U) >> 8)))
#define ___constant_swab32(x) ((__u32)( \
(((__u32)(x) & (__u32)0x000000ffUL) << 24) | \
(((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \
(((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \
(((__u32)(x) & (__u32)0xff000000UL) >> 24)))
#define ___constant_swab64(x) ((__u64)( \
(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \
(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \
(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \
(((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \
(((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \
(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \
(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56)))
#define ___constant_swahw32(x) ((__u32)( \
(((__u32)(x) & (__u32)0x0000ffffUL) << 16) | \
(((__u32)(x) & (__u32)0xffff0000UL) >> 16)))
#define ___constant_swahb32(x) ((__u32)( \
(((__u32)(x) & (__u32)0x00ff00ffUL) << 8) | \
(((__u32)(x) & (__u32)0xff00ff00UL) >> 8)))
/*
* Implement the following as inlines, but define the interface using
* macros to allow constant folding when possible:
* ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
*/
static inline __attribute_const__ __u16 __fswab16(__u16 val)
{
#ifdef __arch_swab16
return __arch_swab16(val);
#else
return ___constant_swab16(val);
#endif
}
static inline __attribute_const__ __u32 __fswab32(__u32 val)
{
#ifdef __arch_swab32
return __arch_swab32(val);
#else
return ___constant_swab32(val);
#endif
}
static inline __attribute_const__ __u64 __fswab64(__u64 val)
{
#ifdef __arch_swab64
return __arch_swab64(val);
#elif defined(__SWAB_64_THRU_32__)
__u32 h = val >> 32;
__u32 l = val & ((1ULL << 32) - 1);
return (((__u64)__fswab32(l)) << 32) | ((__u64)(__fswab32(h)));
#else
return ___constant_swab64(val);
#endif
}
static inline __attribute_const__ __u32 __fswahw32(__u32 val)
{
#ifdef __arch_swahw32
return __arch_swahw32(val);
#else
return ___constant_swahw32(val);
#endif
}
static inline __attribute_const__ __u32 __fswahb32(__u32 val)
{
#ifdef __arch_swahb32
return __arch_swahb32(val);
#else
return ___constant_swahb32(val);
#endif
}
/**
* __swab16 - return a byteswapped 16-bit value
* @x: value to byteswap
*/
#define __swab16(x) \
(__builtin_constant_p((__u16)(x)) ? \
___constant_swab16(x) : \
__fswab16(x))
/**
* __swab32 - return a byteswapped 32-bit value
* @x: value to byteswap
*/
#define __swab32(x) \
(__builtin_constant_p((__u32)(x)) ? \
___constant_swab32(x) : \
__fswab32(x))
/**
* __swab64 - return a byteswapped 64-bit value
* @x: value to byteswap
*/
#define __swab64(x) \
(__builtin_constant_p((__u64)(x)) ? \
___constant_swab64(x) : \
__fswab64(x))
/**
* __swahw32 - return a word-swapped 32-bit value
* @x: value to wordswap
*
* __swahw32(0x12340000) is 0x00001234
*/
#define __swahw32(x) \
(__builtin_constant_p((__u32)(x)) ? \
___constant_swahw32(x) : \
__fswahw32(x))
/**
* __swahb32 - return a high and low byte-swapped 32-bit value
* @x: value to byteswap
*
* __swahb32(0x12345678) is 0x34127856
*/
#define __swahb32(x) \
(__builtin_constant_p((__u32)(x)) ? \
___constant_swahb32(x) : \
__fswahb32(x))
/**
* __swab16p - return a byteswapped 16-bit value from a pointer
* @p: pointer to a naturally-aligned 16-bit value
*/
static inline __u16 __swab16p(const __u16 *p)
{
#ifdef __arch_swab16p
return __arch_swab16p(p);
#else
return __swab16(*p);
#endif
}
/**
* __swab32p - return a byteswapped 32-bit value from a pointer
* @p: pointer to a naturally-aligned 32-bit value
*/
static inline __u32 __swab32p(const __u32 *p)
{
#ifdef __arch_swab32p
return __arch_swab32p(p);
#else
return __swab32(*p);
#endif
}
/**
* __swab64p - return a byteswapped 64-bit value from a pointer
* @p: pointer to a naturally-aligned 64-bit value
*/
static inline __u64 __swab64p(const __u64 *p)
{
#ifdef __arch_swab64p
return __arch_swab64p(p);
#else
return __swab64(*p);
#endif
}
/**
* __swahw32p - return a wordswapped 32-bit value from a pointer
* @p: pointer to a naturally-aligned 32-bit value
*
* See __swahw32() for details of wordswapping.
*/
static inline __u32 __swahw32p(const __u32 *p)
{
#ifdef __arch_swahw32p
return __arch_swahw32p(p);
#else
return __swahw32(*p);
#endif
}
/**
* __swahb32p - return a high and low byteswapped 32-bit value from a pointer
* @p: pointer to a naturally-aligned 32-bit value
*
* See __swahb32() for details of high/low byteswapping.
*/
static inline __u32 __swahb32p(const __u32 *p)
{
#ifdef __arch_swahb32p
return __arch_swahb32p(p);
#else
return __swahb32(*p);
#endif
}
/**
* __swab16s - byteswap a 16-bit value in-place
* @p: pointer to a naturally-aligned 16-bit value
*/
static inline void __swab16s(__u16 *p)
{
#ifdef __arch_swab16s
__arch_swab16s(p);
#else
*p = __swab16p(p);
#endif
}
/**
* __swab32s - byteswap a 32-bit value in-place
* @p: pointer to a naturally-aligned 32-bit value
*/
static inline void __swab32s(__u32 *p)
{
#ifdef __arch_swab32s
__arch_swab32s(p);
#else
*p = __swab32p(p);
#endif
}
/**
* __swab64s - byteswap a 64-bit value in-place
* @p: pointer to a naturally-aligned 64-bit value
*/
static inline void __swab64s(__u64 *p)
{
#ifdef __arch_swab64s
__arch_swab64s(p);
#else
*p = __swab64p(p);
#endif
}
/**
* __swahw32s - wordswap a 32-bit value in-place
* @p: pointer to a naturally-aligned 32-bit value
*
* See __swahw32() for details of wordswapping
*/
static inline void __swahw32s(__u32 *p)
{
#ifdef __arch_swahw32s
__arch_swahw32s(p);
#else
*p = __swahw32p(p);
#endif
}
/**
* __swahb32s - high and low byteswap a 32-bit value in-place
* @p: pointer to a naturally-aligned 32-bit value
*
* See __swahb32() for details of high and low byte swapping
*/
static inline void __swahb32s(__u32 *p)
{
#ifdef __arch_swahb32s
__arch_swahb32s(p);
#else
*p = __swahb32p(p);
#endif
}
#ifdef __KERNEL__
# define swab16 __swab16
# define swab32 __swab32
# define swab64 __swab64
# define swahw32 __swahw32
# define swahb32 __swahb32
# define swab16p __swab16p
# define swab32p __swab32p
# define swab64p __swab64p
# define swahw32p __swahw32p
# define swahb32p __swahb32p
# define swab16s __swab16s
# define swab32s __swab32s
# define swab64s __swab64s
# define swahw32s __swahw32s
# define swahb32s __swahb32s
#endif /* __KERNEL__ */
#endif /* _LINUX_SWAB_H */

View File

@ -0,0 +1,24 @@
#ifndef TYPECHECK_H_INCLUDED
#define TYPECHECK_H_INCLUDED
/*
* Check at compile time that something is of a particular type.
* Always evaluates to 1 so you may use it easily in comparisons.
*/
#define typecheck(type,x) \
({ type __dummy; \
typeof(x) __dummy2; \
(void)(&__dummy == &__dummy2); \
1; \
})
/*
* Check at compile time that 'function' is a certain type, or is a pointer
* to that type (needs to use typedef for the function type.)
*/
#define typecheck_fn(type,function) \
({ typeof(type) __tmp = function; \
(void)__tmp; \
})
#endif /* TYPECHECK_H_INCLUDED */

View File

@ -1,68 +1,229 @@
#ifndef _LINUX_TYPES_H
#define _LINUX_TYPES_H
#ifndef __TYPES_H__ #include <asm/types.h>
#define __TYPES_H__
# define __iomem #ifndef __ASSEMBLY__
# define __force #ifdef __KERNEL__
# define __user
# define WARN(condition, format...) #define DECLARE_BITMAP(name,bits) \
unsigned long name[BITS_TO_LONGS(bits)]
typedef int bool; #endif
#define false 0 #include <linux/posix_types.h>
#define true 1
typedef int ssize_t; #ifdef __KERNEL__
typedef long long loff_t;
typedef unsigned int size_t; typedef __u32 __kernel_dev_t;
typedef unsigned int count_t;
typedef unsigned int addr_t;
typedef unsigned char u8; typedef __kernel_fd_set fd_set;
typedef unsigned short u16; typedef __kernel_dev_t dev_t;
typedef unsigned int u32; typedef __kernel_ino_t ino_t;
typedef unsigned long long u64; typedef __kernel_mode_t mode_t;
typedef __kernel_nlink_t nlink_t;
typedef __kernel_off_t off_t;
typedef __kernel_pid_t pid_t;
typedef __kernel_daddr_t daddr_t;
typedef __kernel_key_t key_t;
typedef __kernel_suseconds_t suseconds_t;
typedef __kernel_timer_t timer_t;
typedef __kernel_clockid_t clockid_t;
typedef __kernel_mqd_t mqd_t;
typedef unsigned char __u8; typedef _Bool bool;
typedef unsigned short __u16;
typedef unsigned int __u32; typedef __kernel_uid32_t uid_t;
typedef unsigned long long __u64; typedef __kernel_gid32_t gid_t;
typedef __kernel_uid16_t uid16_t;
typedef __kernel_gid16_t gid16_t;
typedef unsigned long uintptr_t;
#ifdef CONFIG_UID16
/* This is defined by include/asm-{arch}/posix_types.h */
typedef __kernel_old_uid_t old_uid_t;
typedef __kernel_old_gid_t old_gid_t;
#endif /* CONFIG_UID16 */
#if defined(__GNUC__)
typedef __kernel_loff_t loff_t;
#endif
/*
* The following typedefs are also protected by individual ifdefs for
* historical reasons:
*/
#ifndef _SIZE_T
#define _SIZE_T
typedef __kernel_size_t size_t;
#endif
#ifndef _SSIZE_T
#define _SSIZE_T
typedef __kernel_ssize_t ssize_t;
#endif
#ifndef _PTRDIFF_T
#define _PTRDIFF_T
typedef __kernel_ptrdiff_t ptrdiff_t;
#endif
#ifndef _TIME_T
#define _TIME_T
typedef __kernel_time_t time_t;
#endif
#ifndef _CLOCK_T
#define _CLOCK_T
typedef __kernel_clock_t clock_t;
#endif
#ifndef _CADDR_T
#define _CADDR_T
typedef __kernel_caddr_t caddr_t;
#endif
/* bsd */
typedef unsigned char u_char;
typedef unsigned short u_short;
typedef unsigned int u_int;
typedef unsigned long u_long;
/* sysv */
typedef unsigned char unchar;
typedef unsigned short ushort;
typedef unsigned int uint;
typedef unsigned long ulong;
#ifndef __BIT_TYPES_DEFINED__
#define __BIT_TYPES_DEFINED__
typedef __u8 u_int8_t;
typedef __s8 int8_t;
typedef __u16 u_int16_t;
typedef __s16 int16_t;
typedef __u32 u_int32_t;
typedef __s32 int32_t;
#endif /* !(__BIT_TYPES_DEFINED__) */
typedef __u8 uint8_t;
typedef __u16 uint16_t;
typedef __u32 uint32_t;
#if defined(__GNUC__)
typedef __u64 uint64_t;
typedef __u64 u_int64_t;
typedef __s64 int64_t;
#endif
/* this is a special 64bit data type that is 8-byte aligned */
#define aligned_u64 __u64 __attribute__((aligned(8)))
#define aligned_be64 __be64 __attribute__((aligned(8)))
#define aligned_le64 __le64 __attribute__((aligned(8)))
/**
* The type used for indexing onto a disc or disc partition.
*
* Linux always considers sectors to be 512 bytes long independently
* of the devices real block size.
*
* blkcnt_t is the type of the inode's block count.
*/
#ifdef CONFIG_LBDAF
typedef u64 sector_t;
typedef u64 blkcnt_t;
#else
typedef unsigned long sector_t;
typedef unsigned long blkcnt_t;
#endif
/*
* The type of an index into the pagecache. Use a #define so asm/types.h
* can override it.
*/
#ifndef pgoff_t
#define pgoff_t unsigned long
#endif
#endif /* __KERNEL__ */
/*
* Below are truly Linux-specific types that should never collide with
* any application/library that wants linux/types.h.
*/
#ifdef __CHECKER__
#define __bitwise__ __attribute__((bitwise))
#else
#define __bitwise__
#endif
#ifdef __CHECK_ENDIAN__
#define __bitwise __bitwise__
#else
#define __bitwise
#endif
typedef __u16 __bitwise __le16;
typedef __u16 __bitwise __be16;
typedef __u32 __bitwise __le32;
typedef __u32 __bitwise __be32;
typedef __u64 __bitwise __le64;
typedef __u64 __bitwise __be64;
typedef __u16 __bitwise __sum16;
typedef __u32 __bitwise __wsum;
#ifdef __KERNEL__
typedef unsigned __bitwise__ gfp_t;
typedef unsigned __bitwise__ fmode_t;
#ifdef CONFIG_PHYS_ADDR_T_64BIT
typedef u64 phys_addr_t;
#else
typedef u32 phys_addr_t;
#endif
typedef phys_addr_t resource_size_t;
typedef struct {
volatile int counter;
} atomic_t;
#ifdef CONFIG_64BIT
typedef struct {
volatile long counter;
} atomic64_t;
#endif
struct ustat {
__kernel_daddr_t f_tfree;
__kernel_ino_t f_tinode;
char f_fname[6];
char f_fpack[6];
};
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
typedef signed char __s8;
typedef signed short __s16;
typedef signed int __s32;
typedef signed long long __s64;
typedef __u32 __be32;
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
typedef unsigned int uint32_t;
typedef unsigned long long uint64_t;
typedef unsigned char u8_t; typedef unsigned char u8_t;
typedef unsigned short u16_t; typedef unsigned short u16_t;
typedef unsigned int u32_t; typedef unsigned int u32_t;
typedef unsigned long long u64_t; typedef unsigned long long u64_t;
typedef signed char int8_t; typedef unsigned int addr_t;
typedef signed int int32_t; typedef unsigned int count_t;
typedef signed long long int64_t;
#define NULL (void*)0 # define WARN(condition, format...)
typedef uint32_t dma_addr_t;
typedef uint32_t resource_size_t;
#define cpu_to_le16(v16) (v16) #define false 0
#define cpu_to_le32(v32) (v32) #define true 1
#define cpu_to_le64(v64) (v64)
#define le16_to_cpu(v16) (v16)
#define le32_to_cpu(v32) (v32)
#define le64_to_cpu(v64) (v64)
#define likely(x) __builtin_expect(!!(x), 1) #define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0) #define unlikely(x) __builtin_expect(!!(x), 0)
@ -71,24 +232,6 @@ typedef uint32_t resource_size_t;
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_LONG)
#define DECLARE_BITMAP(name,bits) \
unsigned long name[BITS_TO_LONGS(bits)]
#define KERN_EMERG "<0>" /* system is unusable */
#define KERN_ALERT "<1>" /* action must be taken immediately */
#define KERN_CRIT "<2>" /* critical conditions */
#define KERN_ERR "<3>" /* error conditions */
#define KERN_WARNING "<4>" /* warning conditions */
#define KERN_NOTICE "<5>" /* normal but significant condition */
#define KERN_INFO "<6>" /* informational */
#define KERN_DEBUG "<7>" /* debug-level messages */
//int printk(const char *fmt, ...);
#define printk(fmt, arg...) dbgprintf(fmt , ##arg)
#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */ #define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
@ -148,7 +291,7 @@ char *strncpy (char *dst, const char *src, size_t len);
void *malloc(size_t size); void *malloc(size_t size);
#define kfree free #define kfree free
static inline void *kzalloc(size_t size, u32_t flags) static inline void *kzalloc(size_t size, uint32_t flags)
{ {
void *ret = malloc(size); void *ret = malloc(size);
memset(ret, 0, size); memset(ret, 0, size);
@ -159,13 +302,6 @@ static inline void *kzalloc(size_t size, u32_t flags)
struct drm_file; struct drm_file;
#define offsetof(TYPE,MEMBER) __builtin_offsetof(TYPE,MEMBER)
#define container_of(ptr, type, member) ({ \
const typeof( ((type *)0)->member ) *__mptr = (ptr); \
(type *)( (char *)__mptr - offsetof(type,member) );})
#define DRM_MEMORYBARRIER() __asm__ __volatile__("lock; addl $0,0(%esp)") #define DRM_MEMORYBARRIER() __asm__ __volatile__("lock; addl $0,0(%esp)")
#define mb() __asm__ __volatile__("lock; addl $0,0(%esp)") #define mb() __asm__ __volatile__("lock; addl $0,0(%esp)")
@ -175,31 +311,6 @@ struct drm_file;
#define PAGE_SIZE (1UL << PAGE_SHIFT) #define PAGE_SIZE (1UL << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1)) #define PAGE_MASK (~(PAGE_SIZE-1))
#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
static inline void bitmap_zero(unsigned long *dst, int nbits)
{
if (nbits <= BITS_PER_LONG)
*dst = 0UL;
else {
int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
memset(dst, 0, len);
}
}
#define EXPORT_SYMBOL(x)
#define min(x,y) ({ \
typeof(x) _x = (x); \
typeof(y) _y = (y); \
(void) (&_x == &_y); \
_x < _y ? _x : _y; })
#define max(x,y) ({ \
typeof(x) _x = (x); \
typeof(y) _y = (y); \
(void) (&_x == &_y); \
_x > _y ? _x : _y; })
#define do_div(n, base) \ #define do_div(n, base) \
({ \ ({ \
@ -218,33 +329,17 @@ static inline void bitmap_zero(unsigned long *dst, int nbits)
}) })
#define lower_32_bits(n) ((u32)(n))
#define INT_MAX ((int)(~0U>>1))
#define INT_MIN (-INT_MAX - 1)
#define UINT_MAX (~0U)
#define LONG_MAX ((long)(~0UL>>1))
#define LONG_MIN (-LONG_MAX - 1)
#define ULONG_MAX (~0UL)
#define LLONG_MAX ((long long)(~0ULL>>1))
#define LLONG_MIN (-LLONG_MAX - 1)
#define ULLONG_MAX (~0ULL)
static inline void *kcalloc(size_t n, size_t size, u32_t flags)
{
if (n != 0 && size > ULONG_MAX / n)
return NULL;
return kzalloc(n * size, 0);
}
#define ENTER() dbgprintf("enter %s\n",__FUNCTION__) #define ENTER() dbgprintf("enter %s\n",__FUNCTION__)
#define LEAVE() dbgprintf("leave %s\n",__FUNCTION__) #define LEAVE() dbgprintf("leave %s\n",__FUNCTION__)
#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1)
#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask))
#define PCI_DEVICE_ID_ATI_RADEON_QY 0x5159 #define PCI_DEVICE_ID_ATI_RADEON_QY 0x5159
#endif /* _LINUX_TYPES_H */
#endif //__TYPES_H__

View File

@ -232,7 +232,7 @@ static inline void* __CreateObject(u32_t pid, size_t size)
static inline void __DestroyObject(void *obj) static inline void __DestroyObject(void *obj)
{ {
__asm__ __volatile__ ( __asm__ __volatile__ (
"call *__imp__DestroyObject" "call *__imp__DestroyObject \n\t"
: :
:"a" (obj)); :"a" (obj));
__asm__ __volatile__ ( __asm__ __volatile__ (

View File

@ -263,10 +263,10 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
case ATOM_ARG_FB: case ATOM_ARG_FB:
idx = U8(*ptr); idx = U8(*ptr);
(*ptr)++; (*ptr)++;
val = gctx->scratch[((gctx->fb_base + idx) / 4)];
if (print) if (print)
DEBUG("FB[0x%02X]", idx); DEBUG("FB[0x%02X]", idx);
printk(KERN_INFO "FB access is not implemented.\n"); break;
return 0;
case ATOM_ARG_IMM: case ATOM_ARG_IMM:
switch (align) { switch (align) {
case ATOM_SRC_DWORD: case ATOM_SRC_DWORD:
@ -488,9 +488,9 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
case ATOM_ARG_FB: case ATOM_ARG_FB:
idx = U8(*ptr); idx = U8(*ptr);
(*ptr)++; (*ptr)++;
gctx->scratch[((gctx->fb_base + idx) / 4)] = val;
DEBUG("FB[0x%02X]", idx); DEBUG("FB[0x%02X]", idx);
printk(KERN_INFO "FB access is not implemented.\n"); break;
return;
case ATOM_ARG_PLL: case ATOM_ARG_PLL:
idx = U8(*ptr); idx = U8(*ptr);
(*ptr)++; (*ptr)++;
@ -1214,3 +1214,28 @@ void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
*crev = CU8(idx + 3); *crev = CU8(idx + 3);
return; return;
} }
int atom_allocate_fb_scratch(struct atom_context *ctx)
{
int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
uint16_t data_offset;
int usage_bytes;
struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset);
firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
DRM_DEBUG("atom firmware requested %08x %dkb\n",
firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
if (usage_bytes == 0)
usage_bytes = 20 * 1024;
/* allocate some scratch memory */
ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
if (!ctx->scratch)
return -ENOMEM;
return 0;
}

View File

@ -132,6 +132,7 @@ struct atom_context {
uint8_t shift; uint8_t shift;
int cs_equal, cs_above; int cs_equal, cs_above;
int io_mode; int io_mode;
uint32_t *scratch;
}; };
extern int atom_debug; extern int atom_debug;
@ -142,6 +143,7 @@ int atom_asic_init(struct atom_context *);
void atom_destroy(struct atom_context *); void atom_destroy(struct atom_context *);
void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start); void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start);
void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev); void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev);
int atom_allocate_fb_scratch(struct atom_context *ctx);
#include "atom-types.h" #include "atom-types.h"
#include "atombios.h" #include "atombios.h"
#include "ObjectID.h" #include "ObjectID.h"

View File

@ -2680,7 +2680,7 @@ typedef struct _ATOM_I2C_RECORD {
typedef struct _ATOM_HPD_INT_RECORD { typedef struct _ATOM_HPD_INT_RECORD {
ATOM_COMMON_RECORD_HEADER sheader; ATOM_COMMON_RECORD_HEADER sheader;
UCHAR ucHPDIntGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */ UCHAR ucHPDIntGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */
UCHAR ucPluggged_PinState; UCHAR ucPlugged_PinState;
} ATOM_HPD_INT_RECORD; } ATOM_HPD_INT_RECORD;
typedef struct _ATOM_OUTPUT_PROTECTION_RECORD { typedef struct _ATOM_OUTPUT_PROTECTION_RECORD {

View File

@ -241,6 +241,7 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private; struct radeon_device *rdev = dev->dev_private;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
switch (mode) { switch (mode) {
case DRM_MODE_DPMS_ON: case DRM_MODE_DPMS_ON:
@ -248,6 +249,7 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
if (ASIC_IS_DCE3(rdev)) if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, 1); atombios_enable_crtc_memreq(crtc, 1);
atombios_blank_crtc(crtc, 0); atombios_blank_crtc(crtc, 0);
radeon_crtc_load_lut(crtc);
break; break;
case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_SUSPEND:
@ -258,10 +260,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
atombios_enable_crtc(crtc, 0); atombios_enable_crtc(crtc, 0);
break; break;
} }
if (mode != DRM_MODE_DPMS_OFF) {
radeon_crtc_load_lut(crtc);
}
} }
static void static void
@ -457,9 +455,8 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
if (encoder->encoder_type != if (encoder->encoder_type !=
DRM_MODE_ENCODER_DAC) DRM_MODE_ENCODER_DAC)
pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
if (!ASIC_IS_AVIVO(rdev) if (encoder->encoder_type ==
&& (encoder->encoder_type == DRM_MODE_ENCODER_LVDS)
DRM_MODE_ENCODER_LVDS))
pll_flags |= RADEON_PLL_USE_REF_DIV; pll_flags |= RADEON_PLL_USE_REF_DIV;
} }
radeon_encoder = to_radeon_encoder(encoder); radeon_encoder = to_radeon_encoder(encoder);
@ -500,6 +497,16 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
else else
pll = &rdev->clock.p2pll; pll = &rdev->clock.p2pll;
if (ASIC_IS_AVIVO(rdev)) {
if (radeon_new_pll)
radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock,
&fb_div, &frac_fb_div,
&ref_div, &post_div, pll_flags);
else
radeon_compute_pll(pll, adjusted_clock, &pll_clock,
&fb_div, &frac_fb_div,
&ref_div, &post_div, pll_flags);
} else
radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
&ref_div, &post_div, pll_flags); &ref_div, &post_div, pll_flags);
@ -576,15 +583,20 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct radeon_device *rdev = dev->dev_private; struct radeon_device *rdev = dev->dev_private;
struct radeon_framebuffer *radeon_fb; struct radeon_framebuffer *radeon_fb;
struct drm_gem_object *obj; struct drm_gem_object *obj;
struct drm_radeon_gem_object *obj_priv; struct radeon_bo *rbo;
uint64_t fb_location; uint64_t fb_location;
uint32_t fb_format, fb_pitch_pixels, tiling_flags; uint32_t fb_format, fb_pitch_pixels, tiling_flags;
int r;
if (!crtc->fb) /* no fb bound */
return -EINVAL; if (!crtc->fb) {
DRM_DEBUG("No FB bound\n");
return 0;
}
radeon_fb = to_radeon_framebuffer(crtc->fb); radeon_fb = to_radeon_framebuffer(crtc->fb);
/* Pin framebuffer & get tilling informations */
obj = radeon_fb->obj; obj = radeon_fb->obj;
obj_priv = obj->driver_private; obj_priv = obj->driver_private;
@ -593,6 +605,7 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
// } // }
fb_location = rdev->mc.vram_location; fb_location = rdev->mc.vram_location;
tiling_flags = 0;
switch (crtc->fb->bits_per_pixel) { switch (crtc->fb->bits_per_pixel) {
case 8: case 8:
@ -622,13 +635,11 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
return -EINVAL; return -EINVAL;
} }
// radeon_object_get_tiling_flags(obj->driver_private, if (tiling_flags & RADEON_TILING_MACRO)
// &tiling_flags, NULL); fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
// if (tiling_flags & RADEON_TILING_MACRO)
// fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
// if (tiling_flags & RADEON_TILING_MICRO) if (tiling_flags & RADEON_TILING_MICRO)
// fb_format |= AVIVO_D1GRPH_TILED; fb_format |= AVIVO_D1GRPH_TILED;
if (radeon_crtc->crtc_id == 0) if (radeon_crtc->crtc_id == 0)
WREG32(AVIVO_D1VGA_CONTROL, 0); WREG32(AVIVO_D1VGA_CONTROL, 0);

View File

@ -0,0 +1,13 @@
.global _destroy_cursor
.global _fini_cursor
.section .text
_destroy_cursor:
pushl %eax
call _fini_cursor
addl $4, %esp
ret

View File

@ -63,6 +63,95 @@ MODULE_FIRMWARE(FIRMWARE_R520);
* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
*/ */
/* hpd for digital panel detect/disconnect */
bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
{
bool connected = false;
switch (hpd) {
case RADEON_HPD_1:
if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE)
connected = true;
break;
case RADEON_HPD_2:
if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE)
connected = true;
break;
default:
break;
}
return connected;
}
void r100_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd)
{
u32 tmp;
bool connected = r100_hpd_sense(rdev, hpd);
switch (hpd) {
case RADEON_HPD_1:
tmp = RREG32(RADEON_FP_GEN_CNTL);
if (connected)
tmp &= ~RADEON_FP_DETECT_INT_POL;
else
tmp |= RADEON_FP_DETECT_INT_POL;
WREG32(RADEON_FP_GEN_CNTL, tmp);
break;
case RADEON_HPD_2:
tmp = RREG32(RADEON_FP2_GEN_CNTL);
if (connected)
tmp &= ~RADEON_FP2_DETECT_INT_POL;
else
tmp |= RADEON_FP2_DETECT_INT_POL;
WREG32(RADEON_FP2_GEN_CNTL, tmp);
break;
default:
break;
}
}
void r100_hpd_init(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
rdev->irq.hpd[0] = true;
break;
case RADEON_HPD_2:
rdev->irq.hpd[1] = true;
break;
default:
break;
}
}
r100_irq_set(rdev);
}
void r100_hpd_fini(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
rdev->irq.hpd[0] = false;
break;
case RADEON_HPD_2:
rdev->irq.hpd[1] = false;
break;
default:
break;
}
}
}
/* /*
* PCI GART * PCI GART
*/ */
@ -92,6 +181,15 @@ int r100_pci_gart_init(struct radeon_device *rdev)
return radeon_gart_table_ram_alloc(rdev); return radeon_gart_table_ram_alloc(rdev);
} }
/* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
void r100_enable_bm(struct radeon_device *rdev)
{
uint32_t tmp;
/* Enable bus mastering */
tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
WREG32(RADEON_BUS_CNTL, tmp);
}
int r100_pci_gart_enable(struct radeon_device *rdev) int r100_pci_gart_enable(struct radeon_device *rdev)
{ {
uint32_t tmp; uint32_t tmp;
@ -103,9 +201,6 @@ int r100_pci_gart_enable(struct radeon_device *rdev)
WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location); WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location);
tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
WREG32(RADEON_AIC_HI_ADDR, tmp); WREG32(RADEON_AIC_HI_ADDR, tmp);
/* Enable bus mastering */
tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
WREG32(RADEON_BUS_CNTL, tmp);
/* set PCI GART page-table base address */ /* set PCI GART page-table base address */
WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr); WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN; tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
@ -157,8 +252,9 @@ void r100_irq_disable(struct radeon_device *rdev)
static inline uint32_t r100_irq_ack(struct radeon_device *rdev) static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
{ {
uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
uint32_t irq_mask = RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT | uint32_t irq_mask = RADEON_SW_INT_TEST |
RADEON_CRTC2_VBLANK_STAT; RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
if (irqs) { if (irqs) {
WREG32(RADEON_GEN_INT_STATUS, irqs); WREG32(RADEON_GEN_INT_STATUS, irqs);
@ -192,24 +288,27 @@ int r100_wb_init(struct radeon_device *rdev)
int r; int r;
if (rdev->wb.wb_obj == NULL) { if (rdev->wb.wb_obj == NULL) {
r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
true,
RADEON_GEM_DOMAIN_GTT, RADEON_GEM_DOMAIN_GTT,
false, &rdev->wb.wb_obj); &rdev->wb.wb_obj);
if (r) { if (r) {
DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r); dev_err(rdev->dev, "(%d) create WB buffer failed\n", r);
return r; return r;
} }
r = radeon_object_pin(rdev->wb.wb_obj, r = radeon_bo_reserve(rdev->wb.wb_obj, false);
RADEON_GEM_DOMAIN_GTT, if (unlikely(r != 0))
return r;
r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
&rdev->wb.gpu_addr); &rdev->wb.gpu_addr);
if (r) { if (r) {
DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r); dev_err(rdev->dev, "(%d) pin WB buffer failed\n", r);
radeon_bo_unreserve(rdev->wb.wb_obj);
return r; return r;
} }
r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
radeon_bo_unreserve(rdev->wb.wb_obj);
if (r) { if (r) {
DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r); dev_err(rdev->dev, "(%d) map WB buffer failed\n", r);
return r; return r;
} }
} }
@ -227,6 +326,8 @@ void r100_wb_disable(struct radeon_device *rdev)
void r100_wb_fini(struct radeon_device *rdev) void r100_wb_fini(struct radeon_device *rdev)
{ {
int r;
r100_wb_disable(rdev); r100_wb_disable(rdev);
if (rdev->wb.wb_obj) { if (rdev->wb.wb_obj) {
// radeon_object_kunmap(rdev->wb.wb_obj); // radeon_object_kunmap(rdev->wb.wb_obj);
@ -1245,17 +1346,17 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt, struct radeon_cs_packet *pkt,
struct radeon_object *robj) struct radeon_bo *robj)
{ {
unsigned idx; unsigned idx;
u32 value; u32 value;
idx = pkt->idx + 1; idx = pkt->idx + 1;
value = radeon_get_ib_value(p, idx + 2); value = radeon_get_ib_value(p, idx + 2);
if ((value + 1) > radeon_object_size(robj)) { if ((value + 1) > radeon_bo_size(robj)) {
DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
"(need %u have %lu) !\n", "(need %u have %lu) !\n",
value + 1, value + 1,
radeon_object_size(robj)); radeon_bo_size(robj));
return -EINVAL; return -EINVAL;
} }
return 0; return 0;
@ -1541,6 +1642,14 @@ void r100_gpu_init(struct radeon_device *rdev)
r100_hdp_reset(rdev); r100_hdp_reset(rdev);
} }
void r100_hdp_flush(struct radeon_device *rdev)
{
u32 tmp;
tmp = RREG32(RADEON_HOST_PATH_CNTL);
tmp |= RADEON_HDP_READ_BUFFER_INVALIDATE;
WREG32(RADEON_HOST_PATH_CNTL, tmp);
}
void r100_hdp_reset(struct radeon_device *rdev) void r100_hdp_reset(struct radeon_device *rdev)
{ {
uint32_t tmp; uint32_t tmp;
@ -1612,6 +1721,17 @@ int r100_gpu_reset(struct radeon_device *rdev)
return 0; return 0;
} }
void r100_set_common_regs(struct radeon_device *rdev)
{
/* set these so they don't interfere with anything */
WREG32(RADEON_OV0_SCALE_CNTL, 0);
WREG32(RADEON_SUBPIC_CNTL, 0);
WREG32(RADEON_VIPH_CONTROL, 0);
WREG32(RADEON_I2C_CNTL_1, 0);
WREG32(RADEON_DVI_I2C_CNTL_1, 0);
WREG32(RADEON_CAP0_TRIG_CNTL, 0);
WREG32(RADEON_CAP1_TRIG_CNTL, 0);
}
/* /*
* VRAM info * VRAM info
@ -2677,6 +2797,9 @@ static int r100_startup(struct radeon_device *rdev)
{ {
int r; int r;
/* set common regs */
r100_set_common_regs(rdev);
/* program mc */
r100_mc_program(rdev); r100_mc_program(rdev);
/* Resume clock */ /* Resume clock */
r100_clock_startup(rdev); r100_clock_startup(rdev);
@ -2684,13 +2807,13 @@ static int r100_startup(struct radeon_device *rdev)
r100_gpu_init(rdev); r100_gpu_init(rdev);
/* Initialize GART (initialize after TTM so we can allocate /* Initialize GART (initialize after TTM so we can allocate
* memory through TTM but finalize after TTM) */ * memory through TTM but finalize after TTM) */
r100_enable_bm(rdev);
if (rdev->flags & RADEON_IS_PCI) { if (rdev->flags & RADEON_IS_PCI) {
r = r100_pci_gart_enable(rdev); r = r100_pci_gart_enable(rdev);
if (r) if (r)
return r; return r;
} }
/* Enable IRQ */ /* Enable IRQ */
// rdev->irq.sw_int = true;
// r100_irq_set(rdev); // r100_irq_set(rdev);
/* 1M ring buffer */ /* 1M ring buffer */
// r = r100_cp_init(rdev, 1024 * 1024); // r = r100_cp_init(rdev, 1024 * 1024);
@ -2772,10 +2895,8 @@ int r100_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT)); RREG32(R_0007C0_CP_STAT));
} }
/* check if cards are posted or not */ /* check if cards are posted or not */
if (!radeon_card_posted(rdev) && rdev->bios) { if (radeon_boot_test_post_card(rdev) == false)
DRM_INFO("GPU not posted. posting now...\n"); return -EINVAL;
radeon_combios_asic_init(rdev->ddev);
}
/* Set asic errata */ /* Set asic errata */
r100_errata(rdev); r100_errata(rdev);
/* Initialize clocks */ /* Initialize clocks */
@ -2795,7 +2916,7 @@ int r100_init(struct radeon_device *rdev)
// if (r) // if (r)
// return r; // return r;
/* Memory manager */ /* Memory manager */
r = radeon_object_init(rdev); r = radeon_bo_init(rdev);
if (r) if (r)
return r; return r;
if (rdev->flags & RADEON_IS_PCI) { if (rdev->flags & RADEON_IS_PCI) {

View File

@ -137,7 +137,8 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
void rv370_pcie_gart_disable(struct radeon_device *rdev) void rv370_pcie_gart_disable(struct radeon_device *rdev)
{ {
uint32_t tmp; u32 tmp;
int r;
tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
@ -1192,6 +1193,9 @@ static int r300_startup(struct radeon_device *rdev)
{ {
int r; int r;
/* set common regs */
r100_set_common_regs(rdev);
/* program mc */
r300_mc_program(rdev); r300_mc_program(rdev);
/* Resume clock */ /* Resume clock */
r300_clock_startup(rdev); r300_clock_startup(rdev);
@ -1204,13 +1208,18 @@ static int r300_startup(struct radeon_device *rdev)
if (r) if (r)
return r; return r;
} }
if (rdev->family == CHIP_R300 ||
rdev->family == CHIP_R350 ||
rdev->family == CHIP_RV350)
r100_enable_bm(rdev);
if (rdev->flags & RADEON_IS_PCI) { if (rdev->flags & RADEON_IS_PCI) {
r = r100_pci_gart_enable(rdev); r = r100_pci_gart_enable(rdev);
if (r) if (r)
return r; return r;
} }
/* Enable IRQ */ /* Enable IRQ */
// rdev->irq.sw_int = true;
// r100_irq_set(rdev); // r100_irq_set(rdev);
/* 1M ring buffer */ /* 1M ring buffer */
// r = r100_cp_init(rdev, 1024 * 1024); // r = r100_cp_init(rdev, 1024 * 1024);
@ -1265,10 +1274,8 @@ int r300_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT)); RREG32(R_0007C0_CP_STAT));
} }
/* check if cards are posted or not */ /* check if cards are posted or not */
if (!radeon_card_posted(rdev) && rdev->bios) { if (radeon_boot_test_post_card(rdev) == false)
DRM_INFO("GPU not posted. posting now...\n"); return -EINVAL;
radeon_combios_asic_init(rdev->ddev);
}
/* Set asic errata */ /* Set asic errata */
r300_errata(rdev); r300_errata(rdev);
/* Initialize clocks */ /* Initialize clocks */

View File

@ -169,6 +169,9 @@ static int r420_startup(struct radeon_device *rdev)
{ {
int r; int r;
/* set common regs */
r100_set_common_regs(rdev);
/* program mc */
r300_mc_program(rdev); r300_mc_program(rdev);
/* Resume clock */ /* Resume clock */
r420_clock_resume(rdev); r420_clock_resume(rdev);
@ -186,7 +189,6 @@ static int r420_startup(struct radeon_device *rdev)
} }
r420_pipes_init(rdev); r420_pipes_init(rdev);
/* Enable IRQ */ /* Enable IRQ */
// rdev->irq.sw_int = true;
// r100_irq_set(rdev); // r100_irq_set(rdev);
/* 1M ring buffer */ /* 1M ring buffer */
// r = r100_cp_init(rdev, 1024 * 1024); // r = r100_cp_init(rdev, 1024 * 1024);
@ -229,7 +231,8 @@ int r420_resume(struct radeon_device *rdev)
} }
/* Resume clock after posting */ /* Resume clock after posting */
r420_clock_resume(rdev); r420_clock_resume(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
return r420_startup(rdev); return r420_startup(rdev);
} }
@ -268,14 +271,9 @@ int r420_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT)); RREG32(R_0007C0_CP_STAT));
} }
/* check if cards are posted or not */ /* check if cards are posted or not */
if (!radeon_card_posted(rdev) && rdev->bios) { if (radeon_boot_test_post_card(rdev) == false)
DRM_INFO("GPU not posted. posting now...\n"); return -EINVAL;
if (rdev->is_atom_bios) {
atom_asic_init(rdev->mode_info.atom_context);
} else {
radeon_combios_asic_init(rdev->ddev);
}
}
/* Initialize clocks */ /* Initialize clocks */
radeon_get_clock_info(rdev->ddev); radeon_get_clock_info(rdev->ddev);
/* Initialize power management */ /* Initialize power management */
@ -298,10 +296,13 @@ int r420_init(struct radeon_device *rdev)
// return r; // return r;
// } // }
/* Memory manager */ /* Memory manager */
r = radeon_object_init(rdev); r = radeon_bo_init(rdev);
if (r) { if (r) {
return r; return r;
} }
if (rdev->family == CHIP_R420)
r100_enable_bm(rdev);
if (rdev->flags & RADEON_IS_PCIE) { if (rdev->flags & RADEON_IS_PCIE) {
r = rv370_pcie_gart_init(rdev); r = rv370_pcie_gart_init(rdev);
if (r) if (r)

View File

@ -716,6 +716,8 @@
#define AVIVO_DVOA_BIT_DEPTH_CONTROL 0x7988 #define AVIVO_DVOA_BIT_DEPTH_CONTROL 0x7988
#define AVIVO_DC_GPIO_HPD_A 0x7e94
#define AVIVO_GPIO_0 0x7e30 #define AVIVO_GPIO_0 0x7e30
#define AVIVO_GPIO_1 0x7e40 #define AVIVO_GPIO_1 0x7e40
#define AVIVO_GPIO_2 0x7e50 #define AVIVO_GPIO_2 0x7e50

View File

@ -187,7 +187,6 @@ static int r520_startup(struct radeon_device *rdev)
return r; return r;
} }
/* Enable IRQ */ /* Enable IRQ */
// rdev->irq.sw_int = true;
// rs600_irq_set(rdev); // rs600_irq_set(rdev);
/* 1M ring buffer */ /* 1M ring buffer */
// r = r100_cp_init(rdev, 1024 * 1024); // r = r100_cp_init(rdev, 1024 * 1024);
@ -240,6 +239,9 @@ int r520_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT)); RREG32(R_0007C0_CP_STAT));
} }
/* check if cards are posted or not */ /* check if cards are posted or not */
if (radeon_boot_test_post_card(rdev) == false)
return -EINVAL;
if (!radeon_card_posted(rdev) && rdev->bios) { if (!radeon_card_posted(rdev) && rdev->bios) {
DRM_INFO("GPU not posted. posting now...\n"); DRM_INFO("GPU not posted. posting now...\n");
atom_asic_init(rdev->mode_info.atom_context); atom_asic_init(rdev->mode_info.atom_context);
@ -264,7 +266,7 @@ int r520_init(struct radeon_device *rdev)
// if (r) // if (r)
// return r; // return r;
/* Memory manager */ /* Memory manager */
r = radeon_object_init(rdev); r = radeon_bo_init(rdev);
if (r) if (r)
return r; return r;
r = rv370_pcie_gart_init(rdev); r = rv370_pcie_gart_init(rdev);

View File

@ -37,8 +37,10 @@
#define PFP_UCODE_SIZE 576 #define PFP_UCODE_SIZE 576
#define PM4_UCODE_SIZE 1792 #define PM4_UCODE_SIZE 1792
#define RLC_UCODE_SIZE 768
#define R700_PFP_UCODE_SIZE 848 #define R700_PFP_UCODE_SIZE 848
#define R700_PM4_UCODE_SIZE 1360 #define R700_PM4_UCODE_SIZE 1360
#define R700_RLC_UCODE_SIZE 1024
/* Firmware Names */ /* Firmware Names */
MODULE_FIRMWARE("radeon/R600_pfp.bin"); MODULE_FIRMWARE("radeon/R600_pfp.bin");
@ -61,6 +63,8 @@ MODULE_FIRMWARE("radeon/RV730_pfp.bin");
MODULE_FIRMWARE("radeon/RV730_me.bin"); MODULE_FIRMWARE("radeon/RV730_me.bin");
MODULE_FIRMWARE("radeon/RV710_pfp.bin"); MODULE_FIRMWARE("radeon/RV710_pfp.bin");
MODULE_FIRMWARE("radeon/RV710_me.bin"); MODULE_FIRMWARE("radeon/RV710_me.bin");
MODULE_FIRMWARE("radeon/R600_rlc.bin");
MODULE_FIRMWARE("radeon/R700_rlc.bin");
int r600_debugfs_mc_info_init(struct radeon_device *rdev); int r600_debugfs_mc_info_init(struct radeon_device *rdev);
@ -69,6 +73,281 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev);
void r600_gpu_init(struct radeon_device *rdev); void r600_gpu_init(struct radeon_device *rdev);
void r600_fini(struct radeon_device *rdev); void r600_fini(struct radeon_device *rdev);
/* hpd for digital panel detect/disconnect */
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
{
bool connected = false;
if (ASIC_IS_DCE3(rdev)) {
switch (hpd) {
case RADEON_HPD_1:
if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
connected = true;
break;
case RADEON_HPD_2:
if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
connected = true;
break;
case RADEON_HPD_3:
if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
connected = true;
break;
case RADEON_HPD_4:
if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
connected = true;
break;
/* DCE 3.2 */
case RADEON_HPD_5:
if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
connected = true;
break;
case RADEON_HPD_6:
if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
connected = true;
break;
default:
break;
}
} else {
switch (hpd) {
case RADEON_HPD_1:
if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
connected = true;
break;
case RADEON_HPD_2:
if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
connected = true;
break;
case RADEON_HPD_3:
if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
connected = true;
break;
default:
break;
}
}
return connected;
}
void r600_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd)
{
u32 tmp;
bool connected = r600_hpd_sense(rdev, hpd);
if (ASIC_IS_DCE3(rdev)) {
switch (hpd) {
case RADEON_HPD_1:
tmp = RREG32(DC_HPD1_INT_CONTROL);
if (connected)
tmp &= ~DC_HPDx_INT_POLARITY;
else
tmp |= DC_HPDx_INT_POLARITY;
WREG32(DC_HPD1_INT_CONTROL, tmp);
break;
case RADEON_HPD_2:
tmp = RREG32(DC_HPD2_INT_CONTROL);
if (connected)
tmp &= ~DC_HPDx_INT_POLARITY;
else
tmp |= DC_HPDx_INT_POLARITY;
WREG32(DC_HPD2_INT_CONTROL, tmp);
break;
case RADEON_HPD_3:
tmp = RREG32(DC_HPD3_INT_CONTROL);
if (connected)
tmp &= ~DC_HPDx_INT_POLARITY;
else
tmp |= DC_HPDx_INT_POLARITY;
WREG32(DC_HPD3_INT_CONTROL, tmp);
break;
case RADEON_HPD_4:
tmp = RREG32(DC_HPD4_INT_CONTROL);
if (connected)
tmp &= ~DC_HPDx_INT_POLARITY;
else
tmp |= DC_HPDx_INT_POLARITY;
WREG32(DC_HPD4_INT_CONTROL, tmp);
break;
case RADEON_HPD_5:
tmp = RREG32(DC_HPD5_INT_CONTROL);
if (connected)
tmp &= ~DC_HPDx_INT_POLARITY;
else
tmp |= DC_HPDx_INT_POLARITY;
WREG32(DC_HPD5_INT_CONTROL, tmp);
break;
/* DCE 3.2 */
case RADEON_HPD_6:
tmp = RREG32(DC_HPD6_INT_CONTROL);
if (connected)
tmp &= ~DC_HPDx_INT_POLARITY;
else
tmp |= DC_HPDx_INT_POLARITY;
WREG32(DC_HPD6_INT_CONTROL, tmp);
break;
default:
break;
}
} else {
switch (hpd) {
case RADEON_HPD_1:
tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
if (connected)
tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
else
tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
break;
case RADEON_HPD_2:
tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
if (connected)
tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
else
tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
break;
case RADEON_HPD_3:
tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
if (connected)
tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
else
tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
break;
default:
break;
}
}
}
void r600_hpd_init(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
if (ASIC_IS_DCE3(rdev)) {
u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
if (ASIC_IS_DCE32(rdev))
tmp |= DC_HPDx_EN;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, tmp);
rdev->irq.hpd[0] = true;
break;
case RADEON_HPD_2:
WREG32(DC_HPD2_CONTROL, tmp);
rdev->irq.hpd[1] = true;
break;
case RADEON_HPD_3:
WREG32(DC_HPD3_CONTROL, tmp);
rdev->irq.hpd[2] = true;
break;
case RADEON_HPD_4:
WREG32(DC_HPD4_CONTROL, tmp);
rdev->irq.hpd[3] = true;
break;
/* DCE 3.2 */
case RADEON_HPD_5:
WREG32(DC_HPD5_CONTROL, tmp);
rdev->irq.hpd[4] = true;
break;
case RADEON_HPD_6:
WREG32(DC_HPD6_CONTROL, tmp);
rdev->irq.hpd[5] = true;
break;
default:
break;
}
}
} else {
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
rdev->irq.hpd[0] = true;
break;
case RADEON_HPD_2:
WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
rdev->irq.hpd[1] = true;
break;
case RADEON_HPD_3:
WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
rdev->irq.hpd[2] = true;
break;
default:
break;
}
}
}
r600_irq_set(rdev);
}
void r600_hpd_fini(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
if (ASIC_IS_DCE3(rdev)) {
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, 0);
rdev->irq.hpd[0] = false;
break;
case RADEON_HPD_2:
WREG32(DC_HPD2_CONTROL, 0);
rdev->irq.hpd[1] = false;
break;
case RADEON_HPD_3:
WREG32(DC_HPD3_CONTROL, 0);
rdev->irq.hpd[2] = false;
break;
case RADEON_HPD_4:
WREG32(DC_HPD4_CONTROL, 0);
rdev->irq.hpd[3] = false;
break;
/* DCE 3.2 */
case RADEON_HPD_5:
WREG32(DC_HPD5_CONTROL, 0);
rdev->irq.hpd[4] = false;
break;
case RADEON_HPD_6:
WREG32(DC_HPD6_CONTROL, 0);
rdev->irq.hpd[5] = false;
break;
default:
break;
}
}
} else {
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
rdev->irq.hpd[0] = false;
break;
case RADEON_HPD_2:
WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
rdev->irq.hpd[1] = false;
break;
case RADEON_HPD_3:
WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
rdev->irq.hpd[2] = false;
break;
default:
break;
}
}
}
}
/* /*
* R600 PCIE GART * R600 PCIE GART
*/ */
@ -179,7 +458,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
void r600_pcie_gart_disable(struct radeon_device *rdev) void r600_pcie_gart_disable(struct radeon_device *rdev)
{ {
u32 tmp; u32 tmp;
int i; int i, r;
/* Disable all tables */ /* Disable all tables */
for (i = 0; i < 7; i++) for (i = 0; i < 7; i++)
@ -1100,6 +1379,10 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
(void)RREG32(PCIE_PORT_DATA); (void)RREG32(PCIE_PORT_DATA);
} }
void r600_hdp_flush(struct radeon_device *rdev)
{
WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
}
/* /*
* CP & Ring * CP & Ring
@ -1285,7 +1568,11 @@ int r600_init(struct radeon_device *rdev)
if (r) if (r)
return r; return r;
/* Post card if necessary */ /* Post card if necessary */
if (!r600_card_posted(rdev) && rdev->bios) { if (!r600_card_posted(rdev)) {
if (!rdev->bios) {
dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
return -EINVAL;
}
DRM_INFO("GPU not posted. posting now...\n"); DRM_INFO("GPU not posted. posting now...\n");
atom_asic_init(rdev->mode_info.atom_context); atom_asic_init(rdev->mode_info.atom_context);
} }
@ -1309,31 +1596,31 @@ int r600_init(struct radeon_device *rdev)
if (r) if (r)
return r; return r;
/* Memory manager */ /* Memory manager */
r = radeon_object_init(rdev); r = radeon_bo_init(rdev);
if (r) if (r)
return r; return r;
// r = radeon_irq_kms_init(rdev);
// if (r)
// return r;
// rdev->cp.ring_obj = NULL; // rdev->cp.ring_obj = NULL;
// r600_ring_init(rdev, 1024 * 1024); // r600_ring_init(rdev, 1024 * 1024);
// if (!rdev->me_fw || !rdev->pfp_fw) { // rdev->ih.ring_obj = NULL;
// r = r600_cp_init_microcode(rdev); // r600_ih_ring_init(rdev, 64 * 1024);
// if (r) {
// DRM_ERROR("Failed to load firmware!\n");
// return r;
// }
// }
r = r600_pcie_gart_init(rdev); r = r600_pcie_gart_init(rdev);
if (r) if (r)
return r; return r;
rdev->accel_working = true;
// r = r600_blit_init(rdev); // r = r600_blit_init(rdev);
// if (r) { // if (r) {
// DRM_ERROR("radeon: failled blitter (%d).\n", r); // DRM_ERROR("radeon: failled blitter (%d).\n", r);
// return r; // return r;
// } // }
rdev->accel_working = true;
r = r600_startup(rdev); r = r600_startup(rdev);
if (r) { if (r) {
// r600_suspend(rdev); // r600_suspend(rdev);
@ -1375,21 +1662,21 @@ static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private; struct radeon_device *rdev = dev->dev_private;
uint32_t rdp, wdp;
unsigned count, i, j; unsigned count, i, j;
radeon_ring_free_size(rdev); radeon_ring_free_size(rdev);
rdp = RREG32(CP_RB_RPTR); count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
wdp = RREG32(CP_RB_WPTR);
count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT)); seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp); seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw); seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
seq_printf(m, "%u dwords in ring\n", count); seq_printf(m, "%u dwords in ring\n", count);
i = rdev->cp.rptr;
for (j = 0; j <= count; j++) { for (j = 0; j <= count; j++) {
i = (rdp + j) & rdev->cp.ptr_mask;
seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]); seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
i = (i + 1) & rdev->cp.ptr_mask;
} }
return 0; return 0;
} }

View File

@ -456,7 +456,215 @@
#define WAIT_2D_IDLECLEAN_bit (1 << 16) #define WAIT_2D_IDLECLEAN_bit (1 << 16)
#define WAIT_3D_IDLECLEAN_bit (1 << 17) #define WAIT_3D_IDLECLEAN_bit (1 << 17)
#define IH_RB_CNTL 0x3e00
# define IH_RB_ENABLE (1 << 0)
# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
# define IH_RB_FULL_DRAIN_ENABLE (1 << 6)
# define IH_WPTR_WRITEBACK_ENABLE (1 << 8)
# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */
# define IH_WPTR_OVERFLOW_ENABLE (1 << 16)
# define IH_WPTR_OVERFLOW_CLEAR (1 << 31)
#define IH_RB_BASE 0x3e04
#define IH_RB_RPTR 0x3e08
#define IH_RB_WPTR 0x3e0c
# define RB_OVERFLOW (1 << 0)
# define WPTR_OFFSET_MASK 0x3fffc
#define IH_RB_WPTR_ADDR_HI 0x3e10
#define IH_RB_WPTR_ADDR_LO 0x3e14
#define IH_CNTL 0x3e18
# define ENABLE_INTR (1 << 0)
# define IH_MC_SWAP(x) ((x) << 2)
# define IH_MC_SWAP_NONE 0
# define IH_MC_SWAP_16BIT 1
# define IH_MC_SWAP_32BIT 2
# define IH_MC_SWAP_64BIT 3
# define RPTR_REARM (1 << 4)
# define MC_WRREQ_CREDIT(x) ((x) << 15)
# define MC_WR_CLEAN_CNT(x) ((x) << 20)
#define RLC_CNTL 0x3f00
# define RLC_ENABLE (1 << 0)
#define RLC_HB_BASE 0x3f10
#define RLC_HB_CNTL 0x3f0c
#define RLC_HB_RPTR 0x3f20
#define RLC_HB_WPTR 0x3f1c
#define RLC_HB_WPTR_LSB_ADDR 0x3f14
#define RLC_HB_WPTR_MSB_ADDR 0x3f18
#define RLC_MC_CNTL 0x3f44
#define RLC_UCODE_CNTL 0x3f48
#define RLC_UCODE_ADDR 0x3f2c
#define RLC_UCODE_DATA 0x3f30
#define SRBM_SOFT_RESET 0xe60
# define SOFT_RESET_RLC (1 << 13)
#define CP_INT_CNTL 0xc124
# define CNTX_BUSY_INT_ENABLE (1 << 19)
# define CNTX_EMPTY_INT_ENABLE (1 << 20)
# define SCRATCH_INT_ENABLE (1 << 25)
# define TIME_STAMP_INT_ENABLE (1 << 26)
# define IB2_INT_ENABLE (1 << 29)
# define IB1_INT_ENABLE (1 << 30)
# define RB_INT_ENABLE (1 << 31)
#define CP_INT_STATUS 0xc128
# define SCRATCH_INT_STAT (1 << 25)
# define TIME_STAMP_INT_STAT (1 << 26)
# define IB2_INT_STAT (1 << 29)
# define IB1_INT_STAT (1 << 30)
# define RB_INT_STAT (1 << 31)
#define GRBM_INT_CNTL 0x8060
# define RDERR_INT_ENABLE (1 << 0)
# define WAIT_COUNT_TIMEOUT_INT_ENABLE (1 << 1)
# define GUI_IDLE_INT_ENABLE (1 << 19)
#define INTERRUPT_CNTL 0x5468
# define IH_DUMMY_RD_OVERRIDE (1 << 0)
# define IH_DUMMY_RD_EN (1 << 1)
# define IH_REQ_NONSNOOP_EN (1 << 3)
# define GEN_IH_INT_EN (1 << 8)
#define INTERRUPT_CNTL2 0x546c
#define D1MODE_VBLANK_STATUS 0x6534
#define D2MODE_VBLANK_STATUS 0x6d34
# define DxMODE_VBLANK_OCCURRED (1 << 0)
# define DxMODE_VBLANK_ACK (1 << 4)
# define DxMODE_VBLANK_STAT (1 << 12)
# define DxMODE_VBLANK_INTERRUPT (1 << 16)
# define DxMODE_VBLANK_INTERRUPT_TYPE (1 << 17)
#define D1MODE_VLINE_STATUS 0x653c
#define D2MODE_VLINE_STATUS 0x6d3c
# define DxMODE_VLINE_OCCURRED (1 << 0)
# define DxMODE_VLINE_ACK (1 << 4)
# define DxMODE_VLINE_STAT (1 << 12)
# define DxMODE_VLINE_INTERRUPT (1 << 16)
# define DxMODE_VLINE_INTERRUPT_TYPE (1 << 17)
#define DxMODE_INT_MASK 0x6540
# define D1MODE_VBLANK_INT_MASK (1 << 0)
# define D1MODE_VLINE_INT_MASK (1 << 4)
# define D2MODE_VBLANK_INT_MASK (1 << 8)
# define D2MODE_VLINE_INT_MASK (1 << 12)
#define DCE3_DISP_INTERRUPT_STATUS 0x7ddc
# define DC_HPD1_INTERRUPT (1 << 18)
# define DC_HPD2_INTERRUPT (1 << 19)
#define DISP_INTERRUPT_STATUS 0x7edc
# define LB_D1_VLINE_INTERRUPT (1 << 2)
# define LB_D2_VLINE_INTERRUPT (1 << 3)
# define LB_D1_VBLANK_INTERRUPT (1 << 4)
# define LB_D2_VBLANK_INTERRUPT (1 << 5)
# define DACA_AUTODETECT_INTERRUPT (1 << 16)
# define DACB_AUTODETECT_INTERRUPT (1 << 17)
# define DC_HOT_PLUG_DETECT1_INTERRUPT (1 << 18)
# define DC_HOT_PLUG_DETECT2_INTERRUPT (1 << 19)
# define DC_I2C_SW_DONE_INTERRUPT (1 << 20)
# define DC_I2C_HW_DONE_INTERRUPT (1 << 21)
#define DISP_INTERRUPT_STATUS_CONTINUE 0x7ee8
#define DCE3_DISP_INTERRUPT_STATUS_CONTINUE 0x7de8
# define DC_HPD4_INTERRUPT (1 << 14)
# define DC_HPD4_RX_INTERRUPT (1 << 15)
# define DC_HPD3_INTERRUPT (1 << 28)
# define DC_HPD1_RX_INTERRUPT (1 << 29)
# define DC_HPD2_RX_INTERRUPT (1 << 30)
#define DCE3_DISP_INTERRUPT_STATUS_CONTINUE2 0x7dec
# define DC_HPD3_RX_INTERRUPT (1 << 0)
# define DIGA_DP_VID_STREAM_DISABLE_INTERRUPT (1 << 1)
# define DIGA_DP_STEER_FIFO_OVERFLOW_INTERRUPT (1 << 2)
# define DIGB_DP_VID_STREAM_DISABLE_INTERRUPT (1 << 3)
# define DIGB_DP_STEER_FIFO_OVERFLOW_INTERRUPT (1 << 4)
# define AUX1_SW_DONE_INTERRUPT (1 << 5)
# define AUX1_LS_DONE_INTERRUPT (1 << 6)
# define AUX2_SW_DONE_INTERRUPT (1 << 7)
# define AUX2_LS_DONE_INTERRUPT (1 << 8)
# define AUX3_SW_DONE_INTERRUPT (1 << 9)
# define AUX3_LS_DONE_INTERRUPT (1 << 10)
# define AUX4_SW_DONE_INTERRUPT (1 << 11)
# define AUX4_LS_DONE_INTERRUPT (1 << 12)
# define DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT (1 << 13)
# define DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT (1 << 14)
/* DCE 3.2 */
# define AUX5_SW_DONE_INTERRUPT (1 << 15)
# define AUX5_LS_DONE_INTERRUPT (1 << 16)
# define AUX6_SW_DONE_INTERRUPT (1 << 17)
# define AUX6_LS_DONE_INTERRUPT (1 << 18)
# define DC_HPD5_INTERRUPT (1 << 19)
# define DC_HPD5_RX_INTERRUPT (1 << 20)
# define DC_HPD6_INTERRUPT (1 << 21)
# define DC_HPD6_RX_INTERRUPT (1 << 22)
#define DACA_AUTO_DETECT_CONTROL 0x7828
#define DACB_AUTO_DETECT_CONTROL 0x7a28
#define DCE3_DACA_AUTO_DETECT_CONTROL 0x7028
#define DCE3_DACB_AUTO_DETECT_CONTROL 0x7128
# define DACx_AUTODETECT_MODE(x) ((x) << 0)
# define DACx_AUTODETECT_MODE_NONE 0
# define DACx_AUTODETECT_MODE_CONNECT 1
# define DACx_AUTODETECT_MODE_DISCONNECT 2
# define DACx_AUTODETECT_FRAME_TIME_COUNTER(x) ((x) << 8)
/* bit 18 = R/C, 17 = G/Y, 16 = B/Comp */
# define DACx_AUTODETECT_CHECK_MASK(x) ((x) << 16)
#define DCE3_DACA_AUTODETECT_INT_CONTROL 0x7038
#define DCE3_DACB_AUTODETECT_INT_CONTROL 0x7138
#define DACA_AUTODETECT_INT_CONTROL 0x7838
#define DACB_AUTODETECT_INT_CONTROL 0x7a38
# define DACx_AUTODETECT_ACK (1 << 0)
# define DACx_AUTODETECT_INT_ENABLE (1 << 16)
#define DC_HOT_PLUG_DETECT1_CONTROL 0x7d00
#define DC_HOT_PLUG_DETECT2_CONTROL 0x7d10
#define DC_HOT_PLUG_DETECT3_CONTROL 0x7d24
# define DC_HOT_PLUG_DETECTx_EN (1 << 0)
#define DC_HOT_PLUG_DETECT1_INT_STATUS 0x7d04
#define DC_HOT_PLUG_DETECT2_INT_STATUS 0x7d14
#define DC_HOT_PLUG_DETECT3_INT_STATUS 0x7d28
# define DC_HOT_PLUG_DETECTx_INT_STATUS (1 << 0)
# define DC_HOT_PLUG_DETECTx_SENSE (1 << 1)
/* DCE 3.0 */
#define DC_HPD1_INT_STATUS 0x7d00
#define DC_HPD2_INT_STATUS 0x7d0c
#define DC_HPD3_INT_STATUS 0x7d18
#define DC_HPD4_INT_STATUS 0x7d24
/* DCE 3.2 */
#define DC_HPD5_INT_STATUS 0x7dc0
#define DC_HPD6_INT_STATUS 0x7df4
# define DC_HPDx_INT_STATUS (1 << 0)
# define DC_HPDx_SENSE (1 << 1)
# define DC_HPDx_RX_INT_STATUS (1 << 8)
#define DC_HOT_PLUG_DETECT1_INT_CONTROL 0x7d08
#define DC_HOT_PLUG_DETECT2_INT_CONTROL 0x7d18
#define DC_HOT_PLUG_DETECT3_INT_CONTROL 0x7d2c
# define DC_HOT_PLUG_DETECTx_INT_ACK (1 << 0)
# define DC_HOT_PLUG_DETECTx_INT_POLARITY (1 << 8)
# define DC_HOT_PLUG_DETECTx_INT_EN (1 << 16)
/* DCE 3.0 */
#define DC_HPD1_INT_CONTROL 0x7d04
#define DC_HPD2_INT_CONTROL 0x7d10
#define DC_HPD3_INT_CONTROL 0x7d1c
#define DC_HPD4_INT_CONTROL 0x7d28
/* DCE 3.2 */
#define DC_HPD5_INT_CONTROL 0x7dc4
#define DC_HPD6_INT_CONTROL 0x7df8
# define DC_HPDx_INT_ACK (1 << 0)
# define DC_HPDx_INT_POLARITY (1 << 8)
# define DC_HPDx_INT_EN (1 << 16)
# define DC_HPDx_RX_INT_ACK (1 << 20)
# define DC_HPDx_RX_INT_EN (1 << 24)
/* DCE 3.0 */
#define DC_HPD1_CONTROL 0x7d08
#define DC_HPD2_CONTROL 0x7d14
#define DC_HPD3_CONTROL 0x7d20
#define DC_HPD4_CONTROL 0x7d2c
/* DCE 3.2 */
#define DC_HPD5_CONTROL 0x7dc8
#define DC_HPD6_CONTROL 0x7dfc
# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0)
# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
/* DCE 3.2 */
# define DC_HPDx_EN (1 << 28)
/* /*
* PM4 * PM4
@ -500,7 +708,6 @@
#define PACKET3_WAIT_REG_MEM 0x3C #define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D #define PACKET3_MEM_WRITE 0x3D
#define PACKET3_INDIRECT_BUFFER 0x32 #define PACKET3_INDIRECT_BUFFER 0x32
#define PACKET3_CP_INTERRUPT 0x40
#define PACKET3_SURFACE_SYNC 0x43 #define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6) # define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
# define PACKET3_TC_ACTION_ENA (1 << 23) # define PACKET3_TC_ACTION_ENA (1 << 23)
@ -674,4 +881,5 @@
#define S_000E60_SOFT_RESET_TSC(x) (((x) & 1) << 16) #define S_000E60_SOFT_RESET_TSC(x) (((x) & 1) << 16)
#define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17) #define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17)
#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
#endif #endif

View File

@ -28,8 +28,6 @@
#ifndef __RADEON_H__ #ifndef __RADEON_H__
#define __RADEON_H__ #define __RADEON_H__
//#include "radeon_object.h"
/* TODO: Here are things that needs to be done : /* TODO: Here are things that needs to be done :
* - surface allocator & initializer : (bit like scratch reg) should * - surface allocator & initializer : (bit like scratch reg) should
* initialize HDP_ stuff on RS600, R600, R700 hw, well anythings * initialize HDP_ stuff on RS600, R600, R700 hw, well anythings
@ -62,11 +60,16 @@
* are considered as fatal) * are considered as fatal)
*/ */
#include <asm/atomic.h>
#include <types.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/kref.h>
#include <ttm/ttm_bo_api.h>
#include <ttm/ttm_bo_driver.h>
#include <ttm/ttm_placement.h>
#include <ttm/ttm_module.h>
#include <pci.h> #include <pci.h>
@ -93,6 +96,8 @@ extern int radeon_benchmarking;
extern int radeon_testing; extern int radeon_testing;
extern int radeon_connector_table; extern int radeon_connector_table;
extern int radeon_tv; extern int radeon_tv;
extern int radeon_new_pll;
typedef struct typedef struct
{ {
@ -100,7 +105,7 @@ typedef struct
int height; int height;
int bpp; int bpp;
int freq; int freq;
}mode_t; }videomode_t;
static inline uint8_t __raw_readb(const volatile void __iomem *addr) static inline uint8_t __raw_readb(const volatile void __iomem *addr)
{ {
@ -212,11 +217,11 @@ int radeon_pm_init(struct radeon_device *rdev);
*/ */
struct radeon_fence_driver { struct radeon_fence_driver {
uint32_t scratch_reg; uint32_t scratch_reg;
// atomic_t seq; atomic_t seq;
uint32_t last_seq; uint32_t last_seq;
unsigned long count_timeout; unsigned long count_timeout;
// wait_queue_head_t queue; // wait_queue_head_t queue;
// rwlock_t lock; rwlock_t lock;
struct list_head created; struct list_head created;
struct list_head emited; struct list_head emited;
struct list_head signaled; struct list_head signaled;
@ -224,7 +229,7 @@ struct radeon_fence_driver {
struct radeon_fence { struct radeon_fence {
struct radeon_device *rdev; struct radeon_device *rdev;
// struct kref kref; struct kref kref;
struct list_head list; struct list_head list;
/* protected by radeon_fence.lock */ /* protected by radeon_fence.lock */
uint32_t seq; uint32_t seq;
@ -249,36 +254,48 @@ void radeon_fence_unref(struct radeon_fence **fence);
* Tiling registers * Tiling registers
*/ */
struct radeon_surface_reg { struct radeon_surface_reg {
struct radeon_object *robj; struct radeon_bo *bo;
}; };
#define RADEON_GEM_MAX_SURFACES 8 #define RADEON_GEM_MAX_SURFACES 8
/* /*
* Radeon buffer. * TTM.
*/ */
struct radeon_object; struct radeon_mman {
struct ttm_bo_global_ref bo_global_ref;
struct ttm_global_reference mem_global_ref;
bool mem_global_referenced;
struct ttm_bo_device bdev;
};
struct radeon_object_list { struct radeon_bo {
/* Protected by gem.mutex */
struct list_head list;
/* Protected by tbo.reserved */
u32 placements[3];
struct ttm_placement placement;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
unsigned pin_count;
void *kptr;
u32 tiling_flags;
u32 pitch;
int surface_reg;
/* Constant after initialization */
struct radeon_device *rdev;
struct drm_gem_object *gobj;
};
struct radeon_bo_list {
struct list_head list; struct list_head list;
struct radeon_object *robj; struct radeon_bo *bo;
uint64_t gpu_offset; uint64_t gpu_offset;
unsigned rdomain; unsigned rdomain;
unsigned wdomain; unsigned wdomain;
uint32_t tiling_flags; u32 tiling_flags;
}; };
int radeon_object_init(struct radeon_device *rdev);
void radeon_object_fini(struct radeon_device *rdev);
int radeon_object_create(struct radeon_device *rdev,
struct drm_gem_object *gobj,
unsigned long size,
bool kernel,
uint32_t domain,
bool interruptible,
struct radeon_object **robj_ptr);
/* /*
* GEM objects. * GEM objects.
*/ */
@ -291,7 +308,6 @@ void radeon_gem_fini(struct radeon_device *rdev);
int radeon_gem_object_create(struct radeon_device *rdev, int size, int radeon_gem_object_create(struct radeon_device *rdev, int size,
int alignment, int initial_domain, int alignment, int initial_domain,
bool discardable, bool kernel, bool discardable, bool kernel,
bool interruptible,
struct drm_gem_object **obj); struct drm_gem_object **obj);
int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
uint64_t *gpu_addr); uint64_t *gpu_addr);
@ -308,7 +324,7 @@ struct radeon_gart_table_ram {
}; };
struct radeon_gart_table_vram { struct radeon_gart_table_vram {
struct radeon_object *robj; struct radeon_bo *robj;
volatile uint32_t *ptr; volatile uint32_t *ptr;
}; };
@ -389,11 +405,16 @@ struct radeon_irq {
bool sw_int; bool sw_int;
/* FIXME: use a define max crtc rather than hardcode it */ /* FIXME: use a define max crtc rather than hardcode it */
bool crtc_vblank_int[2]; bool crtc_vblank_int[2];
/* FIXME: use defines for max hpd/dacs */
bool hpd[6];
spinlock_t sw_lock;
int sw_refcount;
}; };
int radeon_irq_kms_init(struct radeon_device *rdev); int radeon_irq_kms_init(struct radeon_device *rdev);
void radeon_irq_kms_fini(struct radeon_device *rdev); void radeon_irq_kms_fini(struct radeon_device *rdev);
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev);
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
/* /*
* CP & ring. * CP & ring.
@ -413,7 +434,7 @@ struct radeon_ib {
*/ */
struct radeon_ib_pool { struct radeon_ib_pool {
// struct mutex mutex; // struct mutex mutex;
struct radeon_object *robj; struct radeon_bo *robj;
struct list_head scheduled_ibs; struct list_head scheduled_ibs;
struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
bool ready; bool ready;
@ -421,7 +442,7 @@ struct radeon_ib_pool {
}; };
struct radeon_cp { struct radeon_cp {
struct radeon_object *ring_obj; struct radeon_bo *ring_obj;
volatile uint32_t *ring; volatile uint32_t *ring;
unsigned rptr; unsigned rptr;
unsigned wptr; unsigned wptr;
@ -436,8 +457,25 @@ struct radeon_cp {
bool ready; bool ready;
}; };
/*
* R6xx+ IH ring
*/
struct r600_ih {
struct radeon_bo *ring_obj;
volatile uint32_t *ring;
unsigned rptr;
unsigned wptr;
unsigned wptr_old;
unsigned ring_size;
uint64_t gpu_addr;
uint32_t align_mask;
uint32_t ptr_mask;
spinlock_t lock;
bool enabled;
};
struct r600_blit { struct r600_blit {
struct radeon_object *shader_obj; struct radeon_bo *shader_obj;
u64 shader_gpu_addr; u64 shader_gpu_addr;
u32 vs_offset, ps_offset; u32 vs_offset, ps_offset;
u32 state_offset; u32 state_offset;
@ -467,8 +505,8 @@ void radeon_ring_fini(struct radeon_device *rdev);
*/ */
struct radeon_cs_reloc { struct radeon_cs_reloc {
// struct drm_gem_object *gobj; // struct drm_gem_object *gobj;
struct radeon_object *robj; struct radeon_bo *robj;
struct radeon_object_list lobj; // struct radeon_bo_list lobj;
uint32_t handle; uint32_t handle;
uint32_t flags; uint32_t flags;
}; };
@ -556,6 +594,7 @@ typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
* AGP * AGP
*/ */
int radeon_agp_init(struct radeon_device *rdev); int radeon_agp_init(struct radeon_device *rdev);
void radeon_agp_resume(struct radeon_device *rdev);
void radeon_agp_fini(struct radeon_device *rdev); void radeon_agp_fini(struct radeon_device *rdev);
@ -563,7 +602,7 @@ void radeon_agp_fini(struct radeon_device *rdev);
* Writeback * Writeback
*/ */
struct radeon_wb { struct radeon_wb {
struct radeon_object *wb_obj; struct radeon_bo *wb_obj;
volatile uint32_t *wb; volatile uint32_t *wb;
uint64_t gpu_addr; uint64_t gpu_addr;
}; };
@ -651,6 +690,11 @@ struct radeon_asic {
uint32_t offset, uint32_t obj_size); uint32_t offset, uint32_t obj_size);
int (*clear_surface_reg)(struct radeon_device *rdev, int reg); int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
void (*bandwidth_update)(struct radeon_device *rdev); void (*bandwidth_update)(struct radeon_device *rdev);
void (*hdp_flush)(struct radeon_device *rdev);
void (*hpd_init)(struct radeon_device *rdev);
void (*hpd_fini)(struct radeon_device *rdev);
bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
}; };
/* /*
@ -738,10 +782,9 @@ struct radeon_device {
uint8_t *bios; uint8_t *bios;
bool is_atom_bios; bool is_atom_bios;
uint16_t bios_header_start; uint16_t bios_header_start;
struct radeon_bo *stollen_vga_memory;
// struct radeon_object *stollen_vga_memory;
struct fb_info *fbdev_info; struct fb_info *fbdev_info;
struct radeon_object *fbdev_robj; struct radeon_bo *fbdev_rbo;
struct radeon_framebuffer *fbdev_rfb; struct radeon_framebuffer *fbdev_rfb;
/* Register mmio */ /* Register mmio */
unsigned long rmmio_base; unsigned long rmmio_base;
@ -759,7 +802,7 @@ struct radeon_device {
struct radeon_gart gart; struct radeon_gart gart;
struct radeon_mode_info mode_info; struct radeon_mode_info mode_info;
struct radeon_scratch scratch; struct radeon_scratch scratch;
// struct radeon_mman mman; struct radeon_mman mman;
struct radeon_fence_driver fence_drv; struct radeon_fence_driver fence_drv;
struct radeon_cp cp; struct radeon_cp cp;
struct radeon_ib_pool ib_pool; struct radeon_ib_pool ib_pool;
@ -817,6 +860,10 @@ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32
} }
} }
/*
* Cast helper
*/
#define to_radeon_fence(p) ((struct radeon_fence *)(p))
/* /*
* Registers read & write functions. * Registers read & write functions.
@ -951,18 +998,24 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
#define radeon_get_engine_clock(rdev) (rdev)->asic->get_engine_clock((rdev)) #define radeon_get_engine_clock(rdev) (rdev)->asic->get_engine_clock((rdev))
#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) #define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
#define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev)) #define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev))
#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) #define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e))
#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l)) #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e)) #define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s))) #define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r))) #define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r)))
#define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev)) #define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev))
#define radeon_hdp_flush(rdev) (rdev)->asic->hdp_flush((rdev))
#define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev))
#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev))
#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd))
#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd))
/* Common functions */ /* Common functions */
extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
extern int radeon_modeset_init(struct radeon_device *rdev); extern int radeon_modeset_init(struct radeon_device *rdev);
extern void radeon_modeset_fini(struct radeon_device *rdev); extern void radeon_modeset_fini(struct radeon_device *rdev);
extern bool radeon_card_posted(struct radeon_device *rdev); extern bool radeon_card_posted(struct radeon_device *rdev);
extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
extern int radeon_clocks_init(struct radeon_device *rdev); extern int radeon_clocks_init(struct radeon_device *rdev);
extern void radeon_clocks_fini(struct radeon_device *rdev); extern void radeon_clocks_fini(struct radeon_device *rdev);
extern void radeon_scratch_init(struct radeon_device *rdev); extern void radeon_scratch_init(struct radeon_device *rdev);
@ -970,6 +1023,7 @@ extern void radeon_surface_init(struct radeon_device *rdev);
extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
struct r100_mc_save { struct r100_mc_save {
@ -1007,7 +1061,7 @@ extern int r100_cp_reset(struct radeon_device *rdev);
extern void r100_vga_render_disable(struct radeon_device *rdev); extern void r100_vga_render_disable(struct radeon_device *rdev);
extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt, struct radeon_cs_packet *pkt,
struct radeon_object *robj); struct radeon_bo *robj);
extern int r100_cs_parse_packet0(struct radeon_cs_parser *p, extern int r100_cs_parse_packet0(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt, struct radeon_cs_packet *pkt,
const unsigned *auth, unsigned n, const unsigned *auth, unsigned n,
@ -1015,6 +1069,8 @@ extern int r100_cs_parse_packet0(struct radeon_cs_parser *p,
extern int r100_cs_packet_parse(struct radeon_cs_parser *p, extern int r100_cs_packet_parse(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt, struct radeon_cs_packet *pkt,
unsigned idx); unsigned idx);
extern void r100_enable_bm(struct radeon_device *rdev);
extern void r100_set_common_regs(struct radeon_device *rdev);
/* rv200,rv250,rv280 */ /* rv200,rv250,rv280 */
extern void r200_set_safe_registers(struct radeon_device *rdev); extern void r200_set_safe_registers(struct radeon_device *rdev);
@ -1090,10 +1146,15 @@ extern void r600_wb_disable(struct radeon_device *rdev);
extern void r600_scratch_init(struct radeon_device *rdev); extern void r600_scratch_init(struct radeon_device *rdev);
extern int r600_blit_init(struct radeon_device *rdev); extern int r600_blit_init(struct radeon_device *rdev);
extern void r600_blit_fini(struct radeon_device *rdev); extern void r600_blit_fini(struct radeon_device *rdev);
extern int r600_cp_init_microcode(struct radeon_device *rdev); extern int r600_init_microcode(struct radeon_device *rdev);
extern int r600_gpu_reset(struct radeon_device *rdev); extern int r600_gpu_reset(struct radeon_device *rdev);
/* r600 irq */
extern int r600_irq_init(struct radeon_device *rdev);
extern void r600_irq_fini(struct radeon_device *rdev);
extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
extern int r600_irq_set(struct radeon_device *rdev);
#include "radeon_object.h"
#define DRM_UDELAY(d) udelay(d) #define DRM_UDELAY(d) udelay(d)

View File

@ -76,6 +76,12 @@ int r100_clear_surface_reg(struct radeon_device *rdev, int reg);
void r100_bandwidth_update(struct radeon_device *rdev); void r100_bandwidth_update(struct radeon_device *rdev);
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r100_ring_test(struct radeon_device *rdev); int r100_ring_test(struct radeon_device *rdev);
void r100_hdp_flush(struct radeon_device *rdev);
void r100_hpd_init(struct radeon_device *rdev);
void r100_hpd_fini(struct radeon_device *rdev);
bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void r100_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
static struct radeon_asic r100_asic = { static struct radeon_asic r100_asic = {
.init = &r100_init, .init = &r100_init,
@ -107,6 +113,11 @@ static struct radeon_asic r100_asic = {
.set_surface_reg = r100_set_surface_reg, .set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg, .clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update, .bandwidth_update = &r100_bandwidth_update,
.hdp_flush = &r100_hdp_flush,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
}; };
@ -162,6 +173,11 @@ static struct radeon_asic r300_asic = {
.set_surface_reg = r100_set_surface_reg, .set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg, .clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update, .bandwidth_update = &r100_bandwidth_update,
.hdp_flush = &r100_hdp_flush,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
}; };
/* /*
@ -201,6 +217,11 @@ static struct radeon_asic r420_asic = {
.set_surface_reg = r100_set_surface_reg, .set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg, .clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update, .bandwidth_update = &r100_bandwidth_update,
.hdp_flush = &r100_hdp_flush,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
}; };
@ -245,6 +266,11 @@ static struct radeon_asic rs400_asic = {
.set_surface_reg = r100_set_surface_reg, .set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg, .clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update, .bandwidth_update = &r100_bandwidth_update,
.hdp_flush = &r100_hdp_flush,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
}; };
@ -263,6 +289,12 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rs600_bandwidth_update(struct radeon_device *rdev); void rs600_bandwidth_update(struct radeon_device *rdev);
void rs600_hpd_init(struct radeon_device *rdev);
void rs600_hpd_fini(struct radeon_device *rdev);
bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void rs600_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
static struct radeon_asic rs600_asic = { static struct radeon_asic rs600_asic = {
.init = &rs600_init, .init = &rs600_init,
// .fini = &rs600_fini, // .fini = &rs600_fini,
@ -291,6 +323,11 @@ static struct radeon_asic rs600_asic = {
.set_pcie_lanes = NULL, .set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating, .set_clock_gating = &radeon_atom_set_clock_gating,
.bandwidth_update = &rs600_bandwidth_update, .bandwidth_update = &rs600_bandwidth_update,
.hdp_flush = &r100_hdp_flush,
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
}; };
@ -334,6 +371,11 @@ static struct radeon_asic rs690_asic = {
.set_surface_reg = r100_set_surface_reg, .set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg, .clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rs690_bandwidth_update, .bandwidth_update = &rs690_bandwidth_update,
.hdp_flush = &r100_hdp_flush,
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
}; };
@ -381,6 +423,11 @@ static struct radeon_asic rv515_asic = {
.set_surface_reg = r100_set_surface_reg, .set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg, .clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update, .bandwidth_update = &rv515_bandwidth_update,
.hdp_flush = &r100_hdp_flush,
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
}; };
@ -419,6 +466,11 @@ static struct radeon_asic r520_asic = {
.set_surface_reg = r100_set_surface_reg, .set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg, .clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update, .bandwidth_update = &rv515_bandwidth_update,
.hdp_flush = &r100_hdp_flush,
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
}; };
/* /*
@ -455,6 +507,12 @@ int r600_ring_test(struct radeon_device *rdev);
int r600_copy_blit(struct radeon_device *rdev, int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset, uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence); unsigned num_pages, struct radeon_fence *fence);
void r600_hdp_flush(struct radeon_device *rdev);
void r600_hpd_init(struct radeon_device *rdev);
void r600_hpd_fini(struct radeon_device *rdev);
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void r600_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
static struct radeon_asic r600_asic = { static struct radeon_asic r600_asic = {
.init = &r600_init, .init = &r600_init,
@ -484,6 +542,11 @@ static struct radeon_asic r600_asic = {
.set_surface_reg = r600_set_surface_reg, .set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg, .clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update, .bandwidth_update = &rv515_bandwidth_update,
.hdp_flush = &r600_hdp_flush,
.hpd_init = &r600_hpd_init,
.hpd_fini = &r600_hpd_fini,
.hpd_sense = &r600_hpd_sense,
.hpd_set_polarity = &r600_hpd_set_polarity,
}; };
/* /*
@ -523,6 +586,11 @@ static struct radeon_asic rv770_asic = {
.set_surface_reg = r600_set_surface_reg, .set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg, .clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update, .bandwidth_update = &rv515_bandwidth_update,
.hdp_flush = &r600_hdp_flush,
.hpd_init = &r600_hpd_init,
.hpd_fini = &r600_hpd_fini,
.hpd_sense = &r600_hpd_sense,
.hpd_set_polarity = &r600_hpd_set_polarity,
}; };
#endif #endif

View File

@ -47,7 +47,8 @@ radeon_add_atom_connector(struct drm_device *dev,
int connector_type, int connector_type,
struct radeon_i2c_bus_rec *i2c_bus, struct radeon_i2c_bus_rec *i2c_bus,
bool linkb, uint32_t igp_lane_info, bool linkb, uint32_t igp_lane_info,
uint16_t connector_object_id); uint16_t connector_object_id,
struct radeon_hpd *hpd);
/* from radeon_legacy_encoder.c */ /* from radeon_legacy_encoder.c */
extern void extern void
@ -60,16 +61,16 @@ union atom_supported_devices {
struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1; struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1;
}; };
static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev,
*dev, uint8_t id) uint8_t id)
{ {
struct radeon_device *rdev = dev->dev_private;
struct atom_context *ctx = rdev->mode_info.atom_context; struct atom_context *ctx = rdev->mode_info.atom_context;
ATOM_GPIO_I2C_ASSIGMENT gpio; ATOM_GPIO_I2C_ASSIGMENT *gpio;
struct radeon_i2c_bus_rec i2c; struct radeon_i2c_bus_rec i2c;
int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info); int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
struct _ATOM_GPIO_I2C_INFO *i2c_info; struct _ATOM_GPIO_I2C_INFO *i2c_info;
uint16_t data_offset; uint16_t data_offset;
int i;
memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
i2c.valid = false; i2c.valid = false;
@ -78,34 +79,121 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device
i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
gpio = i2c_info->asGPIO_Info[id];
i2c.mask_clk_reg = le16_to_cpu(gpio.usClkMaskRegisterIndex) * 4; for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
i2c.mask_data_reg = le16_to_cpu(gpio.usDataMaskRegisterIndex) * 4; gpio = &i2c_info->asGPIO_Info[i];
i2c.put_clk_reg = le16_to_cpu(gpio.usClkEnRegisterIndex) * 4;
i2c.put_data_reg = le16_to_cpu(gpio.usDataEnRegisterIndex) * 4; if (gpio->sucI2cId.ucAccess == id) {
i2c.get_clk_reg = le16_to_cpu(gpio.usClkY_RegisterIndex) * 4; i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
i2c.get_data_reg = le16_to_cpu(gpio.usDataY_RegisterIndex) * 4; i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
i2c.a_clk_reg = le16_to_cpu(gpio.usClkA_RegisterIndex) * 4; i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
i2c.a_data_reg = le16_to_cpu(gpio.usDataA_RegisterIndex) * 4; i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
i2c.mask_clk_mask = (1 << gpio.ucClkMaskShift); i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
i2c.mask_data_mask = (1 << gpio.ucDataMaskShift); i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
i2c.put_clk_mask = (1 << gpio.ucClkEnShift); i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
i2c.put_data_mask = (1 << gpio.ucDataEnShift); i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
i2c.get_clk_mask = (1 << gpio.ucClkY_Shift); i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
i2c.get_data_mask = (1 << gpio.ucDataY_Shift); i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
i2c.a_clk_mask = (1 << gpio.ucClkA_Shift); i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
i2c.a_data_mask = (1 << gpio.ucDataA_Shift); i2c.en_data_mask = (1 << gpio->ucDataEnShift);
i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
i2c.hw_capable = true;
else
i2c.hw_capable = false;
if (gpio->sucI2cId.ucAccess == 0xa0)
i2c.mm_i2c = true;
else
i2c.mm_i2c = false;
i2c.i2c_id = gpio->sucI2cId.ucAccess;
i2c.valid = true; i2c.valid = true;
}
}
return i2c; return i2c;
} }
static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
u8 id)
{
struct atom_context *ctx = rdev->mode_info.atom_context;
struct radeon_gpio_rec gpio;
int index = GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT);
struct _ATOM_GPIO_PIN_LUT *gpio_info;
ATOM_GPIO_PIN_ASSIGNMENT *pin;
u16 data_offset, size;
int i, num_indices;
memset(&gpio, 0, sizeof(struct radeon_gpio_rec));
gpio.valid = false;
atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset);
gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset);
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
for (i = 0; i < num_indices; i++) {
pin = &gpio_info->asGPIO_Pin[i];
if (id == pin->ucGPIO_ID) {
gpio.id = pin->ucGPIO_ID;
gpio.reg = pin->usGpioPin_AIndex * 4;
gpio.mask = (1 << pin->ucGpioPinBitShift);
gpio.valid = true;
break;
}
}
return gpio;
}
static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device *rdev,
struct radeon_gpio_rec *gpio)
{
struct radeon_hpd hpd;
hpd.gpio = *gpio;
if (gpio->reg == AVIVO_DC_GPIO_HPD_A) {
switch(gpio->mask) {
case (1 << 0):
hpd.hpd = RADEON_HPD_1;
break;
case (1 << 8):
hpd.hpd = RADEON_HPD_2;
break;
case (1 << 16):
hpd.hpd = RADEON_HPD_3;
break;
case (1 << 24):
hpd.hpd = RADEON_HPD_4;
break;
case (1 << 26):
hpd.hpd = RADEON_HPD_5;
break;
case (1 << 28):
hpd.hpd = RADEON_HPD_6;
break;
default:
hpd.hpd = RADEON_HPD_NONE;
break;
}
} else
hpd.hpd = RADEON_HPD_NONE;
return hpd;
}
static bool radeon_atom_apply_quirks(struct drm_device *dev, static bool radeon_atom_apply_quirks(struct drm_device *dev,
uint32_t supported_device, uint32_t supported_device,
int *connector_type, int *connector_type,
struct radeon_i2c_bus_rec *i2c_bus, struct radeon_i2c_bus_rec *i2c_bus,
uint16_t *line_mux) uint16_t *line_mux,
struct radeon_hpd *hpd)
{ {
/* Asus M2A-VM HDMI board lists the DVI port as HDMI */ /* Asus M2A-VM HDMI board lists the DVI port as HDMI */
@ -135,6 +223,23 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
} }
} }
/* HIS X1300 is DVI+VGA, not DVI+DVI */
if ((dev->pdev->device == 0x7146) &&
(dev->pdev->subsystem_vendor == 0x17af) &&
(dev->pdev->subsystem_device == 0x2058)) {
if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
return false;
}
/* Gigabyte X1300 is DVI+VGA, not DVI+DVI */
if ((dev->pdev->device == 0x7142) &&
(dev->pdev->subsystem_vendor == 0x1458) &&
(dev->pdev->subsystem_device == 0x2134)) {
if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
return false;
}
/* Funky macbooks */ /* Funky macbooks */
if ((dev->pdev->device == 0x71C5) && if ((dev->pdev->device == 0x71C5) &&
(dev->pdev->subsystem_vendor == 0x106b) && (dev->pdev->subsystem_vendor == 0x106b) &&
@ -172,6 +277,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
} }
} }
/* Acer laptop reports DVI-D as DVI-I */
if ((dev->pdev->device == 0x95c4) &&
(dev->pdev->subsystem_vendor == 0x1025) &&
(dev->pdev->subsystem_device == 0x013c)) {
if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
(supported_device == ATOM_DEVICE_DFP1_SUPPORT))
*connector_type = DRM_MODE_CONNECTOR_DVID;
}
return true; return true;
} }
@ -240,16 +354,18 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
struct radeon_mode_info *mode_info = &rdev->mode_info; struct radeon_mode_info *mode_info = &rdev->mode_info;
struct atom_context *ctx = mode_info->atom_context; struct atom_context *ctx = mode_info->atom_context;
int index = GetIndexIntoMasterTable(DATA, Object_Header); int index = GetIndexIntoMasterTable(DATA, Object_Header);
uint16_t size, data_offset; u16 size, data_offset;
uint8_t frev, crev, line_mux = 0; u8 frev, crev;
ATOM_CONNECTOR_OBJECT_TABLE *con_obj; ATOM_CONNECTOR_OBJECT_TABLE *con_obj;
ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj; ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
ATOM_OBJECT_HEADER *obj_header; ATOM_OBJECT_HEADER *obj_header;
int i, j, path_size, device_support; int i, j, path_size, device_support;
int connector_type; int connector_type;
uint16_t igp_lane_info, conn_id, connector_object_id; u16 igp_lane_info, conn_id, connector_object_id;
bool linkb; bool linkb;
struct radeon_i2c_bus_rec ddc_bus; struct radeon_i2c_bus_rec ddc_bus;
struct radeon_gpio_rec gpio;
struct radeon_hpd hpd;
atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
@ -276,7 +392,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
path = (ATOM_DISPLAY_OBJECT_PATH *) addr; path = (ATOM_DISPLAY_OBJECT_PATH *) addr;
path_size += le16_to_cpu(path->usSize); path_size += le16_to_cpu(path->usSize);
linkb = false; linkb = false;
if (device_support & le16_to_cpu(path->usDeviceTag)) { if (device_support & le16_to_cpu(path->usDeviceTag)) {
uint8_t con_obj_id, con_obj_num, con_obj_type; uint8_t con_obj_id, con_obj_num, con_obj_type;
@ -377,10 +492,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
} }
} }
/* look up gpio for ddc */ /* look up gpio for ddc, hpd */
if ((le16_to_cpu(path->usDeviceTag) & if ((le16_to_cpu(path->usDeviceTag) &
(ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) {
== 0) {
for (j = 0; j < con_obj->ucNumberOfObjects; j++) { for (j = 0; j < con_obj->ucNumberOfObjects; j++) {
if (le16_to_cpu(path->usConnObjectId) == if (le16_to_cpu(path->usConnObjectId) ==
le16_to_cpu(con_obj->asObjects[j]. le16_to_cpu(con_obj->asObjects[j].
@ -394,21 +508,34 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
asObjects[j]. asObjects[j].
usRecordOffset)); usRecordOffset));
ATOM_I2C_RECORD *i2c_record; ATOM_I2C_RECORD *i2c_record;
ATOM_HPD_INT_RECORD *hpd_record;
ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
hpd.hpd = RADEON_HPD_NONE;
while (record->ucRecordType > 0 while (record->ucRecordType > 0
&& record-> && record->
ucRecordType <= ucRecordType <=
ATOM_MAX_OBJECT_RECORD_NUMBER) { ATOM_MAX_OBJECT_RECORD_NUMBER) {
switch (record-> switch (record->ucRecordType) {
ucRecordType) {
case ATOM_I2C_RECORD_TYPE: case ATOM_I2C_RECORD_TYPE:
i2c_record = i2c_record =
(ATOM_I2C_RECORD (ATOM_I2C_RECORD *)
*) record; record;
line_mux = i2c_config =
i2c_record-> (ATOM_I2C_ID_CONFIG_ACCESS *)
sucI2cId. &i2c_record->sucI2cId;
bfI2C_LineMux; ddc_bus = radeon_lookup_i2c_gpio(rdev,
i2c_config->
ucAccess);
break;
case ATOM_HPD_INT_RECORD_TYPE:
hpd_record =
(ATOM_HPD_INT_RECORD *)
record;
gpio = radeon_lookup_gpio(rdev,
hpd_record->ucHPDIntGPIOID);
hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
hpd.plugged_state = hpd_record->ucPlugged_PinState;
break; break;
} }
record = record =
@ -421,24 +548,16 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
break; break;
} }
} }
} else } else {
line_mux = 0; hpd.hpd = RADEON_HPD_NONE;
if ((le16_to_cpu(path->usDeviceTag) ==
ATOM_DEVICE_TV1_SUPPORT)
|| (le16_to_cpu(path->usDeviceTag) ==
ATOM_DEVICE_TV2_SUPPORT)
|| (le16_to_cpu(path->usDeviceTag) ==
ATOM_DEVICE_CV_SUPPORT))
ddc_bus.valid = false; ddc_bus.valid = false;
else }
ddc_bus = radeon_lookup_gpio(dev, line_mux);
conn_id = le16_to_cpu(path->usConnObjectId); conn_id = le16_to_cpu(path->usConnObjectId);
if (!radeon_atom_apply_quirks if (!radeon_atom_apply_quirks
(dev, le16_to_cpu(path->usDeviceTag), &connector_type, (dev, le16_to_cpu(path->usDeviceTag), &connector_type,
&ddc_bus, &conn_id)) &ddc_bus, &conn_id, &hpd))
continue; continue;
radeon_add_atom_connector(dev, radeon_add_atom_connector(dev,
@ -447,7 +566,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
usDeviceTag), usDeviceTag),
connector_type, &ddc_bus, connector_type, &ddc_bus,
linkb, igp_lane_info, linkb, igp_lane_info,
connector_object_id); connector_object_id,
&hpd);
} }
} }
@ -502,6 +622,7 @@ struct bios_connector {
uint16_t devices; uint16_t devices;
int connector_type; int connector_type;
struct radeon_i2c_bus_rec ddc_bus; struct radeon_i2c_bus_rec ddc_bus;
struct radeon_hpd hpd;
}; };
bool radeon_get_atom_connector_info_from_supported_devices_table(struct bool radeon_get_atom_connector_info_from_supported_devices_table(struct
@ -517,7 +638,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
uint16_t device_support; uint16_t device_support;
uint8_t dac; uint8_t dac;
union atom_supported_devices *supported_devices; union atom_supported_devices *supported_devices;
int i, j; int i, j, max_device;
struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE]; struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
@ -527,7 +648,12 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
device_support = le16_to_cpu(supported_devices->info.usDeviceSupport); device_support = le16_to_cpu(supported_devices->info.usDeviceSupport);
for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { if (frev > 1)
max_device = ATOM_MAX_SUPPORTED_DEVICE;
else
max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO;
for (i = 0; i < max_device; i++) {
ATOM_CONNECTOR_INFO_I2C ci = ATOM_CONNECTOR_INFO_I2C ci =
supported_devices->info.asConnInfo[i]; supported_devices->info.asConnInfo[i];
@ -553,22 +679,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
dac = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC; dac = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC;
if ((rdev->family == CHIP_RS690) ||
(rdev->family == CHIP_RS740)) {
if ((i == ATOM_DEVICE_DFP2_INDEX)
&& (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 2))
bios_connectors[i].line_mux = bios_connectors[i].line_mux =
ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1; ci.sucI2cId.ucAccess;
else if ((i == ATOM_DEVICE_DFP3_INDEX)
&& (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 1))
bios_connectors[i].line_mux =
ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1;
else
bios_connectors[i].line_mux =
ci.sucI2cId.sbfAccess.bfI2C_LineMux;
} else
bios_connectors[i].line_mux =
ci.sucI2cId.sbfAccess.bfI2C_LineMux;
/* give tv unique connector ids */ /* give tv unique connector ids */
if (i == ATOM_DEVICE_TV1_INDEX) { if (i == ATOM_DEVICE_TV1_INDEX) {
@ -582,9 +694,31 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
bios_connectors[i].line_mux = 52; bios_connectors[i].line_mux = 52;
} else } else
bios_connectors[i].ddc_bus = bios_connectors[i].ddc_bus =
radeon_lookup_gpio(dev, radeon_lookup_i2c_gpio(rdev,
bios_connectors[i].line_mux); bios_connectors[i].line_mux);
if ((crev > 1) && (frev > 1)) {
u8 isb = supported_devices->info_2d1.asIntSrcInfo[i].ucIntSrcBitmap;
switch (isb) {
case 0x4:
bios_connectors[i].hpd.hpd = RADEON_HPD_1;
break;
case 0xa:
bios_connectors[i].hpd.hpd = RADEON_HPD_2;
break;
default:
bios_connectors[i].hpd.hpd = RADEON_HPD_NONE;
break;
}
} else {
if (i == ATOM_DEVICE_DFP1_INDEX)
bios_connectors[i].hpd.hpd = RADEON_HPD_1;
else if (i == ATOM_DEVICE_DFP2_INDEX)
bios_connectors[i].hpd.hpd = RADEON_HPD_2;
else
bios_connectors[i].hpd.hpd = RADEON_HPD_NONE;
}
/* Always set the connector type to VGA for CRT1/CRT2. if they are /* Always set the connector type to VGA for CRT1/CRT2. if they are
* shared with a DVI port, we'll pick up the DVI connector when we * shared with a DVI port, we'll pick up the DVI connector when we
* merge the outputs. Some bioses incorrectly list VGA ports as DVI. * merge the outputs. Some bioses incorrectly list VGA ports as DVI.
@ -595,7 +729,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
if (!radeon_atom_apply_quirks if (!radeon_atom_apply_quirks
(dev, (1 << i), &bios_connectors[i].connector_type, (dev, (1 << i), &bios_connectors[i].connector_type,
&bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux)) &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux,
&bios_connectors[i].hpd))
continue; continue;
bios_connectors[i].valid = true; bios_connectors[i].valid = true;
@ -617,9 +752,9 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
} }
/* combine shared connectors */ /* combine shared connectors */
for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { for (i = 0; i < max_device; i++) {
if (bios_connectors[i].valid) { if (bios_connectors[i].valid) {
for (j = 0; j < ATOM_MAX_SUPPORTED_DEVICE; j++) { for (j = 0; j < max_device; j++) {
if (bios_connectors[j].valid && (i != j)) { if (bios_connectors[j].valid && (i != j)) {
if (bios_connectors[i].line_mux == if (bios_connectors[i].line_mux ==
bios_connectors[j].line_mux) { bios_connectors[j].line_mux) {
@ -643,6 +778,10 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
bios_connectors[i]. bios_connectors[i].
connector_type = connector_type =
DRM_MODE_CONNECTOR_DVII; DRM_MODE_CONNECTOR_DVII;
if (bios_connectors[j].devices &
(ATOM_DEVICE_DFP_SUPPORT))
bios_connectors[i].hpd =
bios_connectors[j].hpd;
bios_connectors[j]. bios_connectors[j].
valid = false; valid = false;
} }
@ -653,7 +792,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
} }
/* add the connectors */ /* add the connectors */
for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { for (i = 0; i < max_device; i++) {
if (bios_connectors[i].valid) { if (bios_connectors[i].valid) {
uint16_t connector_object_id = uint16_t connector_object_id =
atombios_get_connector_object_id(dev, atombios_get_connector_object_id(dev,
@ -666,7 +805,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
connector_type, connector_type,
&bios_connectors[i].ddc_bus, &bios_connectors[i].ddc_bus,
false, 0, false, 0,
connector_object_id); connector_object_id,
&bios_connectors[i].hpd);
} }
} }
@ -731,6 +871,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
* pre-DCE 3.0 r6xx hardware. This might need to be adjusted per * pre-DCE 3.0 r6xx hardware. This might need to be adjusted per
* family. * family.
*/ */
if (!radeon_new_pll)
p1pll->pll_out_min = 64800; p1pll->pll_out_min = 64800;
} }
@ -861,6 +1002,7 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info; struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
uint8_t frev, crev; uint8_t frev, crev;
struct radeon_atom_ss *ss = NULL; struct radeon_atom_ss *ss = NULL;
int i;
if (id > ATOM_MAX_SS_ENTRY) if (id > ATOM_MAX_SS_ENTRY)
return NULL; return NULL;
@ -878,12 +1020,17 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
if (!ss) if (!ss)
return NULL; return NULL;
ss->percentage = le16_to_cpu(ss_info->asSS_Info[id].usSpreadSpectrumPercentage); for (i = 0; i < ATOM_MAX_SS_ENTRY; i++) {
ss->type = ss_info->asSS_Info[id].ucSpreadSpectrumType; if (ss_info->asSS_Info[i].ucSS_Id == id) {
ss->step = ss_info->asSS_Info[id].ucSS_Step; ss->percentage =
ss->delay = ss_info->asSS_Info[id].ucSS_Delay; le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage);
ss->range = ss_info->asSS_Info[id].ucSS_Range; ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType;
ss->refdiv = ss_info->asSS_Info[id].ucRecommendedRef_Div; ss->step = ss_info->asSS_Info[i].ucSS_Step;
ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
ss->range = ss_info->asSS_Info[i].ucSS_Range;
ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
}
}
} }
return ss; return ss;
} }
@ -901,7 +1048,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
struct radeon_device *rdev = dev->dev_private; struct radeon_device *rdev = dev->dev_private;
struct radeon_mode_info *mode_info = &rdev->mode_info; struct radeon_mode_info *mode_info = &rdev->mode_info;
int index = GetIndexIntoMasterTable(DATA, LVDS_Info); int index = GetIndexIntoMasterTable(DATA, LVDS_Info);
uint16_t data_offset; uint16_t data_offset, misc;
union lvds_info *lvds_info; union lvds_info *lvds_info;
uint8_t frev, crev; uint8_t frev, crev;
struct radeon_encoder_atom_dig *lvds = NULL; struct radeon_encoder_atom_dig *lvds = NULL;
@ -940,6 +1087,19 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
lvds->panel_pwr_delay = lvds->panel_pwr_delay =
le16_to_cpu(lvds_info->info.usOffDelayInMs); le16_to_cpu(lvds_info->info.usOffDelayInMs);
lvds->lvds_misc = lvds_info->info.ucLVDS_Misc; lvds->lvds_misc = lvds_info->info.ucLVDS_Misc;
misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess);
if (misc & ATOM_VSYNC_POLARITY)
lvds->native_mode.flags |= DRM_MODE_FLAG_NVSYNC;
if (misc & ATOM_HSYNC_POLARITY)
lvds->native_mode.flags |= DRM_MODE_FLAG_NHSYNC;
if (misc & ATOM_COMPOSITESYNC)
lvds->native_mode.flags |= DRM_MODE_FLAG_CSYNC;
if (misc & ATOM_INTERLACE)
lvds->native_mode.flags |= DRM_MODE_FLAG_INTERLACE;
if (misc & ATOM_DOUBLE_CLOCK_MODE)
lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN;
/* set crtc values */ /* set crtc values */
drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);

View File

@ -44,6 +44,10 @@ uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
ref_div = ref_div =
RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK; RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
if (ref_div == 0)
return 0;
sclk = fb_div / ref_div; sclk = fb_div / ref_div;
post_div = RREG32_PLL(RADEON_SCLK_CNTL) & RADEON_SCLK_SRC_SEL_MASK; post_div = RREG32_PLL(RADEON_SCLK_CNTL) & RADEON_SCLK_SRC_SEL_MASK;
@ -70,6 +74,10 @@ static uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
ref_div = ref_div =
RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK; RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
if (ref_div == 0)
return 0;
mclk = fb_div / ref_div; mclk = fb_div / ref_div;
post_div = RREG32_PLL(RADEON_MCLK_CNTL) & 0x7; post_div = RREG32_PLL(RADEON_MCLK_CNTL) & 0x7;
@ -98,8 +106,19 @@ void radeon_get_clock_info(struct drm_device *dev)
ret = radeon_combios_get_clock_info(dev); ret = radeon_combios_get_clock_info(dev);
if (ret) { if (ret) {
if (p1pll->reference_div < 2) {
if (!ASIC_IS_AVIVO(rdev)) {
u32 tmp = RREG32_PLL(RADEON_PPLL_REF_DIV);
if (ASIC_IS_R300(rdev))
p1pll->reference_div =
(tmp & R300_PPLL_REF_DIV_ACC_MASK) >> R300_PPLL_REF_DIV_ACC_SHIFT;
else
p1pll->reference_div = tmp & RADEON_PPLL_REF_DIV_MASK;
if (p1pll->reference_div < 2) if (p1pll->reference_div < 2)
p1pll->reference_div = 12; p1pll->reference_div = 12;
} else
p1pll->reference_div = 12;
}
if (p2pll->reference_div < 2) if (p2pll->reference_div < 2)
p2pll->reference_div = 12; p2pll->reference_div = 12;
if (rdev->family < CHIP_RS600) { if (rdev->family < CHIP_RS600) {

File diff suppressed because it is too large Load Diff

View File

@ -40,6 +40,26 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
struct drm_encoder *encoder, struct drm_encoder *encoder,
bool connected); bool connected);
void radeon_connector_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
if (radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
if (radeon_dp_needs_link_train(radeon_connector)) {
if (connector->encoder)
dp_link_train(connector->encoder, connector);
}
}
}
}
static void radeon_property_change_mode(struct drm_encoder *encoder) static void radeon_property_change_mode(struct drm_encoder *encoder)
{ {
struct drm_crtc *crtc = encoder->crtc; struct drm_crtc *crtc = encoder->crtc;
@ -445,10 +465,10 @@ static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connec
ret = connector_status_connected; ret = connector_status_connected;
else { else {
if (radeon_connector->ddc_bus) { if (radeon_connector->ddc_bus) {
radeon_i2c_do_lock(radeon_connector, 1); radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
radeon_connector->edid = drm_get_edid(&radeon_connector->base, radeon_connector->edid = drm_get_edid(&radeon_connector->base,
&radeon_connector->ddc_bus->adapter); &radeon_connector->ddc_bus->adapter);
radeon_i2c_do_lock(radeon_connector, 0); radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (radeon_connector->edid) if (radeon_connector->edid)
ret = connector_status_connected; ret = connector_status_connected;
} }
@ -553,17 +573,17 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
if (!encoder) if (!encoder)
ret = connector_status_disconnected; ret = connector_status_disconnected;
radeon_i2c_do_lock(radeon_connector, 1); radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
dret = radeon_ddc_probe(radeon_connector); dret = radeon_ddc_probe(radeon_connector);
radeon_i2c_do_lock(radeon_connector, 0); radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (dret) { if (dret) {
if (radeon_connector->edid) { if (radeon_connector->edid) {
kfree(radeon_connector->edid); kfree(radeon_connector->edid);
radeon_connector->edid = NULL; radeon_connector->edid = NULL;
} }
radeon_i2c_do_lock(radeon_connector, 1); radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
radeon_i2c_do_lock(radeon_connector, 0); radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (!radeon_connector->edid) { if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
@ -708,17 +728,17 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
enum drm_connector_status ret = connector_status_disconnected; enum drm_connector_status ret = connector_status_disconnected;
bool dret; bool dret;
radeon_i2c_do_lock(radeon_connector, 1); radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
dret = radeon_ddc_probe(radeon_connector); dret = radeon_ddc_probe(radeon_connector);
radeon_i2c_do_lock(radeon_connector, 0); radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (dret) { if (dret) {
if (radeon_connector->edid) { if (radeon_connector->edid) {
kfree(radeon_connector->edid); kfree(radeon_connector->edid);
radeon_connector->edid = NULL; radeon_connector->edid = NULL;
} }
radeon_i2c_do_lock(radeon_connector, 1); radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
radeon_i2c_do_lock(radeon_connector, 0); radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (!radeon_connector->edid) { if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
@ -735,6 +755,39 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
ret = connector_status_disconnected; ret = connector_status_disconnected;
} else } else
ret = connector_status_connected; ret = connector_status_connected;
/* multiple connectors on the same encoder with the same ddc line
* This tends to be HDMI and DVI on the same encoder with the
* same ddc line. If the edid says HDMI, consider the HDMI port
* connected and the DVI port disconnected. If the edid doesn't
* say HDMI, vice versa.
*/
if (radeon_connector->shared_ddc && connector_status_connected) {
struct drm_device *dev = connector->dev;
struct drm_connector *list_connector;
struct radeon_connector *list_radeon_connector;
list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) {
if (connector == list_connector)
continue;
list_radeon_connector = to_radeon_connector(list_connector);
if (radeon_connector->devices == list_radeon_connector->devices) {
if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
if (connector->connector_type == DRM_MODE_CONNECTOR_DVID) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
ret = connector_status_disconnected;
}
} else {
if ((connector->connector_type == DRM_MODE_CONNECTOR_HDMIA) ||
(connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
ret = connector_status_disconnected;
}
}
}
}
}
} }
} }
@ -863,6 +916,91 @@ struct drm_connector_funcs radeon_dvi_connector_funcs = {
.force = radeon_dvi_force, .force = radeon_dvi_force,
}; };
static void radeon_dp_connector_destroy(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
if (radeon_connector->ddc_bus)
radeon_i2c_destroy(radeon_connector->ddc_bus);
if (radeon_connector->edid)
kfree(radeon_connector->edid);
if (radeon_dig_connector->dp_i2c_bus)
radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus);
kfree(radeon_connector->con_priv);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
static int radeon_dp_get_modes(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
int ret;
ret = radeon_ddc_get_modes(radeon_connector);
return ret;
}
static enum drm_connector_status radeon_dp_detect(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
enum drm_connector_status ret = connector_status_disconnected;
struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
u8 sink_type;
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
}
sink_type = radeon_dp_getsinktype(radeon_connector);
if (sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
if (radeon_dp_getdpcd(radeon_connector)) {
radeon_dig_connector->dp_sink_type = sink_type;
ret = connector_status_connected;
}
} else {
radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
if (radeon_ddc_probe(radeon_connector)) {
radeon_dig_connector->dp_sink_type = sink_type;
ret = connector_status_connected;
}
radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
}
return ret;
}
static int radeon_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
/* XXX check mode bandwidth */
if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
return radeon_dp_mode_valid_helper(radeon_connector, mode);
else
return MODE_OK;
}
struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
.get_modes = radeon_dp_get_modes,
.mode_valid = radeon_dp_mode_valid,
.best_encoder = radeon_dvi_encoder,
};
struct drm_connector_funcs radeon_dp_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = radeon_connector_set_property,
.destroy = radeon_dp_connector_destroy,
.force = radeon_dvi_force,
};
void void
radeon_add_atom_connector(struct drm_device *dev, radeon_add_atom_connector(struct drm_device *dev,
uint32_t connector_id, uint32_t connector_id,
@ -871,7 +1009,8 @@ radeon_add_atom_connector(struct drm_device *dev,
struct radeon_i2c_bus_rec *i2c_bus, struct radeon_i2c_bus_rec *i2c_bus,
bool linkb, bool linkb,
uint32_t igp_lane_info, uint32_t igp_lane_info,
uint16_t connector_object_id) uint16_t connector_object_id,
struct radeon_hpd *hpd)
{ {
struct radeon_device *rdev = dev->dev_private; struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector; struct drm_connector *connector;
@ -911,6 +1050,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_connector->devices = supported_device; radeon_connector->devices = supported_device;
radeon_connector->shared_ddc = shared_ddc; radeon_connector->shared_ddc = shared_ddc;
radeon_connector->connector_object_id = connector_object_id; radeon_connector->connector_object_id = connector_object_id;
radeon_connector->hpd = *hpd;
switch (connector_type) { switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA: case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
@ -963,10 +1103,12 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base, drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.coherent_mode_property, rdev->mode_info.coherent_mode_property,
1); 1);
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true; radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base, drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property, rdev->mode_info.load_detect_property,
1); 1);
}
break; break;
case DRM_MODE_CONNECTOR_HDMIA: case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB: case DRM_MODE_CONNECTOR_HDMIB:
@ -997,16 +1139,23 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector->linkb = linkb; radeon_dig_connector->linkb = linkb;
radeon_dig_connector->igp_lane_info = igp_lane_info; radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector; radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
if (ret) if (ret)
goto failed; goto failed;
if (i2c_bus->valid) { if (i2c_bus->valid) {
/* add DP i2c bus */
radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
if (!radeon_dig_connector->dp_i2c_bus)
goto failed;
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP"); radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
if (!radeon_connector->ddc_bus) if (!radeon_connector->ddc_bus)
goto failed; goto failed;
} }
subpixel_order = SubPixelHorizontalRGB; subpixel_order = SubPixelHorizontalRGB;
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.coherent_mode_property,
1);
break; break;
case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite: case DRM_MODE_CONNECTOR_Composite:
@ -1020,6 +1169,9 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base, drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property, rdev->mode_info.load_detect_property,
1); 1);
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.tv_std_property,
1);
} }
break; break;
case DRM_MODE_CONNECTOR_LVDS: case DRM_MODE_CONNECTOR_LVDS:
@ -1038,7 +1190,6 @@ radeon_add_atom_connector(struct drm_device *dev,
if (!radeon_connector->ddc_bus) if (!radeon_connector->ddc_bus)
goto failed; goto failed;
} }
drm_mode_create_scaling_mode_property(dev);
drm_connector_attach_property(&radeon_connector->base, drm_connector_attach_property(&radeon_connector->base,
dev->mode_config.scaling_mode_property, dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN); DRM_MODE_SCALE_FULLSCREEN);
@ -1063,7 +1214,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
uint32_t supported_device, uint32_t supported_device,
int connector_type, int connector_type,
struct radeon_i2c_bus_rec *i2c_bus, struct radeon_i2c_bus_rec *i2c_bus,
uint16_t connector_object_id) uint16_t connector_object_id,
struct radeon_hpd *hpd)
{ {
struct radeon_device *rdev = dev->dev_private; struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector; struct drm_connector *connector;
@ -1093,6 +1245,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
radeon_connector->connector_id = connector_id; radeon_connector->connector_id = connector_id;
radeon_connector->devices = supported_device; radeon_connector->devices = supported_device;
radeon_connector->connector_object_id = connector_object_id; radeon_connector->connector_object_id = connector_object_id;
radeon_connector->hpd = *hpd;
switch (connector_type) { switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA: case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
@ -1160,6 +1313,9 @@ radeon_add_legacy_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base, drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property, rdev->mode_info.load_detect_property,
1); 1);
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.tv_std_property,
1);
} }
break; break;
case DRM_MODE_CONNECTOR_LVDS: case DRM_MODE_CONNECTOR_LVDS:

View File

@ -77,10 +77,11 @@ void radeon_surface_init(struct radeon_device *rdev)
if (rdev->family < CHIP_R600) { if (rdev->family < CHIP_R600) {
int i; int i;
for (i = 0; i < 8; i++) { for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
WREG32(RADEON_SURFACE0_INFO + if (rdev->surface_regs[i].bo)
i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO), radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
0); else
radeon_clear_surface_reg(rdev, i);
} }
/* enable surfaces */ /* enable surfaces */
WREG32(RADEON_SURFACE_CNTL, 0); WREG32(RADEON_SURFACE_CNTL, 0);
@ -241,6 +242,24 @@ bool radeon_card_posted(struct radeon_device *rdev)
} }
bool radeon_boot_test_post_card(struct radeon_device *rdev)
{
if (radeon_card_posted(rdev))
return true;
if (rdev->bios) {
DRM_INFO("GPU not posted. posting now...\n");
if (rdev->is_atom_bios)
atom_asic_init(rdev->mode_info.atom_context);
else
radeon_combios_asic_init(rdev->ddev);
return true;
} else {
dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
return false;
}
}
int radeon_dummy_page_init(struct radeon_device *rdev) int radeon_dummy_page_init(struct radeon_device *rdev)
{ {
rdev->dummy_page.page = AllocPage(); rdev->dummy_page.page = AllocPage();
@ -493,12 +512,16 @@ int radeon_atombios_init(struct radeon_device *rdev)
rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios); rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
radeon_atom_initialize_bios_scratch_regs(rdev->ddev); radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
atom_allocate_fb_scratch(rdev->mode_info.atom_context);
return 0; return 0;
} }
void radeon_atombios_fini(struct radeon_device *rdev) void radeon_atombios_fini(struct radeon_device *rdev)
{ {
if (rdev->mode_info.atom_context) {
kfree(rdev->mode_info.atom_context->scratch);
kfree(rdev->mode_info.atom_context); kfree(rdev->mode_info.atom_context);
}
kfree(rdev->mode_info.atom_card_info); kfree(rdev->mode_info.atom_card_info);
} }
@ -581,7 +604,7 @@ int radeon_device_init(struct radeon_device *rdev,
return r; return r;
} }
if (radeon_agpmode == -1) { if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
radeon_agp_disable(rdev); radeon_agp_disable(rdev);
} }
@ -895,7 +918,7 @@ u32_t drvEntry(int action, char *cmdline)
return 0; return 0;
}; };
} }
dbgprintf("Radeon RC08 cmdline %s\n", cmdline); dbgprintf("Radeon RC09 cmdline %s\n", cmdline);
enum_pci_devices(); enum_pci_devices();

View File

@ -35,6 +35,51 @@ extern int atom_debug;
bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
struct drm_display_mode *mode); struct drm_display_mode *mode);
static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_encoder *clone_encoder;
uint32_t index_mask = 0;
int count;
/* DIG routing gets problematic */
if (rdev->family >= CHIP_R600)
return index_mask;
/* LVDS/TV are too wacky */
if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
return index_mask;
/* DVO requires 2x ppll clocks depending on tmds chip */
if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT)
return index_mask;
count = -1;
list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) {
struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder);
count++;
if (clone_encoder == encoder)
continue;
if (radeon_clone->devices & (ATOM_DEVICE_LCD_SUPPORT))
continue;
if (radeon_clone->devices & ATOM_DEVICE_DFP2_SUPPORT)
continue;
else
index_mask |= (1 << count);
}
return index_mask;
}
void radeon_setup_encoder_clones(struct drm_device *dev)
{
struct drm_encoder *encoder;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
encoder->possible_clones = radeon_encoder_clones(encoder);
}
}
uint32_t uint32_t
radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac) radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
{ {
@ -163,29 +208,6 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
return NULL; return NULL;
} }
/* used for both atom and legacy */
void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
if (mode->hdisplay < native_mode->hdisplay ||
mode->vdisplay < native_mode->vdisplay) {
int mode_id = adjusted_mode->base.id;
*adjusted_mode = *native_mode;
if (!ASIC_IS_AVIVO(rdev)) {
adjusted_mode->hdisplay = mode->hdisplay;
adjusted_mode->vdisplay = mode->vdisplay;
}
adjusted_mode->base.id = mode_id;
}
}
static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode) struct drm_display_mode *adjusted_mode)
@ -198,14 +220,24 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
radeon_encoder_set_active_device(encoder); radeon_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0); drm_mode_set_crtcinfo(adjusted_mode, 0);
if (radeon_encoder->rmx_type != RMX_OFF)
radeon_rmx_mode_fixup(encoder, mode, adjusted_mode);
/* hw bug */ /* hw bug */
if ((mode->flags & DRM_MODE_FLAG_INTERLACE) if ((mode->flags & DRM_MODE_FLAG_INTERLACE)
&& (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
/* get the native mode for LVDS */
if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
int mode_id = adjusted_mode->base.id;
*adjusted_mode = *native_mode;
if (!ASIC_IS_AVIVO(rdev)) {
adjusted_mode->hdisplay = mode->hdisplay;
adjusted_mode->vdisplay = mode->vdisplay;
}
adjusted_mode->base.id = mode_id;
}
/* get the native mode for TV */
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) { if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv; struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
if (tv_dac) { if (tv_dac) {
@ -218,6 +250,12 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
} }
} }
if (ASIC_IS_DCE3(rdev) &&
(radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT))) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
radeon_dp_set_link_config(connector, mode);
}
return true; return true;
} }
@ -392,7 +430,7 @@ union lvds_encoder_control {
LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2; LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2;
}; };
static void void
atombios_digital_setup(struct drm_encoder *encoder, int action) atombios_digital_setup(struct drm_encoder *encoder, int action)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
@ -522,6 +560,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
{ {
struct drm_connector *connector; struct drm_connector *connector;
struct radeon_connector *radeon_connector; struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *radeon_dig_connector;
connector = radeon_get_connector_for_encoder(encoder); connector = radeon_get_connector_for_encoder(encoder);
if (!connector) if (!connector)
@ -551,10 +590,10 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
return ATOM_ENCODER_MODE_LVDS; return ATOM_ENCODER_MODE_LVDS;
break; break;
case DRM_MODE_CONNECTOR_DisplayPort: case DRM_MODE_CONNECTOR_DisplayPort:
/*if (radeon_output->MonType == MT_DP) radeon_dig_connector = radeon_connector->con_priv;
if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
return ATOM_ENCODER_MODE_DP; return ATOM_ENCODER_MODE_DP;
else*/ else if (drm_detect_hdmi_monitor(radeon_connector->edid))
if (drm_detect_hdmi_monitor(radeon_connector->edid))
return ATOM_ENCODER_MODE_HDMI; return ATOM_ENCODER_MODE_HDMI;
else else
return ATOM_ENCODER_MODE_DVI; return ATOM_ENCODER_MODE_DVI;
@ -573,6 +612,30 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
} }
} }
/*
* DIG Encoder/Transmitter Setup
*
* DCE 3.0/3.1
* - 2 DIG transmitter blocks. UNIPHY (links A and B) and LVTMA.
* Supports up to 3 digital outputs
* - 2 DIG encoder blocks.
* DIG1 can drive UNIPHY link A or link B
* DIG2 can drive UNIPHY link B or LVTMA
*
* DCE 3.2
* - 3 DIG transmitter blocks. UNIPHY0/1/2 (links A and B).
* Supports up to 5 digital outputs
* - 2 DIG encoder blocks.
* DIG1/2 can drive UNIPHY0/1/2 link A or link B
*
* Routing
* crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
* Examples:
* crtc0 -> dig2 -> LVTMA links A+B -> TMDS/HDMI
* crtc1 -> dig1 -> UNIPHY0 link B -> DP
* crtc0 -> dig1 -> UNIPHY2 link A -> LVDS
* crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI
*/
static void static void
atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
{ {
@ -614,10 +677,17 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
} else { } else {
switch (radeon_encoder->encoder_id) { switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
/* XXX doesn't really matter which dig encoder we pick as long as it's
* not already in use
*/
if (dig_connector->linkb)
index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
else
index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
num = 1; num = 1;
break; break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
/* Only dig2 encoder can drive LVTMA */
index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
num = 2; num = 2;
break; break;
@ -652,18 +722,21 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
} }
} }
if (radeon_encoder->pixel_clock > 165000) { args.ucEncoderMode = atombios_get_encoder_mode(encoder);
args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA_B;
if (args.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
if (dig_connector->dp_clock == 270000)
args.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
args.ucLaneNum = dig_connector->dp_lane_count;
} else if (radeon_encoder->pixel_clock > 165000)
args.ucLaneNum = 8; args.ucLaneNum = 8;
} else { else
args.ucLaneNum = 4;
if (dig_connector->linkb) if (dig_connector->linkb)
args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB; args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
else else
args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA; args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
args.ucLaneNum = 4;
}
args.ucEncoderMode = atombios_get_encoder_mode(encoder);
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
@ -674,8 +747,8 @@ union dig_transmitter_control {
DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2; DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
}; };
static void void
atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action) atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private; struct radeon_device *rdev = dev->dev_private;
@ -687,6 +760,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
struct drm_connector *connector; struct drm_connector *connector;
struct radeon_connector *radeon_connector; struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector; struct radeon_connector_atom_dig *dig_connector;
bool is_dp = false;
connector = radeon_get_connector_for_encoder(encoder); connector = radeon_get_connector_for_encoder(encoder);
if (!connector) if (!connector)
@ -704,6 +778,9 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
dig_connector = radeon_connector->con_priv; dig_connector = radeon_connector->con_priv;
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
is_dp = true;
memset(&args, 0, sizeof(args)); memset(&args, 0, sizeof(args));
if (ASIC_IS_DCE32(rdev)) if (ASIC_IS_DCE32(rdev))
@ -724,17 +801,23 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
args.v1.ucAction = action; args.v1.ucAction = action;
if (action == ATOM_TRANSMITTER_ACTION_INIT) { if (action == ATOM_TRANSMITTER_ACTION_INIT) {
args.v1.usInitInfo = radeon_connector->connector_object_id; args.v1.usInitInfo = radeon_connector->connector_object_id;
} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
args.v1.asMode.ucLaneSel = lane_num;
args.v1.asMode.ucLaneSet = lane_set;
} else { } else {
if (radeon_encoder->pixel_clock > 165000) if (is_dp)
args.v1.usPixelClock =
cpu_to_le16(dig_connector->dp_clock / 10);
else if (radeon_encoder->pixel_clock > 165000)
args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
else else
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
} }
if (ASIC_IS_DCE32(rdev)) { if (ASIC_IS_DCE32(rdev)) {
if (radeon_encoder->pixel_clock > 165000)
args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
if (dig->dig_block) if (dig->dig_block)
args.v2.acConfig.ucEncoderSel = 1; args.v2.acConfig.ucEncoderSel = 1;
if (dig_connector->linkb)
args.v2.acConfig.ucLinkSel = 1;
switch (radeon_encoder->encoder_id) { switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
@ -751,7 +834,9 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
break; break;
} }
if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { if (is_dp)
args.v2.acConfig.fCoherentMode = 1;
else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode) if (dig->coherent_mode)
args.v2.acConfig.fCoherentMode = 1; args.v2.acConfig.fCoherentMode = 1;
} }
@ -760,17 +845,20 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
switch (radeon_encoder->encoder_id) { switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
/* XXX doesn't really matter which dig encoder we pick as long as it's
* not already in use
*/
if (dig_connector->linkb)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
else
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
if (rdev->flags & RADEON_IS_IGP) { if (rdev->flags & RADEON_IS_IGP) {
if (radeon_encoder->pixel_clock > 165000) { if (radeon_encoder->pixel_clock > 165000) {
args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
ATOM_TRANSMITTER_CONFIG_LINKA_B);
if (dig_connector->igp_lane_info & 0x3) if (dig_connector->igp_lane_info & 0x3)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7; args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
else if (dig_connector->igp_lane_info & 0xc) else if (dig_connector->igp_lane_info & 0xc)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15; args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
} else { } else {
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
if (dig_connector->igp_lane_info & 0x1) if (dig_connector->igp_lane_info & 0x1)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3; args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
else if (dig_connector->igp_lane_info & 0x2) else if (dig_connector->igp_lane_info & 0x2)
@ -780,35 +868,25 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
else if (dig_connector->igp_lane_info & 0x8) else if (dig_connector->igp_lane_info & 0x8)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15; args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
} }
} else {
if (radeon_encoder->pixel_clock > 165000)
args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
ATOM_TRANSMITTER_CONFIG_LINKA_B |
ATOM_TRANSMITTER_CONFIG_LANE_0_7);
else {
if (dig_connector->linkb)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
else
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
}
} }
break; break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
/* Only dig2 encoder can drive LVTMA */
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER; args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
if (radeon_encoder->pixel_clock > 165000)
args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
ATOM_TRANSMITTER_CONFIG_LINKA_B |
ATOM_TRANSMITTER_CONFIG_LANE_0_7);
else {
if (dig_connector->linkb)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
else
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
}
break; break;
} }
if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { if (radeon_encoder->pixel_clock > 165000)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
if (dig_connector->linkb)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
else
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
if (is_dp)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode) if (dig->coherent_mode)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
} }
@ -918,12 +996,16 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
if (is_dig) { if (is_dig) {
switch (mode) { switch (mode) {
case DRM_MODE_DPMS_ON: case DRM_MODE_DPMS_ON:
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE); atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
{
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
dp_link_train(encoder, connector);
}
break; break;
case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF: case DRM_MODE_DPMS_OFF:
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE); atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
break; break;
} }
} else { } else {
@ -1025,13 +1107,33 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
else else
args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
} else } else {
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
connector = radeon_get_connector_for_encoder(encoder);
if (!connector)
return;
radeon_connector = to_radeon_connector(connector);
if (!radeon_connector->con_priv)
return;
dig_connector = radeon_connector->con_priv;
/* XXX doesn't really matter which dig encoder we pick as long as it's
* not already in use
*/
if (dig_connector->linkb)
args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
else
args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
}
break; break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID; args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
break; break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
/* Only dig2 encoder can drive LVTMA */
args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
break; break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
@ -1104,12 +1206,15 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
if (radeon_encoder->active_device &
(ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
if (radeon_encoder->enc_priv) { if (radeon_encoder->enc_priv) {
struct radeon_encoder_atom_dig *dig; struct radeon_encoder_atom_dig *dig;
dig = radeon_encoder->enc_priv; dig = radeon_encoder->enc_priv;
dig->dig_block = radeon_crtc->crtc_id; dig->dig_block = radeon_crtc->crtc_id;
} }
}
radeon_encoder->pixel_clock = adjusted_mode->clock; radeon_encoder->pixel_clock = adjusted_mode->clock;
radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
@ -1134,14 +1239,14 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
/* disable the encoder and transmitter */ /* disable the encoder and transmitter */
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE); atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
atombios_dig_encoder_setup(encoder, ATOM_DISABLE); atombios_dig_encoder_setup(encoder, ATOM_DISABLE);
/* setup and enable the encoder and transmitter */ /* setup and enable the encoder and transmitter */
atombios_dig_encoder_setup(encoder, ATOM_ENABLE); atombios_dig_encoder_setup(encoder, ATOM_ENABLE);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT); atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP); atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE); atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
break; break;
case ENCODER_OBJECT_ID_INTERNAL_DDI: case ENCODER_OBJECT_ID_INTERNAL_DDI:
atombios_ddia_setup(encoder, ATOM_ENABLE); atombios_ddia_setup(encoder, ATOM_ENABLE);
@ -1354,7 +1459,6 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
encoder->possible_crtcs = 0x1; encoder->possible_crtcs = 0x1;
else else
encoder->possible_crtcs = 0x3; encoder->possible_crtcs = 0x3;
encoder->possible_clones = 0;
radeon_encoder->enc_priv = NULL; radeon_encoder->enc_priv = NULL;

View File

@ -168,37 +168,6 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
return signaled; return signaled;
} }
int r600_fence_wait(struct radeon_fence *fence, bool intr, bool lazy)
{
struct radeon_device *rdev;
int ret = 0;
rdev = fence->rdev;
__set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
while (1) {
if (radeon_fence_signaled(fence))
break;
if (time_after_eq(jiffies, fence->timeout)) {
ret = -EBUSY;
break;
}
if (lazy)
schedule_timeout(1);
if (intr && signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
}
__set_current_state(TASK_RUNNING);
return ret;
}
int radeon_fence_wait(struct radeon_fence *fence, bool intr) int radeon_fence_wait(struct radeon_fence *fence, bool intr)
{ {
struct radeon_device *rdev; struct radeon_device *rdev;
@ -216,13 +185,6 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
return 0; return 0;
} }
if (rdev->family >= CHIP_R600) {
r = r600_fence_wait(fence, intr, 0);
if (r == -ERESTARTSYS)
return -EBUSY;
return r;
}
retry: retry:
cur_jiffies = jiffies; cur_jiffies = jiffies;
timeout = HZ / 100; timeout = HZ / 100;
@ -231,14 +193,17 @@ retry:
} }
if (intr) { if (intr) {
radeon_irq_kms_sw_irq_get(rdev);
r = wait_event_interruptible_timeout(rdev->fence_drv.queue, r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
radeon_fence_signaled(fence), timeout); radeon_fence_signaled(fence), timeout);
if (unlikely(r == -ERESTARTSYS)) { radeon_irq_kms_sw_irq_put(rdev);
return -EBUSY; if (unlikely(r < 0))
} return r;
} else { } else {
radeon_irq_kms_sw_irq_get(rdev);
r = wait_event_timeout(rdev->fence_drv.queue, r = wait_event_timeout(rdev->fence_drv.queue,
radeon_fence_signaled(fence), timeout); radeon_fence_signaled(fence), timeout);
radeon_irq_kms_sw_irq_put(rdev);
} }
if (unlikely(!radeon_fence_signaled(fence))) { if (unlikely(!radeon_fence_signaled(fence))) {
if (unlikely(r == 0)) { if (unlikely(r == 0)) {

View File

@ -38,6 +38,23 @@ typedef union rfixed {
#define fixed_init_half(A) { .full = rfixed_const_half((A)) } #define fixed_init_half(A) { .full = rfixed_const_half((A)) }
#define rfixed_trunc(A) ((A).full >> 12) #define rfixed_trunc(A) ((A).full >> 12)
static inline u32 rfixed_floor(fixed20_12 A)
{
u32 non_frac = rfixed_trunc(A);
return rfixed_const(non_frac);
}
static inline u32 rfixed_ceil(fixed20_12 A)
{
u32 non_frac = rfixed_trunc(A);
if (A.full > rfixed_const(non_frac))
return rfixed_const(non_frac + 1);
else
return rfixed_const(non_frac);
}
static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B) static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B)
{ {
u64 tmp = ((u64)A.full << 13); u64 tmp = ((u64)A.full << 13);

View File

@ -59,16 +59,17 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
} }
void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state) void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
{ {
struct radeon_device *rdev = radeon_connector->base.dev->dev_private; struct radeon_device *rdev = i2c->dev->dev_private;
struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t temp; uint32_t temp;
struct radeon_i2c_bus_rec *rec = &radeon_connector->ddc_bus->rec;
/* RV410 appears to have a bug where the hw i2c in reset /* RV410 appears to have a bug where the hw i2c in reset
* holds the i2c port in a bad state - switch hw i2c away before * holds the i2c port in a bad state - switch hw i2c away before
* doing DDC - do this for all r200s/r300s/r400s for safety sake * doing DDC - do this for all r200s/r300s/r400s for safety sake
*/ */
if (rec->hw_capable) {
if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) { if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) {
if (rec->a_clk_reg == RADEON_GPIO_MONID) { if (rec->a_clk_reg == RADEON_GPIO_MONID) {
WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST | WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
@ -78,16 +79,23 @@ void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_stat
R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3))); R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3)));
} }
} }
if (lock_state) {
temp = RREG32(rec->a_clk_reg);
temp &= ~(rec->a_clk_mask);
WREG32(rec->a_clk_reg, temp);
temp = RREG32(rec->a_data_reg);
temp &= ~(rec->a_data_mask);
WREG32(rec->a_data_reg, temp);
} }
/* clear the output pin values */
temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask;
WREG32(rec->a_clk_reg, temp);
temp = RREG32(rec->a_data_reg) & ~rec->a_data_mask;
WREG32(rec->a_data_reg, temp);
/* set the pins to input */
temp = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
WREG32(rec->en_clk_reg, temp);
temp = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
WREG32(rec->en_data_reg, temp);
/* mask the gpio pins for software use */
temp = RREG32(rec->mask_clk_reg); temp = RREG32(rec->mask_clk_reg);
if (lock_state) if (lock_state)
temp |= rec->mask_clk_mask; temp |= rec->mask_clk_mask;
@ -112,8 +120,9 @@ static int get_clock(void *i2c_priv)
struct radeon_i2c_bus_rec *rec = &i2c->rec; struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t val; uint32_t val;
val = RREG32(rec->get_clk_reg); /* read the value off the pin */
val &= rec->get_clk_mask; val = RREG32(rec->y_clk_reg);
val &= rec->y_clk_mask;
return (val != 0); return (val != 0);
} }
@ -126,8 +135,10 @@ static int get_data(void *i2c_priv)
struct radeon_i2c_bus_rec *rec = &i2c->rec; struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t val; uint32_t val;
val = RREG32(rec->get_data_reg); /* read the value off the pin */
val &= rec->get_data_mask; val = RREG32(rec->y_data_reg);
val &= rec->y_data_mask;
return (val != 0); return (val != 0);
} }
@ -138,9 +149,10 @@ static void set_clock(void *i2c_priv, int clock)
struct radeon_i2c_bus_rec *rec = &i2c->rec; struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t val; uint32_t val;
val = RREG32(rec->put_clk_reg) & (uint32_t)~(rec->put_clk_mask); /* set pin direction */
val |= clock ? 0 : rec->put_clk_mask; val = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
WREG32(rec->put_clk_reg, val); val |= clock ? 0 : rec->en_clk_mask;
WREG32(rec->en_clk_reg, val);
} }
static void set_data(void *i2c_priv, int data) static void set_data(void *i2c_priv, int data)
@ -150,9 +162,10 @@ static void set_data(void *i2c_priv, int data)
struct radeon_i2c_bus_rec *rec = &i2c->rec; struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t val; uint32_t val;
val = RREG32(rec->put_data_reg) & (uint32_t)~(rec->put_data_mask); /* set pin direction */
val |= data ? 0 : rec->put_data_mask; val = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
WREG32(rec->put_data_reg, val); val |= data ? 0 : rec->en_data_mask;
WREG32(rec->en_data_reg, val);
} }
struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
@ -166,21 +179,18 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
if (i2c == NULL) if (i2c == NULL)
return NULL; return NULL;
// i2c->adapter.owner = THIS_MODULE;
i2c->adapter.algo_data = &i2c->algo;
i2c->dev = dev; i2c->dev = dev;
i2c->algo.setsda = set_data; i2c->adapter.algo_data = &i2c->algo.bit;
i2c->algo.setscl = set_clock; i2c->algo.bit.setsda = set_data;
i2c->algo.getsda = get_data; i2c->algo.bit.setscl = set_clock;
i2c->algo.getscl = get_clock; i2c->algo.bit.getsda = get_data;
i2c->algo.udelay = 20; i2c->algo.bit.getscl = get_clock;
i2c->algo.bit.udelay = 20;
/* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
* make this, 2 jiffies is a lot more reliable */ * make this, 2 jiffies is a lot more reliable */
i2c->algo.timeout = 2; i2c->algo.bit.timeout = 2;
i2c->algo.data = i2c; i2c->algo.bit.data = i2c;
i2c->rec = *rec; i2c->rec = *rec;
// i2c_set_adapdata(&i2c->adapter, i2c);
ret = i2c_bit_add_bus(&i2c->adapter); ret = i2c_bit_add_bus(&i2c->adapter);
if (ret) { if (ret) {
DRM_INFO("Failed to register i2c %s\n", name); DRM_INFO("Failed to register i2c %s\n", name);
@ -194,12 +204,42 @@ out_free:
} }
struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
struct radeon_i2c_bus_rec *rec,
const char *name)
{
struct radeon_i2c_chan *i2c;
int ret;
i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL);
if (i2c == NULL)
return NULL;
i2c->rec = *rec;
i2c->adapter.owner = THIS_MODULE;
i2c->dev = dev;
i2c->adapter.algo_data = &i2c->algo.dp;
i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch;
i2c->algo.dp.address = 0;
ret = i2c_dp_aux_add_bus(&i2c->adapter);
if (ret) {
DRM_INFO("Failed to register i2c %s\n", name);
goto out_free;
}
return i2c;
out_free:
kfree(i2c);
return NULL;
}
void radeon_i2c_destroy(struct radeon_i2c_chan *i2c) void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
{ {
if (!i2c) if (!i2c)
return; return;
// i2c_del_adapter(&i2c->adapter);
kfree(i2c); kfree(i2c);
} }
@ -207,3 +247,59 @@ struct drm_encoder *radeon_best_encoder(struct drm_connector *connector)
{ {
return NULL; return NULL;
} }
void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
u8 slave_addr,
u8 addr,
u8 *val)
{
u8 out_buf[2];
u8 in_buf[2];
struct i2c_msg msgs[] = {
{
.addr = slave_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
.addr = slave_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
}
};
out_buf[0] = addr;
out_buf[1] = 0;
if (i2c_transfer(&i2c_bus->adapter, msgs, 2) == 2) {
*val = in_buf[0];
DRM_DEBUG("val = 0x%02x\n", *val);
} else {
DRM_ERROR("i2c 0x%02x 0x%02x read failed\n",
addr, *val);
}
}
void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c_bus,
u8 slave_addr,
u8 addr,
u8 val)
{
uint8_t out_buf[2];
struct i2c_msg msg = {
.addr = slave_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
};
out_buf[0] = addr;
out_buf[1] = val;
if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1)
DRM_ERROR("i2c 0x%02x 0x%02x write failed\n",
addr, val);
}

View File

@ -30,6 +30,18 @@
#include "radeon.h" #include "radeon.h"
#include "atom.h" #include "atom.h"
static void radeon_overscan_setup(struct drm_crtc *crtc,
struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
WREG32(RADEON_OVR_CLR + radeon_crtc->crtc_offset, 0);
WREG32(RADEON_OVR_WID_LEFT_RIGHT + radeon_crtc->crtc_offset, 0);
WREG32(RADEON_OVR_WID_TOP_BOTTOM + radeon_crtc->crtc_offset, 0);
}
static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc, static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode) struct drm_display_mode *adjusted_mode)
@ -292,8 +304,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
uint32_t mask; uint32_t mask;
if (radeon_crtc->crtc_id) if (radeon_crtc->crtc_id)
mask = (RADEON_CRTC2_EN | mask = (RADEON_CRTC2_DISP_DIS |
RADEON_CRTC2_DISP_DIS |
RADEON_CRTC2_VSYNC_DIS | RADEON_CRTC2_VSYNC_DIS |
RADEON_CRTC2_HSYNC_DIS | RADEON_CRTC2_HSYNC_DIS |
RADEON_CRTC2_DISP_REQ_EN_B); RADEON_CRTC2_DISP_REQ_EN_B);
@ -305,7 +316,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
switch (mode) { switch (mode) {
case DRM_MODE_DPMS_ON: case DRM_MODE_DPMS_ON:
if (radeon_crtc->crtc_id) if (radeon_crtc->crtc_id)
WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~mask); WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask));
else { else {
WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN | WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN |
RADEON_CRTC_DISP_REQ_EN_B)); RADEON_CRTC_DISP_REQ_EN_B));
@ -319,7 +330,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_OFF: case DRM_MODE_DPMS_OFF:
// drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); // drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
if (radeon_crtc->crtc_id) if (radeon_crtc->crtc_id)
WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~mask); WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
else { else {
WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN | WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN |
RADEON_CRTC_DISP_REQ_EN_B)); RADEON_CRTC_DISP_REQ_EN_B));
@ -400,14 +411,21 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_framebuffer *radeon_fb; struct radeon_framebuffer *radeon_fb;
struct drm_gem_object *obj; struct drm_gem_object *obj;
struct radeon_bo *rbo;
uint64_t base; uint64_t base;
uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0; uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0;
uint32_t crtc_pitch, pitch_pixels; uint32_t crtc_pitch, pitch_pixels;
uint32_t tiling_flags; uint32_t tiling_flags;
int format; int format;
uint32_t gen_cntl_reg, gen_cntl_val; uint32_t gen_cntl_reg, gen_cntl_val;
int r;
DRM_DEBUG("\n"); DRM_DEBUG("\n");
/* no fb bound */
if (!crtc->fb) {
DRM_DEBUG("No FB bound\n");
return 0;
}
radeon_fb = to_radeon_framebuffer(crtc->fb); radeon_fb = to_radeon_framebuffer(crtc->fb);
@ -431,11 +449,21 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
return false; return false;
} }
/* Pin framebuffer & get tilling informations */
obj = radeon_fb->obj; obj = radeon_fb->obj;
// if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) { rbo = obj->driver_private;
// return -EINVAL; r = radeon_bo_reserve(rbo, false);
// } if (unlikely(r != 0))
base = rdev->mc.vram_location; return r;
r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base);
if (unlikely(r != 0)) {
radeon_bo_unreserve(rbo);
return -EINVAL;
}
radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
radeon_bo_unreserve(rbo);
if (tiling_flags & RADEON_TILING_MICRO)
DRM_ERROR("trying to scanout microtiled buffer\n");
/* if scanout was in GTT this really wouldn't work */ /* if scanout was in GTT this really wouldn't work */
/* crtc offset is from display base addr not FB location */ /* crtc offset is from display base addr not FB location */
@ -451,12 +479,6 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
(crtc->fb->bits_per_pixel * 8)); (crtc->fb->bits_per_pixel * 8));
crtc_pitch |= crtc_pitch << 16; crtc_pitch |= crtc_pitch << 16;
// radeon_object_get_tiling_flags(obj->driver_private,
// &tiling_flags, NULL);
tiling_flags = 0;
if (tiling_flags & RADEON_TILING_MICRO)
DRM_ERROR("trying to scanout microtiled buffer\n");
if (tiling_flags & RADEON_TILING_MACRO) { if (tiling_flags & RADEON_TILING_MACRO) {
if (ASIC_IS_R300(rdev)) if (ASIC_IS_R300(rdev))
@ -532,10 +554,15 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, crtc_offset); WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, crtc_offset);
WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch); WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
// if (old_fb && old_fb != crtc->fb) { if (old_fb && old_fb != crtc->fb) {
// radeon_fb = to_radeon_framebuffer(old_fb); radeon_fb = to_radeon_framebuffer(old_fb);
// radeon_gem_object_unpin(radeon_fb->obj); rbo = radeon_fb->obj->driver_private;
// } r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
radeon_bo_unpin(rbo);
radeon_bo_unreserve(rbo);
}
/* Bytes per pixel may have changed */ /* Bytes per pixel may have changed */
radeon_bandwidth_update(rdev); radeon_bandwidth_update(rdev);
@ -646,12 +673,8 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
uint32_t crtc2_gen_cntl; uint32_t crtc2_gen_cntl;
uint32_t disp2_merge_cntl; uint32_t disp2_merge_cntl;
/* check to see if TV DAC is enabled for another crtc and keep it enabled */ /* if TV DAC is enabled for another crtc and keep it enabled */
if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_CRT2_ON) crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0x00718080;
crtc2_gen_cntl = RADEON_CRTC2_CRT2_ON;
else
crtc2_gen_cntl = 0;
crtc2_gen_cntl |= ((format << 8) crtc2_gen_cntl |= ((format << 8)
| RADEON_CRTC2_VSYNC_DIS | RADEON_CRTC2_VSYNC_DIS
| RADEON_CRTC2_HSYNC_DIS | RADEON_CRTC2_HSYNC_DIS
@ -680,7 +703,8 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
uint32_t crtc_ext_cntl; uint32_t crtc_ext_cntl;
uint32_t disp_merge_cntl; uint32_t disp_merge_cntl;
crtc_gen_cntl = (RADEON_CRTC_EXT_DISP_EN crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0x00718000;
crtc_gen_cntl |= (RADEON_CRTC_EXT_DISP_EN
| (format << 8) | (format << 8)
| RADEON_CRTC_DISP_REQ_EN_B | RADEON_CRTC_DISP_REQ_EN_B
| ((mode->flags & DRM_MODE_FLAG_DBLSCAN) | ((mode->flags & DRM_MODE_FLAG_DBLSCAN)
@ -783,6 +807,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) { if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
if (!rdev->is_atom_bios) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv; struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
if (lvds) { if (lvds) {
@ -794,6 +819,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
use_bios_divs = true; use_bios_divs = true;
} }
} }
}
pll_flags |= RADEON_PLL_USE_REF_DIV; pll_flags |= RADEON_PLL_USE_REF_DIV;
} }
} }
@ -1031,6 +1057,7 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
radeon_crtc_set_base(crtc, x, y, old_fb); radeon_crtc_set_base(crtc, x, y, old_fb);
radeon_set_crtc_timing(crtc, adjusted_mode); radeon_set_crtc_timing(crtc, adjusted_mode);
radeon_set_pll(crtc, adjusted_mode); radeon_set_pll(crtc, adjusted_mode);
radeon_overscan_setup(crtc, adjusted_mode);
if (radeon_crtc->crtc_id == 0) { if (radeon_crtc->crtc_id == 0) {
radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode); radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode);
} else { } else {
@ -1046,12 +1073,29 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
static void radeon_crtc_prepare(struct drm_crtc *crtc) static void radeon_crtc_prepare(struct drm_crtc *crtc)
{ {
radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); struct drm_device *dev = crtc->dev;
struct drm_crtc *crtci;
/*
* The hardware wedges sometimes if you reconfigure one CRTC
* whilst another is running (see fdo bug #24611).
*/
list_for_each_entry(crtci, &dev->mode_config.crtc_list, head)
radeon_crtc_dpms(crtci, DRM_MODE_DPMS_OFF);
} }
static void radeon_crtc_commit(struct drm_crtc *crtc) static void radeon_crtc_commit(struct drm_crtc *crtc)
{ {
radeon_crtc_dpms(crtc, DRM_MODE_DPMS_ON); struct drm_device *dev = crtc->dev;
struct drm_crtc *crtci;
/*
* Reenable the CRTCs that should be running.
*/
list_for_each_entry(crtci, &dev->mode_config.crtc_list, head) {
if (crtci->enabled)
radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON);
}
} }
static const struct drm_crtc_helper_funcs legacy_helper_funcs = { static const struct drm_crtc_helper_funcs legacy_helper_funcs = {

View File

@ -136,7 +136,14 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN; lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN;
lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL); lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL);
if ((!rdev->is_atom_bios)) { if (rdev->is_atom_bios) {
/* LVDS_GEN_CNTL parameters are computed in LVDSEncoderControl
* need to call that on resume to set up the reg properly.
*/
radeon_encoder->pixel_clock = adjusted_mode->clock;
atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE);
lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
} else {
struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv; struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
if (lvds) { if (lvds) {
DRM_DEBUG("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl); DRM_DEBUG("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl);
@ -147,8 +154,7 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
(lvds->panel_blon_delay << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT)); (lvds->panel_blon_delay << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT));
} else } else
lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
} else }
lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS; lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
lvds_gen_cntl &= ~(RADEON_LVDS_ON | lvds_gen_cntl &= ~(RADEON_LVDS_ON |
RADEON_LVDS_BLON | RADEON_LVDS_BLON |
@ -184,7 +190,7 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
} }
static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder, static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode) struct drm_display_mode *adjusted_mode)
{ {
@ -194,15 +200,22 @@ static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder,
radeon_encoder_set_active_device(encoder); radeon_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0); drm_mode_set_crtcinfo(adjusted_mode, 0);
if (radeon_encoder->rmx_type != RMX_OFF) /* get the native mode for LVDS */
radeon_rmx_mode_fixup(encoder, mode, adjusted_mode); if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
int mode_id = adjusted_mode->base.id;
*adjusted_mode = *native_mode;
adjusted_mode->hdisplay = mode->hdisplay;
adjusted_mode->vdisplay = mode->vdisplay;
adjusted_mode->base.id = mode_id;
}
return true; return true;
} }
static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = { static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
.dpms = radeon_legacy_lvds_dpms, .dpms = radeon_legacy_lvds_dpms,
.mode_fixup = radeon_legacy_lvds_mode_fixup, .mode_fixup = radeon_legacy_mode_fixup,
.prepare = radeon_legacy_lvds_prepare, .prepare = radeon_legacy_lvds_prepare,
.mode_set = radeon_legacy_lvds_mode_set, .mode_set = radeon_legacy_lvds_mode_set,
.commit = radeon_legacy_lvds_commit, .commit = radeon_legacy_lvds_commit,
@ -214,17 +227,6 @@ static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = {
.destroy = radeon_enc_destroy, .destroy = radeon_enc_destroy,
}; };
static bool radeon_legacy_primary_dac_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
/* set the active encoder to connector routing */
radeon_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0);
return true;
}
static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode) static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
@ -410,7 +412,7 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
static const struct drm_encoder_helper_funcs radeon_legacy_primary_dac_helper_funcs = { static const struct drm_encoder_helper_funcs radeon_legacy_primary_dac_helper_funcs = {
.dpms = radeon_legacy_primary_dac_dpms, .dpms = radeon_legacy_primary_dac_dpms,
.mode_fixup = radeon_legacy_primary_dac_mode_fixup, .mode_fixup = radeon_legacy_mode_fixup,
.prepare = radeon_legacy_primary_dac_prepare, .prepare = radeon_legacy_primary_dac_prepare,
.mode_set = radeon_legacy_primary_dac_mode_set, .mode_set = radeon_legacy_primary_dac_mode_set,
.commit = radeon_legacy_primary_dac_commit, .commit = radeon_legacy_primary_dac_commit,
@ -423,16 +425,6 @@ static const struct drm_encoder_funcs radeon_legacy_primary_dac_enc_funcs = {
.destroy = radeon_enc_destroy, .destroy = radeon_enc_destroy,
}; };
static bool radeon_legacy_tmds_int_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
drm_mode_set_crtcinfo(adjusted_mode, 0);
return true;
}
static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode) static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
@ -584,7 +576,7 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
static const struct drm_encoder_helper_funcs radeon_legacy_tmds_int_helper_funcs = { static const struct drm_encoder_helper_funcs radeon_legacy_tmds_int_helper_funcs = {
.dpms = radeon_legacy_tmds_int_dpms, .dpms = radeon_legacy_tmds_int_dpms,
.mode_fixup = radeon_legacy_tmds_int_mode_fixup, .mode_fixup = radeon_legacy_mode_fixup,
.prepare = radeon_legacy_tmds_int_prepare, .prepare = radeon_legacy_tmds_int_prepare,
.mode_set = radeon_legacy_tmds_int_mode_set, .mode_set = radeon_legacy_tmds_int_mode_set,
.commit = radeon_legacy_tmds_int_commit, .commit = radeon_legacy_tmds_int_commit,
@ -596,17 +588,6 @@ static const struct drm_encoder_funcs radeon_legacy_tmds_int_enc_funcs = {
.destroy = radeon_enc_destroy, .destroy = radeon_enc_destroy,
}; };
static bool radeon_legacy_tmds_ext_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
/* set the active encoder to connector routing */
radeon_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0);
return true;
}
static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode) static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
@ -697,6 +678,8 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
/*if (mode->clock > 165000) /*if (mode->clock > 165000)
fp2_gen_cntl |= R300_FP2_DVO_DUAL_CHANNEL_EN;*/ fp2_gen_cntl |= R300_FP2_DVO_DUAL_CHANNEL_EN;*/
} }
if (!radeon_combios_external_tmds_setup(encoder))
radeon_external_tmds_setup(encoder);
} }
if (radeon_crtc->crtc_id == 0) { if (radeon_crtc->crtc_id == 0) {
@ -724,9 +707,22 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
} }
static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
if (tmds) {
if (tmds->i2c_bus)
radeon_i2c_destroy(tmds->i2c_bus);
}
kfree(radeon_encoder->enc_priv);
drm_encoder_cleanup(encoder);
kfree(radeon_encoder);
}
static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs = { static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs = {
.dpms = radeon_legacy_tmds_ext_dpms, .dpms = radeon_legacy_tmds_ext_dpms,
.mode_fixup = radeon_legacy_tmds_ext_mode_fixup, .mode_fixup = radeon_legacy_mode_fixup,
.prepare = radeon_legacy_tmds_ext_prepare, .prepare = radeon_legacy_tmds_ext_prepare,
.mode_set = radeon_legacy_tmds_ext_mode_set, .mode_set = radeon_legacy_tmds_ext_mode_set,
.commit = radeon_legacy_tmds_ext_commit, .commit = radeon_legacy_tmds_ext_commit,
@ -735,20 +731,9 @@ static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs
static const struct drm_encoder_funcs radeon_legacy_tmds_ext_enc_funcs = { static const struct drm_encoder_funcs radeon_legacy_tmds_ext_enc_funcs = {
.destroy = radeon_enc_destroy, .destroy = radeon_ext_tmds_enc_destroy,
}; };
static bool radeon_legacy_tv_dac_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
/* set the active encoder to connector routing */
radeon_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0);
return true;
}
static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode) static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
@ -1265,7 +1250,7 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder
static const struct drm_encoder_helper_funcs radeon_legacy_tv_dac_helper_funcs = { static const struct drm_encoder_helper_funcs radeon_legacy_tv_dac_helper_funcs = {
.dpms = radeon_legacy_tv_dac_dpms, .dpms = radeon_legacy_tv_dac_dpms,
.mode_fixup = radeon_legacy_tv_dac_mode_fixup, .mode_fixup = radeon_legacy_mode_fixup,
.prepare = radeon_legacy_tv_dac_prepare, .prepare = radeon_legacy_tv_dac_prepare,
.mode_set = radeon_legacy_tv_dac_mode_set, .mode_set = radeon_legacy_tv_dac_mode_set,
.commit = radeon_legacy_tv_dac_commit, .commit = radeon_legacy_tv_dac_commit,
@ -1302,6 +1287,29 @@ static struct radeon_encoder_int_tmds *radeon_legacy_get_tmds_info(struct radeon
return tmds; return tmds;
} }
static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct radeon_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder_ext_tmds *tmds = NULL;
bool ret;
if (rdev->is_atom_bios)
return NULL;
tmds = kzalloc(sizeof(struct radeon_encoder_ext_tmds), GFP_KERNEL);
if (!tmds)
return NULL;
ret = radeon_legacy_get_ext_tmds_info_from_combios(encoder, tmds);
if (ret == false)
radeon_legacy_get_ext_tmds_info_from_table(encoder, tmds);
return tmds;
}
void void
radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device) radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
{ {
@ -1329,7 +1337,6 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
encoder->possible_crtcs = 0x1; encoder->possible_crtcs = 0x1;
else else
encoder->possible_crtcs = 0x3; encoder->possible_crtcs = 0x3;
encoder->possible_clones = 0;
radeon_encoder->enc_priv = NULL; radeon_encoder->enc_priv = NULL;
@ -1373,7 +1380,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, DRM_MODE_ENCODER_TMDS); drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs); drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs);
if (!rdev->is_atom_bios) if (!rdev->is_atom_bios)
radeon_combios_get_ext_tmds_info(radeon_encoder); radeon_encoder->enc_priv = radeon_legacy_get_ext_tmds_info(radeon_encoder);
break; break;
} }
} }

View File

@ -33,6 +33,7 @@
#include <drm_crtc.h> #include <drm_crtc.h>
#include <drm_mode.h> #include <drm_mode.h>
#include <drm_edid.h> #include <drm_edid.h>
#include <drm_dp_helper.h>
#include <linux/i2c.h> #include <linux/i2c.h>
#include <linux/i2c-id.h> #include <linux/i2c-id.h>
#include <linux/i2c-algo-bit.h> #include <linux/i2c-algo-bit.h>
@ -89,24 +90,45 @@ enum radeon_tv_std {
TV_STD_PAL_CN, TV_STD_PAL_CN,
}; };
/* radeon gpio-based i2c
* 1. "mask" reg and bits
* grabs the gpio pins for software use
* 0=not held 1=held
* 2. "a" reg and bits
* output pin value
* 0=low 1=high
* 3. "en" reg and bits
* sets the pin direction
* 0=input 1=output
* 4. "y" reg and bits
* input pin value
* 0=low 1=high
*/
struct radeon_i2c_bus_rec { struct radeon_i2c_bus_rec {
bool valid; bool valid;
/* id used by atom */
uint8_t i2c_id;
/* can be used with hw i2c engine */
bool hw_capable;
/* uses multi-media i2c engine */
bool mm_i2c;
/* regs and bits */
uint32_t mask_clk_reg; uint32_t mask_clk_reg;
uint32_t mask_data_reg; uint32_t mask_data_reg;
uint32_t a_clk_reg; uint32_t a_clk_reg;
uint32_t a_data_reg; uint32_t a_data_reg;
uint32_t put_clk_reg; uint32_t en_clk_reg;
uint32_t put_data_reg; uint32_t en_data_reg;
uint32_t get_clk_reg; uint32_t y_clk_reg;
uint32_t get_data_reg; uint32_t y_data_reg;
uint32_t mask_clk_mask; uint32_t mask_clk_mask;
uint32_t mask_data_mask; uint32_t mask_data_mask;
uint32_t put_clk_mask;
uint32_t put_data_mask;
uint32_t get_clk_mask;
uint32_t get_data_mask;
uint32_t a_clk_mask; uint32_t a_clk_mask;
uint32_t a_data_mask; uint32_t a_data_mask;
uint32_t en_clk_mask;
uint32_t en_data_mask;
uint32_t y_clk_mask;
uint32_t y_data_mask;
}; };
struct radeon_tmds_pll { struct radeon_tmds_pll {
@ -150,9 +172,12 @@ struct radeon_pll {
}; };
struct radeon_i2c_chan { struct radeon_i2c_chan {
struct i2c_adapter adapter;
struct drm_device *dev; struct drm_device *dev;
struct i2c_adapter adapter; union {
struct i2c_algo_bit_data algo; struct i2c_algo_dp_aux_data dp;
struct i2c_algo_bit_data bit;
} algo;
struct radeon_i2c_bus_rec rec; struct radeon_i2c_bus_rec rec;
}; };
@ -170,6 +195,11 @@ enum radeon_connector_table {
CT_EMAC, CT_EMAC,
}; };
enum radeon_dvo_chip {
DVO_SIL164,
DVO_SIL1178,
};
struct radeon_mode_info { struct radeon_mode_info {
struct atom_context *atom_context; struct atom_context *atom_context;
struct card_info *atom_card_info; struct card_info *atom_card_info;
@ -209,7 +239,7 @@ struct radeon_crtc {
bool enabled; bool enabled;
bool can_tile; bool can_tile;
uint32_t crtc_offset; uint32_t crtc_offset;
// struct drm_gem_object *cursor_bo; struct drm_gem_object *cursor_bo;
uint64_t cursor_addr; uint64_t cursor_addr;
int cursor_width; int cursor_width;
int cursor_height; int cursor_height;
@ -261,6 +291,13 @@ struct radeon_encoder_int_tmds {
struct radeon_tmds_pll tmds_pll[4]; struct radeon_tmds_pll tmds_pll[4];
}; };
struct radeon_encoder_ext_tmds {
/* tmds over dvo */
struct radeon_i2c_chan *i2c_bus;
uint8_t slave_addr;
enum radeon_dvo_chip dvo_chip;
};
/* spread spectrum */ /* spread spectrum */
struct radeon_atom_ss { struct radeon_atom_ss {
uint16_t percentage; uint16_t percentage;
@ -302,6 +339,35 @@ struct radeon_encoder {
struct radeon_connector_atom_dig { struct radeon_connector_atom_dig {
uint32_t igp_lane_info; uint32_t igp_lane_info;
bool linkb; bool linkb;
/* displayport */
struct radeon_i2c_chan *dp_i2c_bus;
u8 dpcd[8];
u8 dp_sink_type;
int dp_clock;
int dp_lane_count;
};
struct radeon_gpio_rec {
bool valid;
u8 id;
u32 reg;
u32 mask;
};
enum radeon_hpd_id {
RADEON_HPD_NONE = 0,
RADEON_HPD_1,
RADEON_HPD_2,
RADEON_HPD_3,
RADEON_HPD_4,
RADEON_HPD_5,
RADEON_HPD_6,
};
struct radeon_hpd {
enum radeon_hpd_id hpd;
u8 plugged_state;
struct radeon_gpio_rec gpio;
}; };
struct radeon_connector { struct radeon_connector {
@ -318,6 +384,7 @@ struct radeon_connector {
void *con_priv; void *con_priv;
bool dac_load_detect; bool dac_load_detect;
uint16_t connector_object_id; uint16_t connector_object_id;
struct radeon_hpd hpd;
}; };
struct radeon_framebuffer { struct radeon_framebuffer {
@ -325,10 +392,37 @@ struct radeon_framebuffer {
struct drm_gem_object *obj; struct drm_gem_object *obj;
}; };
extern void radeon_connector_hotplug(struct drm_connector *connector);
extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
extern int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector,
struct drm_display_mode *mode);
extern void radeon_dp_set_link_config(struct drm_connector *connector,
struct drm_display_mode *mode);
extern void dp_link_train(struct drm_encoder *encoder,
struct drm_connector *connector);
extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
int action, uint8_t lane_num,
uint8_t lane_set);
extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
uint8_t write_byte, uint8_t *read_byte);
extern struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
struct radeon_i2c_bus_rec *rec,
const char *name);
extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
struct radeon_i2c_bus_rec *rec, struct radeon_i2c_bus_rec *rec,
const char *name); const char *name);
extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c); extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c);
extern void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
u8 slave_addr,
u8 addr,
u8 *val);
extern void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c,
u8 slave_addr,
u8 addr,
u8 val);
extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
@ -343,12 +437,24 @@ extern void radeon_compute_pll(struct radeon_pll *pll,
uint32_t *post_div_p, uint32_t *post_div_p,
int flags); int flags);
extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
uint64_t freq,
uint32_t *dot_clock_p,
uint32_t *fb_div_p,
uint32_t *frac_fb_div_p,
uint32_t *ref_div_p,
uint32_t *post_div_p,
int flags);
extern void radeon_setup_encoder_clones(struct drm_device *dev);
struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index); struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index);
struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev, int bios_index, int with_tv); struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev, int bios_index, int with_tv);
struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv); struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv);
struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index); struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index);
struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index); struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index);
extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action); extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action);
extern void atombios_digital_setup(struct drm_encoder *encoder, int action);
extern int atombios_get_encoder_mode(struct drm_encoder *encoder); extern int atombios_get_encoder_mode(struct drm_encoder *encoder);
extern void radeon_encoder_set_active_device(struct drm_encoder *encoder); extern void radeon_encoder_set_active_device(struct drm_encoder *encoder);
@ -378,12 +484,16 @@ extern bool radeon_atom_get_clock_info(struct drm_device *dev);
extern bool radeon_combios_get_clock_info(struct drm_device *dev); extern bool radeon_combios_get_clock_info(struct drm_device *dev);
extern struct radeon_encoder_atom_dig * extern struct radeon_encoder_atom_dig *
radeon_atombios_get_lvds_info(struct radeon_encoder *encoder); radeon_atombios_get_lvds_info(struct radeon_encoder *encoder);
bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder, extern bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
struct radeon_encoder_int_tmds *tmds); struct radeon_encoder_int_tmds *tmds);
bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder, extern bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
struct radeon_encoder_int_tmds *tmds); struct radeon_encoder_int_tmds *tmds);
bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder, extern bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder,
struct radeon_encoder_int_tmds *tmds); struct radeon_encoder_int_tmds *tmds);
extern bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder,
struct radeon_encoder_ext_tmds *tmds);
extern bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder,
struct radeon_encoder_ext_tmds *tmds);
extern struct radeon_encoder_primary_dac * extern struct radeon_encoder_primary_dac *
radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder); radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder);
extern struct radeon_encoder_tv_dac * extern struct radeon_encoder_tv_dac *
@ -395,6 +505,8 @@ extern struct radeon_encoder_tv_dac *
radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder); radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder);
extern struct radeon_encoder_primary_dac * extern struct radeon_encoder_primary_dac *
radeon_combios_get_primary_dac_info(struct radeon_encoder *encoder); radeon_combios_get_primary_dac_info(struct radeon_encoder *encoder);
extern bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder);
extern void radeon_external_tmds_setup(struct drm_encoder *encoder);
extern void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock); extern void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock);
extern void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev); extern void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev);
extern void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock); extern void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock);
@ -426,16 +538,13 @@ void radeon_atombios_init_crtc(struct drm_device *dev,
struct radeon_crtc *radeon_crtc); struct radeon_crtc *radeon_crtc);
void radeon_legacy_init_crtc(struct drm_device *dev, void radeon_legacy_init_crtc(struct drm_device *dev,
struct radeon_crtc *radeon_crtc); struct radeon_crtc *radeon_crtc);
void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state); extern void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state);
void radeon_get_clock_info(struct drm_device *dev); void radeon_get_clock_info(struct drm_device *dev);
extern bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev); extern bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev);
extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct drm_device *dev); extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct drm_device *dev);
void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void radeon_enc_destroy(struct drm_encoder *encoder); void radeon_enc_destroy(struct drm_encoder *encoder);
void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
void radeon_combios_asic_init(struct drm_device *dev); void radeon_combios_asic_init(struct drm_device *dev);

View File

@ -28,328 +28,152 @@
#ifndef __RADEON_OBJECT_H__ #ifndef __RADEON_OBJECT_H__
#define __RADEON_OBJECT_H__ #define __RADEON_OBJECT_H__
//#include <ttm/ttm_bo_api.h> #include <drm/radeon_drm.h>
//#include <ttm/ttm_bo_driver.h> #include "radeon.h"
//#include <ttm/ttm_placement.h>
//#include <ttm/ttm_module.h>
/* /**
* TTM. * radeon_mem_type_to_domain - return domain corresponding to mem_type
* @mem_type: ttm memory type
*
* Returns corresponding domain of the ttm mem_type
*/ */
//struct radeon_mman { static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
// struct ttm_global_reference mem_global_ref;
// bool mem_global_referenced;
// struct ttm_bo_device bdev;
//};
#define TTM_PL_SYSTEM 0
#define TTM_PL_TT 1
#define TTM_PL_VRAM 2
#define TTM_PL_PRIV0 3
#define TTM_PL_PRIV1 4
#define TTM_PL_PRIV2 5
#define TTM_PL_PRIV3 6
#define TTM_PL_PRIV4 7
#define TTM_PL_PRIV5 8
#define TTM_PL_SWAPPED 15
#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM)
#define TTM_PL_FLAG_TT (1 << TTM_PL_TT)
#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM)
#define TTM_PL_FLAG_PRIV0 (1 << TTM_PL_PRIV0)
#define TTM_PL_FLAG_PRIV1 (1 << TTM_PL_PRIV1)
#define TTM_PL_FLAG_PRIV2 (1 << TTM_PL_PRIV2)
#define TTM_PL_FLAG_PRIV3 (1 << TTM_PL_PRIV3)
#define TTM_PL_FLAG_PRIV4 (1 << TTM_PL_PRIV4)
#define TTM_PL_FLAG_PRIV5 (1 << TTM_PL_PRIV5)
#define TTM_PL_FLAG_SWAPPED (1 << TTM_PL_SWAPPED)
#define TTM_PL_MASK_MEM 0x0000FFFF
struct ttm_mem_type_manager {
/*
* No protection. Constant from start.
*/
bool has_type;
bool use_type;
uint32_t flags;
unsigned long gpu_offset;
unsigned long io_offset;
unsigned long io_size;
void *io_addr;
uint64_t size;
uint32_t available_caching;
uint32_t default_caching;
/*
* Protected by the bdev->lru_lock.
* TODO: Consider one lru_lock per ttm_mem_type_manager.
* Plays ill with list removal, though.
*/
struct drm_mm manager;
struct list_head lru;
};
struct ttm_bo_driver {
const uint32_t *mem_type_prio;
const uint32_t *mem_busy_prio;
uint32_t num_mem_type_prio;
uint32_t num_mem_busy_prio;
/**
* struct ttm_bo_driver member create_ttm_backend_entry
*
* @bdev: The buffer object device.
*
* Create a driver specific struct ttm_backend.
*/
// struct ttm_backend *(*create_ttm_backend_entry)(struct ttm_bo_device *bdev);
/**
* struct ttm_bo_driver member invalidate_caches
*
* @bdev: the buffer object device.
* @flags: new placement of the rebound buffer object.
*
* A previosly evicted buffer has been rebound in a
* potentially new location. Tell the driver that it might
* consider invalidating read (texture) caches on the next command
* submission as a consequence.
*/
// int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
// int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
// struct ttm_mem_type_manager *man);
/**
* struct ttm_bo_driver member evict_flags:
*
* @bo: the buffer object to be evicted
*
* Return the bo flags for a buffer which is not mapped to the hardware.
* These will be placed in proposed_flags so that when the move is
* finished, they'll end up in bo->mem.flags
*/
// uint32_t(*evict_flags) (struct ttm_buffer_object *bo);
/**
* struct ttm_bo_driver member move:
*
* @bo: the buffer to move
* @evict: whether this motion is evicting the buffer from
* the graphics address space
* @interruptible: Use interruptible sleeps if possible when sleeping.
* @no_wait: whether this should give up and return -EBUSY
* if this move would require sleeping
* @new_mem: the new memory region receiving the buffer
*
* Move a buffer between two memory regions.
*/
// int (*move) (struct ttm_buffer_object *bo,
// bool evict, bool interruptible,
// bool no_wait, struct ttm_mem_reg *new_mem);
/**
* struct ttm_bo_driver_member verify_access
*
* @bo: Pointer to a buffer object.
* @filp: Pointer to a struct file trying to access the object.
*
* Called from the map / write / read methods to verify that the
* caller is permitted to access the buffer object.
* This member may be set to NULL, which will refuse this kind of
* access for all buffer objects.
* This function should return 0 if access is granted, -EPERM otherwise.
*/
// int (*verify_access) (struct ttm_buffer_object *bo,
// struct file *filp);
/**
* In case a driver writer dislikes the TTM fence objects,
* the driver writer can replace those with sync objects of
* his / her own. If it turns out that no driver writer is
* using these. I suggest we remove these hooks and plug in
* fences directly. The bo driver needs the following functionality:
* See the corresponding functions in the fence object API
* documentation.
*/
// bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg);
// int (*sync_obj_wait) (void *sync_obj, void *sync_arg,
// bool lazy, bool interruptible);
// int (*sync_obj_flush) (void *sync_obj, void *sync_arg);
// void (*sync_obj_unref) (void **sync_obj);
// void *(*sync_obj_ref) (void *sync_obj);
};
#define TTM_NUM_MEM_TYPES 8
struct ttm_bo_device {
/*
* Constant after bo device init / atomic.
*/
// struct ttm_mem_global *mem_glob;
struct ttm_bo_driver *driver;
// struct page *dummy_read_page;
// struct ttm_mem_shrink shrink;
size_t ttm_bo_extra_size;
size_t ttm_bo_size;
// rwlock_t vm_lock;
/*
* Protected by the vm lock.
*/
struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
// struct rb_root addr_space_rb;
struct drm_mm addr_space_mm;
/*
* Might want to change this to one lock per manager.
*/
// spinlock_t lru_lock;
/*
* Protected by the lru lock.
*/
struct list_head ddestroy;
struct list_head swap_lru;
/*
* Protected by load / firstopen / lastclose /unload sync.
*/
bool nice_mode;
// struct address_space *dev_mapping;
/*
* Internal protection.
*/
// struct delayed_work wq;
};
struct ttm_mem_reg {
struct drm_mm_node *mm_node;
unsigned long size;
unsigned long num_pages;
uint32_t page_alignment;
uint32_t mem_type;
uint32_t placement;
};
enum ttm_bo_type {
ttm_bo_type_device,
ttm_bo_type_user,
ttm_bo_type_kernel
};
struct ttm_buffer_object {
/**
* Members constant at init.
*/
struct ttm_bo_device *bdev;
unsigned long buffer_start;
enum ttm_bo_type type;
void (*destroy) (struct ttm_buffer_object *);
unsigned long num_pages;
uint64_t addr_space_offset;
size_t acc_size;
/**
* Members not needing protection.
*/
// struct kref kref;
// struct kref list_kref;
// wait_queue_head_t event_queue;
// spinlock_t lock;
/**
* Members protected by the bo::reserved lock.
*/
uint32_t proposed_placement;
struct ttm_mem_reg mem;
// struct file *persistant_swap_storage;
// struct ttm_tt *ttm;
bool evicted;
/**
* Members protected by the bo::reserved lock only when written to.
*/
// atomic_t cpu_writers;
/**
* Members protected by the bdev::lru_lock.
*/
struct list_head lru;
struct list_head ddestroy;
struct list_head swap;
uint32_t val_seq;
bool seq_valid;
/**
* Members protected by the bdev::lru_lock
* only when written to.
*/
// atomic_t reserved;
/**
* Members protected by the bo::lock
*/
void *sync_obj_arg;
void *sync_obj;
unsigned long priv_flags;
/**
* Members protected by the bdev::vm_lock
*/
// struct rb_node vm_rb;
struct drm_mm_node *vm_node;
/**
* Special members that are protected by the reserve lock
* and the bo::lock when written to. Can be read with
* either of these locks held.
*/
unsigned long offset;
uint32_t cur_placement;
};
struct radeon_object
{ {
struct ttm_buffer_object tobj; switch (mem_type) {
struct list_head list; case TTM_PL_VRAM:
struct radeon_device *rdev; return RADEON_GEM_DOMAIN_VRAM;
struct drm_gem_object *gobj; case TTM_PL_TT:
// struct ttm_bo_kmap_obj kmap; return RADEON_GEM_DOMAIN_GTT;
case TTM_PL_SYSTEM:
return RADEON_GEM_DOMAIN_CPU;
default:
break;
}
return 0;
}
unsigned pin_count; /**
uint64_t gpu_addr; * radeon_bo_reserve - reserve bo
void *kptr; * @bo: bo structure
bool is_iomem; * @no_wait: don't sleep while trying to reserve (return -EBUSY)
*
* Returns:
* -EBUSY: buffer is busy and @no_wait is true
* -ERESTART: A wait for the buffer to become unreserved was interrupted by
* a signal. Release all buffer reservations and return to user-space.
*/
static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
{
int r;
struct drm_mm_node *mm_node; retry:
u32_t vm_addr; r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
u32_t cpu_addr; if (unlikely(r != 0)) {
u32_t flags; if (r == -ERESTART)
}; goto retry;
dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
return r;
}
return 0;
}
static inline void radeon_bo_unreserve(struct radeon_bo *bo)
{
ttm_bo_unreserve(&bo->tbo);
}
/**
* radeon_bo_gpu_offset - return GPU offset of bo
* @bo: radeon object for which we query the offset
*
* Returns current GPU offset of the object.
*
* Note: object should either be pinned or reserved when calling this
* function, it might be usefull to add check for this for debugging.
*/
static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
{
return bo->tbo.offset;
}
static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
{
return bo->tbo.num_pages << PAGE_SHIFT;
}
static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
{
return !!atomic_read(&bo->tbo.reserved);
}
/**
* radeon_bo_mmap_offset - return mmap offset of bo
* @bo: radeon object for which we query the offset
*
* Returns mmap offset of the object.
*
* Note: addr_space_offset is constant after ttm bo init thus isn't protected
* by any lock.
*/
static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
{
return bo->tbo.addr_space_offset;
}
static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
bool no_wait)
{
int r;
retry:
r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
if (unlikely(r != 0)) {
if (r == -ERESTART)
goto retry;
dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo);
return r;
}
spin_lock(&bo->tbo.lock);
if (mem_type)
*mem_type = bo->tbo.mem.mem_type;
if (bo->tbo.sync_obj)
r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
spin_unlock(&bo->tbo.lock);
ttm_bo_unreserve(&bo->tbo);
if (unlikely(r == -ERESTART))
goto retry;
return r;
}
extern int radeon_bo_create(struct radeon_device *rdev,
struct drm_gem_object *gobj, unsigned long size,
bool kernel, u32 domain,
struct radeon_bo **bo_ptr);
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
extern void radeon_bo_kunmap(struct radeon_bo *bo);
extern void radeon_bo_unref(struct radeon_bo **bo);
extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
extern int radeon_bo_unpin(struct radeon_bo *bo);
extern int radeon_bo_evict_vram(struct radeon_device *rdev);
extern void radeon_bo_force_delete(struct radeon_device *rdev);
extern int radeon_bo_init(struct radeon_device *rdev);
extern void radeon_bo_fini(struct radeon_device *rdev);
extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
struct list_head *head);
extern int radeon_bo_list_reserve(struct list_head *head);
extern void radeon_bo_list_unreserve(struct list_head *head);
extern int radeon_bo_list_validate(struct list_head *head, void *fence);
extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence);
extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
struct vm_area_struct *vma);
extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
u32 tiling_flags, u32 pitch);
extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
u32 *tiling_flags, u32 *pitch);
extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
bool force_drop);
extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
#endif #endif

View File

@ -27,7 +27,7 @@ int radeon_debugfs_pm_init(struct radeon_device *rdev);
int radeon_pm_init(struct radeon_device *rdev) int radeon_pm_init(struct radeon_device *rdev)
{ {
if (radeon_debugfs_pm_init(rdev)) { if (radeon_debugfs_pm_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for CP !\n"); DRM_ERROR("Failed to register debugfs file for PM!\n");
} }
return 0; return 0;
@ -44,8 +44,8 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private; struct radeon_device *rdev = dev->dev_private;
seq_printf(m, "engine clock: %u0 Hz\n", radeon_get_engine_clock(rdev)); seq_printf(m, "engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
seq_printf(m, "memory clock: %u0 Hz\n", radeon_get_memory_clock(rdev)); seq_printf(m, "memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
return 0; return 0;
} }

View File

@ -887,6 +887,7 @@
# define RADEON_FP_PANEL_FORMAT (1 << 3) # define RADEON_FP_PANEL_FORMAT (1 << 3)
# define RADEON_FP_EN_TMDS (1 << 7) # define RADEON_FP_EN_TMDS (1 << 7)
# define RADEON_FP_DETECT_SENSE (1 << 8) # define RADEON_FP_DETECT_SENSE (1 << 8)
# define RADEON_FP_DETECT_INT_POL (1 << 9)
# define R200_FP_SOURCE_SEL_MASK (3 << 10) # define R200_FP_SOURCE_SEL_MASK (3 << 10)
# define R200_FP_SOURCE_SEL_CRTC1 (0 << 10) # define R200_FP_SOURCE_SEL_CRTC1 (0 << 10)
# define R200_FP_SOURCE_SEL_CRTC2 (1 << 10) # define R200_FP_SOURCE_SEL_CRTC2 (1 << 10)
@ -894,6 +895,7 @@
# define R200_FP_SOURCE_SEL_TRANS (3 << 10) # define R200_FP_SOURCE_SEL_TRANS (3 << 10)
# define RADEON_FP_SEL_CRTC1 (0 << 13) # define RADEON_FP_SEL_CRTC1 (0 << 13)
# define RADEON_FP_SEL_CRTC2 (1 << 13) # define RADEON_FP_SEL_CRTC2 (1 << 13)
# define R300_HPD_SEL(x) ((x) << 13)
# define RADEON_FP_CRTC_DONT_SHADOW_HPAR (1 << 15) # define RADEON_FP_CRTC_DONT_SHADOW_HPAR (1 << 15)
# define RADEON_FP_CRTC_DONT_SHADOW_VPAR (1 << 16) # define RADEON_FP_CRTC_DONT_SHADOW_VPAR (1 << 16)
# define RADEON_FP_CRTC_DONT_SHADOW_HEND (1 << 17) # define RADEON_FP_CRTC_DONT_SHADOW_HEND (1 << 17)
@ -909,6 +911,7 @@
# define RADEON_FP2_ON (1 << 2) # define RADEON_FP2_ON (1 << 2)
# define RADEON_FP2_PANEL_FORMAT (1 << 3) # define RADEON_FP2_PANEL_FORMAT (1 << 3)
# define RADEON_FP2_DETECT_SENSE (1 << 8) # define RADEON_FP2_DETECT_SENSE (1 << 8)
# define RADEON_FP2_DETECT_INT_POL (1 << 9)
# define R200_FP2_SOURCE_SEL_MASK (3 << 10) # define R200_FP2_SOURCE_SEL_MASK (3 << 10)
# define R200_FP2_SOURCE_SEL_CRTC1 (0 << 10) # define R200_FP2_SOURCE_SEL_CRTC1 (0 << 10)
# define R200_FP2_SOURCE_SEL_CRTC2 (1 << 10) # define R200_FP2_SOURCE_SEL_CRTC2 (1 << 10)
@ -988,14 +991,20 @@
#define RADEON_GEN_INT_CNTL 0x0040 #define RADEON_GEN_INT_CNTL 0x0040
# define RADEON_CRTC_VBLANK_MASK (1 << 0) # define RADEON_CRTC_VBLANK_MASK (1 << 0)
# define RADEON_FP_DETECT_MASK (1 << 4)
# define RADEON_CRTC2_VBLANK_MASK (1 << 9) # define RADEON_CRTC2_VBLANK_MASK (1 << 9)
# define RADEON_FP2_DETECT_MASK (1 << 10)
# define RADEON_SW_INT_ENABLE (1 << 25) # define RADEON_SW_INT_ENABLE (1 << 25)
#define RADEON_GEN_INT_STATUS 0x0044 #define RADEON_GEN_INT_STATUS 0x0044
# define AVIVO_DISPLAY_INT_STATUS (1 << 0) # define AVIVO_DISPLAY_INT_STATUS (1 << 0)
# define RADEON_CRTC_VBLANK_STAT (1 << 0) # define RADEON_CRTC_VBLANK_STAT (1 << 0)
# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0) # define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0)
# define RADEON_FP_DETECT_STAT (1 << 4)
# define RADEON_FP_DETECT_STAT_ACK (1 << 4)
# define RADEON_CRTC2_VBLANK_STAT (1 << 9) # define RADEON_CRTC2_VBLANK_STAT (1 << 9)
# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9) # define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9)
# define RADEON_FP2_DETECT_STAT (1 << 10)
# define RADEON_FP2_DETECT_STAT_ACK (1 << 10)
# define RADEON_SW_INT_FIRE (1 << 26) # define RADEON_SW_INT_FIRE (1 << 26)
# define RADEON_SW_INT_TEST (1 << 25) # define RADEON_SW_INT_TEST (1 << 25)
# define RADEON_SW_INT_TEST_ACK (1 << 25) # define RADEON_SW_INT_TEST_ACK (1 << 25)
@ -1051,20 +1060,25 @@
/* Multimedia I2C bus */ /* Multimedia I2C bus */
#define RADEON_I2C_CNTL_0 0x0090 #define RADEON_I2C_CNTL_0 0x0090
#define RADEON_I2C_DONE (1<<0) #define RADEON_I2C_DONE (1 << 0)
#define RADEON_I2C_NACK (1<<1) #define RADEON_I2C_NACK (1 << 1)
#define RADEON_I2C_HALT (1<<2) #define RADEON_I2C_HALT (1 << 2)
#define RADEON_I2C_SOFT_RST (1<<5) #define RADEON_I2C_SOFT_RST (1 << 5)
#define RADEON_I2C_DRIVE_EN (1<<6) #define RADEON_I2C_DRIVE_EN (1 << 6)
#define RADEON_I2C_DRIVE_SEL (1<<7) #define RADEON_I2C_DRIVE_SEL (1 << 7)
#define RADEON_I2C_START (1<<8) #define RADEON_I2C_START (1 << 8)
#define RADEON_I2C_STOP (1<<9) #define RADEON_I2C_STOP (1 << 9)
#define RADEON_I2C_RECEIVE (1<<10) #define RADEON_I2C_RECEIVE (1 << 10)
#define RADEON_I2C_ABORT (1<<11) #define RADEON_I2C_ABORT (1 << 11)
#define RADEON_I2C_GO (1<<12) #define RADEON_I2C_GO (1 << 12)
#define RADEON_I2C_PRESCALE_SHIFT 16
#define RADEON_I2C_CNTL_1 0x0094 #define RADEON_I2C_CNTL_1 0x0094
#define RADEON_I2C_SEL (1<<16) #define RADEON_I2C_DATA_COUNT_SHIFT 0
#define RADEON_I2C_EN (1<<17) #define RADEON_I2C_ADDR_COUNT_SHIFT 4
#define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT 8
#define RADEON_I2C_SEL (1 << 16)
#define RADEON_I2C_EN (1 << 17)
#define RADEON_I2C_TIME_LIMIT_SHIFT 24
#define RADEON_I2C_DATA 0x0098 #define RADEON_I2C_DATA 0x0098
#define RADEON_DVI_I2C_CNTL_0 0x02e0 #define RADEON_DVI_I2C_CNTL_0 0x02e0
@ -1072,7 +1086,7 @@
# define R200_SEL_DDC1 0 /* 0x60 - VGA_DDC */ # define R200_SEL_DDC1 0 /* 0x60 - VGA_DDC */
# define R200_SEL_DDC2 1 /* 0x64 - DVI_DDC */ # define R200_SEL_DDC2 1 /* 0x64 - DVI_DDC */
# define R200_SEL_DDC3 2 /* 0x68 - MONID_DDC */ # define R200_SEL_DDC3 2 /* 0x68 - MONID_DDC */
#define RADEON_DVI_I2C_CNTL_1 0x02e4 /* ? */ #define RADEON_DVI_I2C_CNTL_1 0x02e4
#define RADEON_DVI_I2C_DATA 0x02e8 #define RADEON_DVI_I2C_DATA 0x02e8
#define RADEON_INTERRUPT_LINE 0x0f3c /* PCI */ #define RADEON_INTERRUPT_LINE 0x0f3c /* PCI */
@ -1143,15 +1157,16 @@
# define RADEON_IO_MCLK_MAX_DYN_STOP_LAT (1 << 13) # define RADEON_IO_MCLK_MAX_DYN_STOP_LAT (1 << 13)
# define RADEON_MC_MCLK_DYN_ENABLE (1 << 14) # define RADEON_MC_MCLK_DYN_ENABLE (1 << 14)
# define RADEON_IO_MCLK_DYN_ENABLE (1 << 15) # define RADEON_IO_MCLK_DYN_ENABLE (1 << 15)
#define RADEON_LCD_GPIO_MASK 0x01a0
#define RADEON_GPIOPAD_EN 0x01a0
#define RADEON_LCD_GPIO_Y_REG 0x01a4
#define RADEON_MDGPIO_A_REG 0x01ac
#define RADEON_MDGPIO_EN_REG 0x01b0
#define RADEON_MDGPIO_MASK 0x0198
#define RADEON_GPIOPAD_MASK 0x0198 #define RADEON_GPIOPAD_MASK 0x0198
#define RADEON_GPIOPAD_A 0x019c #define RADEON_GPIOPAD_A 0x019c
#define RADEON_MDGPIO_Y_REG 0x01b4 #define RADEON_GPIOPAD_EN 0x01a0
#define RADEON_GPIOPAD_Y 0x01a4
#define RADEON_MDGPIO_MASK 0x01a8
#define RADEON_MDGPIO_A 0x01ac
#define RADEON_MDGPIO_EN 0x01b0
#define RADEON_MDGPIO_Y 0x01b4
#define RADEON_MEM_ADDR_CONFIG 0x0148 #define RADEON_MEM_ADDR_CONFIG 0x0148
#define RADEON_MEM_BASE 0x0f10 /* PCI */ #define RADEON_MEM_BASE 0x0f10 /* PCI */
#define RADEON_MEM_CNTL 0x0140 #define RADEON_MEM_CNTL 0x0140
@ -1360,6 +1375,9 @@
#define RADEON_OVR_CLR 0x0230 #define RADEON_OVR_CLR 0x0230
#define RADEON_OVR_WID_LEFT_RIGHT 0x0234 #define RADEON_OVR_WID_LEFT_RIGHT 0x0234
#define RADEON_OVR_WID_TOP_BOTTOM 0x0238 #define RADEON_OVR_WID_TOP_BOTTOM 0x0238
#define RADEON_OVR2_CLR 0x0330
#define RADEON_OVR2_WID_LEFT_RIGHT 0x0334
#define RADEON_OVR2_WID_TOP_BOTTOM 0x0338
/* first capture unit */ /* first capture unit */

View File

@ -352,7 +352,7 @@ static int rs400_mc_init(struct radeon_device *rdev)
u32 tmp; u32 tmp;
/* Setup GPU memory space */ /* Setup GPU memory space */
tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM)); tmp = RREG32(R_00015C_NB_TOM);
rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16; rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16;
rdev->mc.gtt_location = 0xFFFFFFFFUL; rdev->mc.gtt_location = 0xFFFFFFFFUL;
r = radeon_mc_setup(rdev); r = radeon_mc_setup(rdev);
@ -387,13 +387,13 @@ static int rs400_startup(struct radeon_device *rdev)
r300_clock_startup(rdev); r300_clock_startup(rdev);
/* Initialize GPU configuration (# pipes, ...) */ /* Initialize GPU configuration (# pipes, ...) */
rs400_gpu_init(rdev); rs400_gpu_init(rdev);
r100_enable_bm(rdev);
/* Initialize GART (initialize after TTM so we can allocate /* Initialize GART (initialize after TTM so we can allocate
* memory through TTM but finalize after TTM) */ * memory through TTM but finalize after TTM) */
r = rs400_gart_enable(rdev); r = rs400_gart_enable(rdev);
if (r) if (r)
return r; return r;
/* Enable IRQ */ /* Enable IRQ */
// rdev->irq.sw_int = true;
// r100_irq_set(rdev); // r100_irq_set(rdev);
/* 1M ring buffer */ /* 1M ring buffer */
// r = r100_cp_init(rdev, 1024 * 1024); // r = r100_cp_init(rdev, 1024 * 1024);
@ -447,10 +447,9 @@ int rs400_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT)); RREG32(R_0007C0_CP_STAT));
} }
/* check if cards are posted or not */ /* check if cards are posted or not */
if (!radeon_card_posted(rdev) && rdev->bios) { if (radeon_boot_test_post_card(rdev) == false)
DRM_INFO("GPU not posted. posting now...\n"); return -EINVAL;
radeon_combios_asic_init(rdev->ddev);
}
/* Initialize clocks */ /* Initialize clocks */
radeon_get_clock_info(rdev->ddev); radeon_get_clock_info(rdev->ddev);
/* Get vram informations */ /* Get vram informations */
@ -467,7 +466,7 @@ int rs400_init(struct radeon_device *rdev)
// if (r) // if (r)
// return r; // return r;
/* Memory manager */ /* Memory manager */
r = radeon_object_init(rdev); r = radeon_bo_init(rdev);
if (r) if (r)
return r; return r;
r = rs400_gart_init(rdev); r = rs400_gart_init(rdev);

View File

@ -45,6 +45,122 @@
void rs600_gpu_init(struct radeon_device *rdev); void rs600_gpu_init(struct radeon_device *rdev);
int rs600_mc_wait_for_idle(struct radeon_device *rdev); int rs600_mc_wait_for_idle(struct radeon_device *rdev);
int rs600_mc_init(struct radeon_device *rdev)
{
/* read back the MC value from the hw */
int r;
u32 tmp;
/* Setup GPU memory space */
tmp = RREG32_MC(R_000004_MC_FB_LOCATION);
rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16;
rdev->mc.gtt_location = 0xffffffffUL;
r = radeon_mc_setup(rdev);
if (r)
return r;
return 0;
}
/* hpd for digital panel detect/disconnect */
bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
{
u32 tmp;
bool connected = false;
switch (hpd) {
case RADEON_HPD_1:
tmp = RREG32(R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS);
if (G_007D04_DC_HOT_PLUG_DETECT1_SENSE(tmp))
connected = true;
break;
case RADEON_HPD_2:
tmp = RREG32(R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS);
if (G_007D14_DC_HOT_PLUG_DETECT2_SENSE(tmp))
connected = true;
break;
default:
break;
}
return connected;
}
void rs600_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd)
{
u32 tmp;
bool connected = rs600_hpd_sense(rdev, hpd);
switch (hpd) {
case RADEON_HPD_1:
tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
if (connected)
tmp &= ~S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
else
tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
break;
case RADEON_HPD_2:
tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
if (connected)
tmp &= ~S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
else
tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
break;
default:
break;
}
}
void rs600_hpd_init(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
S_007D00_DC_HOT_PLUG_DETECT1_EN(1));
rdev->irq.hpd[0] = true;
break;
case RADEON_HPD_2:
WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
S_007D10_DC_HOT_PLUG_DETECT2_EN(1));
rdev->irq.hpd[1] = true;
break;
default:
break;
}
}
rs600_irq_set(rdev);
}
void rs600_hpd_fini(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
S_007D00_DC_HOT_PLUG_DETECT1_EN(0));
rdev->irq.hpd[0] = false;
break;
case RADEON_HPD_2:
WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
S_007D10_DC_HOT_PLUG_DETECT2_EN(0));
rdev->irq.hpd[1] = false;
break;
default:
break;
}
}
}
/* /*
* GART. * GART.
*/ */
@ -102,38 +218,38 @@ int rs600_gart_enable(struct radeon_device *rdev)
WREG32_MC(R_000100_MC_PT0_CNTL, WREG32_MC(R_000100_MC_PT0_CNTL,
(S_000100_EFFECTIVE_L2_CACHE_SIZE(6) | (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
S_000100_EFFECTIVE_L2_QUEUE_SIZE(6))); S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
for (i = 0; i < 19; i++) { for (i = 0; i < 19; i++) {
WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i, WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i,
S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) | S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
S_00016C_SYSTEM_ACCESS_MODE_MASK( S_00016C_SYSTEM_ACCESS_MODE_MASK(
V_00016C_SYSTEM_ACCESS_MODE_IN_SYS) | V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) |
S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS( S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE) | V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) |
S_00016C_EFFECTIVE_L1_CACHE_SIZE(1) | S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) |
S_00016C_ENABLE_FRAGMENT_PROCESSING(1) | S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
S_00016C_EFFECTIVE_L1_QUEUE_SIZE(1)); S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3));
} }
/* System context map to GART space */
WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_start);
WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.gtt_end);
/* enable first context */ /* enable first context */
WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL, WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL,
S_000102_ENABLE_PAGE_TABLE(1) | S_000102_ENABLE_PAGE_TABLE(1) |
S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT)); S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
/* disable all other contexts */ /* disable all other contexts */
for (i = 1; i < 8; i++) { for (i = 1; i < 8; i++)
WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0); WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0);
}
/* setup the page table */ /* setup the page table */
WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
rdev->gart.table_addr); rdev->gart.table_addr);
WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
/* System context maps to VRAM space */
WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start);
WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end);
/* enable page tables */ /* enable page tables */
tmp = RREG32_MC(R_000100_MC_PT0_CNTL); tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1))); WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1)));
@ -146,7 +262,8 @@ int rs600_gart_enable(struct radeon_device *rdev)
void rs600_gart_disable(struct radeon_device *rdev) void rs600_gart_disable(struct radeon_device *rdev)
{ {
uint32_t tmp; u32 tmp;
int r;
/* FIXME: disable out of gart access */ /* FIXME: disable out of gart access */
WREG32_MC(R_000100_MC_PT0_CNTL, 0); WREG32_MC(R_000100_MC_PT0_CNTL, 0);
@ -185,12 +302,42 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
return 0; return 0;
} }
int rs600_irq_set(struct radeon_device *rdev)
{
uint32_t tmp = 0;
uint32_t mode_int = 0;
u32 hpd1 = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL) &
~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
if (rdev->irq.sw_int) {
tmp |= S_000040_SW_INT_EN(1);
}
if (rdev->irq.crtc_vblank_int[0]) {
mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
}
if (rdev->irq.crtc_vblank_int[1]) {
mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
}
if (rdev->irq.hpd[0]) {
hpd1 |= S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
}
if (rdev->irq.hpd[1]) {
hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
}
WREG32(R_000040_GEN_INT_CNTL, tmp);
WREG32(R_006540_DxMODE_INT_MASK, mode_int);
WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
return 0;
}
static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int)
{ {
uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
uint32_t irq_mask = ~C_000044_SW_INT; uint32_t irq_mask = ~C_000044_SW_INT;
u32 tmp;
if (G_000044_DISPLAY_INT_STAT(irqs)) { if (G_000044_DISPLAY_INT_STAT(irqs)) {
*r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
@ -202,6 +349,16 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_
WREG32(R_006D34_D2MODE_VBLANK_STATUS, WREG32(R_006D34_D2MODE_VBLANK_STATUS,
S_006D34_D2MODE_VBLANK_ACK(1)); S_006D34_D2MODE_VBLANK_ACK(1));
} }
if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(*r500_disp_int)) {
tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1);
WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
}
if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(*r500_disp_int)) {
tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1);
WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
}
} else { } else {
*r500_disp_int = 0; *r500_disp_int = 0;
} }
@ -246,9 +403,7 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev)
void rs600_gpu_init(struct radeon_device *rdev) void rs600_gpu_init(struct radeon_device *rdev)
{ {
/* FIXME: HDP same place on rs600 ? */
r100_hdp_reset(rdev); r100_hdp_reset(rdev);
/* FIXME: is this correct ? */
r420_pipes_init(rdev); r420_pipes_init(rdev);
/* Wait for mc idle */ /* Wait for mc idle */
if (rs600_mc_wait_for_idle(rdev)) if (rs600_mc_wait_for_idle(rdev))
@ -257,9 +412,20 @@ void rs600_gpu_init(struct radeon_device *rdev)
void rs600_vram_info(struct radeon_device *rdev) void rs600_vram_info(struct radeon_device *rdev)
{ {
/* FIXME: to do or is these values sane ? */
rdev->mc.vram_is_ddr = true; rdev->mc.vram_is_ddr = true;
rdev->mc.vram_width = 128; rdev->mc.vram_width = 128;
rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
rdev->mc.mc_vram_size = rdev->mc.aper_size;
if (rdev->mc.real_vram_size > rdev->mc.aper_size)
rdev->mc.real_vram_size = rdev->mc.aper_size;
} }
void rs600_bandwidth_update(struct radeon_device *rdev) void rs600_bandwidth_update(struct radeon_device *rdev)
@ -333,7 +499,6 @@ static int rs600_startup(struct radeon_device *rdev)
if (r) if (r)
return r; return r;
/* Enable IRQ */ /* Enable IRQ */
// rdev->irq.sw_int = true;
// rs600_irq_set(rdev); // rs600_irq_set(rdev);
/* 1M ring buffer */ /* 1M ring buffer */
// r = r100_cp_init(rdev, 1024 * 1024); // r = r100_cp_init(rdev, 1024 * 1024);
@ -385,10 +550,9 @@ int rs600_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT)); RREG32(R_0007C0_CP_STAT));
} }
/* check if cards are posted or not */ /* check if cards are posted or not */
if (!radeon_card_posted(rdev) && rdev->bios) { if (radeon_boot_test_post_card(rdev) == false)
DRM_INFO("GPU not posted. posting now...\n"); return -EINVAL;
atom_asic_init(rdev->mode_info.atom_context);
}
/* Initialize clocks */ /* Initialize clocks */
radeon_get_clock_info(rdev->ddev); radeon_get_clock_info(rdev->ddev);
/* Initialize power management */ /* Initialize power management */
@ -396,7 +560,7 @@ int rs600_init(struct radeon_device *rdev)
/* Get vram informations */ /* Get vram informations */
rs600_vram_info(rdev); rs600_vram_info(rdev);
/* Initialize memory controller (also test AGP) */ /* Initialize memory controller (also test AGP) */
r = r420_mc_init(rdev); r = rs600_mc_init(rdev);
if (r) if (r)
return r; return r;
rs600_debugfs(rdev); rs600_debugfs(rdev);
@ -408,7 +572,7 @@ int rs600_init(struct radeon_device *rdev)
// if (r) // if (r)
// return r; // return r;
/* Memory manager */ /* Memory manager */
r = radeon_object_init(rdev); r = radeon_bo_init(rdev);
if (r) if (r)
return r; return r;
r = rs600_gart_init(rdev); r = rs600_gart_init(rdev);

View File

@ -30,27 +30,12 @@
/* Registers */ /* Registers */
#define R_000040_GEN_INT_CNTL 0x000040 #define R_000040_GEN_INT_CNTL 0x000040
#define S_000040_DISPLAY_INT_STATUS(x) (((x) & 0x1) << 0) #define S_000040_SCRATCH_INT_MASK(x) (((x) & 0x1) << 18)
#define G_000040_DISPLAY_INT_STATUS(x) (((x) >> 0) & 0x1) #define G_000040_SCRATCH_INT_MASK(x) (((x) >> 18) & 0x1)
#define C_000040_DISPLAY_INT_STATUS 0xFFFFFFFE #define C_000040_SCRATCH_INT_MASK 0xFFFBFFFF
#define S_000040_DMA_VIPH0_INT_EN(x) (((x) & 0x1) << 12) #define S_000040_GUI_IDLE_MASK(x) (((x) & 0x1) << 19)
#define G_000040_DMA_VIPH0_INT_EN(x) (((x) >> 12) & 0x1) #define G_000040_GUI_IDLE_MASK(x) (((x) >> 19) & 0x1)
#define C_000040_DMA_VIPH0_INT_EN 0xFFFFEFFF #define C_000040_GUI_IDLE_MASK 0xFFF7FFFF
#define S_000040_CRTC2_VSYNC(x) (((x) & 0x1) << 6)
#define G_000040_CRTC2_VSYNC(x) (((x) >> 6) & 0x1)
#define C_000040_CRTC2_VSYNC 0xFFFFFFBF
#define S_000040_SNAPSHOT2(x) (((x) & 0x1) << 7)
#define G_000040_SNAPSHOT2(x) (((x) >> 7) & 0x1)
#define C_000040_SNAPSHOT2 0xFFFFFF7F
#define S_000040_CRTC2_VBLANK(x) (((x) & 0x1) << 9)
#define G_000040_CRTC2_VBLANK(x) (((x) >> 9) & 0x1)
#define C_000040_CRTC2_VBLANK 0xFFFFFDFF
#define S_000040_FP2_DETECT(x) (((x) & 0x1) << 10)
#define G_000040_FP2_DETECT(x) (((x) >> 10) & 0x1)
#define C_000040_FP2_DETECT 0xFFFFFBFF
#define S_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) & 0x1) << 11)
#define G_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) >> 11) & 0x1)
#define C_000040_VSYNC_DIFF_OVER_LIMIT 0xFFFFF7FF
#define S_000040_DMA_VIPH1_INT_EN(x) (((x) & 0x1) << 13) #define S_000040_DMA_VIPH1_INT_EN(x) (((x) & 0x1) << 13)
#define G_000040_DMA_VIPH1_INT_EN(x) (((x) >> 13) & 0x1) #define G_000040_DMA_VIPH1_INT_EN(x) (((x) >> 13) & 0x1)
#define C_000040_DMA_VIPH1_INT_EN 0xFFFFDFFF #define C_000040_DMA_VIPH1_INT_EN 0xFFFFDFFF
@ -370,7 +355,90 @@
#define S_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) & 0x1) << 5) #define S_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) & 0x1) << 5)
#define G_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) >> 5) & 0x1) #define G_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) >> 5) & 0x1)
#define C_007EDC_LB_D2_VBLANK_INTERRUPT 0xFFFFFFDF #define C_007EDC_LB_D2_VBLANK_INTERRUPT 0xFFFFFFDF
#define S_007EDC_DACA_AUTODETECT_INTERRUPT(x) (((x) & 0x1) << 16)
#define G_007EDC_DACA_AUTODETECT_INTERRUPT(x) (((x) >> 16) & 0x1)
#define C_007EDC_DACA_AUTODETECT_INTERRUPT 0xFFFEFFFF
#define S_007EDC_DACB_AUTODETECT_INTERRUPT(x) (((x) & 0x1) << 17)
#define G_007EDC_DACB_AUTODETECT_INTERRUPT(x) (((x) >> 17) & 0x1)
#define C_007EDC_DACB_AUTODETECT_INTERRUPT 0xFFFDFFFF
#define S_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(x) (((x) & 0x1) << 18)
#define G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(x) (((x) >> 18) & 0x1)
#define C_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT 0xFFFBFFFF
#define S_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(x) (((x) & 0x1) << 19)
#define G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(x) (((x) >> 19) & 0x1)
#define C_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT 0xFFF7FFFF
#define R_007828_DACA_AUTODETECT_CONTROL 0x007828
#define S_007828_DACA_AUTODETECT_MODE(x) (((x) & 0x3) << 0)
#define G_007828_DACA_AUTODETECT_MODE(x) (((x) >> 0) & 0x3)
#define C_007828_DACA_AUTODETECT_MODE 0xFFFFFFFC
#define S_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) & 0xff) << 8)
#define G_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) >> 8) & 0xff)
#define C_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER 0xFFFF00FF
#define S_007828_DACA_AUTODETECT_CHECK_MASK(x) (((x) & 0x3) << 16)
#define G_007828_DACA_AUTODETECT_CHECK_MASK(x) (((x) >> 16) & 0x3)
#define C_007828_DACA_AUTODETECT_CHECK_MASK 0xFFFCFFFF
#define R_007838_DACA_AUTODETECT_INT_CONTROL 0x007838
#define S_007838_DACA_AUTODETECT_ACK(x) (((x) & 0x1) << 0)
#define C_007838_DACA_DACA_AUTODETECT_ACK 0xFFFFFFFE
#define S_007838_DACA_AUTODETECT_INT_ENABLE(x) (((x) & 0x1) << 16)
#define G_007838_DACA_AUTODETECT_INT_ENABLE(x) (((x) >> 16) & 0x1)
#define C_007838_DACA_AUTODETECT_INT_ENABLE 0xFFFCFFFF
#define R_007A28_DACB_AUTODETECT_CONTROL 0x007A28
#define S_007A28_DACB_AUTODETECT_MODE(x) (((x) & 0x3) << 0)
#define G_007A28_DACB_AUTODETECT_MODE(x) (((x) >> 0) & 0x3)
#define C_007A28_DACB_AUTODETECT_MODE 0xFFFFFFFC
#define S_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) & 0xff) << 8)
#define G_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) >> 8) & 0xff)
#define C_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER 0xFFFF00FF
#define S_007A28_DACB_AUTODETECT_CHECK_MASK(x) (((x) & 0x3) << 16)
#define G_007A28_DACB_AUTODETECT_CHECK_MASK(x) (((x) >> 16) & 0x3)
#define C_007A28_DACB_AUTODETECT_CHECK_MASK 0xFFFCFFFF
#define R_007A38_DACB_AUTODETECT_INT_CONTROL 0x007A38
#define S_007A38_DACB_AUTODETECT_ACK(x) (((x) & 0x1) << 0)
#define C_007A38_DACB_DACA_AUTODETECT_ACK 0xFFFFFFFE
#define S_007A38_DACB_AUTODETECT_INT_ENABLE(x) (((x) & 0x1) << 16)
#define G_007A38_DACB_AUTODETECT_INT_ENABLE(x) (((x) >> 16) & 0x1)
#define C_007A38_DACB_AUTODETECT_INT_ENABLE 0xFFFCFFFF
#define R_007D00_DC_HOT_PLUG_DETECT1_CONTROL 0x007D00
#define S_007D00_DC_HOT_PLUG_DETECT1_EN(x) (((x) & 0x1) << 0)
#define G_007D00_DC_HOT_PLUG_DETECT1_EN(x) (((x) >> 0) & 0x1)
#define C_007D00_DC_HOT_PLUG_DETECT1_EN 0xFFFFFFFE
#define R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS 0x007D04
#define S_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS(x) (((x) & 0x1) << 0)
#define G_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS(x) (((x) >> 0) & 0x1)
#define C_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS 0xFFFFFFFE
#define S_007D04_DC_HOT_PLUG_DETECT1_SENSE(x) (((x) & 0x1) << 1)
#define G_007D04_DC_HOT_PLUG_DETECT1_SENSE(x) (((x) >> 1) & 0x1)
#define C_007D04_DC_HOT_PLUG_DETECT1_SENSE 0xFFFFFFFD
#define R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL 0x007D08
#define S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(x) (((x) & 0x1) << 0)
#define C_007D08_DC_HOT_PLUG_DETECT1_INT_ACK 0xFFFFFFFE
#define S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(x) (((x) & 0x1) << 8)
#define G_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(x) (((x) >> 8) & 0x1)
#define C_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY 0xFFFFFEFF
#define S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(x) (((x) & 0x1) << 16)
#define G_007D08_DC_HOT_PLUG_DETECT1_INT_EN(x) (((x) >> 16) & 0x1)
#define C_007D08_DC_HOT_PLUG_DETECT1_INT_EN 0xFFFEFFFF
#define R_007D10_DC_HOT_PLUG_DETECT2_CONTROL 0x007D10
#define S_007D10_DC_HOT_PLUG_DETECT2_EN(x) (((x) & 0x1) << 0)
#define G_007D10_DC_HOT_PLUG_DETECT2_EN(x) (((x) >> 0) & 0x1)
#define C_007D10_DC_HOT_PLUG_DETECT2_EN 0xFFFFFFFE
#define R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS 0x007D14
#define S_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS(x) (((x) & 0x1) << 0)
#define G_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS(x) (((x) >> 0) & 0x1)
#define C_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS 0xFFFFFFFE
#define S_007D14_DC_HOT_PLUG_DETECT2_SENSE(x) (((x) & 0x1) << 1)
#define G_007D14_DC_HOT_PLUG_DETECT2_SENSE(x) (((x) >> 1) & 0x1)
#define C_007D14_DC_HOT_PLUG_DETECT2_SENSE 0xFFFFFFFD
#define R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL 0x007D18
#define S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(x) (((x) & 0x1) << 0)
#define C_007D18_DC_HOT_PLUG_DETECT2_INT_ACK 0xFFFFFFFE
#define S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(x) (((x) & 0x1) << 8)
#define G_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(x) (((x) >> 8) & 0x1)
#define C_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY 0xFFFFFEFF
#define S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) & 0x1) << 16)
#define G_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) >> 16) & 0x1)
#define C_007D18_DC_HOT_PLUG_DETECT2_INT_EN 0xFFFEFFFF
/* MC registers */ /* MC registers */
#define R_000000_MC_STATUS 0x000000 #define R_000000_MC_STATUS 0x000000