forked from KolibriOS/kolibrios
VMware SVGA II: some absolutely useless code
git-svn-id: svn://kolibrios.org@4075 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
parent
6c91093930
commit
1f2646cb02
@ -29,6 +29,7 @@
|
||||
* Dave Airlie <airlied@linux.ie>
|
||||
* Jesse Barnes <jesse.barnes@intel.com>
|
||||
*/
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/export.h>
|
||||
@ -87,7 +88,7 @@ EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
|
||||
|
||||
/* Avoid boilerplate. I'm tired of typing. */
|
||||
#define DRM_ENUM_NAME_FN(fnname, list) \
|
||||
char *fnname(int val) \
|
||||
const char *fnname(int val) \
|
||||
{ \
|
||||
int i; \
|
||||
for (i = 0; i < ARRAY_SIZE(list); i++) { \
|
||||
@ -100,7 +101,7 @@ EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
|
||||
/*
|
||||
* Global properties
|
||||
*/
|
||||
static struct drm_prop_enum_list drm_dpms_enum_list[] =
|
||||
static const struct drm_prop_enum_list drm_dpms_enum_list[] =
|
||||
{ { DRM_MODE_DPMS_ON, "On" },
|
||||
{ DRM_MODE_DPMS_STANDBY, "Standby" },
|
||||
{ DRM_MODE_DPMS_SUSPEND, "Suspend" },
|
||||
@ -112,7 +113,7 @@ DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
|
||||
/*
|
||||
* Optional properties
|
||||
*/
|
||||
static struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
|
||||
static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
|
||||
{
|
||||
{ DRM_MODE_SCALE_NONE, "None" },
|
||||
{ DRM_MODE_SCALE_FULLSCREEN, "Full" },
|
||||
@ -120,7 +121,7 @@ static struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
|
||||
{ DRM_MODE_SCALE_ASPECT, "Full aspect" },
|
||||
};
|
||||
|
||||
static struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
|
||||
static const struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
|
||||
{
|
||||
{ DRM_MODE_DITHERING_OFF, "Off" },
|
||||
{ DRM_MODE_DITHERING_ON, "On" },
|
||||
@ -130,7 +131,7 @@ static struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
|
||||
/*
|
||||
* Non-global properties, but "required" for certain connectors.
|
||||
*/
|
||||
static struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
|
||||
static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
|
||||
{
|
||||
{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
|
||||
{ DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
|
||||
@ -139,7 +140,7 @@ static struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
|
||||
|
||||
DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
|
||||
|
||||
static struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
|
||||
static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
|
||||
{
|
||||
{ DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
|
||||
{ DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
|
||||
@ -149,7 +150,7 @@ static struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
|
||||
DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
|
||||
drm_dvi_i_subconnector_enum_list)
|
||||
|
||||
static struct drm_prop_enum_list drm_tv_select_enum_list[] =
|
||||
static const struct drm_prop_enum_list drm_tv_select_enum_list[] =
|
||||
{
|
||||
{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
|
||||
{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
|
||||
@ -160,7 +161,7 @@ static struct drm_prop_enum_list drm_tv_select_enum_list[] =
|
||||
|
||||
DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
|
||||
|
||||
static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
|
||||
static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
|
||||
{
|
||||
{ DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
|
||||
{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
|
||||
@ -172,18 +173,15 @@ static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
|
||||
DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
|
||||
drm_tv_subconnector_enum_list)
|
||||
|
||||
static struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
|
||||
static const struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
|
||||
{ DRM_MODE_DIRTY_OFF, "Off" },
|
||||
{ DRM_MODE_DIRTY_ON, "On" },
|
||||
{ DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
|
||||
};
|
||||
|
||||
DRM_ENUM_NAME_FN(drm_get_dirty_info_name,
|
||||
drm_dirty_info_enum_list)
|
||||
|
||||
struct drm_conn_prop_enum_list {
|
||||
int type;
|
||||
char *name;
|
||||
const char *name;
|
||||
int count;
|
||||
};
|
||||
|
||||
@ -209,7 +207,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
|
||||
{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual", 0},
|
||||
};
|
||||
|
||||
static struct drm_prop_enum_list drm_encoder_enum_list[] =
|
||||
static const struct drm_prop_enum_list drm_encoder_enum_list[] =
|
||||
{ { DRM_MODE_ENCODER_NONE, "None" },
|
||||
{ DRM_MODE_ENCODER_DAC, "DAC" },
|
||||
{ DRM_MODE_ENCODER_TMDS, "TMDS" },
|
||||
@ -218,7 +216,7 @@ static struct drm_prop_enum_list drm_encoder_enum_list[] =
|
||||
{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
|
||||
};
|
||||
|
||||
char *drm_get_encoder_name(struct drm_encoder *encoder)
|
||||
const char *drm_get_encoder_name(const struct drm_encoder *encoder)
|
||||
{
|
||||
static char buf[32];
|
||||
|
||||
@ -229,7 +227,7 @@ char *drm_get_encoder_name(struct drm_encoder *encoder)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_get_encoder_name);
|
||||
|
||||
char *drm_get_connector_name(struct drm_connector *connector)
|
||||
const char *drm_get_connector_name(const struct drm_connector *connector)
|
||||
{
|
||||
static char buf[32];
|
||||
|
||||
@ -240,7 +238,7 @@ char *drm_get_connector_name(struct drm_connector *connector)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_get_connector_name);
|
||||
|
||||
char *drm_get_connector_status_name(enum drm_connector_status status)
|
||||
const char *drm_get_connector_status_name(enum drm_connector_status status)
|
||||
{
|
||||
if (status == connector_status_connected)
|
||||
return "connected";
|
||||
@ -251,6 +249,28 @@ char *drm_get_connector_status_name(enum drm_connector_status status)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_get_connector_status_name);
|
||||
|
||||
static char printable_char(int c)
|
||||
{
|
||||
return isascii(c) && isprint(c) ? c : '?';
|
||||
}
|
||||
|
||||
const char *drm_get_format_name(uint32_t format)
|
||||
{
|
||||
static char buf[32];
|
||||
|
||||
snprintf(buf, sizeof(buf),
|
||||
"%c%c%c%c %s-endian (0x%08x)",
|
||||
printable_char(format & 0xff),
|
||||
printable_char((format >> 8) & 0xff),
|
||||
printable_char((format >> 16) & 0xff),
|
||||
printable_char((format >> 24) & 0x7f),
|
||||
format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little",
|
||||
format);
|
||||
|
||||
return buf;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_get_format_name);
|
||||
|
||||
/**
|
||||
* drm_mode_object_get - allocate a new modeset identifier
|
||||
* @dev: DRM device
|
||||
@ -413,7 +433,7 @@ struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
|
||||
mutex_lock(&dev->mode_config.fb_lock);
|
||||
fb = __drm_framebuffer_lookup(dev, id);
|
||||
if (fb)
|
||||
kref_get(&fb->refcount);
|
||||
drm_framebuffer_reference(fb);
|
||||
mutex_unlock(&dev->mode_config.fb_lock);
|
||||
|
||||
return fb;
|
||||
@ -568,16 +588,8 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
|
||||
}
|
||||
|
||||
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
|
||||
if (plane->fb == fb) {
|
||||
/* should turn off the crtc */
|
||||
ret = plane->funcs->disable_plane(plane);
|
||||
if (ret)
|
||||
DRM_ERROR("failed to disable plane with busy fb\n");
|
||||
/* disconnect the plane from the fb and crtc: */
|
||||
__drm_framebuffer_unreference(plane->fb);
|
||||
plane->fb = NULL;
|
||||
plane->crtc = NULL;
|
||||
}
|
||||
if (plane->fb == fb)
|
||||
drm_plane_force_disable(plane);
|
||||
}
|
||||
drm_modeset_unlock_all(dev);
|
||||
}
|
||||
@ -592,7 +604,7 @@ EXPORT_SYMBOL(drm_framebuffer_remove);
|
||||
* @crtc: CRTC object to init
|
||||
* @funcs: callbacks for the new CRTC
|
||||
*
|
||||
* Inits a new object created as base part of an driver crtc object.
|
||||
* Inits a new object created as base part of a driver crtc object.
|
||||
*
|
||||
* RETURNS:
|
||||
* Zero on success, error code on failure.
|
||||
@ -627,11 +639,12 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
|
||||
EXPORT_SYMBOL(drm_crtc_init);
|
||||
|
||||
/**
|
||||
* drm_crtc_cleanup - Cleans up the core crtc usage.
|
||||
* drm_crtc_cleanup - Clean up the core crtc usage
|
||||
* @crtc: CRTC to cleanup
|
||||
*
|
||||
* Cleanup @crtc. Removes from drm modesetting space
|
||||
* does NOT free object, caller does that.
|
||||
* This function cleans up @crtc and removes it from the DRM mode setting
|
||||
* core. Note that the function does *not* free the crtc structure itself,
|
||||
* this is the responsibility of the caller.
|
||||
*/
|
||||
void drm_crtc_cleanup(struct drm_crtc *crtc)
|
||||
{
|
||||
@ -656,7 +669,7 @@ EXPORT_SYMBOL(drm_crtc_cleanup);
|
||||
void drm_mode_probed_add(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
list_add(&mode->head, &connector->probed_modes);
|
||||
list_add_tail(&mode->head, &connector->probed_modes);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mode_probed_add);
|
||||
|
||||
@ -802,6 +815,21 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_encoder_cleanup);
|
||||
|
||||
/**
|
||||
* drm_plane_init - Initialise a new plane object
|
||||
* @dev: DRM device
|
||||
* @plane: plane object to init
|
||||
* @possible_crtcs: bitmask of possible CRTCs
|
||||
* @funcs: callbacks for the new plane
|
||||
* @formats: array of supported formats (%DRM_FORMAT_*)
|
||||
* @format_count: number of elements in @formats
|
||||
* @priv: plane is private (hidden from userspace)?
|
||||
*
|
||||
* Inits a new object created as base part of a driver plane object.
|
||||
*
|
||||
* RETURNS:
|
||||
* Zero on success, error code on failure.
|
||||
*/
|
||||
int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
|
||||
unsigned long possible_crtcs,
|
||||
const struct drm_plane_funcs *funcs,
|
||||
@ -850,6 +878,14 @@ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
|
||||
}
|
||||
EXPORT_SYMBOL(drm_plane_init);
|
||||
|
||||
/**
|
||||
* drm_plane_cleanup - Clean up the core plane usage
|
||||
* @plane: plane to cleanup
|
||||
*
|
||||
* This function cleans up @plane and removes it from the DRM mode setting
|
||||
* core. Note that the function does *not* free the plane structure itself,
|
||||
* this is the responsibility of the caller.
|
||||
*/
|
||||
void drm_plane_cleanup(struct drm_plane *plane)
|
||||
{
|
||||
struct drm_device *dev = plane->dev;
|
||||
@ -866,6 +902,32 @@ void drm_plane_cleanup(struct drm_plane *plane)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_plane_cleanup);
|
||||
|
||||
/**
|
||||
* drm_plane_force_disable - Forcibly disable a plane
|
||||
* @plane: plane to disable
|
||||
*
|
||||
* Forces the plane to be disabled.
|
||||
*
|
||||
* Used when the plane's current framebuffer is destroyed,
|
||||
* and when restoring fbdev mode.
|
||||
*/
|
||||
void drm_plane_force_disable(struct drm_plane *plane)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!plane->fb)
|
||||
return;
|
||||
|
||||
ret = plane->funcs->disable_plane(plane);
|
||||
if (ret)
|
||||
DRM_ERROR("failed to disable plane with busy fb\n");
|
||||
/* disconnect the plane from the fb and crtc: */
|
||||
__drm_framebuffer_unreference(plane->fb);
|
||||
plane->fb = NULL;
|
||||
plane->crtc = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_plane_force_disable);
|
||||
|
||||
/**
|
||||
* drm_mode_create - create a new display mode
|
||||
* @dev: DRM device
|
||||
@ -1741,7 +1803,7 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
|
||||
|
||||
plane_resp->plane_id = plane->base.id;
|
||||
plane_resp->possible_crtcs = plane->possible_crtcs;
|
||||
plane_resp->gamma_size = plane->gamma_size;
|
||||
plane_resp->gamma_size = 0;
|
||||
|
||||
/*
|
||||
* This ioctl is called twice, once to determine how much space is
|
||||
@ -1835,7 +1897,8 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
|
||||
if (fb->pixel_format == plane->format_types[i])
|
||||
break;
|
||||
if (i == plane->format_count) {
|
||||
DRM_DEBUG_KMS("Invalid pixel format 0x%08x\n", fb->pixel_format);
|
||||
DRM_DEBUG_KMS("Invalid pixel format %s\n",
|
||||
drm_get_format_name(fb->pixel_format));
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@ -1908,18 +1971,31 @@ out:
|
||||
int drm_mode_set_config_internal(struct drm_mode_set *set)
|
||||
{
|
||||
struct drm_crtc *crtc = set->crtc;
|
||||
struct drm_framebuffer *fb, *old_fb;
|
||||
struct drm_framebuffer *fb;
|
||||
struct drm_crtc *tmp;
|
||||
int ret;
|
||||
|
||||
old_fb = crtc->fb;
|
||||
/*
|
||||
* NOTE: ->set_config can also disable other crtcs (if we steal all
|
||||
* connectors from it), hence we need to refcount the fbs across all
|
||||
* crtcs. Atomic modeset will have saner semantics ...
|
||||
*/
|
||||
list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head)
|
||||
tmp->old_fb = tmp->fb;
|
||||
|
||||
fb = set->fb;
|
||||
|
||||
ret = crtc->funcs->set_config(set);
|
||||
if (ret == 0) {
|
||||
if (old_fb)
|
||||
drm_framebuffer_unreference(old_fb);
|
||||
if (fb)
|
||||
drm_framebuffer_reference(fb);
|
||||
/* crtc->fb must be updated by ->set_config, enforces this. */
|
||||
WARN_ON(fb != crtc->fb);
|
||||
}
|
||||
|
||||
list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
|
||||
if (tmp->fb)
|
||||
drm_framebuffer_reference(tmp->fb);
|
||||
// if (tmp->old_fb)
|
||||
// drm_framebuffer_unreference(tmp->old_fb);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -2102,10 +2178,10 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int drm_mode_cursor_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *file_priv)
|
||||
static int drm_mode_cursor_common(struct drm_device *dev,
|
||||
struct drm_mode_cursor2 *req,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_mode_cursor *req = data;
|
||||
struct drm_mode_object *obj;
|
||||
struct drm_crtc *crtc;
|
||||
int ret = 0;
|
||||
@ -2125,11 +2201,15 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
|
||||
|
||||
mutex_lock(&crtc->mutex);
|
||||
if (req->flags & DRM_MODE_CURSOR_BO) {
|
||||
if (!crtc->funcs->cursor_set) {
|
||||
if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
|
||||
ret = -ENXIO;
|
||||
goto out;
|
||||
}
|
||||
/* Turns off the cursor if handle is 0 */
|
||||
if (crtc->funcs->cursor_set2)
|
||||
ret = crtc->funcs->cursor_set2(crtc, file_priv, req->handle,
|
||||
req->width, req->height, req->hot_x, req->hot_y);
|
||||
else
|
||||
ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle,
|
||||
req->width, req->height);
|
||||
}
|
||||
@ -2146,6 +2226,25 @@ out:
|
||||
mutex_unlock(&crtc->mutex);
|
||||
|
||||
return ret;
|
||||
|
||||
}
|
||||
int drm_mode_cursor_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_mode_cursor *req = data;
|
||||
struct drm_mode_cursor2 new_req;
|
||||
|
||||
memcpy(&new_req, req, sizeof(struct drm_mode_cursor));
|
||||
new_req.hot_x = new_req.hot_y = 0;
|
||||
|
||||
return drm_mode_cursor_common(dev, &new_req, file_priv);
|
||||
}
|
||||
|
||||
int drm_mode_cursor2_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_mode_cursor2 *req = data;
|
||||
return drm_mode_cursor_common(dev, req, file_priv);
|
||||
}
|
||||
#endif
|
||||
/* Original addfb only supported RGB formats, so figure out which one */
|
||||
@ -2315,7 +2414,8 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
|
||||
|
||||
ret = format_check(r);
|
||||
if (ret) {
|
||||
DRM_DEBUG_KMS("bad framebuffer format 0x%08x\n", r->pixel_format);
|
||||
DRM_DEBUG_KMS("bad framebuffer format %s\n",
|
||||
drm_get_format_name(r->pixel_format));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -189,13 +189,14 @@ prune:
|
||||
if (list_empty(&connector->modes))
|
||||
return 0;
|
||||
|
||||
list_for_each_entry(mode, &connector->modes, head)
|
||||
mode->vrefresh = drm_mode_vrefresh(mode);
|
||||
|
||||
drm_mode_sort(&connector->modes);
|
||||
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
|
||||
drm_get_connector_name(connector));
|
||||
list_for_each_entry(mode, &connector->modes, head) {
|
||||
mode->vrefresh = drm_mode_vrefresh(mode);
|
||||
|
||||
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
|
||||
drm_mode_debug_printmodeline(mode);
|
||||
}
|
||||
@ -564,14 +565,13 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
if (!set)
|
||||
return -EINVAL;
|
||||
BUG_ON(!set);
|
||||
BUG_ON(!set->crtc);
|
||||
BUG_ON(!set->crtc->helper_private);
|
||||
|
||||
if (!set->crtc)
|
||||
return -EINVAL;
|
||||
|
||||
if (!set->crtc->helper_private)
|
||||
return -EINVAL;
|
||||
/* Enforce sane interface api - has been abused by the fb helper. */
|
||||
BUG_ON(!set->mode && set->fb);
|
||||
BUG_ON(set->fb && set->num_connectors == 0);
|
||||
|
||||
crtc_funcs = set->crtc->helper_private;
|
||||
|
||||
@ -645,11 +645,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
|
||||
mode_changed = true;
|
||||
} else if (set->fb == NULL) {
|
||||
mode_changed = true;
|
||||
} else if (set->fb->depth != set->crtc->fb->depth) {
|
||||
mode_changed = true;
|
||||
} else if (set->fb->bits_per_pixel !=
|
||||
set->crtc->fb->bits_per_pixel) {
|
||||
mode_changed = true;
|
||||
} else if (set->fb->pixel_format !=
|
||||
set->crtc->fb->pixel_format) {
|
||||
mode_changed = true;
|
||||
@ -682,6 +677,11 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
|
||||
/* don't break so fail path works correct */
|
||||
fail = 1;
|
||||
break;
|
||||
|
||||
if (connector->dpms != DRM_MODE_DPMS_ON) {
|
||||
DRM_DEBUG_KMS("connector dpms not on, full mode switch\n");
|
||||
mode_changed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -968,6 +968,9 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
|
||||
u8 csum = 0;
|
||||
struct edid *edid = (struct edid *)raw_edid;
|
||||
|
||||
if (WARN_ON(!raw_edid))
|
||||
return false;
|
||||
|
||||
if (edid_fixup > 8 || edid_fixup < 0)
|
||||
edid_fixup = 6;
|
||||
|
||||
@ -1010,15 +1013,15 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
|
||||
break;
|
||||
}
|
||||
|
||||
return 1;
|
||||
return true;
|
||||
|
||||
bad:
|
||||
if (raw_edid && print_bad_edid) {
|
||||
if (print_bad_edid) {
|
||||
printk(KERN_ERR "Raw EDID:\n");
|
||||
print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1,
|
||||
raw_edid, EDID_LENGTH, false);
|
||||
}
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_edid_block_valid);
|
||||
|
||||
@ -1706,11 +1709,11 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
|
||||
return NULL;
|
||||
|
||||
if (pt->misc & DRM_EDID_PT_STEREO) {
|
||||
printk(KERN_WARNING "stereo mode not supported\n");
|
||||
DRM_DEBUG_KMS("stereo mode not supported\n");
|
||||
return NULL;
|
||||
}
|
||||
if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
|
||||
printk(KERN_WARNING "composite sync not supported\n");
|
||||
DRM_DEBUG_KMS("composite sync not supported\n");
|
||||
}
|
||||
|
||||
/* it is incorrect if hsync/vsync width is zero */
|
||||
@ -2321,6 +2324,31 @@ u8 *drm_find_cea_extension(struct edid *edid)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_find_cea_extension);
|
||||
|
||||
/*
|
||||
* Calculate the alternate clock for the CEA mode
|
||||
* (60Hz vs. 59.94Hz etc.)
|
||||
*/
|
||||
static unsigned int
|
||||
cea_mode_alternate_clock(const struct drm_display_mode *cea_mode)
|
||||
{
|
||||
unsigned int clock = cea_mode->clock;
|
||||
|
||||
if (cea_mode->vrefresh % 6 != 0)
|
||||
return clock;
|
||||
|
||||
/*
|
||||
* edid_cea_modes contains the 59.94Hz
|
||||
* variant for 240 and 480 line modes,
|
||||
* and the 60Hz variant otherwise.
|
||||
*/
|
||||
if (cea_mode->vdisplay == 240 || cea_mode->vdisplay == 480)
|
||||
clock = clock * 1001 / 1000;
|
||||
else
|
||||
clock = DIV_ROUND_UP(clock * 1000, 1001);
|
||||
|
||||
return clock;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_match_cea_mode - look for a CEA mode matching given mode
|
||||
* @to_match: display mode
|
||||
@ -2339,21 +2367,9 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
|
||||
const struct drm_display_mode *cea_mode = &edid_cea_modes[mode];
|
||||
unsigned int clock1, clock2;
|
||||
|
||||
clock1 = clock2 = cea_mode->clock;
|
||||
|
||||
/* Check both 60Hz and 59.94Hz */
|
||||
if (cea_mode->vrefresh % 6 == 0) {
|
||||
/*
|
||||
* edid_cea_modes contains the 59.94Hz
|
||||
* variant for 240 and 480 line modes,
|
||||
* and the 60Hz variant otherwise.
|
||||
*/
|
||||
if (cea_mode->vdisplay == 240 ||
|
||||
cea_mode->vdisplay == 480)
|
||||
clock1 = clock1 * 1001 / 1000;
|
||||
else
|
||||
clock2 = DIV_ROUND_UP(clock2 * 1000, 1001);
|
||||
}
|
||||
clock1 = cea_mode->clock;
|
||||
clock2 = cea_mode_alternate_clock(cea_mode);
|
||||
|
||||
if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
|
||||
KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
|
||||
@ -2364,6 +2380,66 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_match_cea_mode);
|
||||
|
||||
static int
|
||||
add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_display_mode *mode, *tmp;
|
||||
LIST_HEAD(list);
|
||||
int modes = 0;
|
||||
|
||||
/* Don't add CEA modes if the CEA extension block is missing */
|
||||
if (!drm_find_cea_extension(edid))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Go through all probed modes and create a new mode
|
||||
* with the alternate clock for certain CEA modes.
|
||||
*/
|
||||
list_for_each_entry(mode, &connector->probed_modes, head) {
|
||||
const struct drm_display_mode *cea_mode;
|
||||
struct drm_display_mode *newmode;
|
||||
u8 cea_mode_idx = drm_match_cea_mode(mode) - 1;
|
||||
unsigned int clock1, clock2;
|
||||
|
||||
if (cea_mode_idx >= ARRAY_SIZE(edid_cea_modes))
|
||||
continue;
|
||||
|
||||
cea_mode = &edid_cea_modes[cea_mode_idx];
|
||||
|
||||
clock1 = cea_mode->clock;
|
||||
clock2 = cea_mode_alternate_clock(cea_mode);
|
||||
|
||||
if (clock1 == clock2)
|
||||
continue;
|
||||
|
||||
if (mode->clock != clock1 && mode->clock != clock2)
|
||||
continue;
|
||||
|
||||
newmode = drm_mode_duplicate(dev, cea_mode);
|
||||
if (!newmode)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* The current mode could be either variant. Make
|
||||
* sure to pick the "other" clock for the new mode.
|
||||
*/
|
||||
if (mode->clock != clock1)
|
||||
newmode->clock = clock1;
|
||||
else
|
||||
newmode->clock = clock2;
|
||||
|
||||
list_add_tail(&newmode->head, &list);
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(mode, tmp, &list, head) {
|
||||
list_del(&mode->head);
|
||||
drm_mode_probed_add(connector, mode);
|
||||
modes++;
|
||||
}
|
||||
|
||||
return modes;
|
||||
}
|
||||
|
||||
static int
|
||||
do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
|
||||
@ -2946,6 +3022,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
|
||||
if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
|
||||
num_modes += add_inferred_modes(connector, edid);
|
||||
num_modes += add_cea_modes(connector, edid);
|
||||
num_modes += add_alternate_cea_modes(connector, edid);
|
||||
|
||||
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
|
||||
edid_fixup_preferred(connector, quirks);
|
||||
|
@ -104,12 +104,8 @@ drm_gem_init(struct drm_device *dev)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
|
||||
DRM_FILE_PAGE_OFFSET_SIZE)) {
|
||||
drm_ht_remove(&mm->offset_hash);
|
||||
kfree(mm);
|
||||
return -ENOMEM;
|
||||
}
|
||||
drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
|
||||
DRM_FILE_PAGE_OFFSET_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -447,25 +443,21 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
|
||||
spin_lock(&dev->object_name_lock);
|
||||
if (!obj->name) {
|
||||
ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
|
||||
obj->name = ret;
|
||||
args->name = (uint64_t) obj->name;
|
||||
spin_unlock(&dev->object_name_lock);
|
||||
idr_preload_end();
|
||||
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
ret = 0;
|
||||
|
||||
obj->name = ret;
|
||||
|
||||
/* Allocate a reference for the name table. */
|
||||
drm_gem_object_reference(obj);
|
||||
} else {
|
||||
args->name = (uint64_t) obj->name;
|
||||
spin_unlock(&dev->object_name_lock);
|
||||
idr_preload_end();
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
args->name = (uint64_t) obj->name;
|
||||
ret = 0;
|
||||
|
||||
err:
|
||||
spin_unlock(&dev->object_name_lock);
|
||||
idr_preload_end();
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
return ret;
|
||||
}
|
||||
|
212
drivers/video/drm/drm_hashtab.c
Normal file
212
drivers/video/drm/drm_hashtab.c
Normal file
@ -0,0 +1,212 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Simple open hash tab implementation.
|
||||
*
|
||||
* Authors:
|
||||
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_hashtab.h>
|
||||
#include <linux/hash.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/rculist.h>
|
||||
|
||||
#define hlist_for_each_entry_rcu(pos, head, member) \
|
||||
for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\
|
||||
typeof(*(pos)), member); \
|
||||
pos; \
|
||||
pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
|
||||
&(pos)->member)), typeof(*(pos)), member))
|
||||
|
||||
|
||||
int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
|
||||
{
|
||||
unsigned int size = 1 << order;
|
||||
|
||||
ht->order = order;
|
||||
ht->table = NULL;
|
||||
ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL);
|
||||
if (!ht->table) {
|
||||
DRM_ERROR("Out of memory for hash table\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_ht_create);
|
||||
|
||||
void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
|
||||
{
|
||||
struct drm_hash_item *entry;
|
||||
struct hlist_head *h_list;
|
||||
unsigned int hashed_key;
|
||||
int count = 0;
|
||||
|
||||
hashed_key = hash_long(key, ht->order);
|
||||
DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
|
||||
h_list = &ht->table[hashed_key];
|
||||
hlist_for_each_entry(entry, h_list, head)
|
||||
DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
|
||||
}
|
||||
|
||||
static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
|
||||
unsigned long key)
|
||||
{
|
||||
struct drm_hash_item *entry;
|
||||
struct hlist_head *h_list;
|
||||
unsigned int hashed_key;
|
||||
|
||||
hashed_key = hash_long(key, ht->order);
|
||||
h_list = &ht->table[hashed_key];
|
||||
hlist_for_each_entry(entry, h_list, head) {
|
||||
if (entry->key == key)
|
||||
return &entry->head;
|
||||
if (entry->key > key)
|
||||
break;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht,
|
||||
unsigned long key)
|
||||
{
|
||||
struct drm_hash_item *entry;
|
||||
struct hlist_head *h_list;
|
||||
unsigned int hashed_key;
|
||||
|
||||
hashed_key = hash_long(key, ht->order);
|
||||
h_list = &ht->table[hashed_key];
|
||||
hlist_for_each_entry_rcu(entry, h_list, head) {
|
||||
if (entry->key == key)
|
||||
return &entry->head;
|
||||
if (entry->key > key)
|
||||
break;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
|
||||
{
|
||||
struct drm_hash_item *entry;
|
||||
struct hlist_head *h_list;
|
||||
struct hlist_node *parent;
|
||||
unsigned int hashed_key;
|
||||
unsigned long key = item->key;
|
||||
|
||||
hashed_key = hash_long(key, ht->order);
|
||||
h_list = &ht->table[hashed_key];
|
||||
parent = NULL;
|
||||
hlist_for_each_entry(entry, h_list, head) {
|
||||
if (entry->key == key)
|
||||
return -EINVAL;
|
||||
if (entry->key > key)
|
||||
break;
|
||||
parent = &entry->head;
|
||||
}
|
||||
if (parent) {
|
||||
hlist_add_after_rcu(parent, &item->head);
|
||||
} else {
|
||||
hlist_add_head_rcu(&item->head, h_list);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_ht_insert_item);
|
||||
|
||||
/*
|
||||
* Just insert an item and return any "bits" bit key that hasn't been
|
||||
* used before.
|
||||
*/
|
||||
int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
|
||||
unsigned long seed, int bits, int shift,
|
||||
unsigned long add)
|
||||
{
|
||||
int ret;
|
||||
unsigned long mask = (1 << bits) - 1;
|
||||
unsigned long first, unshifted_key;
|
||||
|
||||
unshifted_key = hash_long(seed, bits);
|
||||
first = unshifted_key;
|
||||
do {
|
||||
item->key = (unshifted_key << shift) + add;
|
||||
ret = drm_ht_insert_item(ht, item);
|
||||
if (ret)
|
||||
unshifted_key = (unshifted_key + 1) & mask;
|
||||
} while(ret && (unshifted_key != first));
|
||||
|
||||
if (ret) {
|
||||
DRM_ERROR("Available key bit space exhausted\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_ht_just_insert_please);
|
||||
|
||||
int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
|
||||
struct drm_hash_item **item)
|
||||
{
|
||||
struct hlist_node *list;
|
||||
|
||||
list = drm_ht_find_key_rcu(ht, key);
|
||||
if (!list)
|
||||
return -EINVAL;
|
||||
|
||||
*item = hlist_entry(list, struct drm_hash_item, head);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_ht_find_item);
|
||||
|
||||
int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
|
||||
{
|
||||
struct hlist_node *list;
|
||||
|
||||
list = drm_ht_find_key(ht, key);
|
||||
if (list) {
|
||||
hlist_del_init_rcu(list);
|
||||
return 0;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
|
||||
{
|
||||
hlist_del_init_rcu(&item->head);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_ht_remove_item);
|
||||
|
||||
void drm_ht_remove(struct drm_open_hash *ht)
|
||||
{
|
||||
if (ht->table) {
|
||||
kfree(ht->table);
|
||||
ht->table = NULL;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(drm_ht_remove);
|
@ -59,6 +59,75 @@
|
||||
#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
|
||||
|
||||
|
||||
irqreturn_t device_irq_handler(struct drm_device *dev)
|
||||
{
|
||||
|
||||
printf("video irq\n");
|
||||
|
||||
// printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
|
||||
|
||||
return dev->driver->irq_handler(0, dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* Install IRQ handler.
|
||||
*
|
||||
* \param dev DRM device.
|
||||
*
|
||||
* Initializes the IRQ related data. Installs the handler, calling the driver
|
||||
* \c irq_preinstall() and \c irq_postinstall() functions
|
||||
* before and after the installation.
|
||||
*/
|
||||
int drm_irq_install(struct drm_device *dev)
|
||||
{
|
||||
int ret;
|
||||
unsigned long sh_flags = 0;
|
||||
char *irqname;
|
||||
|
||||
|
||||
if (drm_dev_to_irq(dev) == 0)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
/* Driver must have been initialized */
|
||||
if (!dev->dev_private) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (dev->irq_enabled) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return -EBUSY;
|
||||
}
|
||||
dev->irq_enabled = 1;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
|
||||
|
||||
/* Before installing handler */
|
||||
if (dev->driver->irq_preinstall)
|
||||
dev->driver->irq_preinstall(dev);
|
||||
|
||||
ret = !AttachIntHandler(drm_dev_to_irq(dev), device_irq_handler, (u32)dev);
|
||||
|
||||
/* After installing handler */
|
||||
if (dev->driver->irq_postinstall)
|
||||
ret = dev->driver->irq_postinstall(dev);
|
||||
|
||||
if (ret < 0) {
|
||||
DRM_ERROR(__FUNCTION__);
|
||||
}
|
||||
|
||||
u16_t cmd = PciRead16(dev->pdev->busnr, dev->pdev->devfn, 4);
|
||||
cmd&= ~(1<<10);
|
||||
PciWrite16(dev->pdev->busnr, dev->pdev->devfn, 4, cmd);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_irq_install);
|
||||
|
||||
|
||||
static inline u64 div_u64(u64 dividend, u32 divisor)
|
||||
{
|
||||
u32 remainder;
|
||||
@ -82,7 +151,6 @@ u64 div64_u64(u64 dividend, u64 divisor)
|
||||
return div_u64(dividend, d);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* drm_calc_timestamping_constants - Calculate and
|
||||
* store various constants which are later needed by
|
||||
@ -175,6 +243,10 @@ void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
|
||||
#if 0
|
||||
unsigned long irqflags;
|
||||
|
||||
/* vblank is not initialized (IRQ not installed ?), or has been freed */
|
||||
if (!dev->num_crtcs)
|
||||
return;
|
||||
|
||||
if (dev->vblank_inmodeset[crtc]) {
|
||||
spin_lock_irqsave(&dev->vbl_lock, irqflags);
|
||||
dev->vblank_disable_allowed = 1;
|
||||
@ -188,3 +260,7 @@ void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(drm_vblank_post_modeset);
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -669,7 +669,7 @@ int drm_mm_clean(struct drm_mm * mm)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mm_clean);
|
||||
|
||||
int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
|
||||
void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
|
||||
{
|
||||
INIT_LIST_HEAD(&mm->hole_stack);
|
||||
INIT_LIST_HEAD(&mm->unused_nodes);
|
||||
@ -690,8 +690,6 @@ int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
|
||||
list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
|
||||
|
||||
mm->color_adjust = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mm_init);
|
||||
|
||||
@ -699,8 +697,8 @@ void drm_mm_takedown(struct drm_mm * mm)
|
||||
{
|
||||
struct drm_mm_node *entry, *next;
|
||||
|
||||
if (!list_empty(&mm->head_node.node_list)) {
|
||||
DRM_ERROR("Memory manager not clean. Delaying takedown\n");
|
||||
if (WARN(!list_empty(&mm->head_node.node_list),
|
||||
"Memory manager not clean. Delaying takedown\n")) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -716,27 +714,11 @@ void drm_mm_takedown(struct drm_mm * mm)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mm_takedown);
|
||||
|
||||
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
|
||||
static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
|
||||
const char *prefix)
|
||||
{
|
||||
struct drm_mm_node *entry;
|
||||
unsigned long total_used = 0, total_free = 0, total = 0;
|
||||
unsigned long hole_start, hole_end, hole_size;
|
||||
|
||||
hole_start = drm_mm_hole_node_start(&mm->head_node);
|
||||
hole_end = drm_mm_hole_node_end(&mm->head_node);
|
||||
hole_size = hole_end - hole_start;
|
||||
if (hole_size)
|
||||
printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
|
||||
prefix, hole_start, hole_end,
|
||||
hole_size);
|
||||
total_free += hole_size;
|
||||
|
||||
drm_mm_for_each_node(entry, mm) {
|
||||
printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
|
||||
prefix, entry->start, entry->start + entry->size,
|
||||
entry->size);
|
||||
total_used += entry->size;
|
||||
|
||||
if (entry->hole_follows) {
|
||||
hole_start = drm_mm_hole_node_start(entry);
|
||||
hole_end = drm_mm_hole_node_end(entry);
|
||||
@ -744,8 +726,25 @@ void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
|
||||
printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
|
||||
prefix, hole_start, hole_end,
|
||||
hole_size);
|
||||
total_free += hole_size;
|
||||
return hole_size;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
|
||||
{
|
||||
struct drm_mm_node *entry;
|
||||
unsigned long total_used = 0, total_free = 0, total = 0;
|
||||
|
||||
total_free += drm_mm_debug_hole(&mm->head_node, prefix);
|
||||
|
||||
drm_mm_for_each_node(entry, mm) {
|
||||
printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
|
||||
prefix, entry->start, entry->start + entry->size,
|
||||
entry->size);
|
||||
total_used += entry->size;
|
||||
total_free += drm_mm_debug_hole(entry, prefix);
|
||||
}
|
||||
total = total_free + total_used;
|
||||
|
||||
|
@ -533,6 +533,8 @@ int drm_display_mode_from_videomode(const struct videomode *vm,
|
||||
dmode->flags |= DRM_MODE_FLAG_INTERLACE;
|
||||
if (vm->flags & DISPLAY_FLAGS_DOUBLESCAN)
|
||||
dmode->flags |= DRM_MODE_FLAG_DBLSCAN;
|
||||
if (vm->flags & DISPLAY_FLAGS_DOUBLECLK)
|
||||
dmode->flags |= DRM_MODE_FLAG_DBLCLK;
|
||||
drm_mode_set_name(dmode);
|
||||
|
||||
return 0;
|
||||
@ -785,16 +787,17 @@ EXPORT_SYMBOL(drm_mode_set_crtcinfo);
|
||||
* LOCKING:
|
||||
* None.
|
||||
*
|
||||
* Copy an existing mode into another mode, preserving the object id
|
||||
* of the destination mode.
|
||||
* Copy an existing mode into another mode, preserving the object id and
|
||||
* list head of the destination mode.
|
||||
*/
|
||||
void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src)
|
||||
{
|
||||
int id = dst->base.id;
|
||||
struct list_head head = dst->head;
|
||||
|
||||
*dst = *src;
|
||||
dst->base.id = id;
|
||||
INIT_LIST_HEAD(&dst->head);
|
||||
dst->head = head;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mode_copy);
|
||||
|
||||
@ -1015,6 +1018,11 @@ static int drm_mode_compare(void *priv, struct list_head *lh_a, struct list_head
|
||||
diff = b->hdisplay * b->vdisplay - a->hdisplay * a->vdisplay;
|
||||
if (diff)
|
||||
return diff;
|
||||
|
||||
diff = b->vrefresh - a->vrefresh;
|
||||
if (diff)
|
||||
return diff;
|
||||
|
||||
diff = b->clock - a->clock;
|
||||
return diff;
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
151
drivers/video/drm/ttm/ttm_bo_manager.c
Normal file
151
drivers/video/drm/ttm/ttm_bo_manager.c
Normal file
@ -0,0 +1,151 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
|
||||
#include <drm/ttm/ttm_module.h>
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
#include <drm/drm_mm.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
/**
|
||||
* Currently we use a spinlock for the lock, but a mutex *may* be
|
||||
* more appropriate to reduce scheduling latency if the range manager
|
||||
* ends up with very fragmented allocation patterns.
|
||||
*/
|
||||
|
||||
struct ttm_range_manager {
|
||||
struct drm_mm mm;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
|
||||
struct ttm_buffer_object *bo,
|
||||
struct ttm_placement *placement,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
|
||||
struct drm_mm *mm = &rman->mm;
|
||||
struct drm_mm_node *node = NULL;
|
||||
unsigned long lpfn;
|
||||
int ret;
|
||||
|
||||
lpfn = placement->lpfn;
|
||||
if (!lpfn)
|
||||
lpfn = man->size;
|
||||
do {
|
||||
ret = drm_mm_pre_get(mm);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
spin_lock(&rman->lock);
|
||||
node = drm_mm_search_free_in_range(mm,
|
||||
mem->num_pages, mem->page_alignment,
|
||||
placement->fpfn, lpfn, 1);
|
||||
if (unlikely(node == NULL)) {
|
||||
spin_unlock(&rman->lock);
|
||||
return 0;
|
||||
}
|
||||
node = drm_mm_get_block_atomic_range(node, mem->num_pages,
|
||||
mem->page_alignment,
|
||||
placement->fpfn,
|
||||
lpfn);
|
||||
spin_unlock(&rman->lock);
|
||||
} while (node == NULL);
|
||||
|
||||
mem->mm_node = node;
|
||||
mem->start = node->start;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
|
||||
|
||||
if (mem->mm_node) {
|
||||
spin_lock(&rman->lock);
|
||||
drm_mm_put_block(mem->mm_node);
|
||||
spin_unlock(&rman->lock);
|
||||
mem->mm_node = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
|
||||
unsigned long p_size)
|
||||
{
|
||||
struct ttm_range_manager *rman;
|
||||
|
||||
rman = kzalloc(sizeof(*rman), GFP_KERNEL);
|
||||
if (!rman)
|
||||
return -ENOMEM;
|
||||
|
||||
drm_mm_init(&rman->mm, 0, p_size);
|
||||
spin_lock_init(&rman->lock);
|
||||
man->priv = rman;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
|
||||
{
|
||||
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
|
||||
struct drm_mm *mm = &rman->mm;
|
||||
|
||||
spin_lock(&rman->lock);
|
||||
if (drm_mm_clean(mm)) {
|
||||
drm_mm_takedown(mm);
|
||||
spin_unlock(&rman->lock);
|
||||
kfree(rman);
|
||||
man->priv = NULL;
|
||||
return 0;
|
||||
}
|
||||
spin_unlock(&rman->lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
|
||||
const char *prefix)
|
||||
{
|
||||
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
|
||||
|
||||
spin_lock(&rman->lock);
|
||||
drm_mm_debug_table(&rman->mm, prefix);
|
||||
spin_unlock(&rman->lock);
|
||||
}
|
||||
|
||||
const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
|
||||
ttm_bo_man_init,
|
||||
ttm_bo_man_takedown,
|
||||
ttm_bo_man_get_node,
|
||||
ttm_bo_man_put_node,
|
||||
ttm_bo_man_debug
|
||||
};
|
||||
EXPORT_SYMBOL(ttm_bo_manager_func);
|
706
drivers/video/drm/ttm/ttm_bo_util.c
Normal file
706
drivers/video/drm/ttm/ttm_bo_util.c
Normal file
@ -0,0 +1,706 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
|
||||
{
|
||||
ttm_bo_mem_put(bo, &bo->mem);
|
||||
}
|
||||
|
||||
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
|
||||
bool evict,
|
||||
bool no_wait_gpu, struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct ttm_tt *ttm = bo->ttm;
|
||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
int ret;
|
||||
|
||||
if (old_mem->mem_type != TTM_PL_SYSTEM) {
|
||||
ttm_tt_unbind(ttm);
|
||||
ttm_bo_free_old_node(bo);
|
||||
ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
|
||||
TTM_PL_MASK_MEM);
|
||||
old_mem->mem_type = TTM_PL_SYSTEM;
|
||||
}
|
||||
|
||||
ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
if (new_mem->mem_type != TTM_PL_SYSTEM) {
|
||||
ret = ttm_tt_bind(ttm, new_mem);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
}
|
||||
|
||||
*old_mem = *new_mem;
|
||||
new_mem->mm_node = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_move_ttm);
|
||||
|
||||
int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
|
||||
{
|
||||
if (likely(man->io_reserve_fastpath))
|
||||
return 0;
|
||||
|
||||
if (interruptible)
|
||||
return mutex_lock_interruptible(&man->io_reserve_mutex);
|
||||
|
||||
mutex_lock(&man->io_reserve_mutex);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_mem_io_lock);
|
||||
|
||||
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
|
||||
{
|
||||
if (likely(man->io_reserve_fastpath))
|
||||
return;
|
||||
|
||||
mutex_unlock(&man->io_reserve_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_mem_io_unlock);
|
||||
|
||||
static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
|
||||
{
|
||||
struct ttm_buffer_object *bo;
|
||||
|
||||
if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
|
||||
return -EAGAIN;
|
||||
|
||||
bo = list_first_entry(&man->io_reserve_lru,
|
||||
struct ttm_buffer_object,
|
||||
io_reserve_lru);
|
||||
list_del_init(&bo->io_reserve_lru);
|
||||
ttm_bo_unmap_virtual_locked(bo);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
|
||||
int ret = 0;
|
||||
|
||||
if (!bdev->driver->io_mem_reserve)
|
||||
return 0;
|
||||
if (likely(man->io_reserve_fastpath))
|
||||
return bdev->driver->io_mem_reserve(bdev, mem);
|
||||
|
||||
if (bdev->driver->io_mem_reserve &&
|
||||
mem->bus.io_reserved_count++ == 0) {
|
||||
retry:
|
||||
ret = bdev->driver->io_mem_reserve(bdev, mem);
|
||||
if (ret == -EAGAIN) {
|
||||
ret = ttm_mem_io_evict(man);
|
||||
if (ret == 0)
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_mem_io_reserve);
|
||||
|
||||
void ttm_mem_io_free(struct ttm_bo_device *bdev,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
|
||||
|
||||
if (likely(man->io_reserve_fastpath))
|
||||
return;
|
||||
|
||||
if (bdev->driver->io_mem_reserve &&
|
||||
--mem->bus.io_reserved_count == 0 &&
|
||||
bdev->driver->io_mem_free)
|
||||
bdev->driver->io_mem_free(bdev, mem);
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_mem_io_free);
|
||||
|
||||
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct ttm_mem_reg *mem = &bo->mem;
|
||||
int ret;
|
||||
|
||||
if (!mem->bus.io_reserved_vm) {
|
||||
struct ttm_mem_type_manager *man =
|
||||
&bo->bdev->man[mem->mem_type];
|
||||
|
||||
ret = ttm_mem_io_reserve(bo->bdev, mem);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
mem->bus.io_reserved_vm = true;
|
||||
if (man->use_io_reserve_lru)
|
||||
list_add_tail(&bo->io_reserve_lru,
|
||||
&man->io_reserve_lru);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct ttm_mem_reg *mem = &bo->mem;
|
||||
|
||||
if (mem->bus.io_reserved_vm) {
|
||||
mem->bus.io_reserved_vm = false;
|
||||
list_del_init(&bo->io_reserve_lru);
|
||||
ttm_mem_io_free(bo->bdev, mem);
|
||||
}
|
||||
}
|
||||
|
||||
int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
|
||||
void **virtual)
|
||||
{
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
|
||||
int ret;
|
||||
void *addr;
|
||||
|
||||
*virtual = NULL;
|
||||
(void) ttm_mem_io_lock(man, false);
|
||||
ret = ttm_mem_io_reserve(bdev, mem);
|
||||
ttm_mem_io_unlock(man);
|
||||
if (ret || !mem->bus.is_iomem)
|
||||
return ret;
|
||||
|
||||
if (mem->bus.addr) {
|
||||
addr = mem->bus.addr;
|
||||
} else {
|
||||
if (mem->placement & TTM_PL_FLAG_WC)
|
||||
addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
|
||||
else
|
||||
addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
|
||||
if (!addr) {
|
||||
(void) ttm_mem_io_lock(man, false);
|
||||
ttm_mem_io_free(bdev, mem);
|
||||
ttm_mem_io_unlock(man);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
*virtual = addr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
|
||||
void *virtual)
|
||||
{
|
||||
struct ttm_mem_type_manager *man;
|
||||
|
||||
man = &bdev->man[mem->mem_type];
|
||||
|
||||
if (virtual && mem->bus.addr == NULL)
|
||||
iounmap(virtual);
|
||||
(void) ttm_mem_io_lock(man, false);
|
||||
ttm_mem_io_free(bdev, mem);
|
||||
ttm_mem_io_unlock(man);
|
||||
}
|
||||
|
||||
static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
|
||||
{
|
||||
uint32_t *dstP =
|
||||
(uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
|
||||
uint32_t *srcP =
|
||||
(uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
|
||||
|
||||
int i;
|
||||
for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
|
||||
iowrite32(ioread32(srcP++), dstP++);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
|
||||
unsigned long page,
|
||||
pgprot_t prot)
|
||||
{
|
||||
struct page *d = ttm->pages[page];
|
||||
void *dst;
|
||||
|
||||
if (!d)
|
||||
return -ENOMEM;
|
||||
|
||||
src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
dst = kmap_atomic_prot(d, prot);
|
||||
#else
|
||||
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
|
||||
dst = vmap(&d, 1, 0, prot);
|
||||
else
|
||||
dst = kmap(d);
|
||||
#endif
|
||||
if (!dst)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy_fromio(dst, src, PAGE_SIZE);
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
kunmap_atomic(dst);
|
||||
#else
|
||||
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
|
||||
vunmap(dst);
|
||||
else
|
||||
kunmap(d);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
|
||||
unsigned long page,
|
||||
pgprot_t prot)
|
||||
{
|
||||
struct page *s = ttm->pages[page];
|
||||
void *src;
|
||||
|
||||
if (!s)
|
||||
return -ENOMEM;
|
||||
|
||||
dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
|
||||
#ifdef CONFIG_X86
|
||||
src = kmap_atomic_prot(s, prot);
|
||||
#else
|
||||
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
|
||||
src = vmap(&s, 1, 0, prot);
|
||||
else
|
||||
src = kmap(s);
|
||||
#endif
|
||||
if (!src)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy_toio(dst, src, PAGE_SIZE);
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
kunmap_atomic(src);
|
||||
#else
|
||||
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
|
||||
vunmap(src);
|
||||
else
|
||||
kunmap(s);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
|
||||
bool evict, bool no_wait_gpu,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
|
||||
struct ttm_tt *ttm = bo->ttm;
|
||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
struct ttm_mem_reg old_copy = *old_mem;
|
||||
void *old_iomap;
|
||||
void *new_iomap;
|
||||
int ret;
|
||||
unsigned long i;
|
||||
unsigned long page;
|
||||
unsigned long add = 0;
|
||||
int dir;
|
||||
|
||||
ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (old_iomap == NULL && new_iomap == NULL)
|
||||
goto out2;
|
||||
if (old_iomap == NULL && ttm == NULL)
|
||||
goto out2;
|
||||
|
||||
if (ttm->state == tt_unpopulated) {
|
||||
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
|
||||
if (ret) {
|
||||
/* if we fail here don't nuke the mm node
|
||||
* as the bo still owns it */
|
||||
old_copy.mm_node = NULL;
|
||||
goto out1;
|
||||
}
|
||||
}
|
||||
|
||||
add = 0;
|
||||
dir = 1;
|
||||
|
||||
if ((old_mem->mem_type == new_mem->mem_type) &&
|
||||
(new_mem->start < old_mem->start + old_mem->size)) {
|
||||
dir = -1;
|
||||
add = new_mem->num_pages - 1;
|
||||
}
|
||||
|
||||
for (i = 0; i < new_mem->num_pages; ++i) {
|
||||
page = i * dir + add;
|
||||
if (old_iomap == NULL) {
|
||||
pgprot_t prot = ttm_io_prot(old_mem->placement,
|
||||
PAGE_KERNEL);
|
||||
ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
|
||||
prot);
|
||||
} else if (new_iomap == NULL) {
|
||||
pgprot_t prot = ttm_io_prot(new_mem->placement,
|
||||
PAGE_KERNEL);
|
||||
ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
|
||||
prot);
|
||||
} else
|
||||
ret = ttm_copy_io_page(new_iomap, old_iomap, page);
|
||||
if (ret) {
|
||||
/* failing here, means keep old copy as-is */
|
||||
old_copy.mm_node = NULL;
|
||||
goto out1;
|
||||
}
|
||||
}
|
||||
mb();
|
||||
out2:
|
||||
old_copy = *old_mem;
|
||||
*old_mem = *new_mem;
|
||||
new_mem->mm_node = NULL;
|
||||
|
||||
if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
|
||||
ttm_tt_unbind(ttm);
|
||||
ttm_tt_destroy(ttm);
|
||||
bo->ttm = NULL;
|
||||
}
|
||||
|
||||
out1:
|
||||
ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
|
||||
out:
|
||||
ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
|
||||
ttm_bo_mem_put(bo, &old_copy);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_move_memcpy);
|
||||
|
||||
static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
|
||||
{
|
||||
kfree(bo);
|
||||
}
|
||||
|
||||
/**
|
||||
* ttm_buffer_object_transfer
|
||||
*
|
||||
* @bo: A pointer to a struct ttm_buffer_object.
|
||||
* @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
|
||||
* holding the data of @bo with the old placement.
|
||||
*
|
||||
* This is a utility function that may be called after an accelerated move
|
||||
* has been scheduled. A new buffer object is created as a placeholder for
|
||||
* the old data while it's being copied. When that buffer object is idle,
|
||||
* it can be destroyed, releasing the space of the old placement.
|
||||
* Returns:
|
||||
* !0: Failure.
|
||||
*/
|
||||
|
||||
static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
|
||||
struct ttm_buffer_object **new_obj)
|
||||
{
|
||||
struct ttm_buffer_object *fbo;
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_bo_driver *driver = bdev->driver;
|
||||
int ret;
|
||||
|
||||
fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
|
||||
if (!fbo)
|
||||
return -ENOMEM;
|
||||
|
||||
*fbo = *bo;
|
||||
|
||||
/**
|
||||
* Fix up members that we shouldn't copy directly:
|
||||
* TODO: Explicit member copy would probably be better here.
|
||||
*/
|
||||
|
||||
INIT_LIST_HEAD(&fbo->ddestroy);
|
||||
INIT_LIST_HEAD(&fbo->lru);
|
||||
INIT_LIST_HEAD(&fbo->swap);
|
||||
INIT_LIST_HEAD(&fbo->io_reserve_lru);
|
||||
fbo->vm_node = NULL;
|
||||
atomic_set(&fbo->cpu_writers, 0);
|
||||
|
||||
spin_lock(&bdev->fence_lock);
|
||||
if (bo->sync_obj)
|
||||
fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
|
||||
else
|
||||
fbo->sync_obj = NULL;
|
||||
spin_unlock(&bdev->fence_lock);
|
||||
kref_init(&fbo->list_kref);
|
||||
kref_init(&fbo->kref);
|
||||
fbo->destroy = &ttm_transfered_destroy;
|
||||
fbo->acc_size = 0;
|
||||
fbo->resv = &fbo->ttm_resv;
|
||||
reservation_object_init(fbo->resv);
|
||||
ret = ww_mutex_trylock(&fbo->resv->lock);
|
||||
WARN_ON(!ret);
|
||||
|
||||
*new_obj = fbo;
|
||||
return 0;
|
||||
}
|
||||
|
||||
pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
|
||||
{
|
||||
#if defined(__i386__) || defined(__x86_64__)
|
||||
if (caching_flags & TTM_PL_FLAG_WC)
|
||||
tmp = pgprot_writecombine(tmp);
|
||||
else if (boot_cpu_data.x86 > 3)
|
||||
tmp = pgprot_noncached(tmp);
|
||||
|
||||
#elif defined(__powerpc__)
|
||||
if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
|
||||
pgprot_val(tmp) |= _PAGE_NO_CACHE;
|
||||
if (caching_flags & TTM_PL_FLAG_UNCACHED)
|
||||
pgprot_val(tmp) |= _PAGE_GUARDED;
|
||||
}
|
||||
#endif
|
||||
#if defined(__ia64__)
|
||||
if (caching_flags & TTM_PL_FLAG_WC)
|
||||
tmp = pgprot_writecombine(tmp);
|
||||
else
|
||||
tmp = pgprot_noncached(tmp);
|
||||
#endif
|
||||
#if defined(__sparc__) || defined(__mips__)
|
||||
if (!(caching_flags & TTM_PL_FLAG_CACHED))
|
||||
tmp = pgprot_noncached(tmp);
|
||||
#endif
|
||||
return tmp;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_io_prot);
|
||||
|
||||
static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
|
||||
unsigned long offset,
|
||||
unsigned long size,
|
||||
struct ttm_bo_kmap_obj *map)
|
||||
{
|
||||
struct ttm_mem_reg *mem = &bo->mem;
|
||||
|
||||
if (bo->mem.bus.addr) {
|
||||
map->bo_kmap_type = ttm_bo_map_premapped;
|
||||
map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
|
||||
} else {
|
||||
map->bo_kmap_type = ttm_bo_map_iomap;
|
||||
if (mem->placement & TTM_PL_FLAG_WC)
|
||||
map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
|
||||
size);
|
||||
else
|
||||
map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
|
||||
size);
|
||||
}
|
||||
return (!map->virtual) ? -ENOMEM : 0;
|
||||
}
|
||||
|
||||
static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
|
||||
unsigned long start_page,
|
||||
unsigned long num_pages,
|
||||
struct ttm_bo_kmap_obj *map)
|
||||
{
|
||||
struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
|
||||
struct ttm_tt *ttm = bo->ttm;
|
||||
int ret;
|
||||
|
||||
BUG_ON(!ttm);
|
||||
|
||||
if (ttm->state == tt_unpopulated) {
|
||||
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
|
||||
/*
|
||||
* We're mapping a single page, and the desired
|
||||
* page protection is consistent with the bo.
|
||||
*/
|
||||
|
||||
map->bo_kmap_type = ttm_bo_map_kmap;
|
||||
map->page = ttm->pages[start_page];
|
||||
map->virtual = kmap(map->page);
|
||||
} else {
|
||||
/*
|
||||
* We need to use vmap to get the desired page protection
|
||||
* or to make the buffer object look contiguous.
|
||||
*/
|
||||
prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
|
||||
PAGE_KERNEL :
|
||||
ttm_io_prot(mem->placement, PAGE_KERNEL);
|
||||
map->bo_kmap_type = ttm_bo_map_vmap;
|
||||
map->virtual = vmap(ttm->pages + start_page, num_pages,
|
||||
0, prot);
|
||||
}
|
||||
return (!map->virtual) ? -ENOMEM : 0;
|
||||
}
|
||||
|
||||
int ttm_bo_kmap(struct ttm_buffer_object *bo,
|
||||
unsigned long start_page, unsigned long num_pages,
|
||||
struct ttm_bo_kmap_obj *map)
|
||||
{
|
||||
struct ttm_mem_type_manager *man =
|
||||
&bo->bdev->man[bo->mem.mem_type];
|
||||
unsigned long offset, size;
|
||||
int ret;
|
||||
|
||||
BUG_ON(!list_empty(&bo->swap));
|
||||
map->virtual = NULL;
|
||||
map->bo = bo;
|
||||
if (num_pages > bo->num_pages)
|
||||
return -EINVAL;
|
||||
if (start_page > bo->num_pages)
|
||||
return -EINVAL;
|
||||
#if 0
|
||||
if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
|
||||
return -EPERM;
|
||||
#endif
|
||||
(void) ttm_mem_io_lock(man, false);
|
||||
ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
|
||||
ttm_mem_io_unlock(man);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!bo->mem.bus.is_iomem) {
|
||||
return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
|
||||
} else {
|
||||
offset = start_page << PAGE_SHIFT;
|
||||
size = num_pages << PAGE_SHIFT;
|
||||
return ttm_bo_ioremap(bo, offset, size, map);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_kmap);
|
||||
|
||||
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
|
||||
{
|
||||
struct ttm_buffer_object *bo = map->bo;
|
||||
struct ttm_mem_type_manager *man =
|
||||
&bo->bdev->man[bo->mem.mem_type];
|
||||
|
||||
if (!map->virtual)
|
||||
return;
|
||||
switch (map->bo_kmap_type) {
|
||||
case ttm_bo_map_iomap:
|
||||
iounmap(map->virtual);
|
||||
break;
|
||||
case ttm_bo_map_vmap:
|
||||
vunmap(map->virtual);
|
||||
break;
|
||||
case ttm_bo_map_kmap:
|
||||
kunmap(map->page);
|
||||
break;
|
||||
case ttm_bo_map_premapped:
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
(void) ttm_mem_io_lock(man, false);
|
||||
ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
|
||||
ttm_mem_io_unlock(man);
|
||||
map->virtual = NULL;
|
||||
map->page = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_kunmap);
|
||||
|
||||
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
|
||||
void *sync_obj,
|
||||
bool evict,
|
||||
bool no_wait_gpu,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_bo_driver *driver = bdev->driver;
|
||||
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
|
||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
int ret;
|
||||
struct ttm_buffer_object *ghost_obj;
|
||||
void *tmp_obj = NULL;
|
||||
|
||||
spin_lock(&bdev->fence_lock);
|
||||
if (bo->sync_obj) {
|
||||
tmp_obj = bo->sync_obj;
|
||||
bo->sync_obj = NULL;
|
||||
}
|
||||
bo->sync_obj = driver->sync_obj_ref(sync_obj);
|
||||
if (evict) {
|
||||
ret = ttm_bo_wait(bo, false, false, false);
|
||||
spin_unlock(&bdev->fence_lock);
|
||||
if (tmp_obj)
|
||||
driver->sync_obj_unref(&tmp_obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
|
||||
(bo->ttm != NULL)) {
|
||||
ttm_tt_unbind(bo->ttm);
|
||||
ttm_tt_destroy(bo->ttm);
|
||||
bo->ttm = NULL;
|
||||
}
|
||||
ttm_bo_free_old_node(bo);
|
||||
} else {
|
||||
/**
|
||||
* This should help pipeline ordinary buffer moves.
|
||||
*
|
||||
* Hang old buffer memory on a new buffer object,
|
||||
* and leave it to be released when the GPU
|
||||
* operation has completed.
|
||||
*/
|
||||
|
||||
set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
|
||||
spin_unlock(&bdev->fence_lock);
|
||||
if (tmp_obj)
|
||||
driver->sync_obj_unref(&tmp_obj);
|
||||
|
||||
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/**
|
||||
* If we're not moving to fixed memory, the TTM object
|
||||
* needs to stay alive. Otherwhise hang it on the ghost
|
||||
* bo to be unbound and destroyed.
|
||||
*/
|
||||
|
||||
if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
|
||||
ghost_obj->ttm = NULL;
|
||||
else
|
||||
bo->ttm = NULL;
|
||||
|
||||
ttm_bo_unreserve(ghost_obj);
|
||||
ttm_bo_unref(&ghost_obj);
|
||||
}
|
||||
|
||||
*old_mem = *new_mem;
|
||||
new_mem->mm_node = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
|
235
drivers/video/drm/ttm/ttm_execbuf_util.c
Normal file
235
drivers/video/drm/ttm/ttm_execbuf_util.c
Normal file
@ -0,0 +1,235 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
struct ww_acquire_ctx{};
|
||||
|
||||
#include <drm/ttm/ttm_execbuf_util.h>
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
static void ttm_eu_backoff_reservation_locked(struct list_head *list,
|
||||
struct ww_acquire_ctx *ticket)
|
||||
{
|
||||
struct ttm_validate_buffer *entry;
|
||||
|
||||
list_for_each_entry(entry, list, head) {
|
||||
struct ttm_buffer_object *bo = entry->bo;
|
||||
if (!entry->reserved)
|
||||
continue;
|
||||
|
||||
entry->reserved = false;
|
||||
if (entry->removed) {
|
||||
ttm_bo_add_to_lru(bo);
|
||||
entry->removed = false;
|
||||
}
|
||||
// ww_mutex_unlock(&bo->resv->lock);
|
||||
}
|
||||
}
|
||||
|
||||
static void ttm_eu_del_from_lru_locked(struct list_head *list)
|
||||
{
|
||||
struct ttm_validate_buffer *entry;
|
||||
|
||||
list_for_each_entry(entry, list, head) {
|
||||
struct ttm_buffer_object *bo = entry->bo;
|
||||
if (!entry->reserved)
|
||||
continue;
|
||||
|
||||
if (!entry->removed) {
|
||||
entry->put_count = ttm_bo_del_from_lru(bo);
|
||||
entry->removed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void ttm_eu_list_ref_sub(struct list_head *list)
|
||||
{
|
||||
struct ttm_validate_buffer *entry;
|
||||
|
||||
list_for_each_entry(entry, list, head) {
|
||||
struct ttm_buffer_object *bo = entry->bo;
|
||||
|
||||
if (entry->put_count) {
|
||||
ttm_bo_list_ref_sub(bo, entry->put_count, true);
|
||||
entry->put_count = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
|
||||
struct list_head *list)
|
||||
{
|
||||
struct ttm_validate_buffer *entry;
|
||||
struct ttm_bo_global *glob;
|
||||
|
||||
if (list_empty(list))
|
||||
return;
|
||||
|
||||
entry = list_first_entry(list, struct ttm_validate_buffer, head);
|
||||
glob = entry->bo->glob;
|
||||
spin_lock(&glob->lru_lock);
|
||||
ttm_eu_backoff_reservation_locked(list, ticket);
|
||||
// ww_acquire_fini(ticket);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_eu_backoff_reservation);
|
||||
|
||||
/*
|
||||
* Reserve buffers for validation.
|
||||
*
|
||||
* If a buffer in the list is marked for CPU access, we back off and
|
||||
* wait for that buffer to become free for GPU access.
|
||||
*
|
||||
* If a buffer is reserved for another validation, the validator with
|
||||
* the highest validation sequence backs off and waits for that buffer
|
||||
* to become unreserved. This prevents deadlocks when validating multiple
|
||||
* buffers in different orders.
|
||||
*/
|
||||
|
||||
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
|
||||
struct list_head *list)
|
||||
{
|
||||
struct ttm_bo_global *glob;
|
||||
struct ttm_validate_buffer *entry;
|
||||
int ret;
|
||||
|
||||
if (list_empty(list))
|
||||
return 0;
|
||||
|
||||
list_for_each_entry(entry, list, head) {
|
||||
entry->reserved = false;
|
||||
entry->put_count = 0;
|
||||
entry->removed = false;
|
||||
}
|
||||
|
||||
entry = list_first_entry(list, struct ttm_validate_buffer, head);
|
||||
glob = entry->bo->glob;
|
||||
|
||||
// ww_acquire_init(ticket, &reservation_ww_class);
|
||||
retry:
|
||||
list_for_each_entry(entry, list, head) {
|
||||
struct ttm_buffer_object *bo = entry->bo;
|
||||
|
||||
/* already slowpath reserved? */
|
||||
if (entry->reserved)
|
||||
continue;
|
||||
|
||||
|
||||
ret = ttm_bo_reserve_nolru(bo, true, false, true, ticket);
|
||||
|
||||
if (ret == -EDEADLK) {
|
||||
/* uh oh, we lost out, drop every reservation and try
|
||||
* to only reserve this buffer, then start over if
|
||||
* this succeeds.
|
||||
*/
|
||||
spin_lock(&glob->lru_lock);
|
||||
ttm_eu_backoff_reservation_locked(list, ticket);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
ttm_eu_list_ref_sub(list);
|
||||
// ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
|
||||
// ticket);
|
||||
if (unlikely(ret != 0)) {
|
||||
if (ret == -EINTR)
|
||||
ret = -ERESTARTSYS;
|
||||
goto err_fini;
|
||||
}
|
||||
|
||||
entry->reserved = true;
|
||||
if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
|
||||
ret = -EBUSY;
|
||||
goto err;
|
||||
}
|
||||
goto retry;
|
||||
} else if (ret)
|
||||
goto err;
|
||||
|
||||
entry->reserved = true;
|
||||
if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
|
||||
ret = -EBUSY;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
// ww_acquire_done(ticket);
|
||||
spin_lock(&glob->lru_lock);
|
||||
ttm_eu_del_from_lru_locked(list);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
ttm_eu_list_ref_sub(list);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
spin_lock(&glob->lru_lock);
|
||||
ttm_eu_backoff_reservation_locked(list, ticket);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
ttm_eu_list_ref_sub(list);
|
||||
err_fini:
|
||||
// ww_acquire_done(ticket);
|
||||
// ww_acquire_fini(ticket);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
|
||||
|
||||
void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
|
||||
struct list_head *list, void *sync_obj)
|
||||
{
|
||||
struct ttm_validate_buffer *entry;
|
||||
struct ttm_buffer_object *bo;
|
||||
struct ttm_bo_global *glob;
|
||||
struct ttm_bo_device *bdev;
|
||||
struct ttm_bo_driver *driver;
|
||||
|
||||
if (list_empty(list))
|
||||
return;
|
||||
|
||||
bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
|
||||
bdev = bo->bdev;
|
||||
driver = bdev->driver;
|
||||
glob = bo->glob;
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
spin_lock(&bdev->fence_lock);
|
||||
|
||||
list_for_each_entry(entry, list, head) {
|
||||
bo = entry->bo;
|
||||
entry->old_sync_obj = bo->sync_obj;
|
||||
bo->sync_obj = driver->sync_obj_ref(sync_obj);
|
||||
ttm_bo_add_to_lru(bo);
|
||||
// ww_mutex_unlock(&bo->resv->lock);
|
||||
entry->reserved = false;
|
||||
}
|
||||
spin_unlock(&bdev->fence_lock);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
// ww_acquire_fini(ticket);
|
||||
|
||||
list_for_each_entry(entry, list, head) {
|
||||
if (entry->old_sync_obj)
|
||||
driver->sync_obj_unref(&entry->old_sync_obj);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
|
605
drivers/video/drm/ttm/ttm_memory.c
Normal file
605
drivers/video/drm/ttm/ttm_memory.c
Normal file
@ -0,0 +1,605 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#define pr_fmt(fmt) "[TTM] " fmt
|
||||
|
||||
#include <drm/ttm/ttm_memory.h>
|
||||
#include <drm/ttm/ttm_module.h>
|
||||
#include <drm/ttm/ttm_page_alloc.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
struct sysinfo {
|
||||
u32_t totalram; /* Total usable main memory size */
|
||||
u32_t freeram; /* Available memory size */
|
||||
u32_t sharedram; /* Amount of shared memory */
|
||||
u32_t bufferram; /* Memory used by buffers */
|
||||
u32_t totalswap; /* Total swap space size */
|
||||
u32_t freeswap; /* swap space still available */
|
||||
u32_t totalhigh; /* Total high memory size */
|
||||
u32_t freehigh; /* Available high memory size */
|
||||
u32_t mem_unit; /* Memory unit size in bytes */
|
||||
};
|
||||
|
||||
|
||||
#define TTM_MEMORY_ALLOC_RETRIES 4
|
||||
|
||||
struct ttm_mem_zone {
|
||||
struct kobject kobj;
|
||||
struct ttm_mem_global *glob;
|
||||
const char *name;
|
||||
uint64_t zone_mem;
|
||||
uint64_t emer_mem;
|
||||
uint64_t max_mem;
|
||||
uint64_t swap_limit;
|
||||
uint64_t used_mem;
|
||||
};
|
||||
|
||||
#if 0
|
||||
|
||||
static struct attribute ttm_mem_sys = {
|
||||
.name = "zone_memory",
|
||||
.mode = S_IRUGO
|
||||
};
|
||||
static struct attribute ttm_mem_emer = {
|
||||
.name = "emergency_memory",
|
||||
.mode = S_IRUGO | S_IWUSR
|
||||
};
|
||||
static struct attribute ttm_mem_max = {
|
||||
.name = "available_memory",
|
||||
.mode = S_IRUGO | S_IWUSR
|
||||
};
|
||||
static struct attribute ttm_mem_swap = {
|
||||
.name = "swap_limit",
|
||||
.mode = S_IRUGO | S_IWUSR
|
||||
};
|
||||
static struct attribute ttm_mem_used = {
|
||||
.name = "used_memory",
|
||||
.mode = S_IRUGO
|
||||
};
|
||||
#endif
|
||||
|
||||
static void ttm_mem_zone_kobj_release(struct kobject *kobj)
|
||||
{
|
||||
struct ttm_mem_zone *zone =
|
||||
container_of(kobj, struct ttm_mem_zone, kobj);
|
||||
|
||||
pr_info("Zone %7s: Used memory at exit: %llu kiB\n",
|
||||
zone->name, (unsigned long long)zone->used_mem >> 10);
|
||||
kfree(zone);
|
||||
}
|
||||
|
||||
#if 0
|
||||
static ssize_t ttm_mem_zone_show(struct kobject *kobj,
|
||||
struct attribute *attr,
|
||||
char *buffer)
|
||||
{
|
||||
struct ttm_mem_zone *zone =
|
||||
container_of(kobj, struct ttm_mem_zone, kobj);
|
||||
uint64_t val = 0;
|
||||
|
||||
spin_lock(&zone->glob->lock);
|
||||
if (attr == &ttm_mem_sys)
|
||||
val = zone->zone_mem;
|
||||
else if (attr == &ttm_mem_emer)
|
||||
val = zone->emer_mem;
|
||||
else if (attr == &ttm_mem_max)
|
||||
val = zone->max_mem;
|
||||
else if (attr == &ttm_mem_swap)
|
||||
val = zone->swap_limit;
|
||||
else if (attr == &ttm_mem_used)
|
||||
val = zone->used_mem;
|
||||
spin_unlock(&zone->glob->lock);
|
||||
|
||||
return snprintf(buffer, PAGE_SIZE, "%llu\n",
|
||||
(unsigned long long) val >> 10);
|
||||
}
|
||||
|
||||
static void ttm_check_swapping(struct ttm_mem_global *glob);
|
||||
|
||||
static ssize_t ttm_mem_zone_store(struct kobject *kobj,
|
||||
struct attribute *attr,
|
||||
const char *buffer,
|
||||
size_t size)
|
||||
{
|
||||
struct ttm_mem_zone *zone =
|
||||
container_of(kobj, struct ttm_mem_zone, kobj);
|
||||
int chars;
|
||||
unsigned long val;
|
||||
uint64_t val64;
|
||||
|
||||
chars = sscanf(buffer, "%lu", &val);
|
||||
if (chars == 0)
|
||||
return size;
|
||||
|
||||
val64 = val;
|
||||
val64 <<= 10;
|
||||
|
||||
spin_lock(&zone->glob->lock);
|
||||
if (val64 > zone->zone_mem)
|
||||
val64 = zone->zone_mem;
|
||||
if (attr == &ttm_mem_emer) {
|
||||
zone->emer_mem = val64;
|
||||
if (zone->max_mem > val64)
|
||||
zone->max_mem = val64;
|
||||
} else if (attr == &ttm_mem_max) {
|
||||
zone->max_mem = val64;
|
||||
if (zone->emer_mem < val64)
|
||||
zone->emer_mem = val64;
|
||||
} else if (attr == &ttm_mem_swap)
|
||||
zone->swap_limit = val64;
|
||||
spin_unlock(&zone->glob->lock);
|
||||
|
||||
ttm_check_swapping(zone->glob);
|
||||
|
||||
return size;
|
||||
}
|
||||
#endif
|
||||
|
||||
//static struct attribute *ttm_mem_zone_attrs[] = {
|
||||
// &ttm_mem_sys,
|
||||
// &ttm_mem_emer,
|
||||
// &ttm_mem_max,
|
||||
// &ttm_mem_swap,
|
||||
// &ttm_mem_used,
|
||||
// NULL
|
||||
//};
|
||||
|
||||
//static const struct sysfs_ops ttm_mem_zone_ops = {
|
||||
// .show = &ttm_mem_zone_show,
|
||||
// .store = &ttm_mem_zone_store
|
||||
//};
|
||||
|
||||
static struct kobj_type ttm_mem_zone_kobj_type = {
|
||||
.release = &ttm_mem_zone_kobj_release,
|
||||
// .sysfs_ops = &ttm_mem_zone_ops,
|
||||
// .default_attrs = ttm_mem_zone_attrs,
|
||||
};
|
||||
|
||||
static void ttm_mem_global_kobj_release(struct kobject *kobj)
|
||||
{
|
||||
struct ttm_mem_global *glob =
|
||||
container_of(kobj, struct ttm_mem_global, kobj);
|
||||
|
||||
kfree(glob);
|
||||
}
|
||||
|
||||
static struct kobj_type ttm_mem_glob_kobj_type = {
|
||||
.release = &ttm_mem_global_kobj_release,
|
||||
};
|
||||
|
||||
#if 0
|
||||
static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
|
||||
bool from_wq, uint64_t extra)
|
||||
{
|
||||
unsigned int i;
|
||||
struct ttm_mem_zone *zone;
|
||||
uint64_t target;
|
||||
|
||||
for (i = 0; i < glob->num_zones; ++i) {
|
||||
zone = glob->zones[i];
|
||||
|
||||
if (from_wq)
|
||||
target = zone->swap_limit;
|
||||
else if (capable(CAP_SYS_ADMIN))
|
||||
target = zone->emer_mem;
|
||||
else
|
||||
target = zone->max_mem;
|
||||
|
||||
target = (extra > target) ? 0ULL : target;
|
||||
|
||||
if (zone->used_mem > target)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* At this point we only support a single shrink callback.
|
||||
* Extend this if needed, perhaps using a linked list of callbacks.
|
||||
* Note that this function is reentrant:
|
||||
* many threads may try to swap out at any given time.
|
||||
*/
|
||||
|
||||
static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
|
||||
uint64_t extra)
|
||||
{
|
||||
int ret;
|
||||
struct ttm_mem_shrink *shrink;
|
||||
|
||||
spin_lock(&glob->lock);
|
||||
if (glob->shrink == NULL)
|
||||
goto out;
|
||||
|
||||
while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
|
||||
shrink = glob->shrink;
|
||||
spin_unlock(&glob->lock);
|
||||
ret = shrink->do_shrink(shrink);
|
||||
spin_lock(&glob->lock);
|
||||
if (unlikely(ret != 0))
|
||||
goto out;
|
||||
}
|
||||
out:
|
||||
spin_unlock(&glob->lock);
|
||||
}
|
||||
|
||||
|
||||
|
||||
static void ttm_shrink_work(struct work_struct *work)
|
||||
{
|
||||
struct ttm_mem_global *glob =
|
||||
container_of(work, struct ttm_mem_global, work);
|
||||
|
||||
ttm_shrink(glob, true, 0ULL);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
|
||||
const struct sysinfo *si)
|
||||
{
|
||||
struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
|
||||
uint64_t mem;
|
||||
int ret;
|
||||
|
||||
if (unlikely(!zone))
|
||||
return -ENOMEM;
|
||||
|
||||
// mem = si->totalram - si->totalhigh;
|
||||
// mem *= si->mem_unit;
|
||||
|
||||
zone->name = "kernel";
|
||||
zone->zone_mem = mem;
|
||||
zone->max_mem = mem >> 1;
|
||||
zone->emer_mem = (mem >> 1) + (mem >> 2);
|
||||
zone->swap_limit = zone->max_mem - (mem >> 3);
|
||||
zone->used_mem = 0;
|
||||
zone->glob = glob;
|
||||
glob->zone_kernel = zone;
|
||||
ret = kobject_init_and_add(
|
||||
&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
|
||||
if (unlikely(ret != 0)) {
|
||||
kobject_put(&zone->kobj);
|
||||
return ret;
|
||||
}
|
||||
glob->zones[glob->num_zones++] = zone;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if 0
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
|
||||
const struct sysinfo *si)
|
||||
{
|
||||
struct ttm_mem_zone *zone;
|
||||
uint64_t mem;
|
||||
int ret;
|
||||
|
||||
if (si->totalhigh == 0)
|
||||
return 0;
|
||||
|
||||
zone = kzalloc(sizeof(*zone), GFP_KERNEL);
|
||||
if (unlikely(!zone))
|
||||
return -ENOMEM;
|
||||
|
||||
mem = si->totalram;
|
||||
mem *= si->mem_unit;
|
||||
|
||||
zone->name = "highmem";
|
||||
zone->zone_mem = mem;
|
||||
zone->max_mem = mem >> 1;
|
||||
zone->emer_mem = (mem >> 1) + (mem >> 2);
|
||||
zone->swap_limit = zone->max_mem - (mem >> 3);
|
||||
zone->used_mem = 0;
|
||||
zone->glob = glob;
|
||||
glob->zone_highmem = zone;
|
||||
ret = kobject_init_and_add(
|
||||
&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
|
||||
if (unlikely(ret != 0)) {
|
||||
kobject_put(&zone->kobj);
|
||||
return ret;
|
||||
}
|
||||
glob->zones[glob->num_zones++] = zone;
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
|
||||
const struct sysinfo *si)
|
||||
{
|
||||
struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
|
||||
uint64_t mem;
|
||||
int ret;
|
||||
|
||||
if (unlikely(!zone))
|
||||
return -ENOMEM;
|
||||
|
||||
mem = si->totalram;
|
||||
mem *= si->mem_unit;
|
||||
|
||||
/**
|
||||
* No special dma32 zone needed.
|
||||
*/
|
||||
|
||||
if (mem <= ((uint64_t) 1ULL << 32)) {
|
||||
kfree(zone);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Limit max dma32 memory to 4GB for now
|
||||
* until we can figure out how big this
|
||||
* zone really is.
|
||||
*/
|
||||
|
||||
mem = ((uint64_t) 1ULL << 32);
|
||||
zone->name = "dma32";
|
||||
zone->zone_mem = mem;
|
||||
zone->max_mem = mem >> 1;
|
||||
zone->emer_mem = (mem >> 1) + (mem >> 2);
|
||||
zone->swap_limit = zone->max_mem - (mem >> 3);
|
||||
zone->used_mem = 0;
|
||||
zone->glob = glob;
|
||||
glob->zone_dma32 = zone;
|
||||
ret = kobject_init_and_add(
|
||||
&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
|
||||
if (unlikely(ret != 0)) {
|
||||
kobject_put(&zone->kobj);
|
||||
return ret;
|
||||
}
|
||||
glob->zones[glob->num_zones++] = zone;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
void ttm_mem_global_release(struct ttm_mem_global *glob)
|
||||
{
|
||||
unsigned int i;
|
||||
struct ttm_mem_zone *zone;
|
||||
|
||||
/* let the page allocator first stop the shrink work. */
|
||||
ttm_page_alloc_fini();
|
||||
ttm_dma_page_alloc_fini();
|
||||
|
||||
flush_workqueue(glob->swap_queue);
|
||||
destroy_workqueue(glob->swap_queue);
|
||||
glob->swap_queue = NULL;
|
||||
for (i = 0; i < glob->num_zones; ++i) {
|
||||
zone = glob->zones[i];
|
||||
kobject_del(&zone->kobj);
|
||||
kobject_put(&zone->kobj);
|
||||
}
|
||||
kobject_del(&glob->kobj);
|
||||
kobject_put(&glob->kobj);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_mem_global_release);
|
||||
|
||||
static void ttm_check_swapping(struct ttm_mem_global *glob)
|
||||
{
|
||||
bool needs_swapping = false;
|
||||
unsigned int i;
|
||||
struct ttm_mem_zone *zone;
|
||||
|
||||
spin_lock(&glob->lock);
|
||||
for (i = 0; i < glob->num_zones; ++i) {
|
||||
zone = glob->zones[i];
|
||||
if (zone->used_mem > zone->swap_limit) {
|
||||
needs_swapping = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&glob->lock);
|
||||
|
||||
if (unlikely(needs_swapping))
|
||||
(void)queue_work(glob->swap_queue, &glob->work);
|
||||
|
||||
}
|
||||
|
||||
static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
|
||||
struct ttm_mem_zone *single_zone,
|
||||
uint64_t amount)
|
||||
{
|
||||
unsigned int i;
|
||||
struct ttm_mem_zone *zone;
|
||||
|
||||
spin_lock(&glob->lock);
|
||||
for (i = 0; i < glob->num_zones; ++i) {
|
||||
zone = glob->zones[i];
|
||||
if (single_zone && zone != single_zone)
|
||||
continue;
|
||||
zone->used_mem -= amount;
|
||||
}
|
||||
spin_unlock(&glob->lock);
|
||||
}
|
||||
|
||||
void ttm_mem_global_free(struct ttm_mem_global *glob,
|
||||
uint64_t amount)
|
||||
{
|
||||
return ttm_mem_global_free_zone(glob, NULL, amount);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_mem_global_free);
|
||||
|
||||
static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
|
||||
struct ttm_mem_zone *single_zone,
|
||||
uint64_t amount, bool reserve)
|
||||
{
|
||||
uint64_t limit;
|
||||
int ret = -ENOMEM;
|
||||
unsigned int i;
|
||||
struct ttm_mem_zone *zone;
|
||||
|
||||
spin_lock(&glob->lock);
|
||||
for (i = 0; i < glob->num_zones; ++i) {
|
||||
zone = glob->zones[i];
|
||||
if (single_zone && zone != single_zone)
|
||||
continue;
|
||||
|
||||
limit = zone->emer_mem;
|
||||
|
||||
if (zone->used_mem > limit)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (reserve) {
|
||||
for (i = 0; i < glob->num_zones; ++i) {
|
||||
zone = glob->zones[i];
|
||||
if (single_zone && zone != single_zone)
|
||||
continue;
|
||||
zone->used_mem += amount;
|
||||
}
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
out_unlock:
|
||||
spin_unlock(&glob->lock);
|
||||
ttm_check_swapping(glob);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
|
||||
struct ttm_mem_zone *single_zone,
|
||||
uint64_t memory,
|
||||
bool no_wait, bool interruptible)
|
||||
{
|
||||
int count = TTM_MEMORY_ALLOC_RETRIES;
|
||||
|
||||
while (unlikely(ttm_mem_global_reserve(glob,
|
||||
single_zone,
|
||||
memory, true)
|
||||
!= 0)) {
|
||||
if (no_wait)
|
||||
return -ENOMEM;
|
||||
if (unlikely(count-- == 0))
|
||||
return -ENOMEM;
|
||||
ttm_shrink(glob, false, memory + (memory >> 2) + 16);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
|
||||
bool no_wait, bool interruptible)
|
||||
{
|
||||
/**
|
||||
* Normal allocations of kernel memory are registered in
|
||||
* all zones.
|
||||
*/
|
||||
|
||||
return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
|
||||
interruptible);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_mem_global_alloc);
|
||||
|
||||
int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
|
||||
struct page *page,
|
||||
bool no_wait, bool interruptible)
|
||||
{
|
||||
|
||||
struct ttm_mem_zone *zone = NULL;
|
||||
|
||||
/**
|
||||
* Page allocations may be registed in a single zone
|
||||
* only if highmem or !dma32.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
if (PageHighMem(page) && glob->zone_highmem != NULL)
|
||||
zone = glob->zone_highmem;
|
||||
#else
|
||||
if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
|
||||
zone = glob->zone_kernel;
|
||||
#endif
|
||||
return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait,
|
||||
interruptible);
|
||||
}
|
||||
|
||||
void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page)
|
||||
{
|
||||
struct ttm_mem_zone *zone = NULL;
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
if (PageHighMem(page) && glob->zone_highmem != NULL)
|
||||
zone = glob->zone_highmem;
|
||||
#else
|
||||
if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
|
||||
zone = glob->zone_kernel;
|
||||
#endif
|
||||
ttm_mem_global_free_zone(glob, zone, PAGE_SIZE);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
size_t ttm_round_pot(size_t size)
|
||||
{
|
||||
if ((size & (size - 1)) == 0)
|
||||
return size;
|
||||
else if (size > PAGE_SIZE)
|
||||
return PAGE_ALIGN(size);
|
||||
else {
|
||||
size_t tmp_size = 4;
|
||||
|
||||
while (tmp_size < size)
|
||||
tmp_size <<= 1;
|
||||
|
||||
return tmp_size;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_round_pot);
|
||||
|
||||
void ttm_mem_global_free(struct ttm_mem_global *glob,
|
||||
uint64_t amount)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
|
||||
bool no_wait, bool interruptible)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(ttm_mem_global_alloc);
|
||||
|
||||
int ttm_mem_global_init(struct ttm_mem_global *glob)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_mem_global_init);
|
462
drivers/video/drm/ttm/ttm_object.c
Normal file
462
drivers/video/drm/ttm/ttm_object.c
Normal file
@ -0,0 +1,462 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
/** @file ttm_ref_object.c
|
||||
*
|
||||
* Base- and reference object implementation for the various
|
||||
* ttm objects. Implements reference counting, minimal security checks
|
||||
* and release on file close.
|
||||
*/
|
||||
|
||||
/**
|
||||
* struct ttm_object_file
|
||||
*
|
||||
* @tdev: Pointer to the ttm_object_device.
|
||||
*
|
||||
* @lock: Lock that protects the ref_list list and the
|
||||
* ref_hash hash tables.
|
||||
*
|
||||
* @ref_list: List of ttm_ref_objects to be destroyed at
|
||||
* file release.
|
||||
*
|
||||
* @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
|
||||
* for fast lookup of ref objects given a base object.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "[TTM] " fmt
|
||||
|
||||
#include <drm/ttm/ttm_object.h>
|
||||
#include <drm/ttm/ttm_module.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
//#include <linux/atomic.h>
|
||||
|
||||
static inline int __must_check kref_get_unless_zero(struct kref *kref)
|
||||
{
|
||||
return atomic_add_unless(&kref->refcount, 1, 0);
|
||||
}
|
||||
|
||||
#define pr_err(fmt, ...) \
|
||||
printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
|
||||
|
||||
struct ttm_object_file {
|
||||
struct ttm_object_device *tdev;
|
||||
rwlock_t lock;
|
||||
struct list_head ref_list;
|
||||
struct drm_open_hash ref_hash[TTM_REF_NUM];
|
||||
struct kref refcount;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ttm_object_device
|
||||
*
|
||||
* @object_lock: lock that protects the object_hash hash table.
|
||||
*
|
||||
* @object_hash: hash table for fast lookup of object global names.
|
||||
*
|
||||
* @object_count: Per device object count.
|
||||
*
|
||||
* This is the per-device data structure needed for ttm object management.
|
||||
*/
|
||||
|
||||
struct ttm_object_device {
|
||||
spinlock_t object_lock;
|
||||
struct drm_open_hash object_hash;
|
||||
atomic_t object_count;
|
||||
struct ttm_mem_global *mem_glob;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ttm_ref_object
|
||||
*
|
||||
* @hash: Hash entry for the per-file object reference hash.
|
||||
*
|
||||
* @head: List entry for the per-file list of ref-objects.
|
||||
*
|
||||
* @kref: Ref count.
|
||||
*
|
||||
* @obj: Base object this ref object is referencing.
|
||||
*
|
||||
* @ref_type: Type of ref object.
|
||||
*
|
||||
* This is similar to an idr object, but it also has a hash table entry
|
||||
* that allows lookup with a pointer to the referenced object as a key. In
|
||||
* that way, one can easily detect whether a base object is referenced by
|
||||
* a particular ttm_object_file. It also carries a ref count to avoid creating
|
||||
* multiple ref objects if a ttm_object_file references the same base
|
||||
* object more than once.
|
||||
*/
|
||||
|
||||
struct ttm_ref_object {
|
||||
struct drm_hash_item hash;
|
||||
struct list_head head;
|
||||
struct kref kref;
|
||||
enum ttm_ref_type ref_type;
|
||||
struct ttm_base_object *obj;
|
||||
struct ttm_object_file *tfile;
|
||||
};
|
||||
|
||||
static inline struct ttm_object_file *
|
||||
ttm_object_file_ref(struct ttm_object_file *tfile)
|
||||
{
|
||||
kref_get(&tfile->refcount);
|
||||
return tfile;
|
||||
}
|
||||
|
||||
static void ttm_object_file_destroy(struct kref *kref)
|
||||
{
|
||||
struct ttm_object_file *tfile =
|
||||
container_of(kref, struct ttm_object_file, refcount);
|
||||
|
||||
kfree(tfile);
|
||||
}
|
||||
|
||||
|
||||
static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
|
||||
{
|
||||
struct ttm_object_file *tfile = *p_tfile;
|
||||
|
||||
*p_tfile = NULL;
|
||||
kref_put(&tfile->refcount, ttm_object_file_destroy);
|
||||
}
|
||||
|
||||
|
||||
int ttm_base_object_init(struct ttm_object_file *tfile,
|
||||
struct ttm_base_object *base,
|
||||
bool shareable,
|
||||
enum ttm_object_type object_type,
|
||||
void (*refcount_release) (struct ttm_base_object **),
|
||||
void (*ref_obj_release) (struct ttm_base_object *,
|
||||
enum ttm_ref_type ref_type))
|
||||
{
|
||||
struct ttm_object_device *tdev = tfile->tdev;
|
||||
int ret;
|
||||
|
||||
base->shareable = shareable;
|
||||
base->tfile = ttm_object_file_ref(tfile);
|
||||
base->refcount_release = refcount_release;
|
||||
base->ref_obj_release = ref_obj_release;
|
||||
base->object_type = object_type;
|
||||
kref_init(&base->refcount);
|
||||
spin_lock(&tdev->object_lock);
|
||||
ret = drm_ht_just_insert_please_rcu(&tdev->object_hash,
|
||||
&base->hash,
|
||||
(unsigned long)base, 31, 0, 0);
|
||||
spin_unlock(&tdev->object_lock);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err0;
|
||||
|
||||
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err1;
|
||||
|
||||
ttm_base_object_unref(&base);
|
||||
|
||||
return 0;
|
||||
out_err1:
|
||||
spin_lock(&tdev->object_lock);
|
||||
(void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
|
||||
spin_unlock(&tdev->object_lock);
|
||||
out_err0:
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_base_object_init);
|
||||
|
||||
static void ttm_release_base(struct kref *kref)
|
||||
{
|
||||
struct ttm_base_object *base =
|
||||
container_of(kref, struct ttm_base_object, refcount);
|
||||
struct ttm_object_device *tdev = base->tfile->tdev;
|
||||
|
||||
spin_lock(&tdev->object_lock);
|
||||
(void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
|
||||
spin_unlock(&tdev->object_lock);
|
||||
|
||||
/*
|
||||
* Note: We don't use synchronize_rcu() here because it's far
|
||||
* too slow. It's up to the user to free the object using
|
||||
* call_rcu() or ttm_base_object_kfree().
|
||||
*/
|
||||
|
||||
if (base->refcount_release) {
|
||||
ttm_object_file_unref(&base->tfile);
|
||||
base->refcount_release(&base);
|
||||
}
|
||||
}
|
||||
|
||||
void ttm_base_object_unref(struct ttm_base_object **p_base)
|
||||
{
|
||||
struct ttm_base_object *base = *p_base;
|
||||
|
||||
*p_base = NULL;
|
||||
|
||||
kref_put(&base->refcount, ttm_release_base);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_base_object_unref);
|
||||
|
||||
struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
|
||||
uint32_t key)
|
||||
{
|
||||
struct ttm_object_device *tdev = tfile->tdev;
|
||||
struct ttm_base_object *base;
|
||||
struct drm_hash_item *hash;
|
||||
int ret;
|
||||
|
||||
// rcu_read_lock();
|
||||
ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash);
|
||||
|
||||
if (likely(ret == 0)) {
|
||||
base = drm_hash_entry(hash, struct ttm_base_object, hash);
|
||||
ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL;
|
||||
}
|
||||
// rcu_read_unlock();
|
||||
|
||||
if (unlikely(ret != 0))
|
||||
return NULL;
|
||||
|
||||
if (tfile != base->tfile && !base->shareable) {
|
||||
pr_err("Attempted access of non-shareable object\n");
|
||||
ttm_base_object_unref(&base);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return base;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_base_object_lookup);
|
||||
|
||||
int ttm_ref_object_add(struct ttm_object_file *tfile,
|
||||
struct ttm_base_object *base,
|
||||
enum ttm_ref_type ref_type, bool *existed)
|
||||
{
|
||||
struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
|
||||
struct ttm_ref_object *ref;
|
||||
struct drm_hash_item *hash;
|
||||
struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (existed != NULL)
|
||||
*existed = true;
|
||||
|
||||
while (ret == -EINVAL) {
|
||||
read_lock(&tfile->lock);
|
||||
ret = drm_ht_find_item(ht, base->hash.key, &hash);
|
||||
|
||||
if (ret == 0) {
|
||||
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
|
||||
kref_get(&ref->kref);
|
||||
read_unlock(&tfile->lock);
|
||||
break;
|
||||
}
|
||||
|
||||
read_unlock(&tfile->lock);
|
||||
ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
|
||||
false, false);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
ref = kmalloc(sizeof(*ref), GFP_KERNEL);
|
||||
if (unlikely(ref == NULL)) {
|
||||
ttm_mem_global_free(mem_glob, sizeof(*ref));
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ref->hash.key = base->hash.key;
|
||||
ref->obj = base;
|
||||
ref->tfile = tfile;
|
||||
ref->ref_type = ref_type;
|
||||
kref_init(&ref->kref);
|
||||
|
||||
write_lock(&tfile->lock);
|
||||
ret = drm_ht_insert_item(ht, &ref->hash);
|
||||
|
||||
if (likely(ret == 0)) {
|
||||
list_add_tail(&ref->head, &tfile->ref_list);
|
||||
kref_get(&base->refcount);
|
||||
write_unlock(&tfile->lock);
|
||||
if (existed != NULL)
|
||||
*existed = false;
|
||||
break;
|
||||
}
|
||||
|
||||
write_unlock(&tfile->lock);
|
||||
BUG_ON(ret != -EINVAL);
|
||||
|
||||
ttm_mem_global_free(mem_glob, sizeof(*ref));
|
||||
kfree(ref);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_ref_object_add);
|
||||
|
||||
static void ttm_ref_object_release(struct kref *kref)
|
||||
{
|
||||
struct ttm_ref_object *ref =
|
||||
container_of(kref, struct ttm_ref_object, kref);
|
||||
struct ttm_base_object *base = ref->obj;
|
||||
struct ttm_object_file *tfile = ref->tfile;
|
||||
struct drm_open_hash *ht;
|
||||
struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
|
||||
|
||||
ht = &tfile->ref_hash[ref->ref_type];
|
||||
(void)drm_ht_remove_item(ht, &ref->hash);
|
||||
list_del(&ref->head);
|
||||
write_unlock(&tfile->lock);
|
||||
|
||||
if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
|
||||
base->ref_obj_release(base, ref->ref_type);
|
||||
|
||||
ttm_base_object_unref(&ref->obj);
|
||||
ttm_mem_global_free(mem_glob, sizeof(*ref));
|
||||
kfree(ref);
|
||||
write_lock(&tfile->lock);
|
||||
}
|
||||
|
||||
int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
|
||||
unsigned long key, enum ttm_ref_type ref_type)
|
||||
{
|
||||
struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
|
||||
struct ttm_ref_object *ref;
|
||||
struct drm_hash_item *hash;
|
||||
int ret;
|
||||
|
||||
write_lock(&tfile->lock);
|
||||
ret = drm_ht_find_item(ht, key, &hash);
|
||||
if (unlikely(ret != 0)) {
|
||||
write_unlock(&tfile->lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
|
||||
kref_put(&ref->kref, ttm_ref_object_release);
|
||||
write_unlock(&tfile->lock);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_ref_object_base_unref);
|
||||
|
||||
void ttm_object_file_release(struct ttm_object_file **p_tfile)
|
||||
{
|
||||
struct ttm_ref_object *ref;
|
||||
struct list_head *list;
|
||||
unsigned int i;
|
||||
struct ttm_object_file *tfile = *p_tfile;
|
||||
|
||||
*p_tfile = NULL;
|
||||
write_lock(&tfile->lock);
|
||||
|
||||
/*
|
||||
* Since we release the lock within the loop, we have to
|
||||
* restart it from the beginning each time.
|
||||
*/
|
||||
|
||||
while (!list_empty(&tfile->ref_list)) {
|
||||
list = tfile->ref_list.next;
|
||||
ref = list_entry(list, struct ttm_ref_object, head);
|
||||
ttm_ref_object_release(&ref->kref);
|
||||
}
|
||||
|
||||
for (i = 0; i < TTM_REF_NUM; ++i)
|
||||
drm_ht_remove(&tfile->ref_hash[i]);
|
||||
|
||||
write_unlock(&tfile->lock);
|
||||
ttm_object_file_unref(&tfile);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_object_file_release);
|
||||
|
||||
struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
|
||||
unsigned int hash_order)
|
||||
{
|
||||
struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
|
||||
unsigned int i;
|
||||
unsigned int j = 0;
|
||||
int ret;
|
||||
|
||||
if (unlikely(tfile == NULL))
|
||||
return NULL;
|
||||
|
||||
rwlock_init(&tfile->lock);
|
||||
tfile->tdev = tdev;
|
||||
kref_init(&tfile->refcount);
|
||||
INIT_LIST_HEAD(&tfile->ref_list);
|
||||
|
||||
for (i = 0; i < TTM_REF_NUM; ++i) {
|
||||
ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
|
||||
if (ret) {
|
||||
j = i;
|
||||
goto out_err;
|
||||
}
|
||||
}
|
||||
|
||||
return tfile;
|
||||
out_err:
|
||||
for (i = 0; i < j; ++i)
|
||||
drm_ht_remove(&tfile->ref_hash[i]);
|
||||
|
||||
kfree(tfile);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_object_file_init);
|
||||
|
||||
struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
|
||||
*mem_glob,
|
||||
unsigned int hash_order)
|
||||
{
|
||||
struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
|
||||
int ret;
|
||||
|
||||
if (unlikely(tdev == NULL))
|
||||
return NULL;
|
||||
|
||||
tdev->mem_glob = mem_glob;
|
||||
spin_lock_init(&tdev->object_lock);
|
||||
atomic_set(&tdev->object_count, 0);
|
||||
ret = drm_ht_create(&tdev->object_hash, hash_order);
|
||||
|
||||
if (likely(ret == 0))
|
||||
return tdev;
|
||||
|
||||
kfree(tdev);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_object_device_init);
|
||||
|
||||
void ttm_object_device_release(struct ttm_object_device **p_tdev)
|
||||
{
|
||||
struct ttm_object_device *tdev = *p_tdev;
|
||||
|
||||
*p_tdev = NULL;
|
||||
|
||||
spin_lock(&tdev->object_lock);
|
||||
drm_ht_remove(&tdev->object_hash);
|
||||
spin_unlock(&tdev->object_lock);
|
||||
|
||||
kfree(tdev);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_object_device_release);
|
923
drivers/video/drm/ttm/ttm_page_alloc.c
Normal file
923
drivers/video/drm/ttm/ttm_page_alloc.c
Normal file
@ -0,0 +1,923 @@
|
||||
/*
|
||||
* Copyright (c) Red Hat Inc.
|
||||
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sub license,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie <airlied@redhat.com>
|
||||
* Jerome Glisse <jglisse@redhat.com>
|
||||
* Pauli Nieminen <suokkos@gmail.com>
|
||||
*/
|
||||
|
||||
/* simple list based uncached page pool
|
||||
* - Pool collects resently freed pages for reuse
|
||||
* - Use page->lru to keep a free list
|
||||
* - doesn't track currently in use pages
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "[TTM] " fmt
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/spinlock.h>
|
||||
//#include <linux/highmem.h>
|
||||
//#include <linux/mm_types.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/seq_file.h> /* for seq_printf */
|
||||
#include <linux/slab.h>
|
||||
//#include <linux/dma-mapping.h>
|
||||
|
||||
//#include <linux/atomic.h>
|
||||
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_page_alloc.h>
|
||||
|
||||
#ifdef TTM_HAS_AGP
|
||||
#include <asm/agp.h>
|
||||
#endif
|
||||
|
||||
#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
|
||||
#define SMALL_ALLOCATION 16
|
||||
#define FREE_ALL_PAGES (~0U)
|
||||
/* times are in msecs */
|
||||
#define PAGE_FREE_INTERVAL 1000
|
||||
|
||||
#define pr_err(fmt, ...) \
|
||||
printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
|
||||
|
||||
|
||||
|
||||
#if 0
|
||||
/**
|
||||
* struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
|
||||
*
|
||||
* @lock: Protects the shared pool from concurrnet access. Must be used with
|
||||
* irqsave/irqrestore variants because pool allocator maybe called from
|
||||
* delayed work.
|
||||
* @fill_lock: Prevent concurrent calls to fill.
|
||||
* @list: Pool of free uc/wc pages for fast reuse.
|
||||
* @gfp_flags: Flags to pass for alloc_page.
|
||||
* @npages: Number of pages in pool.
|
||||
*/
|
||||
struct ttm_page_pool {
|
||||
spinlock_t lock;
|
||||
bool fill_lock;
|
||||
struct list_head list;
|
||||
gfp_t gfp_flags;
|
||||
unsigned npages;
|
||||
char *name;
|
||||
unsigned long nfrees;
|
||||
unsigned long nrefills;
|
||||
};
|
||||
|
||||
/**
|
||||
* Limits for the pool. They are handled without locks because only place where
|
||||
* they may change is in sysfs store. They won't have immediate effect anyway
|
||||
* so forcing serialization to access them is pointless.
|
||||
*/
|
||||
|
||||
struct ttm_pool_opts {
|
||||
unsigned alloc_size;
|
||||
unsigned max_size;
|
||||
unsigned small;
|
||||
};
|
||||
|
||||
#define NUM_POOLS 4
|
||||
|
||||
/**
|
||||
* struct ttm_pool_manager - Holds memory pools for fst allocation
|
||||
*
|
||||
* Manager is read only object for pool code so it doesn't need locking.
|
||||
*
|
||||
* @free_interval: minimum number of jiffies between freeing pages from pool.
|
||||
* @page_alloc_inited: reference counting for pool allocation.
|
||||
* @work: Work that is used to shrink the pool. Work is only run when there is
|
||||
* some pages to free.
|
||||
* @small_allocation: Limit in number of pages what is small allocation.
|
||||
*
|
||||
* @pools: All pool objects in use.
|
||||
**/
|
||||
struct ttm_pool_manager {
|
||||
struct kobject kobj;
|
||||
struct shrinker mm_shrink;
|
||||
struct ttm_pool_opts options;
|
||||
|
||||
union {
|
||||
struct ttm_page_pool pools[NUM_POOLS];
|
||||
struct {
|
||||
struct ttm_page_pool wc_pool;
|
||||
struct ttm_page_pool uc_pool;
|
||||
struct ttm_page_pool wc_pool_dma32;
|
||||
struct ttm_page_pool uc_pool_dma32;
|
||||
} ;
|
||||
};
|
||||
};
|
||||
|
||||
static struct attribute ttm_page_pool_max = {
|
||||
.name = "pool_max_size",
|
||||
.mode = S_IRUGO | S_IWUSR
|
||||
};
|
||||
static struct attribute ttm_page_pool_small = {
|
||||
.name = "pool_small_allocation",
|
||||
.mode = S_IRUGO | S_IWUSR
|
||||
};
|
||||
static struct attribute ttm_page_pool_alloc_size = {
|
||||
.name = "pool_allocation_size",
|
||||
.mode = S_IRUGO | S_IWUSR
|
||||
};
|
||||
|
||||
static struct attribute *ttm_pool_attrs[] = {
|
||||
&ttm_page_pool_max,
|
||||
&ttm_page_pool_small,
|
||||
&ttm_page_pool_alloc_size,
|
||||
NULL
|
||||
};
|
||||
|
||||
static void ttm_pool_kobj_release(struct kobject *kobj)
|
||||
{
|
||||
struct ttm_pool_manager *m =
|
||||
container_of(kobj, struct ttm_pool_manager, kobj);
|
||||
kfree(m);
|
||||
}
|
||||
|
||||
static ssize_t ttm_pool_store(struct kobject *kobj,
|
||||
struct attribute *attr, const char *buffer, size_t size)
|
||||
{
|
||||
struct ttm_pool_manager *m =
|
||||
container_of(kobj, struct ttm_pool_manager, kobj);
|
||||
int chars;
|
||||
unsigned val;
|
||||
chars = sscanf(buffer, "%u", &val);
|
||||
if (chars == 0)
|
||||
return size;
|
||||
|
||||
/* Convert kb to number of pages */
|
||||
val = val / (PAGE_SIZE >> 10);
|
||||
|
||||
if (attr == &ttm_page_pool_max)
|
||||
m->options.max_size = val;
|
||||
else if (attr == &ttm_page_pool_small)
|
||||
m->options.small = val;
|
||||
else if (attr == &ttm_page_pool_alloc_size) {
|
||||
if (val > NUM_PAGES_TO_ALLOC*8) {
|
||||
pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
|
||||
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
|
||||
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
|
||||
return size;
|
||||
} else if (val > NUM_PAGES_TO_ALLOC) {
|
||||
pr_warn("Setting allocation size to larger than %lu is not recommended\n",
|
||||
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
|
||||
}
|
||||
m->options.alloc_size = val;
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static ssize_t ttm_pool_show(struct kobject *kobj,
|
||||
struct attribute *attr, char *buffer)
|
||||
{
|
||||
struct ttm_pool_manager *m =
|
||||
container_of(kobj, struct ttm_pool_manager, kobj);
|
||||
unsigned val = 0;
|
||||
|
||||
if (attr == &ttm_page_pool_max)
|
||||
val = m->options.max_size;
|
||||
else if (attr == &ttm_page_pool_small)
|
||||
val = m->options.small;
|
||||
else if (attr == &ttm_page_pool_alloc_size)
|
||||
val = m->options.alloc_size;
|
||||
|
||||
val = val * (PAGE_SIZE >> 10);
|
||||
|
||||
return snprintf(buffer, PAGE_SIZE, "%u\n", val);
|
||||
}
|
||||
|
||||
static const struct sysfs_ops ttm_pool_sysfs_ops = {
|
||||
.show = &ttm_pool_show,
|
||||
.store = &ttm_pool_store,
|
||||
};
|
||||
|
||||
static struct kobj_type ttm_pool_kobj_type = {
|
||||
.release = &ttm_pool_kobj_release,
|
||||
.sysfs_ops = &ttm_pool_sysfs_ops,
|
||||
.default_attrs = ttm_pool_attrs,
|
||||
};
|
||||
|
||||
static struct ttm_pool_manager *_manager;
|
||||
|
||||
#ifndef CONFIG_X86
|
||||
static int set_pages_array_wb(struct page **pages, int addrinarray)
|
||||
{
|
||||
#ifdef TTM_HAS_AGP
|
||||
int i;
|
||||
|
||||
for (i = 0; i < addrinarray; i++)
|
||||
unmap_page_from_agp(pages[i]);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_pages_array_wc(struct page **pages, int addrinarray)
|
||||
{
|
||||
#ifdef TTM_HAS_AGP
|
||||
int i;
|
||||
|
||||
for (i = 0; i < addrinarray; i++)
|
||||
map_page_into_agp(pages[i]);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_pages_array_uc(struct page **pages, int addrinarray)
|
||||
{
|
||||
#ifdef TTM_HAS_AGP
|
||||
int i;
|
||||
|
||||
for (i = 0; i < addrinarray; i++)
|
||||
map_page_into_agp(pages[i]);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Select the right pool or requested caching state and ttm flags. */
|
||||
static struct ttm_page_pool *ttm_get_pool(int flags,
|
||||
enum ttm_caching_state cstate)
|
||||
{
|
||||
int pool_index;
|
||||
|
||||
if (cstate == tt_cached)
|
||||
return NULL;
|
||||
|
||||
if (cstate == tt_wc)
|
||||
pool_index = 0x0;
|
||||
else
|
||||
pool_index = 0x1;
|
||||
|
||||
if (flags & TTM_PAGE_FLAG_DMA32)
|
||||
pool_index |= 0x2;
|
||||
|
||||
return &_manager->pools[pool_index];
|
||||
}
|
||||
|
||||
/* set memory back to wb and free the pages. */
|
||||
static void ttm_pages_put(struct page *pages[], unsigned npages)
|
||||
{
|
||||
unsigned i;
|
||||
if (set_pages_array_wb(pages, npages))
|
||||
pr_err("Failed to set %d pages to wb!\n", npages);
|
||||
for (i = 0; i < npages; ++i)
|
||||
__free_page(pages[i]);
|
||||
}
|
||||
|
||||
static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
|
||||
unsigned freed_pages)
|
||||
{
|
||||
pool->npages -= freed_pages;
|
||||
pool->nfrees += freed_pages;
|
||||
}
|
||||
|
||||
/**
|
||||
* Free pages from pool.
|
||||
*
|
||||
* To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
|
||||
* number of pages in one go.
|
||||
*
|
||||
* @pool: to free the pages from
|
||||
* @free_all: If set to true will free all pages in pool
|
||||
**/
|
||||
static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
struct page *p;
|
||||
struct page **pages_to_free;
|
||||
unsigned freed_pages = 0,
|
||||
npages_to_free = nr_free;
|
||||
|
||||
if (NUM_PAGES_TO_ALLOC < nr_free)
|
||||
npages_to_free = NUM_PAGES_TO_ALLOC;
|
||||
|
||||
pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
|
||||
GFP_KERNEL);
|
||||
if (!pages_to_free) {
|
||||
pr_err("Failed to allocate memory for pool free operation\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
restart:
|
||||
spin_lock_irqsave(&pool->lock, irq_flags);
|
||||
|
||||
list_for_each_entry_reverse(p, &pool->list, lru) {
|
||||
if (freed_pages >= npages_to_free)
|
||||
break;
|
||||
|
||||
pages_to_free[freed_pages++] = p;
|
||||
/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
|
||||
if (freed_pages >= NUM_PAGES_TO_ALLOC) {
|
||||
/* remove range of pages from the pool */
|
||||
__list_del(p->lru.prev, &pool->list);
|
||||
|
||||
ttm_pool_update_free_locked(pool, freed_pages);
|
||||
/**
|
||||
* Because changing page caching is costly
|
||||
* we unlock the pool to prevent stalling.
|
||||
*/
|
||||
spin_unlock_irqrestore(&pool->lock, irq_flags);
|
||||
|
||||
ttm_pages_put(pages_to_free, freed_pages);
|
||||
if (likely(nr_free != FREE_ALL_PAGES))
|
||||
nr_free -= freed_pages;
|
||||
|
||||
if (NUM_PAGES_TO_ALLOC >= nr_free)
|
||||
npages_to_free = nr_free;
|
||||
else
|
||||
npages_to_free = NUM_PAGES_TO_ALLOC;
|
||||
|
||||
freed_pages = 0;
|
||||
|
||||
/* free all so restart the processing */
|
||||
if (nr_free)
|
||||
goto restart;
|
||||
|
||||
/* Not allowed to fall through or break because
|
||||
* following context is inside spinlock while we are
|
||||
* outside here.
|
||||
*/
|
||||
goto out;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
/* remove range of pages from the pool */
|
||||
if (freed_pages) {
|
||||
__list_del(&p->lru, &pool->list);
|
||||
|
||||
ttm_pool_update_free_locked(pool, freed_pages);
|
||||
nr_free -= freed_pages;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&pool->lock, irq_flags);
|
||||
|
||||
if (freed_pages)
|
||||
ttm_pages_put(pages_to_free, freed_pages);
|
||||
out:
|
||||
kfree(pages_to_free);
|
||||
return nr_free;
|
||||
}
|
||||
|
||||
/* Get good estimation how many pages are free in pools */
|
||||
static int ttm_pool_get_num_unused_pages(void)
|
||||
{
|
||||
unsigned i;
|
||||
int total = 0;
|
||||
for (i = 0; i < NUM_POOLS; ++i)
|
||||
total += _manager->pools[i].npages;
|
||||
|
||||
return total;
|
||||
}
|
||||
|
||||
/**
|
||||
* Callback for mm to request pool to reduce number of page held.
|
||||
*/
|
||||
static int ttm_pool_mm_shrink(struct shrinker *shrink,
|
||||
struct shrink_control *sc)
|
||||
{
|
||||
static atomic_t start_pool = ATOMIC_INIT(0);
|
||||
unsigned i;
|
||||
unsigned pool_offset = atomic_add_return(1, &start_pool);
|
||||
struct ttm_page_pool *pool;
|
||||
int shrink_pages = sc->nr_to_scan;
|
||||
|
||||
pool_offset = pool_offset % NUM_POOLS;
|
||||
/* select start pool in round robin fashion */
|
||||
for (i = 0; i < NUM_POOLS; ++i) {
|
||||
unsigned nr_free = shrink_pages;
|
||||
if (shrink_pages == 0)
|
||||
break;
|
||||
pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
|
||||
shrink_pages = ttm_page_pool_free(pool, nr_free);
|
||||
}
|
||||
/* return estimated number of unused pages in pool */
|
||||
return ttm_pool_get_num_unused_pages();
|
||||
}
|
||||
|
||||
static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
|
||||
{
|
||||
manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
|
||||
manager->mm_shrink.seeks = 1;
|
||||
register_shrinker(&manager->mm_shrink);
|
||||
}
|
||||
|
||||
static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
|
||||
{
|
||||
unregister_shrinker(&manager->mm_shrink);
|
||||
}
|
||||
|
||||
static int ttm_set_pages_caching(struct page **pages,
|
||||
enum ttm_caching_state cstate, unsigned cpages)
|
||||
{
|
||||
int r = 0;
|
||||
/* Set page caching */
|
||||
switch (cstate) {
|
||||
case tt_uncached:
|
||||
r = set_pages_array_uc(pages, cpages);
|
||||
if (r)
|
||||
pr_err("Failed to set %d pages to uc!\n", cpages);
|
||||
break;
|
||||
case tt_wc:
|
||||
r = set_pages_array_wc(pages, cpages);
|
||||
if (r)
|
||||
pr_err("Failed to set %d pages to wc!\n", cpages);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* Free pages the pages that failed to change the caching state. If there is
|
||||
* any pages that have changed their caching state already put them to the
|
||||
* pool.
|
||||
*/
|
||||
static void ttm_handle_caching_state_failure(struct list_head *pages,
|
||||
int ttm_flags, enum ttm_caching_state cstate,
|
||||
struct page **failed_pages, unsigned cpages)
|
||||
{
|
||||
unsigned i;
|
||||
/* Failed pages have to be freed */
|
||||
for (i = 0; i < cpages; ++i) {
|
||||
list_del(&failed_pages[i]->lru);
|
||||
__free_page(failed_pages[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocate new pages with correct caching.
|
||||
*
|
||||
* This function is reentrant if caller updates count depending on number of
|
||||
* pages returned in pages array.
|
||||
*/
|
||||
static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
|
||||
int ttm_flags, enum ttm_caching_state cstate, unsigned count)
|
||||
{
|
||||
struct page **caching_array;
|
||||
struct page *p;
|
||||
int r = 0;
|
||||
unsigned i, cpages;
|
||||
unsigned max_cpages = min(count,
|
||||
(unsigned)(PAGE_SIZE/sizeof(struct page *)));
|
||||
|
||||
/* allocate array for page caching change */
|
||||
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
|
||||
|
||||
if (!caching_array) {
|
||||
pr_err("Unable to allocate table for new pages\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0, cpages = 0; i < count; ++i) {
|
||||
p = alloc_page(gfp_flags);
|
||||
|
||||
if (!p) {
|
||||
pr_err("Unable to get page %u\n", i);
|
||||
|
||||
/* store already allocated pages in the pool after
|
||||
* setting the caching state */
|
||||
if (cpages) {
|
||||
r = ttm_set_pages_caching(caching_array,
|
||||
cstate, cpages);
|
||||
if (r)
|
||||
ttm_handle_caching_state_failure(pages,
|
||||
ttm_flags, cstate,
|
||||
caching_array, cpages);
|
||||
}
|
||||
r = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
/* gfp flags of highmem page should never be dma32 so we
|
||||
* we should be fine in such case
|
||||
*/
|
||||
if (!PageHighMem(p))
|
||||
#endif
|
||||
{
|
||||
caching_array[cpages++] = p;
|
||||
if (cpages == max_cpages) {
|
||||
|
||||
r = ttm_set_pages_caching(caching_array,
|
||||
cstate, cpages);
|
||||
if (r) {
|
||||
ttm_handle_caching_state_failure(pages,
|
||||
ttm_flags, cstate,
|
||||
caching_array, cpages);
|
||||
goto out;
|
||||
}
|
||||
cpages = 0;
|
||||
}
|
||||
}
|
||||
|
||||
list_add(&p->lru, pages);
|
||||
}
|
||||
|
||||
if (cpages) {
|
||||
r = ttm_set_pages_caching(caching_array, cstate, cpages);
|
||||
if (r)
|
||||
ttm_handle_caching_state_failure(pages,
|
||||
ttm_flags, cstate,
|
||||
caching_array, cpages);
|
||||
}
|
||||
out:
|
||||
kfree(caching_array);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* Fill the given pool if there aren't enough pages and the requested number of
|
||||
* pages is small.
|
||||
*/
|
||||
static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
|
||||
int ttm_flags, enum ttm_caching_state cstate, unsigned count,
|
||||
unsigned long *irq_flags)
|
||||
{
|
||||
struct page *p;
|
||||
int r;
|
||||
unsigned cpages = 0;
|
||||
/**
|
||||
* Only allow one pool fill operation at a time.
|
||||
* If pool doesn't have enough pages for the allocation new pages are
|
||||
* allocated from outside of pool.
|
||||
*/
|
||||
if (pool->fill_lock)
|
||||
return;
|
||||
|
||||
pool->fill_lock = true;
|
||||
|
||||
/* If allocation request is small and there are not enough
|
||||
* pages in a pool we fill the pool up first. */
|
||||
if (count < _manager->options.small
|
||||
&& count > pool->npages) {
|
||||
struct list_head new_pages;
|
||||
unsigned alloc_size = _manager->options.alloc_size;
|
||||
|
||||
/**
|
||||
* Can't change page caching if in irqsave context. We have to
|
||||
* drop the pool->lock.
|
||||
*/
|
||||
spin_unlock_irqrestore(&pool->lock, *irq_flags);
|
||||
|
||||
INIT_LIST_HEAD(&new_pages);
|
||||
r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
|
||||
cstate, alloc_size);
|
||||
spin_lock_irqsave(&pool->lock, *irq_flags);
|
||||
|
||||
if (!r) {
|
||||
list_splice(&new_pages, &pool->list);
|
||||
++pool->nrefills;
|
||||
pool->npages += alloc_size;
|
||||
} else {
|
||||
pr_err("Failed to fill pool (%p)\n", pool);
|
||||
/* If we have any pages left put them to the pool. */
|
||||
list_for_each_entry(p, &pool->list, lru) {
|
||||
++cpages;
|
||||
}
|
||||
list_splice(&new_pages, &pool->list);
|
||||
pool->npages += cpages;
|
||||
}
|
||||
|
||||
}
|
||||
pool->fill_lock = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Cut 'count' number of pages from the pool and put them on the return list.
|
||||
*
|
||||
* @return count of pages still required to fulfill the request.
|
||||
*/
|
||||
static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
|
||||
struct list_head *pages,
|
||||
int ttm_flags,
|
||||
enum ttm_caching_state cstate,
|
||||
unsigned count)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
struct list_head *p;
|
||||
unsigned i;
|
||||
|
||||
spin_lock_irqsave(&pool->lock, irq_flags);
|
||||
ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
|
||||
|
||||
if (count >= pool->npages) {
|
||||
/* take all pages from the pool */
|
||||
list_splice_init(&pool->list, pages);
|
||||
count -= pool->npages;
|
||||
pool->npages = 0;
|
||||
goto out;
|
||||
}
|
||||
/* find the last pages to include for requested number of pages. Split
|
||||
* pool to begin and halve it to reduce search space. */
|
||||
if (count <= pool->npages/2) {
|
||||
i = 0;
|
||||
list_for_each(p, &pool->list) {
|
||||
if (++i == count)
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
i = pool->npages + 1;
|
||||
list_for_each_prev(p, &pool->list) {
|
||||
if (--i == count)
|
||||
break;
|
||||
}
|
||||
}
|
||||
/* Cut 'count' number of pages from the pool */
|
||||
list_cut_position(pages, &pool->list, p);
|
||||
pool->npages -= count;
|
||||
count = 0;
|
||||
out:
|
||||
spin_unlock_irqrestore(&pool->lock, irq_flags);
|
||||
return count;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Put all pages in pages list to correct pool to wait for reuse */
|
||||
static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
|
||||
enum ttm_caching_state cstate)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
// struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < npages; i++) {
|
||||
if (pages[i]) {
|
||||
// if (page_count(pages[i]) != 1)
|
||||
// pr_err("Erroneous page count. Leaking pages.\n");
|
||||
FreePage(pages[i]);
|
||||
pages[i] = NULL;
|
||||
}
|
||||
}
|
||||
return;
|
||||
|
||||
#if 0
|
||||
if (pool == NULL) {
|
||||
/* No pool for this memory type so free the pages */
|
||||
for (i = 0; i < npages; i++) {
|
||||
if (pages[i]) {
|
||||
if (page_count(pages[i]) != 1)
|
||||
pr_err("Erroneous page count. Leaking pages.\n");
|
||||
__free_page(pages[i]);
|
||||
pages[i] = NULL;
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&pool->lock, irq_flags);
|
||||
for (i = 0; i < npages; i++) {
|
||||
if (pages[i]) {
|
||||
if (page_count(pages[i]) != 1)
|
||||
pr_err("Erroneous page count. Leaking pages.\n");
|
||||
list_add_tail(&pages[i]->lru, &pool->list);
|
||||
pages[i] = NULL;
|
||||
pool->npages++;
|
||||
}
|
||||
}
|
||||
/* Check that we don't go over the pool limit */
|
||||
npages = 0;
|
||||
if (pool->npages > _manager->options.max_size) {
|
||||
npages = pool->npages - _manager->options.max_size;
|
||||
/* free at least NUM_PAGES_TO_ALLOC number of pages
|
||||
* to reduce calls to set_memory_wb */
|
||||
if (npages < NUM_PAGES_TO_ALLOC)
|
||||
npages = NUM_PAGES_TO_ALLOC;
|
||||
}
|
||||
spin_unlock_irqrestore(&pool->lock, irq_flags);
|
||||
if (npages)
|
||||
ttm_page_pool_free(pool, npages);
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* On success pages list will hold count number of correctly
|
||||
* cached pages.
|
||||
*/
|
||||
static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
|
||||
enum ttm_caching_state cstate)
|
||||
{
|
||||
// struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
|
||||
struct list_head plist;
|
||||
struct page *p = NULL;
|
||||
// gfp_t gfp_flags = GFP_USER;
|
||||
unsigned count;
|
||||
int r;
|
||||
|
||||
for (r = 0; r < npages; ++r) {
|
||||
p = AllocPage();
|
||||
if (!p) {
|
||||
|
||||
pr_err("Unable to allocate page\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pages[r] = p;
|
||||
}
|
||||
return 0;
|
||||
|
||||
#if 0
|
||||
|
||||
|
||||
/* set zero flag for page allocation if required */
|
||||
if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
|
||||
gfp_flags |= __GFP_ZERO;
|
||||
|
||||
/* No pool for cached pages */
|
||||
if (pool == NULL) {
|
||||
if (flags & TTM_PAGE_FLAG_DMA32)
|
||||
gfp_flags |= GFP_DMA32;
|
||||
else
|
||||
gfp_flags |= GFP_HIGHUSER;
|
||||
|
||||
for (r = 0; r < npages; ++r) {
|
||||
p = alloc_page(gfp_flags);
|
||||
if (!p) {
|
||||
|
||||
pr_err("Unable to allocate page\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pages[r] = p;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* combine zero flag to pool flags */
|
||||
gfp_flags |= pool->gfp_flags;
|
||||
|
||||
/* First we take pages from the pool */
|
||||
INIT_LIST_HEAD(&plist);
|
||||
npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
|
||||
count = 0;
|
||||
list_for_each_entry(p, &plist, lru) {
|
||||
pages[count++] = p;
|
||||
}
|
||||
|
||||
/* clear the pages coming from the pool if requested */
|
||||
if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
|
||||
list_for_each_entry(p, &plist, lru) {
|
||||
if (PageHighMem(p))
|
||||
clear_highpage(p);
|
||||
else
|
||||
clear_page(page_address(p));
|
||||
}
|
||||
}
|
||||
|
||||
/* If pool didn't have enough pages allocate new one. */
|
||||
if (npages > 0) {
|
||||
/* ttm_alloc_new_pages doesn't reference pool so we can run
|
||||
* multiple requests in parallel.
|
||||
**/
|
||||
INIT_LIST_HEAD(&plist);
|
||||
r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
|
||||
list_for_each_entry(p, &plist, lru) {
|
||||
pages[count++] = p;
|
||||
}
|
||||
if (r) {
|
||||
/* If there is any pages in the list put them back to
|
||||
* the pool. */
|
||||
pr_err("Failed to allocate extra pages for large request\n");
|
||||
ttm_put_pages(pages, count, flags, cstate);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if 0
|
||||
static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
|
||||
char *name)
|
||||
{
|
||||
spin_lock_init(&pool->lock);
|
||||
pool->fill_lock = false;
|
||||
INIT_LIST_HEAD(&pool->list);
|
||||
pool->npages = pool->nfrees = 0;
|
||||
pool->gfp_flags = flags;
|
||||
pool->name = name;
|
||||
}
|
||||
|
||||
int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
|
||||
{
|
||||
int ret;
|
||||
|
||||
WARN_ON(_manager);
|
||||
|
||||
pr_info("Initializing pool allocator\n");
|
||||
|
||||
_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
|
||||
|
||||
ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
|
||||
|
||||
ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
|
||||
|
||||
ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
|
||||
GFP_USER | GFP_DMA32, "wc dma");
|
||||
|
||||
ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
|
||||
GFP_USER | GFP_DMA32, "uc dma");
|
||||
|
||||
_manager->options.max_size = max_pages;
|
||||
_manager->options.small = SMALL_ALLOCATION;
|
||||
_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
|
||||
|
||||
ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
|
||||
&glob->kobj, "pool");
|
||||
if (unlikely(ret != 0)) {
|
||||
kobject_put(&_manager->kobj);
|
||||
_manager = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
ttm_pool_mm_shrink_init(_manager);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ttm_page_alloc_fini(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
pr_info("Finalizing pool allocator\n");
|
||||
ttm_pool_mm_shrink_fini(_manager);
|
||||
|
||||
for (i = 0; i < NUM_POOLS; ++i)
|
||||
ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
|
||||
|
||||
kobject_put(&_manager->kobj);
|
||||
_manager = NULL;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
int ttm_pool_populate(struct ttm_tt *ttm)
|
||||
{
|
||||
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
|
||||
unsigned i;
|
||||
int ret;
|
||||
|
||||
if (ttm->state != tt_unpopulated)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
ret = ttm_get_pages(&ttm->pages[i], 1,
|
||||
ttm->page_flags,
|
||||
ttm->caching_state);
|
||||
if (ret != 0) {
|
||||
ttm_pool_unpopulate(ttm);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
ttm->state = tt_unbound;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_pool_populate);
|
||||
|
||||
void ttm_pool_unpopulate(struct ttm_tt *ttm)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
if (ttm->pages[i]) {
|
||||
ttm_mem_global_free_page(ttm->glob->mem_glob,
|
||||
ttm->pages[i]);
|
||||
ttm_put_pages(&ttm->pages[i], 1,
|
||||
ttm->page_flags,
|
||||
ttm->caching_state);
|
||||
}
|
||||
}
|
||||
ttm->state = tt_unpopulated;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_pool_unpopulate);
|
||||
|
387
drivers/video/drm/ttm/ttm_tt.c
Normal file
387
drivers/video/drm/ttm/ttm_tt.c
Normal file
@ -0,0 +1,387 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "[TTM] " fmt
|
||||
|
||||
#include <syscall.h>
|
||||
|
||||
#include <linux/sched.h>
|
||||
//#include <linux/highmem.h>
|
||||
//#include <linux/pagemap.h>
|
||||
#include <linux/shmem_fs.h>
|
||||
//#include <linux/file.h>
|
||||
//#include <linux/swap.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/export.h>
|
||||
//#include <drm/drm_cache.h>
|
||||
#include <drm/drm_mem_util.h>
|
||||
#include <drm/ttm/ttm_module.h>
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
#include <drm/ttm/ttm_page_alloc.h>
|
||||
|
||||
/**
|
||||
* Allocates storage for pointers to the pages that back the ttm.
|
||||
*/
|
||||
static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
|
||||
{
|
||||
ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
|
||||
}
|
||||
|
||||
static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
|
||||
{
|
||||
ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*));
|
||||
ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages,
|
||||
sizeof(*ttm->dma_address));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
static inline int ttm_tt_set_page_caching(struct page *p,
|
||||
enum ttm_caching_state c_old,
|
||||
enum ttm_caching_state c_new)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (PageHighMem(p))
|
||||
return 0;
|
||||
|
||||
if (c_old != tt_cached) {
|
||||
/* p isn't in the default caching state, set it to
|
||||
* writeback first to free its current memtype. */
|
||||
|
||||
ret = set_pages_wb(p, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (c_new == tt_wc)
|
||||
ret = set_memory_wc((unsigned long) page_address(p), 1);
|
||||
else if (c_new == tt_uncached)
|
||||
ret = set_pages_uc(p, 1);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#else /* CONFIG_X86 */
|
||||
static inline int ttm_tt_set_page_caching(struct page *p,
|
||||
enum ttm_caching_state c_old,
|
||||
enum ttm_caching_state c_new)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_X86 */
|
||||
|
||||
/*
|
||||
* Change caching policy for the linear kernel map
|
||||
* for range of pages in a ttm.
|
||||
*/
|
||||
|
||||
static int ttm_tt_set_caching(struct ttm_tt *ttm,
|
||||
enum ttm_caching_state c_state)
|
||||
{
|
||||
int i, j;
|
||||
struct page *cur_page;
|
||||
int ret;
|
||||
|
||||
if (ttm->caching_state == c_state)
|
||||
return 0;
|
||||
|
||||
if (ttm->state == tt_unpopulated) {
|
||||
/* Change caching but don't populate */
|
||||
ttm->caching_state = c_state;
|
||||
return 0;
|
||||
}
|
||||
|
||||
// if (ttm->caching_state == tt_cached)
|
||||
// drm_clflush_pages(ttm->pages, ttm->num_pages);
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
cur_page = ttm->pages[i];
|
||||
if (likely(cur_page != NULL)) {
|
||||
ret = ttm_tt_set_page_caching(cur_page,
|
||||
ttm->caching_state,
|
||||
c_state);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err;
|
||||
}
|
||||
}
|
||||
|
||||
ttm->caching_state = c_state;
|
||||
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
for (j = 0; j < i; ++j) {
|
||||
cur_page = ttm->pages[j];
|
||||
if (likely(cur_page != NULL)) {
|
||||
(void)ttm_tt_set_page_caching(cur_page, c_state,
|
||||
ttm->caching_state);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
|
||||
{
|
||||
enum ttm_caching_state state;
|
||||
|
||||
if (placement & TTM_PL_FLAG_WC)
|
||||
state = tt_wc;
|
||||
else if (placement & TTM_PL_FLAG_UNCACHED)
|
||||
state = tt_uncached;
|
||||
else
|
||||
state = tt_cached;
|
||||
|
||||
return ttm_tt_set_caching(ttm, state);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_tt_set_placement_caching);
|
||||
|
||||
void ttm_tt_destroy(struct ttm_tt *ttm)
|
||||
{
|
||||
if (unlikely(ttm == NULL))
|
||||
return;
|
||||
|
||||
if (ttm->state == tt_bound) {
|
||||
ttm_tt_unbind(ttm);
|
||||
}
|
||||
|
||||
if (likely(ttm->pages != NULL)) {
|
||||
ttm->bdev->driver->ttm_tt_unpopulate(ttm);
|
||||
}
|
||||
|
||||
// if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
|
||||
// ttm->swap_storage)
|
||||
// fput(ttm->swap_storage);
|
||||
|
||||
ttm->swap_storage = NULL;
|
||||
ttm->func->destroy(ttm);
|
||||
}
|
||||
|
||||
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags,
|
||||
struct page *dummy_read_page)
|
||||
{
|
||||
ttm->bdev = bdev;
|
||||
ttm->glob = bdev->glob;
|
||||
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
ttm->caching_state = tt_cached;
|
||||
ttm->page_flags = page_flags;
|
||||
ttm->dummy_read_page = dummy_read_page;
|
||||
ttm->state = tt_unpopulated;
|
||||
ttm->swap_storage = NULL;
|
||||
|
||||
ttm_tt_alloc_page_directory(ttm);
|
||||
if (!ttm->pages) {
|
||||
ttm_tt_destroy(ttm);
|
||||
printf("Failed allocating page table\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_tt_init);
|
||||
|
||||
void ttm_tt_fini(struct ttm_tt *ttm)
|
||||
{
|
||||
drm_free_large(ttm->pages);
|
||||
ttm->pages = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_tt_fini);
|
||||
|
||||
int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags,
|
||||
struct page *dummy_read_page)
|
||||
{
|
||||
struct ttm_tt *ttm = &ttm_dma->ttm;
|
||||
|
||||
ttm->bdev = bdev;
|
||||
ttm->glob = bdev->glob;
|
||||
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
ttm->caching_state = tt_cached;
|
||||
ttm->page_flags = page_flags;
|
||||
ttm->dummy_read_page = dummy_read_page;
|
||||
ttm->state = tt_unpopulated;
|
||||
ttm->swap_storage = NULL;
|
||||
|
||||
INIT_LIST_HEAD(&ttm_dma->pages_list);
|
||||
ttm_dma_tt_alloc_page_directory(ttm_dma);
|
||||
if (!ttm->pages || !ttm_dma->dma_address) {
|
||||
ttm_tt_destroy(ttm);
|
||||
printf("Failed allocating page table\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_dma_tt_init);
|
||||
|
||||
void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
|
||||
{
|
||||
struct ttm_tt *ttm = &ttm_dma->ttm;
|
||||
|
||||
drm_free_large(ttm->pages);
|
||||
ttm->pages = NULL;
|
||||
drm_free_large(ttm_dma->dma_address);
|
||||
ttm_dma->dma_address = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_dma_tt_fini);
|
||||
|
||||
void ttm_tt_unbind(struct ttm_tt *ttm)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (ttm->state == tt_bound) {
|
||||
ret = ttm->func->unbind(ttm);
|
||||
BUG_ON(ret);
|
||||
ttm->state = tt_unbound;
|
||||
}
|
||||
}
|
||||
|
||||
#if 0
|
||||
|
||||
int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!ttm)
|
||||
return -EINVAL;
|
||||
|
||||
if (ttm->state == tt_bound)
|
||||
return 0;
|
||||
|
||||
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = ttm->func->bind(ttm, bo_mem);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
ttm->state = tt_bound;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_tt_bind);
|
||||
#endif
|
||||
|
||||
/*
|
||||
int ttm_tt_swapin(struct ttm_tt *ttm)
|
||||
{
|
||||
struct address_space *swap_space;
|
||||
struct file *swap_storage;
|
||||
struct page *from_page;
|
||||
struct page *to_page;
|
||||
int i;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
swap_storage = ttm->swap_storage;
|
||||
BUG_ON(swap_storage == NULL);
|
||||
|
||||
swap_space = file_inode(swap_storage)->i_mapping;
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
from_page = shmem_read_mapping_page(swap_space, i);
|
||||
if (IS_ERR(from_page)) {
|
||||
ret = PTR_ERR(from_page);
|
||||
goto out_err;
|
||||
}
|
||||
to_page = ttm->pages[i];
|
||||
if (unlikely(to_page == NULL))
|
||||
goto out_err;
|
||||
|
||||
copy_highpage(to_page, from_page);
|
||||
page_cache_release(from_page);
|
||||
}
|
||||
|
||||
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
|
||||
fput(swap_storage);
|
||||
ttm->swap_storage = NULL;
|
||||
ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
|
||||
|
||||
return 0;
|
||||
out_err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
|
||||
{
|
||||
struct address_space *swap_space;
|
||||
struct file *swap_storage;
|
||||
struct page *from_page;
|
||||
struct page *to_page;
|
||||
int i;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
|
||||
BUG_ON(ttm->caching_state != tt_cached);
|
||||
|
||||
if (!persistent_swap_storage) {
|
||||
swap_storage = shmem_file_setup("ttm swap",
|
||||
ttm->num_pages << PAGE_SHIFT,
|
||||
0);
|
||||
if (unlikely(IS_ERR(swap_storage))) {
|
||||
pr_err("Failed allocating swap storage\n");
|
||||
return PTR_ERR(swap_storage);
|
||||
}
|
||||
} else
|
||||
swap_storage = persistent_swap_storage;
|
||||
|
||||
swap_space = file_inode(swap_storage)->i_mapping;
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
from_page = ttm->pages[i];
|
||||
if (unlikely(from_page == NULL))
|
||||
continue;
|
||||
to_page = shmem_read_mapping_page(swap_space, i);
|
||||
if (unlikely(IS_ERR(to_page))) {
|
||||
ret = PTR_ERR(to_page);
|
||||
goto out_err;
|
||||
}
|
||||
copy_highpage(to_page, from_page);
|
||||
set_page_dirty(to_page);
|
||||
mark_page_accessed(to_page);
|
||||
page_cache_release(to_page);
|
||||
}
|
||||
|
||||
ttm->bdev->driver->ttm_tt_unpopulate(ttm);
|
||||
ttm->swap_storage = swap_storage;
|
||||
ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
|
||||
if (persistent_swap_storage)
|
||||
ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
|
||||
|
||||
return 0;
|
||||
out_err:
|
||||
if (!persistent_swap_storage)
|
||||
fput(swap_storage);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
|
100
drivers/video/drm/vmwgfx/Makefile
Normal file
100
drivers/video/drm/vmwgfx/Makefile
Normal file
@ -0,0 +1,100 @@
|
||||
|
||||
|
||||
CC = gcc
|
||||
LD = ld
|
||||
AS = as
|
||||
FASM = fasm.exe
|
||||
|
||||
DEFINES = -D__KERNEL__ -DCONFIG_X86_32
|
||||
|
||||
DRV_TOPDIR = $(CURDIR)/../../..
|
||||
DRM_TOPDIR = $(CURDIR)/..
|
||||
|
||||
DRV_INCLUDES = $(DRV_TOPDIR)/include
|
||||
|
||||
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/drm \
|
||||
-I$(DRV_INCLUDES)/linux
|
||||
|
||||
CFLAGS = -c -O2 $(INCLUDES) -march=i686 -fomit-frame-pointer -fno-builtin-printf
|
||||
|
||||
LIBPATH:= $(DRV_TOPDIR)/ddk
|
||||
|
||||
LIBS:= -lddk -lcore -lgcc
|
||||
|
||||
LDFLAGS = -nostdlib -shared -s -Map atikms.map --image-base 0\
|
||||
--file-alignment 512 --section-alignment 4096
|
||||
|
||||
|
||||
NAME:= vmwgfx
|
||||
|
||||
HFILES:= $(DRV_INCLUDES)/linux/types.h \
|
||||
$(DRV_INCLUDES)/linux/list.h \
|
||||
$(DRV_INCLUDES)/linux/pci.h \
|
||||
$(DRV_INCLUDES)/drm/drm.h \
|
||||
$(DRV_INCLUDES)/drm/drmP.h \
|
||||
$(DRV_INCLUDES)/drm/drm_edid.h \
|
||||
$(DRV_INCLUDES)/drm/drm_crtc.h \
|
||||
$(DRV_INCLUDES)/drm/drm_mode.h \
|
||||
$(DRV_INCLUDES)/drm/drm_mm.h
|
||||
|
||||
NAME_SRC= \
|
||||
main.c \
|
||||
pci.c \
|
||||
vmwgfx_buffer.c \
|
||||
vmwgfx_context.c \
|
||||
vmwgfx_dmabuf.c \
|
||||
vmwgfx_drv.c \
|
||||
vmwgfx_execbuf.c \
|
||||
vmwgfx_fence.c \
|
||||
vmwgfx_fifo.c \
|
||||
vmwgfx_gmr.c \
|
||||
vmwgfx_gmrid_manager.c \
|
||||
vmwgfx_irq.c \
|
||||
vmwgfx_kms.c \
|
||||
vmwgfx_marker.c \
|
||||
vmwgfx_resource.c \
|
||||
vmwgfx_scrn.c \
|
||||
vmwgfx_surface.c \
|
||||
vmwgfx_ttm_glue.c \
|
||||
../i2c/i2c-core.c \
|
||||
../ttm/ttm_bo.c \
|
||||
../ttm/ttm_bo_manager.c \
|
||||
../ttm/ttm_execbuf_util.c \
|
||||
../ttm/ttm_memory.c \
|
||||
../ttm/ttm_object.c \
|
||||
../ttm/ttm_page_alloc.c \
|
||||
../ttm/ttm_tt.c \
|
||||
$(DRM_TOPDIR)/drm_crtc.c \
|
||||
$(DRM_TOPDIR)/drm_crtc_helper.c \
|
||||
$(DRM_TOPDIR)/drm_edid.c \
|
||||
$(DRM_TOPDIR)/drm_global.c \
|
||||
$(DRM_TOPDIR)/drm_hashtab.c \
|
||||
$(DRM_TOPDIR)/drm_irq.c \
|
||||
$(DRM_TOPDIR)/drm_mm.c \
|
||||
$(DRM_TOPDIR)/drm_modes.c \
|
||||
$(DRM_TOPDIR)/drm_stub.c
|
||||
|
||||
|
||||
|
||||
|
||||
NAME_OBJS = $(patsubst %.S, %.o, $(patsubst %.asm, %.o,\
|
||||
$(patsubst %.c, %.o, $(NAME_SRC))))
|
||||
|
||||
|
||||
|
||||
all: $(NAME).dll
|
||||
|
||||
$(NAME).dll: $(NAME_OBJS) $(LIBPATH)/libcore.a $(LIBPATH)/libddk.a vmw.lds Makefile
|
||||
$(LD) -L$(LIBPATH) $(LDFLAGS) -T vmw.lds -o $@ $(NAME_OBJS) $(LIBS)
|
||||
kpack $@
|
||||
|
||||
|
||||
%.o : %.c $(HFILES) Makefile
|
||||
$(CC) $(CFLAGS) $(DEFINES) -o $@ $<
|
||||
|
||||
%.o : %.S $(HFILES) Makefile
|
||||
$(AS) -o $@ $<
|
||||
|
||||
clean:
|
||||
-rm -f */*.o
|
||||
|
812
drivers/video/drm/vmwgfx/main.c
Normal file
812
drivers/video/drm/vmwgfx/main.c
Normal file
@ -0,0 +1,812 @@
|
||||
#include <drm/drmP.h>
|
||||
#include <drm.h>
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <errno-base.h>
|
||||
#include <linux/pci.h>
|
||||
#include <syscall.h>
|
||||
|
||||
#include "bitmap.h"
|
||||
|
||||
struct pci_device {
|
||||
uint16_t domain;
|
||||
uint8_t bus;
|
||||
uint8_t dev;
|
||||
uint8_t func;
|
||||
uint16_t vendor_id;
|
||||
uint16_t device_id;
|
||||
uint16_t subvendor_id;
|
||||
uint16_t subdevice_id;
|
||||
uint32_t device_class;
|
||||
uint8_t revision;
|
||||
};
|
||||
|
||||
extern struct drm_device *main_device;
|
||||
extern struct drm_file *drm_file_handlers[256];
|
||||
|
||||
int vmw_init(void);
|
||||
void cpu_detect();
|
||||
|
||||
void parse_cmdline(char *cmdline, char *log);
|
||||
int _stdcall display_handler(ioctl_t *io);
|
||||
|
||||
int srv_blit_bitmap(u32 hbitmap, int dst_x, int dst_y,
|
||||
int src_x, int src_y, u32 w, u32 h);
|
||||
|
||||
int blit_textured(u32 hbitmap, int dst_x, int dst_y,
|
||||
int src_x, int src_y, u32 w, u32 h);
|
||||
|
||||
int blit_tex(u32 hbitmap, int dst_x, int dst_y,
|
||||
int src_x, int src_y, u32 w, u32 h);
|
||||
|
||||
void get_pci_info(struct pci_device *dev);
|
||||
int gem_getparam(struct drm_device *dev, void *data);
|
||||
|
||||
int i915_mask_update(struct drm_device *dev, void *data,
|
||||
struct drm_file *file);
|
||||
|
||||
|
||||
static char log[256];
|
||||
|
||||
struct workqueue_struct *system_wq;
|
||||
int driver_wq_state;
|
||||
|
||||
int x86_clflush_size;
|
||||
unsigned int tsc_khz;
|
||||
|
||||
int i915_modeset = 1;
|
||||
|
||||
u32_t __attribute__((externally_visible)) drvEntry(int action, char *cmdline)
|
||||
{
|
||||
|
||||
int err = 0;
|
||||
|
||||
if(action != 1)
|
||||
{
|
||||
driver_wq_state = 0;
|
||||
return 0;
|
||||
};
|
||||
|
||||
if( GetService("DISPLAY") != 0 )
|
||||
return 0;
|
||||
|
||||
if( cmdline && *cmdline )
|
||||
parse_cmdline(cmdline, log);
|
||||
|
||||
if(!dbg_open(log))
|
||||
{
|
||||
// strcpy(log, "/tmp1/1/vmw.log");
|
||||
// strcpy(log, "/RD/1/DRIVERS/VMW.log");
|
||||
strcpy(log, "/HD0/1/vmw.log");
|
||||
|
||||
if(!dbg_open(log))
|
||||
{
|
||||
printf("Can't open %s\nExit\n", log);
|
||||
return 0;
|
||||
};
|
||||
}
|
||||
dbgprintf(" vmw v3.10\n cmdline: %s\n", cmdline);
|
||||
|
||||
cpu_detect();
|
||||
dbgprintf("\ncache line size %d\n", x86_clflush_size);
|
||||
|
||||
enum_pci_devices();
|
||||
|
||||
err = vmw_init();
|
||||
|
||||
if(err)
|
||||
{
|
||||
dbgprintf("Epic Fail :(\n");
|
||||
return 0;
|
||||
};
|
||||
|
||||
err = RegService("DISPLAY", display_handler);
|
||||
|
||||
if( err != 0)
|
||||
dbgprintf("Set DISPLAY handler\n");
|
||||
|
||||
// struct drm_i915_private *dev_priv = main_device->dev_private;
|
||||
// driver_wq_state = 1;
|
||||
// run_workqueue(dev_priv->wq);
|
||||
|
||||
return err;
|
||||
};
|
||||
|
||||
|
||||
#define CURRENT_API 0x0200 /* 2.00 */
|
||||
#define COMPATIBLE_API 0x0100 /* 1.00 */
|
||||
|
||||
#define API_VERSION (COMPATIBLE_API << 16) | CURRENT_API
|
||||
#define DISPLAY_VERSION API_VERSION
|
||||
|
||||
|
||||
#define SRV_GETVERSION 0
|
||||
#define SRV_ENUM_MODES 1
|
||||
#define SRV_SET_MODE 2
|
||||
#define SRV_GET_CAPS 3
|
||||
|
||||
#define SRV_CREATE_SURFACE 10
|
||||
#define SRV_DESTROY_SURFACE 11
|
||||
#define SRV_LOCK_SURFACE 12
|
||||
#define SRV_UNLOCK_SURFACE 13
|
||||
#define SRV_RESIZE_SURFACE 14
|
||||
#define SRV_BLIT_BITMAP 15
|
||||
#define SRV_BLIT_TEXTURE 16
|
||||
#define SRV_BLIT_VIDEO 17
|
||||
|
||||
|
||||
#define SRV_GET_PCI_INFO 20
|
||||
#define SRV_GET_PARAM 21
|
||||
#define SRV_I915_GEM_CREATE 22
|
||||
#define SRV_DRM_GEM_CLOSE 23
|
||||
#define SRV_I915_GEM_PIN 24
|
||||
#define SRV_I915_GEM_SET_CACHEING 25
|
||||
#define SRV_I915_GEM_GET_APERTURE 26
|
||||
#define SRV_I915_GEM_PWRITE 27
|
||||
#define SRV_I915_GEM_BUSY 28
|
||||
#define SRV_I915_GEM_SET_DOMAIN 29
|
||||
#define SRV_I915_GEM_MMAP 30
|
||||
#define SRV_I915_GEM_MMAP_GTT 31
|
||||
#define SRV_I915_GEM_THROTTLE 32
|
||||
#define SRV_FBINFO 33
|
||||
#define SRV_I915_GEM_EXECBUFFER2 34
|
||||
#define SRV_MASK_UPDATE 35
|
||||
|
||||
|
||||
|
||||
#define check_input(size) \
|
||||
if( unlikely((inp==NULL)||(io->inp_size != (size))) ) \
|
||||
break;
|
||||
|
||||
#define check_output(size) \
|
||||
if( unlikely((outp==NULL)||(io->out_size != (size))) ) \
|
||||
break;
|
||||
|
||||
int _stdcall display_handler(ioctl_t *io)
|
||||
{
|
||||
struct drm_file *file;
|
||||
|
||||
int retval = -1;
|
||||
u32_t *inp;
|
||||
u32_t *outp;
|
||||
|
||||
inp = io->input;
|
||||
outp = io->output;
|
||||
|
||||
file = drm_file_handlers[0];
|
||||
|
||||
switch(io->io_code)
|
||||
{
|
||||
case SRV_GETVERSION:
|
||||
check_output(4);
|
||||
*outp = DISPLAY_VERSION;
|
||||
retval = 0;
|
||||
break;
|
||||
#if 0
|
||||
case SRV_ENUM_MODES:
|
||||
// dbgprintf("SRV_ENUM_MODES inp %x inp_size %x out_size %x\n",
|
||||
// inp, io->inp_size, io->out_size );
|
||||
check_output(4);
|
||||
// check_input(*outp * sizeof(videomode_t));
|
||||
if( i915_modeset)
|
||||
retval = get_videomodes((videomode_t*)inp, outp);
|
||||
break;
|
||||
|
||||
case SRV_SET_MODE:
|
||||
// dbgprintf("SRV_SET_MODE inp %x inp_size %x\n",
|
||||
// inp, io->inp_size);
|
||||
check_input(sizeof(videomode_t));
|
||||
if( i915_modeset )
|
||||
retval = set_user_mode((videomode_t*)inp);
|
||||
break;
|
||||
|
||||
case SRV_GET_CAPS:
|
||||
retval = get_driver_caps((hwcaps_t*)inp);
|
||||
break;
|
||||
|
||||
case SRV_CREATE_SURFACE:
|
||||
// check_input(8);
|
||||
// retval = create_surface(main_device, (struct io_call_10*)inp);
|
||||
break;
|
||||
|
||||
case SRV_LOCK_SURFACE:
|
||||
// retval = lock_surface((struct io_call_12*)inp);
|
||||
break;
|
||||
|
||||
case SRV_RESIZE_SURFACE:
|
||||
// retval = resize_surface((struct io_call_14*)inp);
|
||||
break;
|
||||
|
||||
case SRV_BLIT_BITMAP:
|
||||
// srv_blit_bitmap( inp[0], inp[1], inp[2],
|
||||
// inp[3], inp[4], inp[5], inp[6]);
|
||||
|
||||
// blit_tex( inp[0], inp[1], inp[2],
|
||||
// inp[3], inp[4], inp[5], inp[6]);
|
||||
|
||||
break;
|
||||
|
||||
case SRV_GET_PCI_INFO:
|
||||
get_pci_info((struct pci_device *)inp);
|
||||
retval = 0;
|
||||
break;
|
||||
|
||||
case SRV_GET_PARAM:
|
||||
retval = gem_getparam(main_device, inp);
|
||||
break;
|
||||
|
||||
case SRV_I915_GEM_CREATE:
|
||||
retval = i915_gem_create_ioctl(main_device, inp, file);
|
||||
break;
|
||||
|
||||
case SRV_DRM_GEM_CLOSE:
|
||||
retval = drm_gem_close_ioctl(main_device, inp, file);
|
||||
break;
|
||||
|
||||
case SRV_I915_GEM_PIN:
|
||||
retval = i915_gem_pin_ioctl(main_device, inp, file);
|
||||
break;
|
||||
|
||||
case SRV_I915_GEM_SET_CACHEING:
|
||||
retval = i915_gem_set_caching_ioctl(main_device, inp, file);
|
||||
break;
|
||||
|
||||
case SRV_I915_GEM_GET_APERTURE:
|
||||
retval = i915_gem_get_aperture_ioctl(main_device, inp, file);
|
||||
break;
|
||||
|
||||
case SRV_I915_GEM_PWRITE:
|
||||
retval = i915_gem_pwrite_ioctl(main_device, inp, file);
|
||||
break;
|
||||
|
||||
case SRV_I915_GEM_BUSY:
|
||||
retval = i915_gem_busy_ioctl(main_device, inp, file);
|
||||
break;
|
||||
|
||||
case SRV_I915_GEM_SET_DOMAIN:
|
||||
retval = i915_gem_set_domain_ioctl(main_device, inp, file);
|
||||
break;
|
||||
|
||||
case SRV_I915_GEM_THROTTLE:
|
||||
retval = i915_gem_throttle_ioctl(main_device, inp, file);
|
||||
break;
|
||||
|
||||
case SRV_I915_GEM_MMAP:
|
||||
retval = i915_gem_mmap_ioctl(main_device, inp, file);
|
||||
break;
|
||||
|
||||
case SRV_I915_GEM_MMAP_GTT:
|
||||
retval = i915_gem_mmap_gtt_ioctl(main_device, inp, file);
|
||||
break;
|
||||
|
||||
|
||||
case SRV_FBINFO:
|
||||
retval = i915_fbinfo(inp);
|
||||
break;
|
||||
|
||||
case SRV_I915_GEM_EXECBUFFER2:
|
||||
retval = i915_gem_execbuffer2(main_device, inp, file);
|
||||
break;
|
||||
|
||||
case SRV_MASK_UPDATE:
|
||||
retval = i915_mask_update(main_device, inp, file);
|
||||
break;
|
||||
#endif
|
||||
|
||||
};
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
||||
#define PCI_CLASS_REVISION 0x08
|
||||
#define PCI_CLASS_DISPLAY_VGA 0x0300
|
||||
#define PCI_CLASS_BRIDGE_HOST 0x0600
|
||||
#define PCI_CLASS_BRIDGE_ISA 0x0601
|
||||
|
||||
int pci_scan_filter(u32_t id, u32_t busnr, u32_t devfn)
|
||||
{
|
||||
u16_t vendor, device;
|
||||
u32_t class;
|
||||
int ret = 0;
|
||||
|
||||
vendor = id & 0xffff;
|
||||
device = (id >> 16) & 0xffff;
|
||||
|
||||
if(vendor == 0x15AD )
|
||||
{
|
||||
class = PciRead32(busnr, devfn, PCI_CLASS_REVISION);
|
||||
class >>= 16;
|
||||
|
||||
if( class == PCI_CLASS_DISPLAY_VGA )
|
||||
ret = 1;
|
||||
}
|
||||
return ret;
|
||||
};
|
||||
|
||||
|
||||
static char* parse_path(char *p, char *log)
|
||||
{
|
||||
char c;
|
||||
|
||||
while( (c = *p++) == ' ');
|
||||
p--;
|
||||
while( (c = *log++ = *p++) && (c != ' '));
|
||||
*log = 0;
|
||||
|
||||
return p;
|
||||
};
|
||||
|
||||
void parse_cmdline(char *cmdline, char *log)
|
||||
{
|
||||
char *p = cmdline;
|
||||
|
||||
char c = *p++;
|
||||
|
||||
while( c )
|
||||
{
|
||||
if( c == '-')
|
||||
{
|
||||
switch(*p++)
|
||||
{
|
||||
case 'l':
|
||||
p = parse_path(p, log);
|
||||
break;
|
||||
};
|
||||
};
|
||||
c = *p++;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
|
||||
unsigned int *ecx, unsigned int *edx)
|
||||
{
|
||||
/* ecx is often an input as well as an output. */
|
||||
asm volatile("cpuid"
|
||||
: "=a" (*eax),
|
||||
"=b" (*ebx),
|
||||
"=c" (*ecx),
|
||||
"=d" (*edx)
|
||||
: "0" (*eax), "2" (*ecx)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
|
||||
|
||||
static inline void cpuid(unsigned int op,
|
||||
unsigned int *eax, unsigned int *ebx,
|
||||
unsigned int *ecx, unsigned int *edx)
|
||||
{
|
||||
*eax = op;
|
||||
*ecx = 0;
|
||||
__cpuid(eax, ebx, ecx, edx);
|
||||
}
|
||||
|
||||
void cpu_detect()
|
||||
{
|
||||
u32 junk, tfms, cap0, misc;
|
||||
|
||||
cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
|
||||
|
||||
if (cap0 & (1<<19))
|
||||
{
|
||||
x86_clflush_size = ((misc >> 8) & 0xff) * 8;
|
||||
}
|
||||
|
||||
tsc_khz = (unsigned int)(GetCpuFreq()/1000);
|
||||
}
|
||||
|
||||
/*
|
||||
int get_driver_caps(hwcaps_t *caps)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
switch(caps->idx)
|
||||
{
|
||||
case 0:
|
||||
caps->opt[0] = 0;
|
||||
caps->opt[1] = 0;
|
||||
break;
|
||||
|
||||
case 1:
|
||||
caps->cap1.max_tex_width = 4096;
|
||||
caps->cap1.max_tex_height = 4096;
|
||||
break;
|
||||
default:
|
||||
ret = 1;
|
||||
};
|
||||
caps->idx = 1;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
void get_pci_info(struct pci_device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = main_device->pdev;
|
||||
|
||||
memset(dev, sizeof(*dev), 0);
|
||||
|
||||
dev->domain = 0;
|
||||
dev->bus = pdev->busnr;
|
||||
dev->dev = pdev->devfn >> 3;
|
||||
dev->func = pdev->devfn & 7;
|
||||
dev->vendor_id = pdev->vendor;
|
||||
dev->device_id = pdev->device;
|
||||
dev->revision = pdev->revision;
|
||||
};
|
||||
|
||||
*/
|
||||
|
||||
#include <ddk.h>
|
||||
#include <linux/mm.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <linux/hdmi.h>
|
||||
#include <linux/ctype.h>
|
||||
|
||||
/**
|
||||
* hdmi_avi_infoframe_init() - initialize an HDMI AVI infoframe
|
||||
* @frame: HDMI AVI infoframe
|
||||
*
|
||||
* Returns 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame)
|
||||
{
|
||||
memset(frame, 0, sizeof(*frame));
|
||||
|
||||
frame->type = HDMI_INFOFRAME_TYPE_AVI;
|
||||
frame->version = 2;
|
||||
frame->length = 13;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void *check_bytes8(const u8 *start, u8 value, unsigned int bytes)
|
||||
{
|
||||
while (bytes) {
|
||||
if (*start != value)
|
||||
return (void *)start;
|
||||
start++;
|
||||
bytes--;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* memchr_inv - Find an unmatching character in an area of memory.
|
||||
* @start: The memory area
|
||||
* @c: Find a character other than c
|
||||
* @bytes: The size of the area.
|
||||
*
|
||||
* returns the address of the first character other than @c, or %NULL
|
||||
* if the whole buffer contains just @c.
|
||||
*/
|
||||
void *memchr_inv(const void *start, int c, size_t bytes)
|
||||
{
|
||||
u8 value = c;
|
||||
u64 value64;
|
||||
unsigned int words, prefix;
|
||||
|
||||
if (bytes <= 16)
|
||||
return check_bytes8(start, value, bytes);
|
||||
|
||||
value64 = value;
|
||||
#if defined(ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
|
||||
value64 *= 0x0101010101010101;
|
||||
#elif defined(ARCH_HAS_FAST_MULTIPLIER)
|
||||
value64 *= 0x01010101;
|
||||
value64 |= value64 << 32;
|
||||
#else
|
||||
value64 |= value64 << 8;
|
||||
value64 |= value64 << 16;
|
||||
value64 |= value64 << 32;
|
||||
#endif
|
||||
|
||||
prefix = (unsigned long)start % 8;
|
||||
if (prefix) {
|
||||
u8 *r;
|
||||
|
||||
prefix = 8 - prefix;
|
||||
r = check_bytes8(start, value, prefix);
|
||||
if (r)
|
||||
return r;
|
||||
start += prefix;
|
||||
bytes -= prefix;
|
||||
}
|
||||
|
||||
words = bytes / 8;
|
||||
|
||||
while (words) {
|
||||
if (*(u64 *)start != value64)
|
||||
return check_bytes8(start, value, 8);
|
||||
start += 8;
|
||||
words--;
|
||||
}
|
||||
|
||||
return check_bytes8(start, value, bytes % 8);
|
||||
}
|
||||
|
||||
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
|
||||
{
|
||||
int i;
|
||||
|
||||
i = vsnprintf(buf, size, fmt, args);
|
||||
|
||||
if (likely(i < size))
|
||||
return i;
|
||||
if (size != 0)
|
||||
return size - 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int scnprintf(char *buf, size_t size, const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
int i;
|
||||
|
||||
va_start(args, fmt);
|
||||
i = vscnprintf(buf, size, fmt, args);
|
||||
va_end(args);
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
|
||||
|
||||
#define _U 0x01 /* upper */
|
||||
#define _L 0x02 /* lower */
|
||||
#define _D 0x04 /* digit */
|
||||
#define _C 0x08 /* cntrl */
|
||||
#define _P 0x10 /* punct */
|
||||
#define _S 0x20 /* white space (space/lf/tab) */
|
||||
#define _X 0x40 /* hex digit */
|
||||
#define _SP 0x80 /* hard space (0x20) */
|
||||
|
||||
extern const unsigned char _ctype[];
|
||||
|
||||
#define __ismask(x) (_ctype[(int)(unsigned char)(x)])
|
||||
|
||||
#define isalnum(c) ((__ismask(c)&(_U|_L|_D)) != 0)
|
||||
#define isalpha(c) ((__ismask(c)&(_U|_L)) != 0)
|
||||
#define iscntrl(c) ((__ismask(c)&(_C)) != 0)
|
||||
#define isdigit(c) ((__ismask(c)&(_D)) != 0)
|
||||
#define isgraph(c) ((__ismask(c)&(_P|_U|_L|_D)) != 0)
|
||||
#define islower(c) ((__ismask(c)&(_L)) != 0)
|
||||
#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
|
||||
#define ispunct(c) ((__ismask(c)&(_P)) != 0)
|
||||
/* Note: isspace() must return false for %NUL-terminator */
|
||||
#define isspace(c) ((__ismask(c)&(_S)) != 0)
|
||||
#define isupper(c) ((__ismask(c)&(_U)) != 0)
|
||||
#define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0)
|
||||
|
||||
#define isascii(c) (((unsigned char)(c))<=0x7f)
|
||||
#define toascii(c) (((unsigned char)(c))&0x7f)
|
||||
|
||||
|
||||
|
||||
//const char hex_asc[] = "0123456789abcdef";
|
||||
|
||||
/**
|
||||
* hex_to_bin - convert a hex digit to its real value
|
||||
* @ch: ascii character represents hex digit
|
||||
*
|
||||
* hex_to_bin() converts one hex digit to its actual value or -1 in case of bad
|
||||
* input.
|
||||
*/
|
||||
int hex_to_bin(char ch)
|
||||
{
|
||||
if ((ch >= '0') && (ch <= '9'))
|
||||
return ch - '0';
|
||||
ch = tolower(ch);
|
||||
if ((ch >= 'a') && (ch <= 'f'))
|
||||
return ch - 'a' + 10;
|
||||
return -1;
|
||||
}
|
||||
EXPORT_SYMBOL(hex_to_bin);
|
||||
|
||||
/**
|
||||
* hex2bin - convert an ascii hexadecimal string to its binary representation
|
||||
* @dst: binary result
|
||||
* @src: ascii hexadecimal string
|
||||
* @count: result length
|
||||
*
|
||||
* Return 0 on success, -1 in case of bad input.
|
||||
*/
|
||||
int hex2bin(u8 *dst, const char *src, size_t count)
|
||||
{
|
||||
while (count--) {
|
||||
int hi = hex_to_bin(*src++);
|
||||
int lo = hex_to_bin(*src++);
|
||||
|
||||
if ((hi < 0) || (lo < 0))
|
||||
return -1;
|
||||
|
||||
*dst++ = (hi << 4) | lo;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(hex2bin);
|
||||
|
||||
/**
|
||||
* hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory
|
||||
* @buf: data blob to dump
|
||||
* @len: number of bytes in the @buf
|
||||
* @rowsize: number of bytes to print per line; must be 16 or 32
|
||||
* @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
|
||||
* @linebuf: where to put the converted data
|
||||
* @linebuflen: total size of @linebuf, including space for terminating NUL
|
||||
* @ascii: include ASCII after the hex output
|
||||
*
|
||||
* hex_dump_to_buffer() works on one "line" of output at a time, i.e.,
|
||||
* 16 or 32 bytes of input data converted to hex + ASCII output.
|
||||
*
|
||||
* Given a buffer of u8 data, hex_dump_to_buffer() converts the input data
|
||||
* to a hex + ASCII dump at the supplied memory location.
|
||||
* The converted output is always NUL-terminated.
|
||||
*
|
||||
* E.g.:
|
||||
* hex_dump_to_buffer(frame->data, frame->len, 16, 1,
|
||||
* linebuf, sizeof(linebuf), true);
|
||||
*
|
||||
* example output buffer:
|
||||
* 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
|
||||
*/
|
||||
void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
|
||||
int groupsize, char *linebuf, size_t linebuflen,
|
||||
bool ascii)
|
||||
{
|
||||
const u8 *ptr = buf;
|
||||
u8 ch;
|
||||
int j, lx = 0;
|
||||
int ascii_column;
|
||||
|
||||
if (rowsize != 16 && rowsize != 32)
|
||||
rowsize = 16;
|
||||
|
||||
if (!len)
|
||||
goto nil;
|
||||
if (len > rowsize) /* limit to one line at a time */
|
||||
len = rowsize;
|
||||
if ((len % groupsize) != 0) /* no mixed size output */
|
||||
groupsize = 1;
|
||||
|
||||
switch (groupsize) {
|
||||
case 8: {
|
||||
const u64 *ptr8 = buf;
|
||||
int ngroups = len / groupsize;
|
||||
|
||||
for (j = 0; j < ngroups; j++)
|
||||
lx += scnprintf(linebuf + lx, linebuflen - lx,
|
||||
"%s%16.16llx", j ? " " : "",
|
||||
(unsigned long long)*(ptr8 + j));
|
||||
ascii_column = 17 * ngroups + 2;
|
||||
break;
|
||||
}
|
||||
|
||||
case 4: {
|
||||
const u32 *ptr4 = buf;
|
||||
int ngroups = len / groupsize;
|
||||
|
||||
for (j = 0; j < ngroups; j++)
|
||||
lx += scnprintf(linebuf + lx, linebuflen - lx,
|
||||
"%s%8.8x", j ? " " : "", *(ptr4 + j));
|
||||
ascii_column = 9 * ngroups + 2;
|
||||
break;
|
||||
}
|
||||
|
||||
case 2: {
|
||||
const u16 *ptr2 = buf;
|
||||
int ngroups = len / groupsize;
|
||||
|
||||
for (j = 0; j < ngroups; j++)
|
||||
lx += scnprintf(linebuf + lx, linebuflen - lx,
|
||||
"%s%4.4x", j ? " " : "", *(ptr2 + j));
|
||||
ascii_column = 5 * ngroups + 2;
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
|
||||
ch = ptr[j];
|
||||
linebuf[lx++] = hex_asc_hi(ch);
|
||||
linebuf[lx++] = hex_asc_lo(ch);
|
||||
linebuf[lx++] = ' ';
|
||||
}
|
||||
if (j)
|
||||
lx--;
|
||||
|
||||
ascii_column = 3 * rowsize + 2;
|
||||
break;
|
||||
}
|
||||
if (!ascii)
|
||||
goto nil;
|
||||
|
||||
while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
|
||||
linebuf[lx++] = ' ';
|
||||
for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) {
|
||||
ch = ptr[j];
|
||||
linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.';
|
||||
}
|
||||
nil:
|
||||
linebuf[lx++] = '\0';
|
||||
}
|
||||
|
||||
/**
|
||||
* print_hex_dump - print a text hex dump to syslog for a binary blob of data
|
||||
* @level: kernel log level (e.g. KERN_DEBUG)
|
||||
* @prefix_str: string to prefix each line with;
|
||||
* caller supplies trailing spaces for alignment if desired
|
||||
* @prefix_type: controls whether prefix of an offset, address, or none
|
||||
* is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE)
|
||||
* @rowsize: number of bytes to print per line; must be 16 or 32
|
||||
* @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
|
||||
* @buf: data blob to dump
|
||||
* @len: number of bytes in the @buf
|
||||
* @ascii: include ASCII after the hex output
|
||||
*
|
||||
* Given a buffer of u8 data, print_hex_dump() prints a hex + ASCII dump
|
||||
* to the kernel log at the specified kernel log level, with an optional
|
||||
* leading prefix.
|
||||
*
|
||||
* print_hex_dump() works on one "line" of output at a time, i.e.,
|
||||
* 16 or 32 bytes of input data converted to hex + ASCII output.
|
||||
* print_hex_dump() iterates over the entire input @buf, breaking it into
|
||||
* "line size" chunks to format and print.
|
||||
*
|
||||
* E.g.:
|
||||
* print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_ADDRESS,
|
||||
* 16, 1, frame->data, frame->len, true);
|
||||
*
|
||||
* Example output using %DUMP_PREFIX_OFFSET and 1-byte mode:
|
||||
* 0009ab42: 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
|
||||
* Example output using %DUMP_PREFIX_ADDRESS and 4-byte mode:
|
||||
* ffffffff88089af0: 73727170 77767574 7b7a7978 7f7e7d7c pqrstuvwxyz{|}~.
|
||||
*/
|
||||
void print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
|
||||
int rowsize, int groupsize,
|
||||
const void *buf, size_t len, bool ascii)
|
||||
{
|
||||
const u8 *ptr = buf;
|
||||
int i, linelen, remaining = len;
|
||||
unsigned char linebuf[32 * 3 + 2 + 32 + 1];
|
||||
|
||||
if (rowsize != 16 && rowsize != 32)
|
||||
rowsize = 16;
|
||||
|
||||
for (i = 0; i < len; i += rowsize) {
|
||||
linelen = min(remaining, rowsize);
|
||||
remaining -= rowsize;
|
||||
|
||||
hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
|
||||
linebuf, sizeof(linebuf), ascii);
|
||||
|
||||
switch (prefix_type) {
|
||||
case DUMP_PREFIX_ADDRESS:
|
||||
printk("%s%s%p: %s\n",
|
||||
level, prefix_str, ptr + i, linebuf);
|
||||
break;
|
||||
case DUMP_PREFIX_OFFSET:
|
||||
printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
|
||||
break;
|
||||
default:
|
||||
printk("%s%s%s\n", level, prefix_str, linebuf);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
|
||||
const void *buf, size_t len)
|
||||
{
|
||||
print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1,
|
||||
buf, len, true);
|
||||
}
|
||||
|
||||
|
880
drivers/video/drm/vmwgfx/pci.c
Normal file
880
drivers/video/drm/vmwgfx/pci.c
Normal file
@ -0,0 +1,880 @@
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <errno-base.h>
|
||||
#include <pci.h>
|
||||
#include <syscall.h>
|
||||
|
||||
extern int pci_scan_filter(u32_t id, u32_t busnr, u32_t devfn);
|
||||
|
||||
static LIST_HEAD(devices);
|
||||
|
||||
/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */
|
||||
#define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */
|
||||
|
||||
#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
|
||||
|
||||
/*
|
||||
* Translate the low bits of the PCI base
|
||||
* to the resource type
|
||||
*/
|
||||
static inline unsigned int pci_calc_resource_flags(unsigned int flags)
|
||||
{
|
||||
if (flags & PCI_BASE_ADDRESS_SPACE_IO)
|
||||
return IORESOURCE_IO;
|
||||
|
||||
if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
|
||||
return IORESOURCE_MEM | IORESOURCE_PREFETCH;
|
||||
|
||||
return IORESOURCE_MEM;
|
||||
}
|
||||
|
||||
|
||||
static u32_t pci_size(u32_t base, u32_t maxbase, u32_t mask)
|
||||
{
|
||||
u32_t size = mask & maxbase; /* Find the significant bits */
|
||||
|
||||
if (!size)
|
||||
return 0;
|
||||
|
||||
/* Get the lowest of them to find the decode size, and
|
||||
from that the extent. */
|
||||
size = (size & ~(size-1)) - 1;
|
||||
|
||||
/* base == maxbase can be valid only if the BAR has
|
||||
already been programmed with all 1s. */
|
||||
if (base == maxbase && ((base | size) & mask) != mask)
|
||||
return 0;
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static u64_t pci_size64(u64_t base, u64_t maxbase, u64_t mask)
|
||||
{
|
||||
u64_t size = mask & maxbase; /* Find the significant bits */
|
||||
|
||||
if (!size)
|
||||
return 0;
|
||||
|
||||
/* Get the lowest of them to find the decode size, and
|
||||
from that the extent. */
|
||||
size = (size & ~(size-1)) - 1;
|
||||
|
||||
/* base == maxbase can be valid only if the BAR has
|
||||
already been programmed with all 1s. */
|
||||
if (base == maxbase && ((base | size) & mask) != mask)
|
||||
return 0;
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline int is_64bit_memory(u32_t mask)
|
||||
{
|
||||
if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) ==
|
||||
(PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
|
||||
{
|
||||
u32_t pos, reg, next;
|
||||
u32_t l, sz;
|
||||
struct resource *res;
|
||||
|
||||
for(pos=0; pos < howmany; pos = next)
|
||||
{
|
||||
u64_t l64;
|
||||
u64_t sz64;
|
||||
u32_t raw_sz;
|
||||
|
||||
next = pos + 1;
|
||||
|
||||
res = &dev->resource[pos];
|
||||
|
||||
reg = PCI_BASE_ADDRESS_0 + (pos << 2);
|
||||
l = PciRead32(dev->busnr, dev->devfn, reg);
|
||||
PciWrite32(dev->busnr, dev->devfn, reg, ~0);
|
||||
sz = PciRead32(dev->busnr, dev->devfn, reg);
|
||||
PciWrite32(dev->busnr, dev->devfn, reg, l);
|
||||
|
||||
if (!sz || sz == 0xffffffff)
|
||||
continue;
|
||||
|
||||
if (l == 0xffffffff)
|
||||
l = 0;
|
||||
|
||||
raw_sz = sz;
|
||||
if ((l & PCI_BASE_ADDRESS_SPACE) ==
|
||||
PCI_BASE_ADDRESS_SPACE_MEMORY)
|
||||
{
|
||||
sz = pci_size(l, sz, (u32_t)PCI_BASE_ADDRESS_MEM_MASK);
|
||||
/*
|
||||
* For 64bit prefetchable memory sz could be 0, if the
|
||||
* real size is bigger than 4G, so we need to check
|
||||
* szhi for that.
|
||||
*/
|
||||
if (!is_64bit_memory(l) && !sz)
|
||||
continue;
|
||||
res->start = l & PCI_BASE_ADDRESS_MEM_MASK;
|
||||
res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK;
|
||||
}
|
||||
else {
|
||||
sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff);
|
||||
if (!sz)
|
||||
continue;
|
||||
res->start = l & PCI_BASE_ADDRESS_IO_MASK;
|
||||
res->flags |= l & ~PCI_BASE_ADDRESS_IO_MASK;
|
||||
}
|
||||
res->end = res->start + (unsigned long) sz;
|
||||
res->flags |= pci_calc_resource_flags(l);
|
||||
if (is_64bit_memory(l))
|
||||
{
|
||||
u32_t szhi, lhi;
|
||||
|
||||
lhi = PciRead32(dev->busnr, dev->devfn, reg+4);
|
||||
PciWrite32(dev->busnr, dev->devfn, reg+4, ~0);
|
||||
szhi = PciRead32(dev->busnr, dev->devfn, reg+4);
|
||||
PciWrite32(dev->busnr, dev->devfn, reg+4, lhi);
|
||||
sz64 = ((u64_t)szhi << 32) | raw_sz;
|
||||
l64 = ((u64_t)lhi << 32) | l;
|
||||
sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK);
|
||||
next++;
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
if (!sz64) {
|
||||
res->start = 0;
|
||||
res->end = 0;
|
||||
res->flags = 0;
|
||||
continue;
|
||||
}
|
||||
res->start = l64 & PCI_BASE_ADDRESS_MEM_MASK;
|
||||
res->end = res->start + sz64;
|
||||
#else
|
||||
if (sz64 > 0x100000000ULL) {
|
||||
printk(KERN_ERR "PCI: Unable to handle 64-bit "
|
||||
"BAR for device %s\n", pci_name(dev));
|
||||
res->start = 0;
|
||||
res->flags = 0;
|
||||
}
|
||||
else if (lhi)
|
||||
{
|
||||
/* 64-bit wide address, treat as disabled */
|
||||
PciWrite32(dev->busnr, dev->devfn, reg,
|
||||
l & ~(u32_t)PCI_BASE_ADDRESS_MEM_MASK);
|
||||
PciWrite32(dev->busnr, dev->devfn, reg+4, 0);
|
||||
res->start = 0;
|
||||
res->end = sz;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
if ( rom )
|
||||
{
|
||||
dev->rom_base_reg = rom;
|
||||
res = &dev->resource[PCI_ROM_RESOURCE];
|
||||
|
||||
l = PciRead32(dev->busnr, dev->devfn, rom);
|
||||
PciWrite32(dev->busnr, dev->devfn, rom, ~PCI_ROM_ADDRESS_ENABLE);
|
||||
sz = PciRead32(dev->busnr, dev->devfn, rom);
|
||||
PciWrite32(dev->busnr, dev->devfn, rom, l);
|
||||
|
||||
if (l == 0xffffffff)
|
||||
l = 0;
|
||||
|
||||
if (sz && sz != 0xffffffff)
|
||||
{
|
||||
sz = pci_size(l, sz, (u32_t)PCI_ROM_ADDRESS_MASK);
|
||||
|
||||
if (sz)
|
||||
{
|
||||
res->flags = (l & IORESOURCE_ROM_ENABLE) |
|
||||
IORESOURCE_MEM | IORESOURCE_PREFETCH |
|
||||
IORESOURCE_READONLY | IORESOURCE_CACHEABLE;
|
||||
res->start = l & PCI_ROM_ADDRESS_MASK;
|
||||
res->end = res->start + (unsigned long) sz;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void pci_read_irq(struct pci_dev *dev)
|
||||
{
|
||||
u8_t irq;
|
||||
|
||||
irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_PIN);
|
||||
dev->pin = irq;
|
||||
if (irq)
|
||||
irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_LINE);
|
||||
dev->irq = irq;
|
||||
};
|
||||
|
||||
|
||||
int pci_setup_device(struct pci_dev *dev)
|
||||
{
|
||||
u32_t class;
|
||||
|
||||
class = PciRead32(dev->busnr, dev->devfn, PCI_CLASS_REVISION);
|
||||
dev->revision = class & 0xff;
|
||||
class >>= 8; /* upper 3 bytes */
|
||||
dev->class = class;
|
||||
|
||||
/* "Unknown power state" */
|
||||
// dev->current_state = PCI_UNKNOWN;
|
||||
|
||||
/* Early fixups, before probing the BARs */
|
||||
// pci_fixup_device(pci_fixup_early, dev);
|
||||
class = dev->class >> 8;
|
||||
|
||||
switch (dev->hdr_type)
|
||||
{
|
||||
case PCI_HEADER_TYPE_NORMAL: /* standard header */
|
||||
if (class == PCI_CLASS_BRIDGE_PCI)
|
||||
goto bad;
|
||||
pci_read_irq(dev);
|
||||
pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
|
||||
dev->subsystem_vendor = PciRead16(dev->busnr, dev->devfn,PCI_SUBSYSTEM_VENDOR_ID);
|
||||
dev->subsystem_device = PciRead16(dev->busnr, dev->devfn, PCI_SUBSYSTEM_ID);
|
||||
|
||||
/*
|
||||
* Do the ugly legacy mode stuff here rather than broken chip
|
||||
* quirk code. Legacy mode ATA controllers have fixed
|
||||
* addresses. These are not always echoed in BAR0-3, and
|
||||
* BAR0-3 in a few cases contain junk!
|
||||
*/
|
||||
if (class == PCI_CLASS_STORAGE_IDE)
|
||||
{
|
||||
u8_t progif;
|
||||
|
||||
progif = PciRead8(dev->busnr, dev->devfn,PCI_CLASS_PROG);
|
||||
if ((progif & 1) == 0)
|
||||
{
|
||||
dev->resource[0].start = 0x1F0;
|
||||
dev->resource[0].end = 0x1F7;
|
||||
dev->resource[0].flags = LEGACY_IO_RESOURCE;
|
||||
dev->resource[1].start = 0x3F6;
|
||||
dev->resource[1].end = 0x3F6;
|
||||
dev->resource[1].flags = LEGACY_IO_RESOURCE;
|
||||
}
|
||||
if ((progif & 4) == 0)
|
||||
{
|
||||
dev->resource[2].start = 0x170;
|
||||
dev->resource[2].end = 0x177;
|
||||
dev->resource[2].flags = LEGACY_IO_RESOURCE;
|
||||
dev->resource[3].start = 0x376;
|
||||
dev->resource[3].end = 0x376;
|
||||
dev->resource[3].flags = LEGACY_IO_RESOURCE;
|
||||
};
|
||||
}
|
||||
break;
|
||||
|
||||
case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
|
||||
if (class != PCI_CLASS_BRIDGE_PCI)
|
||||
goto bad;
|
||||
/* The PCI-to-PCI bridge spec requires that subtractive
|
||||
decoding (i.e. transparent) bridge must have programming
|
||||
interface code of 0x01. */
|
||||
pci_read_irq(dev);
|
||||
dev->transparent = ((dev->class & 0xff) == 1);
|
||||
pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
|
||||
break;
|
||||
|
||||
case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
|
||||
if (class != PCI_CLASS_BRIDGE_CARDBUS)
|
||||
goto bad;
|
||||
pci_read_irq(dev);
|
||||
pci_read_bases(dev, 1, 0);
|
||||
dev->subsystem_vendor = PciRead16(dev->busnr,
|
||||
dev->devfn,
|
||||
PCI_CB_SUBSYSTEM_VENDOR_ID);
|
||||
|
||||
dev->subsystem_device = PciRead16(dev->busnr,
|
||||
dev->devfn,
|
||||
PCI_CB_SUBSYSTEM_ID);
|
||||
break;
|
||||
|
||||
default: /* unknown header */
|
||||
printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n",
|
||||
pci_name(dev), dev->hdr_type);
|
||||
return -1;
|
||||
|
||||
bad:
|
||||
printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n",
|
||||
pci_name(dev), class, dev->hdr_type);
|
||||
dev->class = PCI_CLASS_NOT_DEFINED;
|
||||
}
|
||||
|
||||
/* We found a fine healthy device, go go go... */
|
||||
|
||||
return 0;
|
||||
};
|
||||
|
||||
static pci_dev_t* pci_scan_device(u32_t busnr, int devfn)
|
||||
{
|
||||
pci_dev_t *dev;
|
||||
|
||||
u32_t id;
|
||||
u8_t hdr;
|
||||
|
||||
int timeout = 10;
|
||||
|
||||
id = PciRead32(busnr, devfn, PCI_VENDOR_ID);
|
||||
|
||||
/* some broken boards return 0 or ~0 if a slot is empty: */
|
||||
if (id == 0xffffffff || id == 0x00000000 ||
|
||||
id == 0x0000ffff || id == 0xffff0000)
|
||||
return NULL;
|
||||
|
||||
while (id == 0xffff0001)
|
||||
{
|
||||
|
||||
delay(timeout/10);
|
||||
timeout *= 2;
|
||||
|
||||
id = PciRead32(busnr, devfn, PCI_VENDOR_ID);
|
||||
|
||||
/* Card hasn't responded in 60 seconds? Must be stuck. */
|
||||
if (timeout > 60 * 100)
|
||||
{
|
||||
printk(KERN_WARNING "Device %04x:%02x:%02x.%d not "
|
||||
"responding\n", busnr,PCI_SLOT(devfn),PCI_FUNC(devfn));
|
||||
return NULL;
|
||||
}
|
||||
};
|
||||
|
||||
if( pci_scan_filter(id, busnr, devfn) == 0)
|
||||
return NULL;
|
||||
|
||||
hdr = PciRead8(busnr, devfn, PCI_HEADER_TYPE);
|
||||
|
||||
dev = (pci_dev_t*)kzalloc(sizeof(pci_dev_t), 0);
|
||||
if(unlikely(dev == NULL))
|
||||
return NULL;
|
||||
|
||||
INIT_LIST_HEAD(&dev->link);
|
||||
|
||||
|
||||
dev->pci_dev.busnr = busnr;
|
||||
dev->pci_dev.devfn = devfn;
|
||||
dev->pci_dev.hdr_type = hdr & 0x7f;
|
||||
dev->pci_dev.multifunction = !!(hdr & 0x80);
|
||||
dev->pci_dev.vendor = id & 0xffff;
|
||||
dev->pci_dev.device = (id >> 16) & 0xffff;
|
||||
|
||||
pci_setup_device(&dev->pci_dev);
|
||||
|
||||
return dev;
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
int pci_scan_slot(u32_t bus, int devfn)
|
||||
{
|
||||
int func, nr = 0;
|
||||
|
||||
for (func = 0; func < 8; func++, devfn++)
|
||||
{
|
||||
pci_dev_t *dev;
|
||||
|
||||
dev = pci_scan_device(bus, devfn);
|
||||
if( dev )
|
||||
{
|
||||
list_add(&dev->link, &devices);
|
||||
|
||||
nr++;
|
||||
|
||||
/*
|
||||
* If this is a single function device,
|
||||
* don't scan past the first function.
|
||||
*/
|
||||
if (!dev->pci_dev.multifunction)
|
||||
{
|
||||
if (func > 0) {
|
||||
dev->pci_dev.multifunction = 1;
|
||||
}
|
||||
else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (func == 0)
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
return nr;
|
||||
};
|
||||
|
||||
#define PCI_FIND_CAP_TTL 48
|
||||
|
||||
static int __pci_find_next_cap_ttl(unsigned int bus, unsigned int devfn,
|
||||
u8 pos, int cap, int *ttl)
|
||||
{
|
||||
u8 id;
|
||||
|
||||
while ((*ttl)--) {
|
||||
pos = PciRead8(bus, devfn, pos);
|
||||
if (pos < 0x40)
|
||||
break;
|
||||
pos &= ~3;
|
||||
id = PciRead8(bus, devfn, pos + PCI_CAP_LIST_ID);
|
||||
if (id == 0xff)
|
||||
break;
|
||||
if (id == cap)
|
||||
return pos;
|
||||
pos += PCI_CAP_LIST_NEXT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __pci_find_next_cap(unsigned int bus, unsigned int devfn,
|
||||
u8 pos, int cap)
|
||||
{
|
||||
int ttl = PCI_FIND_CAP_TTL;
|
||||
|
||||
return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
|
||||
}
|
||||
|
||||
static int __pci_bus_find_cap_start(unsigned int bus,
|
||||
unsigned int devfn, u8 hdr_type)
|
||||
{
|
||||
u16 status;
|
||||
|
||||
status = PciRead16(bus, devfn, PCI_STATUS);
|
||||
if (!(status & PCI_STATUS_CAP_LIST))
|
||||
return 0;
|
||||
|
||||
switch (hdr_type) {
|
||||
case PCI_HEADER_TYPE_NORMAL:
|
||||
case PCI_HEADER_TYPE_BRIDGE:
|
||||
return PCI_CAPABILITY_LIST;
|
||||
case PCI_HEADER_TYPE_CARDBUS:
|
||||
return PCI_CB_CAPABILITY_LIST;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int pci_find_capability(struct pci_dev *dev, int cap)
|
||||
{
|
||||
int pos;
|
||||
|
||||
pos = __pci_bus_find_cap_start(dev->busnr, dev->devfn, dev->hdr_type);
|
||||
if (pos)
|
||||
pos = __pci_find_next_cap(dev->busnr, dev->devfn, pos, cap);
|
||||
|
||||
return pos;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
int enum_pci_devices()
|
||||
{
|
||||
pci_dev_t *dev;
|
||||
u32_t last_bus;
|
||||
u32_t bus = 0 , devfn = 0;
|
||||
|
||||
|
||||
last_bus = PciApi(1);
|
||||
|
||||
|
||||
if( unlikely(last_bus == -1))
|
||||
return -1;
|
||||
|
||||
for(;bus <= last_bus; bus++)
|
||||
{
|
||||
for (devfn = 0; devfn < 0x100; devfn += 8)
|
||||
pci_scan_slot(bus, devfn);
|
||||
|
||||
|
||||
}
|
||||
for(dev = (pci_dev_t*)devices.next;
|
||||
&dev->link != &devices;
|
||||
dev = (pci_dev_t*)dev->link.next)
|
||||
{
|
||||
dbgprintf("PCI device %x:%x bus:%x devfn:%x\n",
|
||||
dev->pci_dev.vendor,
|
||||
dev->pci_dev.device,
|
||||
dev->pci_dev.busnr,
|
||||
dev->pci_dev.devfn);
|
||||
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct pci_device_id* find_pci_device(pci_dev_t* pdev, const struct pci_device_id *idlist)
|
||||
{
|
||||
pci_dev_t *dev;
|
||||
const struct pci_device_id *ent;
|
||||
|
||||
for(dev = (pci_dev_t*)devices.next;
|
||||
&dev->link != &devices;
|
||||
dev = (pci_dev_t*)dev->link.next)
|
||||
{
|
||||
if( dev->pci_dev.vendor != idlist->vendor )
|
||||
continue;
|
||||
|
||||
for(ent = idlist; ent->vendor != 0; ent++)
|
||||
{
|
||||
if(unlikely(ent->device == dev->pci_dev.device))
|
||||
{
|
||||
pdev->pci_dev = dev->pci_dev;
|
||||
return ent;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
return NULL;
|
||||
};
|
||||
|
||||
struct pci_dev *
|
||||
pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from)
|
||||
{
|
||||
pci_dev_t *dev;
|
||||
|
||||
dev = (pci_dev_t*)devices.next;
|
||||
|
||||
if(from != NULL)
|
||||
{
|
||||
for(; &dev->link != &devices;
|
||||
dev = (pci_dev_t*)dev->link.next)
|
||||
{
|
||||
if( &dev->pci_dev == from)
|
||||
{
|
||||
dev = (pci_dev_t*)dev->link.next;
|
||||
break;
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
for(; &dev->link != &devices;
|
||||
dev = (pci_dev_t*)dev->link.next)
|
||||
{
|
||||
if( dev->pci_dev.vendor != vendor )
|
||||
continue;
|
||||
|
||||
if(dev->pci_dev.device == device)
|
||||
{
|
||||
return &dev->pci_dev;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
};
|
||||
|
||||
|
||||
struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn)
|
||||
{
|
||||
pci_dev_t *dev;
|
||||
|
||||
for(dev = (pci_dev_t*)devices.next;
|
||||
&dev->link != &devices;
|
||||
dev = (pci_dev_t*)dev->link.next)
|
||||
{
|
||||
if ( dev->pci_dev.busnr == bus && dev->pci_dev.devfn == devfn)
|
||||
return &dev->pci_dev;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from)
|
||||
{
|
||||
pci_dev_t *dev;
|
||||
|
||||
dev = (pci_dev_t*)devices.next;
|
||||
|
||||
if(from != NULL)
|
||||
{
|
||||
for(; &dev->link != &devices;
|
||||
dev = (pci_dev_t*)dev->link.next)
|
||||
{
|
||||
if( &dev->pci_dev == from)
|
||||
{
|
||||
dev = (pci_dev_t*)dev->link.next;
|
||||
break;
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
for(; &dev->link != &devices;
|
||||
dev = (pci_dev_t*)dev->link.next)
|
||||
{
|
||||
if( dev->pci_dev.class == class)
|
||||
{
|
||||
return &dev->pci_dev;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
#define PIO_OFFSET 0x10000UL
|
||||
#define PIO_MASK 0x0ffffUL
|
||||
#define PIO_RESERVED 0x40000UL
|
||||
|
||||
#define IO_COND(addr, is_pio, is_mmio) do { \
|
||||
unsigned long port = (unsigned long __force)addr; \
|
||||
if (port >= PIO_RESERVED) { \
|
||||
is_mmio; \
|
||||
} else if (port > PIO_OFFSET) { \
|
||||
port &= PIO_MASK; \
|
||||
is_pio; \
|
||||
}; \
|
||||
} while (0)
|
||||
|
||||
/* Create a virtual mapping cookie for an IO port range */
|
||||
void __iomem *ioport_map(unsigned long port, unsigned int nr)
|
||||
{
|
||||
if (port > PIO_MASK)
|
||||
return NULL;
|
||||
return (void __iomem *) (unsigned long) (port + PIO_OFFSET);
|
||||
}
|
||||
|
||||
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
|
||||
{
|
||||
resource_size_t start = pci_resource_start(dev, bar);
|
||||
resource_size_t len = pci_resource_len(dev, bar);
|
||||
unsigned long flags = pci_resource_flags(dev, bar);
|
||||
|
||||
if (!len || !start)
|
||||
return NULL;
|
||||
if (maxlen && len > maxlen)
|
||||
len = maxlen;
|
||||
if (flags & IORESOURCE_IO)
|
||||
return ioport_map(start, len);
|
||||
if (flags & IORESOURCE_MEM) {
|
||||
return ioremap(start, len);
|
||||
}
|
||||
/* What? */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
|
||||
{
|
||||
IO_COND(addr, /* nothing */, iounmap(addr));
|
||||
}
|
||||
|
||||
|
||||
struct pci_bus_region {
|
||||
resource_size_t start;
|
||||
resource_size_t end;
|
||||
};
|
||||
|
||||
static inline void
|
||||
pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
|
||||
struct resource *res)
|
||||
{
|
||||
region->start = res->start;
|
||||
region->end = res->end;
|
||||
}
|
||||
|
||||
|
||||
int pci_enable_rom(struct pci_dev *pdev)
|
||||
{
|
||||
struct resource *res = pdev->resource + PCI_ROM_RESOURCE;
|
||||
struct pci_bus_region region;
|
||||
u32 rom_addr;
|
||||
|
||||
if (!res->flags)
|
||||
return -1;
|
||||
|
||||
pcibios_resource_to_bus(pdev, ®ion, res);
|
||||
pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
|
||||
rom_addr &= ~PCI_ROM_ADDRESS_MASK;
|
||||
rom_addr |= region.start | PCI_ROM_ADDRESS_ENABLE;
|
||||
pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void pci_disable_rom(struct pci_dev *pdev)
|
||||
{
|
||||
u32 rom_addr;
|
||||
pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
|
||||
rom_addr &= ~PCI_ROM_ADDRESS_ENABLE;
|
||||
pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* pci_get_rom_size - obtain the actual size of the ROM image
|
||||
* @pdev: target PCI device
|
||||
* @rom: kernel virtual pointer to image of ROM
|
||||
* @size: size of PCI window
|
||||
* return: size of actual ROM image
|
||||
*
|
||||
* Determine the actual length of the ROM image.
|
||||
* The PCI window size could be much larger than the
|
||||
* actual image size.
|
||||
*/
|
||||
size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
|
||||
{
|
||||
void __iomem *image;
|
||||
int last_image;
|
||||
|
||||
image = rom;
|
||||
do {
|
||||
void __iomem *pds;
|
||||
/* Standard PCI ROMs start out with these bytes 55 AA */
|
||||
if (readb(image) != 0x55) {
|
||||
dev_err(&pdev->dev, "Invalid ROM contents\n");
|
||||
break;
|
||||
}
|
||||
if (readb(image + 1) != 0xAA)
|
||||
break;
|
||||
/* get the PCI data structure and check its signature */
|
||||
pds = image + readw(image + 24);
|
||||
if (readb(pds) != 'P')
|
||||
break;
|
||||
if (readb(pds + 1) != 'C')
|
||||
break;
|
||||
if (readb(pds + 2) != 'I')
|
||||
break;
|
||||
if (readb(pds + 3) != 'R')
|
||||
break;
|
||||
last_image = readb(pds + 21) & 0x80;
|
||||
/* this length is reliable */
|
||||
image += readw(pds + 16) * 512;
|
||||
} while (!last_image);
|
||||
|
||||
/* never return a size larger than the PCI resource window */
|
||||
/* there are known ROMs that get the size wrong */
|
||||
return min((size_t)(image - rom), size);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* pci_map_rom - map a PCI ROM to kernel space
|
||||
* @pdev: pointer to pci device struct
|
||||
* @size: pointer to receive size of pci window over ROM
|
||||
*
|
||||
* Return: kernel virtual pointer to image of ROM
|
||||
*
|
||||
* Map a PCI ROM into kernel space. If ROM is boot video ROM,
|
||||
* the shadow BIOS copy will be returned instead of the
|
||||
* actual ROM.
|
||||
*/
|
||||
void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
|
||||
{
|
||||
struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
|
||||
loff_t start;
|
||||
void __iomem *rom;
|
||||
|
||||
/*
|
||||
* IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy
|
||||
* memory map if the VGA enable bit of the Bridge Control register is
|
||||
* set for embedded VGA.
|
||||
*/
|
||||
if (res->flags & IORESOURCE_ROM_SHADOW) {
|
||||
/* primary video rom always starts here */
|
||||
start = (loff_t)0xC0000;
|
||||
*size = 0x20000; /* cover C000:0 through E000:0 */
|
||||
} else {
|
||||
if (res->flags &
|
||||
(IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) {
|
||||
*size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
|
||||
return (void __iomem *)(unsigned long)
|
||||
pci_resource_start(pdev, PCI_ROM_RESOURCE);
|
||||
} else {
|
||||
/* assign the ROM an address if it doesn't have one */
|
||||
// if (res->parent == NULL &&
|
||||
// pci_assign_resource(pdev,PCI_ROM_RESOURCE))
|
||||
return NULL;
|
||||
// start = pci_resource_start(pdev, PCI_ROM_RESOURCE);
|
||||
// *size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
|
||||
// if (*size == 0)
|
||||
// return NULL;
|
||||
|
||||
/* Enable ROM space decodes */
|
||||
// if (pci_enable_rom(pdev))
|
||||
// return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
rom = ioremap(start, *size);
|
||||
if (!rom) {
|
||||
/* restore enable if ioremap fails */
|
||||
if (!(res->flags & (IORESOURCE_ROM_ENABLE |
|
||||
IORESOURCE_ROM_SHADOW |
|
||||
IORESOURCE_ROM_COPY)))
|
||||
pci_disable_rom(pdev);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to find the true size of the ROM since sometimes the PCI window
|
||||
* size is much larger than the actual size of the ROM.
|
||||
* True size is important if the ROM is going to be copied.
|
||||
*/
|
||||
*size = pci_get_rom_size(pdev, rom, *size);
|
||||
return rom;
|
||||
}
|
||||
|
||||
void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom)
|
||||
{
|
||||
struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
|
||||
|
||||
if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY))
|
||||
return;
|
||||
|
||||
iounmap(rom);
|
||||
|
||||
/* Disable again before continuing, leave enabled if pci=rom */
|
||||
if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW)))
|
||||
pci_disable_rom(pdev);
|
||||
}
|
||||
|
||||
#if 0
|
||||
void pcibios_set_master(struct pci_dev *dev)
|
||||
{
|
||||
u8 lat;
|
||||
|
||||
/* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
|
||||
if (pci_is_pcie(dev))
|
||||
return;
|
||||
|
||||
pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
|
||||
if (lat < 16)
|
||||
lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
|
||||
else if (lat > pcibios_max_latency)
|
||||
lat = pcibios_max_latency;
|
||||
else
|
||||
return;
|
||||
dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
|
||||
pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
static void __pci_set_master(struct pci_dev *dev, bool enable)
|
||||
{
|
||||
u16 old_cmd, cmd;
|
||||
|
||||
pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
|
||||
if (enable)
|
||||
cmd = old_cmd | PCI_COMMAND_MASTER;
|
||||
else
|
||||
cmd = old_cmd & ~PCI_COMMAND_MASTER;
|
||||
if (cmd != old_cmd) {
|
||||
dbgprintf("%s bus mastering\n",
|
||||
enable ? "enabling" : "disabling");
|
||||
pci_write_config_word(dev, PCI_COMMAND, cmd);
|
||||
}
|
||||
dev->is_busmaster = enable;
|
||||
}
|
||||
|
||||
void pci_set_master(struct pci_dev *dev)
|
||||
{
|
||||
__pci_set_master(dev, true);
|
||||
// pcibios_set_master(dev);
|
||||
}
|
||||
|
||||
|
||||
|
1896
drivers/video/drm/vmwgfx/svga3d_reg.h
Normal file
1896
drivers/video/drm/vmwgfx/svga3d_reg.h
Normal file
File diff suppressed because it is too large
Load Diff
909
drivers/video/drm/vmwgfx/svga3d_surfacedefs.h
Normal file
909
drivers/video/drm/vmwgfx/svga3d_surfacedefs.h
Normal file
@ -0,0 +1,909 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2008-2012 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <drm/vmwgfx_drm.h>
|
||||
#define surf_size_struct struct drm_vmw_size
|
||||
|
||||
#else /* __KERNEL__ */
|
||||
|
||||
#ifndef ARRAY_SIZE
|
||||
#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
|
||||
#endif /* ARRAY_SIZE */
|
||||
|
||||
#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
|
||||
#define max_t(type, x, y) ((x) > (y) ? (x) : (y))
|
||||
#define surf_size_struct SVGA3dSize
|
||||
#define u32 uint32
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#include "svga3d_reg.h"
|
||||
|
||||
/*
|
||||
* enum svga3d_block_desc describes the active data channels in a block.
|
||||
*
|
||||
* There can be at-most four active channels in a block:
|
||||
* 1. Red, bump W, luminance and depth are stored in the first channel.
|
||||
* 2. Green, bump V and stencil are stored in the second channel.
|
||||
* 3. Blue and bump U are stored in the third channel.
|
||||
* 4. Alpha and bump Q are stored in the fourth channel.
|
||||
*
|
||||
* Block channels can be used to store compressed and buffer data:
|
||||
* 1. For compressed formats, only the data channel is used and its size
|
||||
* is equal to that of a singular block in the compression scheme.
|
||||
* 2. For buffer formats, only the data channel is used and its size is
|
||||
* exactly one byte in length.
|
||||
* 3. In each case the bit depth represent the size of a singular block.
|
||||
*
|
||||
* Note: Compressed and IEEE formats do not use the bitMask structure.
|
||||
*/
|
||||
|
||||
enum svga3d_block_desc {
|
||||
SVGA3DBLOCKDESC_NONE = 0, /* No channels are active */
|
||||
SVGA3DBLOCKDESC_BLUE = 1 << 0, /* Block with red channel
|
||||
data */
|
||||
SVGA3DBLOCKDESC_U = 1 << 0, /* Block with bump U channel
|
||||
data */
|
||||
SVGA3DBLOCKDESC_UV_VIDEO = 1 << 7, /* Block with alternating video
|
||||
U and V */
|
||||
SVGA3DBLOCKDESC_GREEN = 1 << 1, /* Block with green channel
|
||||
data */
|
||||
SVGA3DBLOCKDESC_V = 1 << 1, /* Block with bump V channel
|
||||
data */
|
||||
SVGA3DBLOCKDESC_STENCIL = 1 << 1, /* Block with a stencil
|
||||
channel */
|
||||
SVGA3DBLOCKDESC_RED = 1 << 2, /* Block with blue channel
|
||||
data */
|
||||
SVGA3DBLOCKDESC_W = 1 << 2, /* Block with bump W channel
|
||||
data */
|
||||
SVGA3DBLOCKDESC_LUMINANCE = 1 << 2, /* Block with luminance channel
|
||||
data */
|
||||
SVGA3DBLOCKDESC_Y = 1 << 2, /* Block with video luminance
|
||||
data */
|
||||
SVGA3DBLOCKDESC_DEPTH = 1 << 2, /* Block with depth channel */
|
||||
SVGA3DBLOCKDESC_ALPHA = 1 << 3, /* Block with an alpha
|
||||
channel */
|
||||
SVGA3DBLOCKDESC_Q = 1 << 3, /* Block with bump Q channel
|
||||
data */
|
||||
SVGA3DBLOCKDESC_BUFFER = 1 << 4, /* Block stores 1 byte of
|
||||
data */
|
||||
SVGA3DBLOCKDESC_COMPRESSED = 1 << 5, /* Block stores n bytes of
|
||||
data depending on the
|
||||
compression method used */
|
||||
SVGA3DBLOCKDESC_IEEE_FP = 1 << 6, /* Block stores data in an IEEE
|
||||
floating point
|
||||
representation in
|
||||
all channels */
|
||||
SVGA3DBLOCKDESC_PLANAR_YUV = 1 << 8, /* Three separate blocks store
|
||||
data. */
|
||||
SVGA3DBLOCKDESC_U_VIDEO = 1 << 9, /* Block with U video data */
|
||||
SVGA3DBLOCKDESC_V_VIDEO = 1 << 10, /* Block with V video data */
|
||||
SVGA3DBLOCKDESC_EXP = 1 << 11, /* Shared exponent */
|
||||
SVGA3DBLOCKDESC_SRGB = 1 << 12, /* Data is in sRGB format */
|
||||
SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13, /* 2 planes of Y, UV,
|
||||
e.g., NV12. */
|
||||
SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14, /* 3 planes of separate
|
||||
Y, U, V, e.g., YV12. */
|
||||
|
||||
SVGA3DBLOCKDESC_RG = SVGA3DBLOCKDESC_RED |
|
||||
SVGA3DBLOCKDESC_GREEN,
|
||||
SVGA3DBLOCKDESC_RGB = SVGA3DBLOCKDESC_RG |
|
||||
SVGA3DBLOCKDESC_BLUE,
|
||||
SVGA3DBLOCKDESC_RGB_SRGB = SVGA3DBLOCKDESC_RGB |
|
||||
SVGA3DBLOCKDESC_SRGB,
|
||||
SVGA3DBLOCKDESC_RGBA = SVGA3DBLOCKDESC_RGB |
|
||||
SVGA3DBLOCKDESC_ALPHA,
|
||||
SVGA3DBLOCKDESC_RGBA_SRGB = SVGA3DBLOCKDESC_RGBA |
|
||||
SVGA3DBLOCKDESC_SRGB,
|
||||
SVGA3DBLOCKDESC_UV = SVGA3DBLOCKDESC_U |
|
||||
SVGA3DBLOCKDESC_V,
|
||||
SVGA3DBLOCKDESC_UVL = SVGA3DBLOCKDESC_UV |
|
||||
SVGA3DBLOCKDESC_LUMINANCE,
|
||||
SVGA3DBLOCKDESC_UVW = SVGA3DBLOCKDESC_UV |
|
||||
SVGA3DBLOCKDESC_W,
|
||||
SVGA3DBLOCKDESC_UVWA = SVGA3DBLOCKDESC_UVW |
|
||||
SVGA3DBLOCKDESC_ALPHA,
|
||||
SVGA3DBLOCKDESC_UVWQ = SVGA3DBLOCKDESC_U |
|
||||
SVGA3DBLOCKDESC_V |
|
||||
SVGA3DBLOCKDESC_W |
|
||||
SVGA3DBLOCKDESC_Q,
|
||||
SVGA3DBLOCKDESC_LA = SVGA3DBLOCKDESC_LUMINANCE |
|
||||
SVGA3DBLOCKDESC_ALPHA,
|
||||
SVGA3DBLOCKDESC_R_FP = SVGA3DBLOCKDESC_RED |
|
||||
SVGA3DBLOCKDESC_IEEE_FP,
|
||||
SVGA3DBLOCKDESC_RG_FP = SVGA3DBLOCKDESC_R_FP |
|
||||
SVGA3DBLOCKDESC_GREEN,
|
||||
SVGA3DBLOCKDESC_RGB_FP = SVGA3DBLOCKDESC_RG_FP |
|
||||
SVGA3DBLOCKDESC_BLUE,
|
||||
SVGA3DBLOCKDESC_RGBA_FP = SVGA3DBLOCKDESC_RGB_FP |
|
||||
SVGA3DBLOCKDESC_ALPHA,
|
||||
SVGA3DBLOCKDESC_DS = SVGA3DBLOCKDESC_DEPTH |
|
||||
SVGA3DBLOCKDESC_STENCIL,
|
||||
SVGA3DBLOCKDESC_YUV = SVGA3DBLOCKDESC_UV_VIDEO |
|
||||
SVGA3DBLOCKDESC_Y,
|
||||
SVGA3DBLOCKDESC_AYUV = SVGA3DBLOCKDESC_ALPHA |
|
||||
SVGA3DBLOCKDESC_Y |
|
||||
SVGA3DBLOCKDESC_U_VIDEO |
|
||||
SVGA3DBLOCKDESC_V_VIDEO,
|
||||
SVGA3DBLOCKDESC_RGBE = SVGA3DBLOCKDESC_RGB |
|
||||
SVGA3DBLOCKDESC_EXP,
|
||||
SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED |
|
||||
SVGA3DBLOCKDESC_SRGB,
|
||||
SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
|
||||
SVGA3DBLOCKDESC_2PLANAR_YUV,
|
||||
SVGA3DBLOCKDESC_YV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
|
||||
SVGA3DBLOCKDESC_3PLANAR_YUV,
|
||||
};
|
||||
|
||||
/*
|
||||
* SVGA3dSurfaceDesc describes the actual pixel data.
|
||||
*
|
||||
* This structure provides the following information:
|
||||
* 1. Block description.
|
||||
* 2. Dimensions of a block in the surface.
|
||||
* 3. Size of block in bytes.
|
||||
* 4. Bit depth of the pixel data.
|
||||
* 5. Channel bit depths and masks (if applicable).
|
||||
*/
|
||||
#define SVGA3D_CHANNEL_DEF(type) \
|
||||
struct { \
|
||||
union { \
|
||||
type blue; \
|
||||
type u; \
|
||||
type uv_video; \
|
||||
type u_video; \
|
||||
}; \
|
||||
union { \
|
||||
type green; \
|
||||
type v; \
|
||||
type stencil; \
|
||||
type v_video; \
|
||||
}; \
|
||||
union { \
|
||||
type red; \
|
||||
type w; \
|
||||
type luminance; \
|
||||
type y; \
|
||||
type depth; \
|
||||
type data; \
|
||||
}; \
|
||||
union { \
|
||||
type alpha; \
|
||||
type q; \
|
||||
type exp; \
|
||||
}; \
|
||||
}
|
||||
|
||||
struct svga3d_surface_desc {
|
||||
enum svga3d_block_desc block_desc;
|
||||
surf_size_struct block_size;
|
||||
u32 bytes_per_block;
|
||||
u32 pitch_bytes_per_block;
|
||||
|
||||
struct {
|
||||
u32 total;
|
||||
SVGA3D_CHANNEL_DEF(uint8);
|
||||
} bit_depth;
|
||||
|
||||
struct {
|
||||
SVGA3D_CHANNEL_DEF(uint8);
|
||||
} bit_offset;
|
||||
};
|
||||
|
||||
static const struct svga3d_surface_desc svga3d_surface_descs[] = {
|
||||
{SVGA3DBLOCKDESC_NONE,
|
||||
{1, 1, 1}, 0, 0, {0, {{0}, {0}, {0}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_FORMAT_INVALID */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGB,
|
||||
{1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
|
||||
{{{0}, {8}, {16}, {24} } } }, /* SVGA3D_X8R8G8B8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
|
||||
{{{0}, {8}, {16}, {24} } } }, /* SVGA3D_A8R8G8B8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGB,
|
||||
{1, 1, 1}, 2, 2, {16, {{5}, {6}, {5}, {0} } },
|
||||
{{{0}, {5}, {11}, {0} } } }, /* SVGA3D_R5G6B5 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGB,
|
||||
{1, 1, 1}, 2, 2, {15, {{5}, {5}, {5}, {0} } },
|
||||
{{{0}, {5}, {10}, {0} } } }, /* SVGA3D_X1R5G5B5 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 2, 2, {16, {{5}, {5}, {5}, {1} } },
|
||||
{{{0}, {5}, {10}, {15} } } }, /* SVGA3D_A1R5G5B5 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 2, 2, {16, {{4}, {4}, {4}, {4} } },
|
||||
{{{0}, {4}, {8}, {12} } } }, /* SVGA3D_A4R4G4B4 */
|
||||
|
||||
{SVGA3DBLOCKDESC_DEPTH,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D32 */
|
||||
|
||||
{SVGA3DBLOCKDESC_DEPTH,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D16 */
|
||||
|
||||
{SVGA3DBLOCKDESC_DS,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
|
||||
{{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_DS,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {1}, {15}, {0} } },
|
||||
{{{0}, {15}, {0}, {0} } } }, /* SVGA3D_Z_D15S1 */
|
||||
|
||||
{SVGA3DBLOCKDESC_LUMINANCE,
|
||||
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_LA,
|
||||
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {4}, {4} } },
|
||||
{{{0}, {0}, {0}, {4} } } }, /* SVGA3D_LUMINANCE4_ALPHA4 */
|
||||
|
||||
{SVGA3DBLOCKDESC_LUMINANCE,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE16 */
|
||||
|
||||
{SVGA3DBLOCKDESC_LA,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } },
|
||||
{{{0}, {0}, {0}, {8} } } }, /* SVGA3D_LUMINANCE8_ALPHA8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT1 */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT2 */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT3 */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT4 */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT5 */
|
||||
|
||||
{SVGA3DBLOCKDESC_UV,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } },
|
||||
{{{0}, {0}, {0}, {8} } } }, /* SVGA3D_BUMPU8V8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_UVL,
|
||||
{1, 1, 1}, 2, 2, {16, {{5}, {5}, {6}, {0} } },
|
||||
{{{11}, {6}, {0}, {0} } } }, /* SVGA3D_BUMPL6V5U5 */
|
||||
|
||||
{SVGA3DBLOCKDESC_UVL,
|
||||
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {0} } },
|
||||
{{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPX8L8V8U8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_UVL,
|
||||
{1, 1, 1}, 3, 3, {24, {{8}, {8}, {8}, {0} } },
|
||||
{{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPL8V8U8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA_FP,
|
||||
{1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
|
||||
{{{32}, {16}, {0}, {48} } } }, /* SVGA3D_ARGB_S10E5 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA_FP,
|
||||
{1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
|
||||
{{{64}, {32}, {0}, {96} } } }, /* SVGA3D_ARGB_S23E8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
|
||||
{{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2R10G10B10 */
|
||||
|
||||
{SVGA3DBLOCKDESC_UV,
|
||||
{1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } },
|
||||
{{{8}, {0}, {0}, {0} } } }, /* SVGA3D_V8U8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_UVWQ,
|
||||
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
|
||||
{{{24}, {16}, {8}, {0} } } }, /* SVGA3D_Q8W8V8U8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_UV,
|
||||
{1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } },
|
||||
{{{8}, {0}, {0}, {0} } } }, /* SVGA3D_CxV8U8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_UVL,
|
||||
{1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
|
||||
{{{16}, {8}, {0}, {0} } } }, /* SVGA3D_X8L8V8U8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_UVWA,
|
||||
{1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
|
||||
{{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2W10V10U10 */
|
||||
|
||||
{SVGA3DBLOCKDESC_ALPHA,
|
||||
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {0}, {8} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_ALPHA8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_R_FP,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S10E5 */
|
||||
|
||||
{SVGA3DBLOCKDESC_R_FP,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S23E8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG_FP,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
|
||||
{{{0}, {16}, {0}, {0} } } }, /* SVGA3D_RG_S10E5 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG_FP,
|
||||
{1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
|
||||
{{{0}, {32}, {0}, {0} } } }, /* SVGA3D_RG_S23E8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_BUFFER,
|
||||
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BUFFER */
|
||||
|
||||
{SVGA3DBLOCKDESC_DEPTH,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } },
|
||||
{{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24X8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_UV,
|
||||
{1, 1, 1}, 4, 4, {32, {{16}, {16}, {0}, {0} } },
|
||||
{{{16}, {0}, {0}, {0} } } }, /* SVGA3D_V16U16 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
|
||||
{{{0}, {0}, {16}, {0} } } }, /* SVGA3D_G16R16 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
|
||||
{{{32}, {16}, {0}, {48} } } }, /* SVGA3D_A16B16G16R16 */
|
||||
|
||||
{SVGA3DBLOCKDESC_YUV,
|
||||
{1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } },
|
||||
{{{0}, {0}, {8}, {0} } } }, /* SVGA3D_UYVY */
|
||||
|
||||
{SVGA3DBLOCKDESC_YUV,
|
||||
{1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } },
|
||||
{{{8}, {0}, {0}, {0} } } }, /* SVGA3D_YUY2 */
|
||||
|
||||
{SVGA3DBLOCKDESC_NV12,
|
||||
{2, 2, 1}, 6, 2, {48, {{0}, {0}, {48}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_NV12 */
|
||||
|
||||
{SVGA3DBLOCKDESC_AYUV,
|
||||
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
|
||||
{{{0}, {8}, {16}, {24} } } }, /* SVGA3D_AYUV */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
|
||||
{{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
|
||||
{{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_UVWQ,
|
||||
{1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
|
||||
{{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_SINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGB,
|
||||
{1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
|
||||
{{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGB_FP,
|
||||
{1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
|
||||
{{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_FLOAT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGB,
|
||||
{1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
|
||||
{{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_UVW,
|
||||
{1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
|
||||
{{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_SINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
|
||||
{{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
|
||||
{{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_UVWQ,
|
||||
{1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
|
||||
{{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_UVWQ,
|
||||
{1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
|
||||
{{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG,
|
||||
{1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
|
||||
{{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG,
|
||||
{1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
|
||||
{{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_UV,
|
||||
{1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
|
||||
{{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_SINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG,
|
||||
{1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } },
|
||||
{{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G8X24_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_DS,
|
||||
{1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } },
|
||||
{{{0}, {32}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT_S8X24_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_R_FP,
|
||||
{1, 1, 1}, 8, 8, {64, {{0}, {0}, {32}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_FLOAT_X8_X24_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_GREEN,
|
||||
{1, 1, 1}, 8, 8, {64, {{0}, {8}, {0}, {0} } },
|
||||
{{{0}, {32}, {0}, {0} } } }, /* SVGA3D_X32_TYPELESS_G8X24_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
|
||||
{{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
|
||||
{{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGB_FP,
|
||||
{1, 1, 1}, 4, 4, {32, {{10}, {11}, {11}, {0} } },
|
||||
{{{0}, {10}, {21}, {0} } } }, /* SVGA3D_R11G11B10_FLOAT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
|
||||
{{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
|
||||
{{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA_SRGB,
|
||||
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
|
||||
{{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM_SRGB */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
|
||||
{{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
|
||||
{{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_SINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
|
||||
{{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG_FP,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
|
||||
{{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_UV,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
|
||||
{{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_SINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RED,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_DEPTH,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RED,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RED,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_SINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
|
||||
{{{0}, {24}, {0}, {0} } } }, /* SVGA3D_R24G8_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_DS,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
|
||||
{{{0}, {24}, {0}, {0} } } }, /* SVGA3D_D24_UNORM_S8_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RED,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R24_UNORM_X8_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_GREEN,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {8}, {0}, {0} } },
|
||||
{{{0}, {24}, {0}, {0} } } }, /* SVGA3D_X24_TYPELESS_G8_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
|
||||
{{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
|
||||
{{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
|
||||
{{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_UV,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
|
||||
{{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_SINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RED,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RED,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_RED,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_U,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_U,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RED,
|
||||
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RED,
|
||||
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_RED,
|
||||
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_U,
|
||||
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_U,
|
||||
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RED,
|
||||
{8, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R1_UNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBE,
|
||||
{1, 1, 1}, 4, 4, {32, {{9}, {9}, {9}, {5} } },
|
||||
{{{18}, {9}, {0}, {27} } } }, /* SVGA3D_R9G9B9E5_SHAREDEXP */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
|
||||
{{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_B8G8_UNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
|
||||
{{{0}, {8}, {0}, {0} } } }, /* SVGA3D_G8R8_G8B8_UNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED_SRGB,
|
||||
{4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_UNORM_SRGB */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED_SRGB,
|
||||
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_UNORM_SRGB */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED_SRGB,
|
||||
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_UNORM_SRGB */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_UNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_SNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_UNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_SNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
|
||||
{{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10_XR_BIAS_A2_UNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
|
||||
{{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA_SRGB,
|
||||
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
|
||||
{{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_UNORM_SRGB */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGB,
|
||||
{1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
|
||||
{{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGB_SRGB,
|
||||
{1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
|
||||
{{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_UNORM_SRGB */
|
||||
|
||||
{SVGA3DBLOCKDESC_DEPTH,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_DF16 */
|
||||
|
||||
{SVGA3DBLOCKDESC_DS,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
|
||||
{{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_DF24 */
|
||||
|
||||
{SVGA3DBLOCKDESC_DS,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
|
||||
{{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8_INT */
|
||||
};
|
||||
|
||||
static inline u32 clamped_umul32(u32 a, u32 b)
|
||||
{
|
||||
uint64_t tmp = (uint64_t) a*b;
|
||||
return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
|
||||
}
|
||||
|
||||
static inline const struct svga3d_surface_desc *
|
||||
svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
|
||||
{
|
||||
if (format < ARRAY_SIZE(svga3d_surface_descs))
|
||||
return &svga3d_surface_descs[format];
|
||||
|
||||
return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
|
||||
}
|
||||
|
||||
/*
|
||||
*----------------------------------------------------------------------
|
||||
*
|
||||
* svga3dsurface_get_mip_size --
|
||||
*
|
||||
* Given a base level size and the mip level, compute the size of
|
||||
* the mip level.
|
||||
*
|
||||
* Results:
|
||||
* See above.
|
||||
*
|
||||
* Side effects:
|
||||
* None.
|
||||
*
|
||||
*----------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
static inline surf_size_struct
|
||||
svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
|
||||
{
|
||||
surf_size_struct size;
|
||||
|
||||
size.width = max_t(u32, base_level.width >> mip_level, 1);
|
||||
size.height = max_t(u32, base_level.height >> mip_level, 1);
|
||||
size.depth = max_t(u32, base_level.depth >> mip_level, 1);
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline void
|
||||
svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc,
|
||||
const surf_size_struct *pixel_size,
|
||||
surf_size_struct *block_size)
|
||||
{
|
||||
block_size->width = DIV_ROUND_UP(pixel_size->width,
|
||||
desc->block_size.width);
|
||||
block_size->height = DIV_ROUND_UP(pixel_size->height,
|
||||
desc->block_size.height);
|
||||
block_size->depth = DIV_ROUND_UP(pixel_size->depth,
|
||||
desc->block_size.depth);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc)
|
||||
{
|
||||
return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0;
|
||||
}
|
||||
|
||||
static inline u32
|
||||
svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
|
||||
const surf_size_struct *size)
|
||||
{
|
||||
u32 pitch;
|
||||
surf_size_struct blocks;
|
||||
|
||||
svga3dsurface_get_size_in_blocks(desc, size, &blocks);
|
||||
|
||||
pitch = blocks.width * desc->pitch_bytes_per_block;
|
||||
|
||||
return pitch;
|
||||
}
|
||||
|
||||
/*
|
||||
*-----------------------------------------------------------------------------
|
||||
*
|
||||
* svga3dsurface_get_image_buffer_size --
|
||||
*
|
||||
* Return the number of bytes of buffer space required to store
|
||||
* one image of a surface, optionally using the specified pitch.
|
||||
*
|
||||
* If pitch is zero, it is assumed that rows are tightly packed.
|
||||
*
|
||||
* This function is overflow-safe. If the result would have
|
||||
* overflowed, instead we return MAX_UINT32.
|
||||
*
|
||||
* Results:
|
||||
* Byte count.
|
||||
*
|
||||
* Side effects:
|
||||
* None.
|
||||
*
|
||||
*-----------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
static inline u32
|
||||
svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
|
||||
const surf_size_struct *size,
|
||||
u32 pitch)
|
||||
{
|
||||
surf_size_struct image_blocks;
|
||||
u32 slice_size, total_size;
|
||||
|
||||
svga3dsurface_get_size_in_blocks(desc, size, &image_blocks);
|
||||
|
||||
if (svga3dsurface_is_planar_surface(desc)) {
|
||||
total_size = clamped_umul32(image_blocks.width,
|
||||
image_blocks.height);
|
||||
total_size = clamped_umul32(total_size, image_blocks.depth);
|
||||
total_size = clamped_umul32(total_size, desc->bytes_per_block);
|
||||
return total_size;
|
||||
}
|
||||
|
||||
if (pitch == 0)
|
||||
pitch = svga3dsurface_calculate_pitch(desc, size);
|
||||
|
||||
slice_size = clamped_umul32(image_blocks.height, pitch);
|
||||
total_size = clamped_umul32(slice_size, image_blocks.depth);
|
||||
|
||||
return total_size;
|
||||
}
|
||||
|
||||
static inline u32
|
||||
svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
|
||||
surf_size_struct base_level_size,
|
||||
u32 num_mip_levels,
|
||||
bool cubemap)
|
||||
{
|
||||
const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
|
||||
u32 total_size = 0;
|
||||
u32 mip;
|
||||
|
||||
for (mip = 0; mip < num_mip_levels; mip++) {
|
||||
surf_size_struct size =
|
||||
svga3dsurface_get_mip_size(base_level_size, mip);
|
||||
total_size += svga3dsurface_get_image_buffer_size(desc,
|
||||
&size, 0);
|
||||
}
|
||||
|
||||
if (cubemap)
|
||||
total_size *= SVGA3D_MAX_SURFACE_FACES;
|
||||
|
||||
return total_size;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
|
||||
* in an image (or volume).
|
||||
*
|
||||
* @width: The image width in pixels.
|
||||
* @height: The image height in pixels
|
||||
*/
|
||||
static inline u32
|
||||
svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
|
||||
u32 width, u32 height,
|
||||
u32 x, u32 y, u32 z)
|
||||
{
|
||||
const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
|
||||
const u32 bw = desc->block_size.width, bh = desc->block_size.height;
|
||||
const u32 bd = desc->block_size.depth;
|
||||
const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block;
|
||||
const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride;
|
||||
const u32 offset = (z / bd * imgstride +
|
||||
y / bh * rowstride +
|
||||
x / bw * desc->bytes_per_block);
|
||||
return offset;
|
||||
}
|
||||
|
||||
|
||||
static inline u32
|
||||
svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
|
||||
surf_size_struct baseLevelSize,
|
||||
u32 numMipLevels,
|
||||
u32 face,
|
||||
u32 mip)
|
||||
|
||||
{
|
||||
u32 offset;
|
||||
u32 mipChainBytes;
|
||||
u32 mipChainBytesToLevel;
|
||||
u32 i;
|
||||
const struct svga3d_surface_desc *desc;
|
||||
surf_size_struct mipSize;
|
||||
u32 bytes;
|
||||
|
||||
desc = svga3dsurface_get_desc(format);
|
||||
|
||||
mipChainBytes = 0;
|
||||
mipChainBytesToLevel = 0;
|
||||
for (i = 0; i < numMipLevels; i++) {
|
||||
mipSize = svga3dsurface_get_mip_size(baseLevelSize, i);
|
||||
bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0);
|
||||
mipChainBytes += bytes;
|
||||
if (i < mip)
|
||||
mipChainBytesToLevel += bytes;
|
||||
}
|
||||
|
||||
offset = mipChainBytes * face + mipChainBytesToLevel;
|
||||
|
||||
return offset;
|
||||
}
|
1552
drivers/video/drm/vmwgfx/svga_reg.h
Normal file
1552
drivers/video/drm/vmwgfx/svga_reg.h
Normal file
File diff suppressed because it is too large
Load Diff
45
drivers/video/drm/vmwgfx/svga_types.h
Normal file
45
drivers/video/drm/vmwgfx/svga_types.h
Normal file
@ -0,0 +1,45 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
/**
|
||||
* Silly typedefs for the svga headers. Currently the headers are shared
|
||||
* between all components that talk to svga. And as such the headers are
|
||||
* are in a completely different style and use weird defines.
|
||||
*
|
||||
* This file lets all the ugly be prefixed with svga*.
|
||||
*/
|
||||
|
||||
#ifndef _SVGA_TYPES_H_
|
||||
#define _SVGA_TYPES_H_
|
||||
|
||||
typedef uint16_t uint16;
|
||||
typedef uint32_t uint32;
|
||||
typedef uint8_t uint8;
|
||||
typedef int32_t int32;
|
||||
typedef bool Bool;
|
||||
|
||||
#endif
|
57
drivers/video/drm/vmwgfx/vmw.lds
Normal file
57
drivers/video/drm/vmwgfx/vmw.lds
Normal file
@ -0,0 +1,57 @@
|
||||
|
||||
|
||||
OUTPUT_FORMAT(pei-i386)
|
||||
|
||||
ENTRY("_drvEntry")
|
||||
|
||||
SECTIONS
|
||||
{
|
||||
. = SIZEOF_HEADERS;
|
||||
. = ALIGN(__section_alignment__);
|
||||
|
||||
.text __image_base__ + ( __section_alignment__ < 0x1000 ? . : __section_alignment__ ) :
|
||||
|
||||
{
|
||||
*(.text) *(.rdata)
|
||||
}
|
||||
|
||||
.data ALIGN(__section_alignment__) :
|
||||
{
|
||||
*(.data)
|
||||
}
|
||||
|
||||
.bss ALIGN(__section_alignment__):
|
||||
{
|
||||
*(.bss)
|
||||
*(COMMON)
|
||||
}
|
||||
|
||||
/DISCARD/ :
|
||||
{
|
||||
*(.debug$S)
|
||||
*(.debug$T)
|
||||
*(.debug$F)
|
||||
*(.drectve)
|
||||
*(.edata)
|
||||
*(.eh_frame)
|
||||
}
|
||||
|
||||
.idata ALIGN(__section_alignment__):
|
||||
{
|
||||
SORT(*)(.idata$2)
|
||||
SORT(*)(.idata$3)
|
||||
/* These zeroes mark the end of the import list. */
|
||||
LONG (0); LONG (0); LONG (0); LONG (0); LONG (0);
|
||||
SORT(*)(.idata$4)
|
||||
SORT(*)(.idata$5)
|
||||
SORT(*)(.idata$6)
|
||||
SORT(*)(.idata$7)
|
||||
}
|
||||
|
||||
.reloc ALIGN(__section_alignment__) :
|
||||
{
|
||||
*(.reloc)
|
||||
}
|
||||
|
||||
}
|
||||
|
46
drivers/video/drm/vmwgfx/vmwgfx.asm
Normal file
46
drivers/video/drm/vmwgfx/vmwgfx.asm
Normal file
@ -0,0 +1,46 @@
|
||||
|
||||
use32
|
||||
|
||||
db 'MENUET01'
|
||||
dd 1
|
||||
dd start
|
||||
dd i_end
|
||||
dd mem
|
||||
dd mem
|
||||
dd cmdline
|
||||
dd path
|
||||
|
||||
start:
|
||||
mov eax, 68
|
||||
mov ebx, 16
|
||||
mov ecx, sz_display
|
||||
int 0x40
|
||||
test eax, eax
|
||||
jnz .done ; FIXME parse command line and
|
||||
; call service
|
||||
|
||||
xor eax, eax
|
||||
mov ecx, 1024
|
||||
mov edi, path
|
||||
cld
|
||||
repne scasb
|
||||
dec edi
|
||||
mov [edi], dword '.dll'
|
||||
mov [edi+4], al
|
||||
mov eax, 68
|
||||
mov ebx, 21
|
||||
mov ecx, path
|
||||
mov edx, cmdline
|
||||
int 0x40
|
||||
.done:
|
||||
mov eax, -1
|
||||
int 0x40
|
||||
|
||||
sz_display db 'DISPLAY',0
|
||||
|
||||
align 4
|
||||
i_end:
|
||||
cmdline rb 256
|
||||
path rb 1024
|
||||
rb 16 ; stack
|
||||
mem:
|
352
drivers/video/drm/vmwgfx/vmwgfx_buffer.c
Normal file
352
drivers/video/drm/vmwgfx/vmwgfx_buffer.c
Normal file
@ -0,0 +1,352 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
#include <drm/ttm/ttm_page_alloc.h>
|
||||
|
||||
static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
|
||||
TTM_PL_FLAG_CACHED;
|
||||
|
||||
static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
|
||||
TTM_PL_FLAG_CACHED |
|
||||
TTM_PL_FLAG_NO_EVICT;
|
||||
|
||||
static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
|
||||
TTM_PL_FLAG_CACHED;
|
||||
|
||||
static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR |
|
||||
TTM_PL_FLAG_CACHED;
|
||||
|
||||
static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR |
|
||||
TTM_PL_FLAG_CACHED |
|
||||
TTM_PL_FLAG_NO_EVICT;
|
||||
|
||||
struct ttm_placement vmw_vram_placement = {
|
||||
.fpfn = 0,
|
||||
.lpfn = 0,
|
||||
.num_placement = 1,
|
||||
.placement = &vram_placement_flags,
|
||||
.num_busy_placement = 1,
|
||||
.busy_placement = &vram_placement_flags
|
||||
};
|
||||
|
||||
static uint32_t vram_gmr_placement_flags[] = {
|
||||
TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
|
||||
VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
|
||||
};
|
||||
|
||||
static uint32_t gmr_vram_placement_flags[] = {
|
||||
VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED,
|
||||
TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
|
||||
};
|
||||
|
||||
struct ttm_placement vmw_vram_gmr_placement = {
|
||||
.fpfn = 0,
|
||||
.lpfn = 0,
|
||||
.num_placement = 2,
|
||||
.placement = vram_gmr_placement_flags,
|
||||
.num_busy_placement = 1,
|
||||
.busy_placement = &gmr_placement_flags
|
||||
};
|
||||
|
||||
static uint32_t vram_gmr_ne_placement_flags[] = {
|
||||
TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT,
|
||||
VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
|
||||
};
|
||||
|
||||
struct ttm_placement vmw_vram_gmr_ne_placement = {
|
||||
.fpfn = 0,
|
||||
.lpfn = 0,
|
||||
.num_placement = 2,
|
||||
.placement = vram_gmr_ne_placement_flags,
|
||||
.num_busy_placement = 1,
|
||||
.busy_placement = &gmr_ne_placement_flags
|
||||
};
|
||||
|
||||
struct ttm_placement vmw_vram_sys_placement = {
|
||||
.fpfn = 0,
|
||||
.lpfn = 0,
|
||||
.num_placement = 1,
|
||||
.placement = &vram_placement_flags,
|
||||
.num_busy_placement = 1,
|
||||
.busy_placement = &sys_placement_flags
|
||||
};
|
||||
|
||||
struct ttm_placement vmw_vram_ne_placement = {
|
||||
.fpfn = 0,
|
||||
.lpfn = 0,
|
||||
.num_placement = 1,
|
||||
.placement = &vram_ne_placement_flags,
|
||||
.num_busy_placement = 1,
|
||||
.busy_placement = &vram_ne_placement_flags
|
||||
};
|
||||
|
||||
struct ttm_placement vmw_sys_placement = {
|
||||
.fpfn = 0,
|
||||
.lpfn = 0,
|
||||
.num_placement = 1,
|
||||
.placement = &sys_placement_flags,
|
||||
.num_busy_placement = 1,
|
||||
.busy_placement = &sys_placement_flags
|
||||
};
|
||||
|
||||
static uint32_t evictable_placement_flags[] = {
|
||||
TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED,
|
||||
TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
|
||||
VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
|
||||
};
|
||||
|
||||
struct ttm_placement vmw_evictable_placement = {
|
||||
.fpfn = 0,
|
||||
.lpfn = 0,
|
||||
.num_placement = 3,
|
||||
.placement = evictable_placement_flags,
|
||||
.num_busy_placement = 1,
|
||||
.busy_placement = &sys_placement_flags
|
||||
};
|
||||
|
||||
struct ttm_placement vmw_srf_placement = {
|
||||
.fpfn = 0,
|
||||
.lpfn = 0,
|
||||
.num_placement = 1,
|
||||
.num_busy_placement = 2,
|
||||
.placement = &gmr_placement_flags,
|
||||
.busy_placement = gmr_vram_placement_flags
|
||||
};
|
||||
|
||||
struct vmw_ttm_tt {
|
||||
struct ttm_tt ttm;
|
||||
struct vmw_private *dev_priv;
|
||||
int gmr_id;
|
||||
};
|
||||
|
||||
static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
|
||||
{
|
||||
struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
|
||||
|
||||
vmw_be->gmr_id = bo_mem->start;
|
||||
|
||||
return vmw_gmr_bind(vmw_be->dev_priv, ttm->pages,
|
||||
ttm->num_pages, vmw_be->gmr_id);
|
||||
}
|
||||
|
||||
static int vmw_ttm_unbind(struct ttm_tt *ttm)
|
||||
{
|
||||
struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
|
||||
|
||||
vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vmw_ttm_destroy(struct ttm_tt *ttm)
|
||||
{
|
||||
struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
|
||||
|
||||
ttm_tt_fini(ttm);
|
||||
kfree(vmw_be);
|
||||
}
|
||||
|
||||
static struct ttm_backend_func vmw_ttm_func = {
|
||||
.bind = vmw_ttm_bind,
|
||||
.unbind = vmw_ttm_unbind,
|
||||
.destroy = vmw_ttm_destroy,
|
||||
};
|
||||
|
||||
struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags,
|
||||
struct page *dummy_read_page)
|
||||
{
|
||||
struct vmw_ttm_tt *vmw_be;
|
||||
|
||||
vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
|
||||
if (!vmw_be)
|
||||
return NULL;
|
||||
|
||||
vmw_be->ttm.func = &vmw_ttm_func;
|
||||
vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
|
||||
|
||||
if (ttm_tt_init(&vmw_be->ttm, bdev, size, page_flags, dummy_read_page)) {
|
||||
kfree(vmw_be);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return &vmw_be->ttm;
|
||||
}
|
||||
|
||||
int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
|
||||
struct ttm_mem_type_manager *man)
|
||||
{
|
||||
switch (type) {
|
||||
case TTM_PL_SYSTEM:
|
||||
/* System memory */
|
||||
|
||||
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
|
||||
man->available_caching = TTM_PL_FLAG_CACHED;
|
||||
man->default_caching = TTM_PL_FLAG_CACHED;
|
||||
break;
|
||||
case TTM_PL_VRAM:
|
||||
/* "On-card" video ram */
|
||||
man->func = &ttm_bo_manager_func;
|
||||
man->gpu_offset = 0;
|
||||
man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
|
||||
man->available_caching = TTM_PL_FLAG_CACHED;
|
||||
man->default_caching = TTM_PL_FLAG_CACHED;
|
||||
break;
|
||||
case VMW_PL_GMR:
|
||||
/*
|
||||
* "Guest Memory Regions" is an aperture like feature with
|
||||
* one slot per bo. There is an upper limit of the number of
|
||||
* slots as well as the bo size.
|
||||
*/
|
||||
man->func = &vmw_gmrid_manager_func;
|
||||
man->gpu_offset = 0;
|
||||
man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
|
||||
man->available_caching = TTM_PL_FLAG_CACHED;
|
||||
man->default_caching = TTM_PL_FLAG_CACHED;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void vmw_evict_flags(struct ttm_buffer_object *bo,
|
||||
struct ttm_placement *placement)
|
||||
{
|
||||
*placement = vmw_sys_placement;
|
||||
}
|
||||
|
||||
static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
|
||||
{
|
||||
// struct ttm_object_file *tfile =
|
||||
// vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
|
||||
|
||||
return 0; //vmw_user_dmabuf_verify_access(bo, tfile);
|
||||
}
|
||||
|
||||
static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
|
||||
struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
|
||||
|
||||
mem->bus.addr = NULL;
|
||||
mem->bus.is_iomem = false;
|
||||
mem->bus.offset = 0;
|
||||
mem->bus.size = mem->num_pages << PAGE_SHIFT;
|
||||
mem->bus.base = 0;
|
||||
if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
|
||||
return -EINVAL;
|
||||
switch (mem->mem_type) {
|
||||
case TTM_PL_SYSTEM:
|
||||
case VMW_PL_GMR:
|
||||
return 0;
|
||||
case TTM_PL_VRAM:
|
||||
mem->bus.offset = mem->start << PAGE_SHIFT;
|
||||
mem->bus.base = dev_priv->vram_start;
|
||||
mem->bus.is_iomem = true;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
|
||||
{
|
||||
}
|
||||
|
||||
static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* FIXME: We're using the old vmware polling method to sync.
|
||||
* Do this with fences instead.
|
||||
*/
|
||||
|
||||
static void *vmw_sync_obj_ref(void *sync_obj)
|
||||
{
|
||||
|
||||
return (void *)
|
||||
vmw_fence_obj_reference((struct vmw_fence_obj *) sync_obj);
|
||||
}
|
||||
|
||||
static void vmw_sync_obj_unref(void **sync_obj)
|
||||
{
|
||||
vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj);
|
||||
}
|
||||
|
||||
static int vmw_sync_obj_flush(void *sync_obj)
|
||||
{
|
||||
vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool vmw_sync_obj_signaled(void *sync_obj)
|
||||
{
|
||||
return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj,
|
||||
DRM_VMW_FENCE_FLAG_EXEC);
|
||||
|
||||
}
|
||||
|
||||
static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
|
||||
{
|
||||
return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj,
|
||||
DRM_VMW_FENCE_FLAG_EXEC,
|
||||
lazy, interruptible,
|
||||
VMW_FENCE_WAIT_TIMEOUT);
|
||||
}
|
||||
|
||||
struct ttm_bo_driver vmw_bo_driver = {
|
||||
.ttm_tt_create = &vmw_ttm_tt_create,
|
||||
.ttm_tt_populate = &ttm_pool_populate,
|
||||
.ttm_tt_unpopulate = &ttm_pool_unpopulate,
|
||||
.invalidate_caches = vmw_invalidate_caches,
|
||||
.init_mem_type = vmw_init_mem_type,
|
||||
.evict_flags = vmw_evict_flags,
|
||||
.move = NULL,
|
||||
.verify_access = vmw_verify_access,
|
||||
.sync_obj_signaled = vmw_sync_obj_signaled,
|
||||
.sync_obj_wait = vmw_sync_obj_wait,
|
||||
.sync_obj_flush = vmw_sync_obj_flush,
|
||||
.sync_obj_unref = vmw_sync_obj_unref,
|
||||
.sync_obj_ref = vmw_sync_obj_ref,
|
||||
.move_notify = NULL,
|
||||
.swap_notify = NULL,
|
||||
.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
|
||||
.io_mem_reserve = &vmw_ttm_io_mem_reserve,
|
||||
.io_mem_free = &vmw_ttm_io_mem_free,
|
||||
};
|
276
drivers/video/drm/vmwgfx/vmwgfx_context.c
Normal file
276
drivers/video/drm/vmwgfx/vmwgfx_context.c
Normal file
@ -0,0 +1,276 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
#include "vmwgfx_resource_priv.h"
|
||||
#include "ttm/ttm_placement.h"
|
||||
|
||||
struct vmw_user_context {
|
||||
struct ttm_base_object base;
|
||||
struct vmw_resource res;
|
||||
};
|
||||
|
||||
static void vmw_user_context_free(struct vmw_resource *res);
|
||||
static struct vmw_resource *
|
||||
vmw_user_context_base_to_res(struct ttm_base_object *base);
|
||||
|
||||
static uint64_t vmw_user_context_size;
|
||||
|
||||
static const struct vmw_user_resource_conv user_context_conv = {
|
||||
.object_type = VMW_RES_CONTEXT,
|
||||
.base_obj_to_res = vmw_user_context_base_to_res,
|
||||
.res_free = vmw_user_context_free
|
||||
};
|
||||
|
||||
const struct vmw_user_resource_conv *user_context_converter =
|
||||
&user_context_conv;
|
||||
|
||||
|
||||
static const struct vmw_res_func vmw_legacy_context_func = {
|
||||
.res_type = vmw_res_context,
|
||||
.needs_backup = false,
|
||||
.may_evict = false,
|
||||
.type_name = "legacy contexts",
|
||||
.backup_placement = NULL,
|
||||
.create = NULL,
|
||||
.destroy = NULL,
|
||||
.bind = NULL,
|
||||
.unbind = NULL
|
||||
};
|
||||
|
||||
/**
|
||||
* Context management:
|
||||
*/
|
||||
|
||||
static void vmw_hw_context_destroy(struct vmw_resource *res)
|
||||
{
|
||||
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
struct {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdDestroyContext body;
|
||||
} *cmd;
|
||||
|
||||
|
||||
vmw_execbuf_release_pinned_bo(dev_priv);
|
||||
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Failed reserving FIFO space for surface "
|
||||
"destruction.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
|
||||
cmd->header.size = cpu_to_le32(sizeof(cmd->body));
|
||||
cmd->body.cid = cpu_to_le32(res->id);
|
||||
|
||||
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||
vmw_3d_resource_dec(dev_priv, false);
|
||||
}
|
||||
|
||||
static int vmw_context_init(struct vmw_private *dev_priv,
|
||||
struct vmw_resource *res,
|
||||
void (*res_free) (struct vmw_resource *res))
|
||||
{
|
||||
int ret;
|
||||
|
||||
struct {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdDefineContext body;
|
||||
} *cmd;
|
||||
|
||||
ret = vmw_resource_init(dev_priv, res, false,
|
||||
res_free, &vmw_legacy_context_func);
|
||||
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Failed to allocate a resource id.\n");
|
||||
goto out_early;
|
||||
}
|
||||
|
||||
if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
|
||||
DRM_ERROR("Out of hw context ids.\n");
|
||||
vmw_resource_unreference(&res);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Fifo reserve failed.\n");
|
||||
vmw_resource_unreference(&res);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
|
||||
cmd->header.size = cpu_to_le32(sizeof(cmd->body));
|
||||
cmd->body.cid = cpu_to_le32(res->id);
|
||||
|
||||
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||
(void) vmw_3d_resource_inc(dev_priv, false);
|
||||
vmw_resource_activate(res, vmw_hw_context_destroy);
|
||||
return 0;
|
||||
|
||||
out_early:
|
||||
if (res_free == NULL)
|
||||
kfree(res);
|
||||
else
|
||||
res_free(res);
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
|
||||
int ret;
|
||||
|
||||
if (unlikely(res == NULL))
|
||||
return NULL;
|
||||
|
||||
ret = vmw_context_init(dev_priv, res, NULL);
|
||||
|
||||
return (ret == 0) ? res : NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* User-space context management:
|
||||
*/
|
||||
|
||||
static struct vmw_resource *
|
||||
vmw_user_context_base_to_res(struct ttm_base_object *base)
|
||||
{
|
||||
return &(container_of(base, struct vmw_user_context, base)->res);
|
||||
}
|
||||
|
||||
static void vmw_user_context_free(struct vmw_resource *res)
|
||||
{
|
||||
struct vmw_user_context *ctx =
|
||||
container_of(res, struct vmw_user_context, res);
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
|
||||
// ttm_base_object_kfree(ctx, base);
|
||||
ttm_mem_global_free(vmw_mem_glob(dev_priv),
|
||||
vmw_user_context_size);
|
||||
}
|
||||
|
||||
/**
|
||||
* This function is called when user space has no more references on the
|
||||
* base object. It releases the base-object's reference on the resource object.
|
||||
*/
|
||||
|
||||
static void vmw_user_context_base_release(struct ttm_base_object **p_base)
|
||||
{
|
||||
struct ttm_base_object *base = *p_base;
|
||||
struct vmw_user_context *ctx =
|
||||
container_of(base, struct vmw_user_context, base);
|
||||
struct vmw_resource *res = &ctx->res;
|
||||
|
||||
*p_base = NULL;
|
||||
vmw_resource_unreference(&res);
|
||||
}
|
||||
|
||||
#if 0
|
||||
int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
|
||||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||
|
||||
return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
|
||||
}
|
||||
|
||||
int vmw_context_define_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
struct vmw_user_context *ctx;
|
||||
struct vmw_resource *res;
|
||||
struct vmw_resource *tmp;
|
||||
struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
|
||||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||
struct vmw_master *vmaster = vmw_master(file_priv->master);
|
||||
int ret;
|
||||
|
||||
|
||||
/*
|
||||
* Approximate idr memory usage with 128 bytes. It will be limited
|
||||
* by maximum number_of contexts anyway.
|
||||
*/
|
||||
|
||||
if (unlikely(vmw_user_context_size == 0))
|
||||
vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
|
||||
|
||||
ret = ttm_read_lock(&vmaster->lock, true);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
|
||||
vmw_user_context_size,
|
||||
false, true);
|
||||
if (unlikely(ret != 0)) {
|
||||
if (ret != -ERESTARTSYS)
|
||||
DRM_ERROR("Out of graphics memory for context"
|
||||
" creation.\n");
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
|
||||
if (unlikely(ctx == NULL)) {
|
||||
ttm_mem_global_free(vmw_mem_glob(dev_priv),
|
||||
vmw_user_context_size);
|
||||
ret = -ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
res = &ctx->res;
|
||||
ctx->base.shareable = false;
|
||||
ctx->base.tfile = NULL;
|
||||
|
||||
/*
|
||||
* From here on, the destructor takes over resource freeing.
|
||||
*/
|
||||
|
||||
ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_unlock;
|
||||
|
||||
tmp = vmw_resource_reference(&ctx->res);
|
||||
ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
|
||||
&vmw_user_context_base_release, NULL);
|
||||
|
||||
if (unlikely(ret != 0)) {
|
||||
vmw_resource_unreference(&tmp);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
arg->cid = ctx->base.hash.key;
|
||||
out_err:
|
||||
vmw_resource_unreference(&res);
|
||||
out_unlock:
|
||||
ttm_read_unlock(&vmaster->lock);
|
||||
return ret;
|
||||
|
||||
}
|
||||
#endif
|
320
drivers/video/drm/vmwgfx/vmwgfx_dmabuf.c
Normal file
320
drivers/video/drm/vmwgfx/vmwgfx_dmabuf.c
Normal file
@ -0,0 +1,320 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include "vmwgfx_drv.h"
|
||||
|
||||
|
||||
/**
|
||||
* vmw_dmabuf_to_placement - Validate a buffer to placement.
|
||||
*
|
||||
* @dev_priv: Driver private.
|
||||
* @buf: DMA buffer to move.
|
||||
* @pin: Pin buffer if true.
|
||||
* @interruptible: Use interruptible wait.
|
||||
*
|
||||
* May only be called by the current master since it assumes that the
|
||||
* master lock is the current master's lock.
|
||||
* This function takes the master's lock in write mode.
|
||||
* Flushes and unpins the query bo to avoid failures.
|
||||
*
|
||||
* Returns
|
||||
* -ERESTARTSYS if interrupted by a signal.
|
||||
*/
|
||||
int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
struct ttm_placement *placement,
|
||||
bool interruptible)
|
||||
{
|
||||
// struct vmw_master *vmaster = dev_priv->active_master;
|
||||
struct ttm_buffer_object *bo = &buf->base;
|
||||
int ret;
|
||||
|
||||
// ret = ttm_write_lock(&vmaster->lock, interruptible);
|
||||
// if (unlikely(ret != 0))
|
||||
// return ret;
|
||||
|
||||
vmw_execbuf_release_pinned_bo(dev_priv);
|
||||
|
||||
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
|
||||
if (unlikely(ret != 0))
|
||||
goto err;
|
||||
|
||||
ret = ttm_bo_validate(bo, placement, interruptible, false);
|
||||
|
||||
ttm_bo_unreserve(bo);
|
||||
|
||||
err:
|
||||
// ttm_write_unlock(&vmaster->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_dmabuf_to_vram_or_gmr - Move a buffer to vram or gmr.
|
||||
*
|
||||
* May only be called by the current master since it assumes that the
|
||||
* master lock is the current master's lock.
|
||||
* This function takes the master's lock in write mode.
|
||||
* Flushes and unpins the query bo if @pin == true to avoid failures.
|
||||
*
|
||||
* @dev_priv: Driver private.
|
||||
* @buf: DMA buffer to move.
|
||||
* @pin: Pin buffer if true.
|
||||
* @interruptible: Use interruptible wait.
|
||||
*
|
||||
* Returns
|
||||
* -ERESTARTSYS if interrupted by a signal.
|
||||
*/
|
||||
int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
bool pin, bool interruptible)
|
||||
{
|
||||
// struct vmw_master *vmaster = dev_priv->active_master;
|
||||
struct ttm_buffer_object *bo = &buf->base;
|
||||
struct ttm_placement *placement;
|
||||
int ret;
|
||||
|
||||
// ret = ttm_write_lock(&vmaster->lock, interruptible);
|
||||
// if (unlikely(ret != 0))
|
||||
// return ret;
|
||||
|
||||
if (pin)
|
||||
vmw_execbuf_release_pinned_bo(dev_priv);
|
||||
|
||||
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
|
||||
if (unlikely(ret != 0))
|
||||
goto err;
|
||||
|
||||
/**
|
||||
* Put BO in VRAM if there is space, otherwise as a GMR.
|
||||
* If there is no space in VRAM and GMR ids are all used up,
|
||||
* start evicting GMRs to make room. If the DMA buffer can't be
|
||||
* used as a GMR, this will return -ENOMEM.
|
||||
*/
|
||||
|
||||
if (pin)
|
||||
placement = &vmw_vram_gmr_ne_placement;
|
||||
else
|
||||
placement = &vmw_vram_gmr_placement;
|
||||
|
||||
ret = ttm_bo_validate(bo, placement, interruptible, false);
|
||||
if (likely(ret == 0) || ret == -ERESTARTSYS)
|
||||
goto err_unreserve;
|
||||
|
||||
|
||||
/**
|
||||
* If that failed, try VRAM again, this time evicting
|
||||
* previous contents.
|
||||
*/
|
||||
|
||||
if (pin)
|
||||
placement = &vmw_vram_ne_placement;
|
||||
else
|
||||
placement = &vmw_vram_placement;
|
||||
|
||||
ret = ttm_bo_validate(bo, placement, interruptible, false);
|
||||
|
||||
err_unreserve:
|
||||
ttm_bo_unreserve(bo);
|
||||
err:
|
||||
// ttm_write_unlock(&vmaster->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_dmabuf_to_vram - Move a buffer to vram.
|
||||
*
|
||||
* May only be called by the current master since it assumes that the
|
||||
* master lock is the current master's lock.
|
||||
* This function takes the master's lock in write mode.
|
||||
*
|
||||
* @dev_priv: Driver private.
|
||||
* @buf: DMA buffer to move.
|
||||
* @pin: Pin buffer in vram if true.
|
||||
* @interruptible: Use interruptible wait.
|
||||
*
|
||||
* Returns
|
||||
* -ERESTARTSYS if interrupted by a signal.
|
||||
*/
|
||||
int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
bool pin, bool interruptible)
|
||||
{
|
||||
struct ttm_placement *placement;
|
||||
|
||||
if (pin)
|
||||
placement = &vmw_vram_ne_placement;
|
||||
else
|
||||
placement = &vmw_vram_placement;
|
||||
|
||||
return vmw_dmabuf_to_placement(dev_priv, buf,
|
||||
placement,
|
||||
interruptible);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_dmabuf_to_start_of_vram - Move a buffer to start of vram.
|
||||
*
|
||||
* May only be called by the current master since it assumes that the
|
||||
* master lock is the current master's lock.
|
||||
* This function takes the master's lock in write mode.
|
||||
* Flushes and unpins the query bo if @pin == true to avoid failures.
|
||||
*
|
||||
* @dev_priv: Driver private.
|
||||
* @buf: DMA buffer to move.
|
||||
* @pin: Pin buffer in vram if true.
|
||||
* @interruptible: Use interruptible wait.
|
||||
*
|
||||
* Returns
|
||||
* -ERESTARTSYS if interrupted by a signal.
|
||||
*/
|
||||
int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
bool pin, bool interruptible)
|
||||
{
|
||||
// struct vmw_master *vmaster = dev_priv->active_master;
|
||||
struct ttm_buffer_object *bo = &buf->base;
|
||||
struct ttm_placement placement;
|
||||
int ret = 0;
|
||||
|
||||
if (pin)
|
||||
placement = vmw_vram_ne_placement;
|
||||
else
|
||||
placement = vmw_vram_placement;
|
||||
placement.lpfn = bo->num_pages;
|
||||
|
||||
// ret = ttm_write_lock(&vmaster->lock, interruptible);
|
||||
// if (unlikely(ret != 0))
|
||||
// return ret;
|
||||
|
||||
if (pin)
|
||||
vmw_execbuf_release_pinned_bo(dev_priv);
|
||||
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
|
||||
if (unlikely(ret != 0))
|
||||
goto err_unlock;
|
||||
|
||||
/* Is this buffer already in vram but not at the start of it? */
|
||||
if (bo->mem.mem_type == TTM_PL_VRAM &&
|
||||
bo->mem.start < bo->num_pages &&
|
||||
bo->mem.start > 0)
|
||||
(void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);
|
||||
|
||||
ret = ttm_bo_validate(bo, &placement, interruptible, false);
|
||||
|
||||
/* For some reason we didn't up at the start of vram */
|
||||
WARN_ON(ret == 0 && bo->offset != 0);
|
||||
|
||||
ttm_bo_unreserve(bo);
|
||||
err_unlock:
|
||||
// ttm_write_unlock(&vmaster->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* vmw_dmabuf_upin - Unpin the buffer given buffer, does not move the buffer.
|
||||
*
|
||||
* May only be called by the current master since it assumes that the
|
||||
* master lock is the current master's lock.
|
||||
* This function takes the master's lock in write mode.
|
||||
*
|
||||
* @dev_priv: Driver private.
|
||||
* @buf: DMA buffer to unpin.
|
||||
* @interruptible: Use interruptible wait.
|
||||
*
|
||||
* Returns
|
||||
* -ERESTARTSYS if interrupted by a signal.
|
||||
*/
|
||||
int vmw_dmabuf_unpin(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
bool interruptible)
|
||||
{
|
||||
/*
|
||||
* We could in theory early out if the buffer is
|
||||
* unpinned but we need to lock and reserve the buffer
|
||||
* anyways so we don't gain much by that.
|
||||
*/
|
||||
return vmw_dmabuf_to_placement(dev_priv, buf,
|
||||
&vmw_evictable_placement,
|
||||
interruptible);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
|
||||
* of a buffer.
|
||||
*
|
||||
* @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
|
||||
* @ptr: SVGAGuestPtr returning the result.
|
||||
*/
|
||||
void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
|
||||
SVGAGuestPtr *ptr)
|
||||
{
|
||||
if (bo->mem.mem_type == TTM_PL_VRAM) {
|
||||
ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
|
||||
ptr->offset = bo->offset;
|
||||
} else {
|
||||
ptr->gmrId = bo->mem.start;
|
||||
ptr->offset = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* vmw_bo_pin - Pin or unpin a buffer object without moving it.
|
||||
*
|
||||
* @bo: The buffer object. Must be reserved, and present either in VRAM
|
||||
* or GMR memory.
|
||||
* @pin: Whether to pin or unpin.
|
||||
*
|
||||
*/
|
||||
void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
|
||||
{
|
||||
uint32_t pl_flags;
|
||||
struct ttm_placement placement;
|
||||
uint32_t old_mem_type = bo->mem.mem_type;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&bo->resv->lock.base);
|
||||
BUG_ON(old_mem_type != TTM_PL_VRAM &&
|
||||
old_mem_type != VMW_PL_GMR);
|
||||
|
||||
pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED;
|
||||
if (pin)
|
||||
pl_flags |= TTM_PL_FLAG_NO_EVICT;
|
||||
|
||||
memset(&placement, 0, sizeof(placement));
|
||||
placement.num_placement = 1;
|
||||
placement.placement = &pl_flags;
|
||||
|
||||
ret = ttm_bo_validate(bo, &placement, false, true);
|
||||
|
||||
BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
|
||||
}
|
1119
drivers/video/drm/vmwgfx/vmwgfx_drv.c
Normal file
1119
drivers/video/drm/vmwgfx/vmwgfx_drv.c
Normal file
File diff suppressed because it is too large
Load Diff
787
drivers/video/drm/vmwgfx/vmwgfx_drv.h
Normal file
787
drivers/video/drm/vmwgfx/vmwgfx_drv.h
Normal file
@ -0,0 +1,787 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#ifndef _VMWGFX_DRV_H_
|
||||
#define _VMWGFX_DRV_H_
|
||||
|
||||
#include "vmwgfx_reg.h"
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/vmwgfx_drm.h>
|
||||
#include <drm/drm_hashtab.h>
|
||||
//#include <linux/suspend.h>
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_object.h>
|
||||
//#include <drm/ttm/ttm_lock.h>
|
||||
#include <drm/ttm/ttm_execbuf_util.h>
|
||||
//#include <drm/ttm/ttm_module.h>
|
||||
#include "vmwgfx_fence.h"
|
||||
|
||||
#define VMWGFX_DRIVER_DATE "20120209"
|
||||
#define VMWGFX_DRIVER_MAJOR 2
|
||||
#define VMWGFX_DRIVER_MINOR 4
|
||||
#define VMWGFX_DRIVER_PATCHLEVEL 0
|
||||
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
|
||||
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
|
||||
#define VMWGFX_MAX_RELOCATIONS 2048
|
||||
#define VMWGFX_MAX_VALIDATIONS 2048
|
||||
#define VMWGFX_MAX_DISPLAYS 16
|
||||
#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
|
||||
|
||||
#define VMW_PL_GMR TTM_PL_PRIV0
|
||||
#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
|
||||
|
||||
#define VMW_RES_CONTEXT ttm_driver_type0
|
||||
#define VMW_RES_SURFACE ttm_driver_type1
|
||||
#define VMW_RES_STREAM ttm_driver_type2
|
||||
#define VMW_RES_FENCE ttm_driver_type3
|
||||
|
||||
#define ioread32(addr) readl(addr)
|
||||
|
||||
static inline void outl(u32 v, u16 port)
|
||||
{
|
||||
asm volatile("outl %0,%1" : : "a" (v), "dN" (port));
|
||||
}
|
||||
static inline u32 inl(u16 port)
|
||||
{
|
||||
u32 v;
|
||||
asm volatile("inl %1,%0" : "=a" (v) : "dN" (port));
|
||||
return v;
|
||||
}
|
||||
|
||||
struct ttm_lock{};
|
||||
struct ww_acquire_ctx{};
|
||||
|
||||
struct vmw_fpriv {
|
||||
// struct drm_master *locked_master;
|
||||
struct ttm_object_file *tfile;
|
||||
struct list_head fence_events;
|
||||
};
|
||||
|
||||
struct vmw_dma_buffer {
|
||||
struct ttm_buffer_object base;
|
||||
struct list_head res_list;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_validate_buffer - Carries validation info about buffers.
|
||||
*
|
||||
* @base: Validation info for TTM.
|
||||
* @hash: Hash entry for quick lookup of the TTM buffer object.
|
||||
*
|
||||
* This structure contains also driver private validation info
|
||||
* on top of the info needed by TTM.
|
||||
*/
|
||||
struct vmw_validate_buffer {
|
||||
struct ttm_validate_buffer base;
|
||||
struct drm_hash_item hash;
|
||||
};
|
||||
|
||||
struct vmw_res_func;
|
||||
struct vmw_resource {
|
||||
struct kref kref;
|
||||
struct vmw_private *dev_priv;
|
||||
int id;
|
||||
bool avail;
|
||||
unsigned long backup_size;
|
||||
bool res_dirty; /* Protected by backup buffer reserved */
|
||||
bool backup_dirty; /* Protected by backup buffer reserved */
|
||||
struct vmw_dma_buffer *backup;
|
||||
unsigned long backup_offset;
|
||||
const struct vmw_res_func *func;
|
||||
struct list_head lru_head; /* Protected by the resource lock */
|
||||
struct list_head mob_head; /* Protected by @backup reserved */
|
||||
void (*res_free) (struct vmw_resource *res);
|
||||
void (*hw_destroy) (struct vmw_resource *res);
|
||||
};
|
||||
|
||||
enum vmw_res_type {
|
||||
vmw_res_context,
|
||||
vmw_res_surface,
|
||||
vmw_res_stream,
|
||||
vmw_res_max
|
||||
};
|
||||
|
||||
struct vmw_cursor_snooper {
|
||||
struct drm_crtc *crtc;
|
||||
size_t age;
|
||||
uint32_t *image;
|
||||
};
|
||||
|
||||
struct vmw_framebuffer;
|
||||
struct vmw_surface_offset;
|
||||
|
||||
struct vmw_surface {
|
||||
struct vmw_resource res;
|
||||
uint32_t flags;
|
||||
uint32_t format;
|
||||
uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
|
||||
struct drm_vmw_size base_size;
|
||||
struct drm_vmw_size *sizes;
|
||||
uint32_t num_sizes;
|
||||
bool scanout;
|
||||
/* TODO so far just a extra pointer */
|
||||
struct vmw_cursor_snooper snooper;
|
||||
struct vmw_surface_offset *offsets;
|
||||
SVGA3dTextureFilter autogen_filter;
|
||||
uint32_t multisample_count;
|
||||
};
|
||||
|
||||
struct vmw_marker_queue {
|
||||
struct list_head head;
|
||||
struct timespec lag;
|
||||
struct timespec lag_time;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
struct vmw_fifo_state {
|
||||
unsigned long reserved_size;
|
||||
__le32 *dynamic_buffer;
|
||||
__le32 *static_buffer;
|
||||
unsigned long static_buffer_size;
|
||||
bool using_bounce_buffer;
|
||||
uint32_t capabilities;
|
||||
struct mutex fifo_mutex;
|
||||
struct rw_semaphore rwsem;
|
||||
struct vmw_marker_queue marker_queue;
|
||||
};
|
||||
|
||||
struct vmw_relocation {
|
||||
SVGAGuestPtr *location;
|
||||
uint32_t index;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_res_cache_entry - resource information cache entry
|
||||
*
|
||||
* @valid: Whether the entry is valid, which also implies that the execbuf
|
||||
* code holds a reference to the resource, and it's placed on the
|
||||
* validation list.
|
||||
* @handle: User-space handle of a resource.
|
||||
* @res: Non-ref-counted pointer to the resource.
|
||||
*
|
||||
* Used to avoid frequent repeated user-space handle lookups of the
|
||||
* same resource.
|
||||
*/
|
||||
struct vmw_res_cache_entry {
|
||||
bool valid;
|
||||
uint32_t handle;
|
||||
struct vmw_resource *res;
|
||||
struct vmw_resource_val_node *node;
|
||||
};
|
||||
|
||||
struct vmw_sw_context{
|
||||
struct drm_open_hash res_ht;
|
||||
bool res_ht_initialized;
|
||||
bool kernel; /**< is the called made from the kernel */
|
||||
struct ttm_object_file *tfile;
|
||||
struct list_head validate_nodes;
|
||||
struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
|
||||
uint32_t cur_reloc;
|
||||
struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
|
||||
uint32_t cur_val_buf;
|
||||
uint32_t *cmd_bounce;
|
||||
uint32_t cmd_bounce_size;
|
||||
struct list_head resource_list;
|
||||
uint32_t fence_flags;
|
||||
struct ttm_buffer_object *cur_query_bo;
|
||||
struct list_head res_relocations;
|
||||
uint32_t *buf_start;
|
||||
struct vmw_res_cache_entry res_cache[vmw_res_max];
|
||||
struct vmw_resource *last_query_ctx;
|
||||
bool needs_post_query_barrier;
|
||||
struct vmw_resource *error_resource;
|
||||
};
|
||||
|
||||
struct vmw_legacy_display;
|
||||
struct vmw_overlay;
|
||||
|
||||
struct vmw_master {
|
||||
struct ttm_lock lock;
|
||||
struct mutex fb_surf_mutex;
|
||||
struct list_head fb_surf;
|
||||
};
|
||||
|
||||
struct vmw_vga_topology_state {
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
uint32_t primary;
|
||||
uint32_t pos_x;
|
||||
uint32_t pos_y;
|
||||
};
|
||||
|
||||
struct vmw_private {
|
||||
struct ttm_bo_device bdev;
|
||||
struct ttm_bo_global_ref bo_global_ref;
|
||||
struct drm_global_reference mem_global_ref;
|
||||
|
||||
struct vmw_fifo_state fifo;
|
||||
|
||||
struct drm_device *dev;
|
||||
unsigned long vmw_chipset;
|
||||
unsigned int io_start;
|
||||
uint32_t vram_start;
|
||||
uint32_t vram_size;
|
||||
uint32_t mmio_start;
|
||||
uint32_t mmio_size;
|
||||
uint32_t fb_max_width;
|
||||
uint32_t fb_max_height;
|
||||
uint32_t initial_width;
|
||||
uint32_t initial_height;
|
||||
__le32 __iomem *mmio_virt;
|
||||
int mmio_mtrr;
|
||||
uint32_t capabilities;
|
||||
uint32_t max_gmr_descriptors;
|
||||
uint32_t max_gmr_ids;
|
||||
uint32_t max_gmr_pages;
|
||||
uint32_t memory_size;
|
||||
bool has_gmr;
|
||||
struct mutex hw_mutex;
|
||||
|
||||
/*
|
||||
* VGA registers.
|
||||
*/
|
||||
|
||||
struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
|
||||
uint32_t vga_width;
|
||||
uint32_t vga_height;
|
||||
uint32_t vga_bpp;
|
||||
uint32_t vga_bpl;
|
||||
uint32_t vga_pitchlock;
|
||||
|
||||
uint32_t num_displays;
|
||||
|
||||
/*
|
||||
* Framebuffer info.
|
||||
*/
|
||||
|
||||
void *fb_info;
|
||||
struct vmw_legacy_display *ldu_priv;
|
||||
struct vmw_screen_object_display *sou_priv;
|
||||
struct vmw_overlay *overlay_priv;
|
||||
|
||||
/*
|
||||
* Context and surface management.
|
||||
*/
|
||||
|
||||
rwlock_t resource_lock;
|
||||
struct idr res_idr[vmw_res_max];
|
||||
/*
|
||||
* Block lastclose from racing with firstopen.
|
||||
*/
|
||||
|
||||
struct mutex init_mutex;
|
||||
|
||||
/*
|
||||
* A resource manager for kernel-only surfaces and
|
||||
* contexts.
|
||||
*/
|
||||
|
||||
struct ttm_object_device *tdev;
|
||||
|
||||
/*
|
||||
* Fencing and IRQs.
|
||||
*/
|
||||
|
||||
atomic_t marker_seq;
|
||||
wait_queue_head_t fence_queue;
|
||||
wait_queue_head_t fifo_queue;
|
||||
int fence_queue_waiters; /* Protected by hw_mutex */
|
||||
int goal_queue_waiters; /* Protected by hw_mutex */
|
||||
atomic_t fifo_queue_waiters;
|
||||
uint32_t last_read_seqno;
|
||||
spinlock_t irq_lock;
|
||||
struct vmw_fence_manager *fman;
|
||||
uint32_t irq_mask;
|
||||
|
||||
/*
|
||||
* Device state
|
||||
*/
|
||||
|
||||
uint32_t traces_state;
|
||||
uint32_t enable_state;
|
||||
uint32_t config_done_state;
|
||||
|
||||
/**
|
||||
* Execbuf
|
||||
*/
|
||||
/**
|
||||
* Protected by the cmdbuf mutex.
|
||||
*/
|
||||
|
||||
struct vmw_sw_context ctx;
|
||||
struct mutex cmdbuf_mutex;
|
||||
|
||||
/**
|
||||
* Operating mode.
|
||||
*/
|
||||
|
||||
bool stealth;
|
||||
bool is_opened;
|
||||
bool enable_fb;
|
||||
|
||||
/**
|
||||
* Master management.
|
||||
*/
|
||||
|
||||
// struct vmw_master *active_master;
|
||||
// struct vmw_master fbdev_master;
|
||||
// struct notifier_block pm_nb;
|
||||
bool suspended;
|
||||
|
||||
struct mutex release_mutex;
|
||||
uint32_t num_3d_resources;
|
||||
|
||||
/*
|
||||
* Query processing. These members
|
||||
* are protected by the cmdbuf mutex.
|
||||
*/
|
||||
|
||||
struct ttm_buffer_object *dummy_query_bo;
|
||||
struct ttm_buffer_object *pinned_bo;
|
||||
uint32_t query_cid;
|
||||
uint32_t query_cid_valid;
|
||||
bool dummy_query_bo_pinned;
|
||||
|
||||
/*
|
||||
* Surface swapping. The "surface_lru" list is protected by the
|
||||
* resource lock in order to be able to destroy a surface and take
|
||||
* it off the lru atomically. "used_memory_size" is currently
|
||||
* protected by the cmdbuf mutex for simplicity.
|
||||
*/
|
||||
|
||||
struct list_head res_lru[vmw_res_max];
|
||||
uint32_t used_memory_size;
|
||||
};
|
||||
|
||||
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
|
||||
{
|
||||
return container_of(res, struct vmw_surface, res);
|
||||
}
|
||||
|
||||
static inline struct vmw_private *vmw_priv(struct drm_device *dev)
|
||||
{
|
||||
return (struct vmw_private *)dev->dev_private;
|
||||
}
|
||||
|
||||
static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
|
||||
{
|
||||
return (struct vmw_fpriv *)file_priv->driver_priv;
|
||||
}
|
||||
|
||||
static inline struct vmw_master *vmw_master(struct drm_master *master)
|
||||
{
|
||||
return (struct vmw_master *) master->driver_priv;
|
||||
}
|
||||
|
||||
static inline void vmw_write(struct vmw_private *dev_priv,
|
||||
unsigned int offset, uint32_t value)
|
||||
{
|
||||
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
|
||||
outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
|
||||
}
|
||||
|
||||
static inline uint32_t vmw_read(struct vmw_private *dev_priv,
|
||||
unsigned int offset)
|
||||
{
|
||||
uint32_t val;
|
||||
|
||||
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
|
||||
val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
|
||||
return val;
|
||||
}
|
||||
|
||||
|
||||
int vmw_3d_resource_inc(struct vmw_private *dev_priv, bool unhide_svga);
|
||||
void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga);
|
||||
|
||||
/**
|
||||
* GMR utilities - vmwgfx_gmr.c
|
||||
*/
|
||||
|
||||
extern int vmw_gmr_bind(struct vmw_private *dev_priv,
|
||||
struct page *pages[],
|
||||
unsigned long num_pages,
|
||||
int gmr_id);
|
||||
extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
|
||||
|
||||
/**
|
||||
* Resource utilities - vmwgfx_resource.c
|
||||
*/
|
||||
struct vmw_user_resource_conv;
|
||||
extern const struct vmw_user_resource_conv *user_surface_converter;
|
||||
extern const struct vmw_user_resource_conv *user_context_converter;
|
||||
|
||||
extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
|
||||
extern void vmw_resource_unreference(struct vmw_resource **p_res);
|
||||
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
|
||||
extern int vmw_resource_validate(struct vmw_resource *res);
|
||||
extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
|
||||
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
|
||||
extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_context_check(struct vmw_private *dev_priv,
|
||||
struct ttm_object_file *tfile,
|
||||
int id,
|
||||
struct vmw_resource **p_res);
|
||||
extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
|
||||
struct ttm_object_file *tfile,
|
||||
uint32_t handle,
|
||||
struct vmw_surface **out_surf,
|
||||
struct vmw_dma_buffer **out_buf);
|
||||
extern int vmw_user_resource_lookup_handle(
|
||||
struct vmw_private *dev_priv,
|
||||
struct ttm_object_file *tfile,
|
||||
uint32_t handle,
|
||||
const struct vmw_user_resource_conv *converter,
|
||||
struct vmw_resource **p_res);
|
||||
extern void vmw_surface_res_free(struct vmw_resource *res);
|
||||
extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_surface_check(struct vmw_private *dev_priv,
|
||||
struct ttm_object_file *tfile,
|
||||
uint32_t handle, int *id);
|
||||
extern int vmw_surface_validate(struct vmw_private *dev_priv,
|
||||
struct vmw_surface *srf);
|
||||
extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
|
||||
extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *vmw_bo,
|
||||
size_t size, struct ttm_placement *placement,
|
||||
bool interuptable,
|
||||
void (*bo_free) (struct ttm_buffer_object *bo));
|
||||
extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
|
||||
struct ttm_object_file *tfile);
|
||||
extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
|
||||
uint32_t cur_validate_node);
|
||||
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
|
||||
extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
|
||||
uint32_t id, struct vmw_dma_buffer **out);
|
||||
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
|
||||
struct ttm_object_file *tfile,
|
||||
uint32_t *inout_id,
|
||||
struct vmw_resource **out);
|
||||
extern void vmw_resource_unreserve(struct vmw_resource *res,
|
||||
struct vmw_dma_buffer *new_backup,
|
||||
unsigned long new_backup_offset);
|
||||
extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *mem);
|
||||
extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
|
||||
struct vmw_fence_obj *fence);
|
||||
extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
|
||||
|
||||
/**
|
||||
* DMA buffer helper routines - vmwgfx_dmabuf.c
|
||||
*/
|
||||
extern int vmw_dmabuf_to_placement(struct vmw_private *vmw_priv,
|
||||
struct vmw_dma_buffer *bo,
|
||||
struct ttm_placement *placement,
|
||||
bool interruptible);
|
||||
extern int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
bool pin, bool interruptible);
|
||||
extern int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
bool pin, bool interruptible);
|
||||
extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
|
||||
struct vmw_dma_buffer *bo,
|
||||
bool pin, bool interruptible);
|
||||
extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
|
||||
struct vmw_dma_buffer *bo,
|
||||
bool interruptible);
|
||||
extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
|
||||
SVGAGuestPtr *ptr);
|
||||
extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin);
|
||||
|
||||
/**
|
||||
* Misc Ioctl functionality - vmwgfx_ioctl.c
|
||||
*/
|
||||
|
||||
extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_present_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
//extern unsigned int vmw_fops_poll(struct file *filp,
|
||||
// struct poll_table_struct *wait);
|
||||
extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
|
||||
size_t count, loff_t *offset);
|
||||
|
||||
/**
|
||||
* Fifo utilities - vmwgfx_fifo.c
|
||||
*/
|
||||
|
||||
extern int vmw_fifo_init(struct vmw_private *dev_priv,
|
||||
struct vmw_fifo_state *fifo);
|
||||
extern void vmw_fifo_release(struct vmw_private *dev_priv,
|
||||
struct vmw_fifo_state *fifo);
|
||||
extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
|
||||
extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
|
||||
extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
|
||||
uint32_t *seqno);
|
||||
extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
|
||||
extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
|
||||
extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
|
||||
extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
|
||||
uint32_t cid);
|
||||
|
||||
/**
|
||||
* TTM glue - vmwgfx_ttm_glue.c
|
||||
*/
|
||||
|
||||
extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
|
||||
extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
|
||||
extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||
|
||||
/**
|
||||
* TTM buffer object driver - vmwgfx_buffer.c
|
||||
*/
|
||||
|
||||
extern struct ttm_placement vmw_vram_placement;
|
||||
extern struct ttm_placement vmw_vram_ne_placement;
|
||||
extern struct ttm_placement vmw_vram_sys_placement;
|
||||
extern struct ttm_placement vmw_vram_gmr_placement;
|
||||
extern struct ttm_placement vmw_vram_gmr_ne_placement;
|
||||
extern struct ttm_placement vmw_sys_placement;
|
||||
extern struct ttm_placement vmw_evictable_placement;
|
||||
extern struct ttm_placement vmw_srf_placement;
|
||||
extern struct ttm_bo_driver vmw_bo_driver;
|
||||
extern int vmw_dma_quiescent(struct drm_device *dev);
|
||||
|
||||
/**
|
||||
* Command submission - vmwgfx_execbuf.c
|
||||
*/
|
||||
|
||||
extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_execbuf_process(struct drm_file *file_priv,
|
||||
struct vmw_private *dev_priv,
|
||||
void __user *user_commands,
|
||||
void *kernel_commands,
|
||||
uint32_t command_size,
|
||||
uint64_t throttle_us,
|
||||
struct drm_vmw_fence_rep __user
|
||||
*user_fence_rep,
|
||||
struct vmw_fence_obj **out_fence);
|
||||
extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
|
||||
struct vmw_fence_obj *fence);
|
||||
extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
|
||||
|
||||
extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
|
||||
struct vmw_private *dev_priv,
|
||||
struct vmw_fence_obj **p_fence,
|
||||
uint32_t *p_handle);
|
||||
extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
|
||||
struct vmw_fpriv *vmw_fp,
|
||||
int ret,
|
||||
struct drm_vmw_fence_rep __user
|
||||
*user_fence_rep,
|
||||
struct vmw_fence_obj *fence,
|
||||
uint32_t fence_handle);
|
||||
|
||||
/**
|
||||
* IRQs and wating - vmwgfx_irq.c
|
||||
*/
|
||||
|
||||
extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS);
|
||||
extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
|
||||
uint32_t seqno, bool interruptible,
|
||||
unsigned long timeout);
|
||||
extern void vmw_irq_preinstall(struct drm_device *dev);
|
||||
extern int vmw_irq_postinstall(struct drm_device *dev);
|
||||
extern void vmw_irq_uninstall(struct drm_device *dev);
|
||||
extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
|
||||
uint32_t seqno);
|
||||
extern int vmw_fallback_wait(struct vmw_private *dev_priv,
|
||||
bool lazy,
|
||||
bool fifo_idle,
|
||||
uint32_t seqno,
|
||||
bool interruptible,
|
||||
unsigned long timeout);
|
||||
extern void vmw_update_seqno(struct vmw_private *dev_priv,
|
||||
struct vmw_fifo_state *fifo_state);
|
||||
extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
|
||||
extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
|
||||
extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
|
||||
extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
|
||||
|
||||
/**
|
||||
* Rudimentary fence-like objects currently used only for throttling -
|
||||
* vmwgfx_marker.c
|
||||
*/
|
||||
|
||||
extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
|
||||
extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
|
||||
extern int vmw_marker_push(struct vmw_marker_queue *queue,
|
||||
uint32_t seqno);
|
||||
extern int vmw_marker_pull(struct vmw_marker_queue *queue,
|
||||
uint32_t signaled_seqno);
|
||||
extern int vmw_wait_lag(struct vmw_private *dev_priv,
|
||||
struct vmw_marker_queue *queue, uint32_t us);
|
||||
|
||||
/**
|
||||
* Kernel framebuffer - vmwgfx_fb.c
|
||||
*/
|
||||
|
||||
int vmw_fb_init(struct vmw_private *vmw_priv);
|
||||
int vmw_fb_close(struct vmw_private *dev_priv);
|
||||
int vmw_fb_off(struct vmw_private *vmw_priv);
|
||||
int vmw_fb_on(struct vmw_private *vmw_priv);
|
||||
|
||||
/**
|
||||
* Kernel modesetting - vmwgfx_kms.c
|
||||
*/
|
||||
|
||||
int vmw_kms_init(struct vmw_private *dev_priv);
|
||||
int vmw_kms_close(struct vmw_private *dev_priv);
|
||||
int vmw_kms_save_vga(struct vmw_private *vmw_priv);
|
||||
int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
|
||||
int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
|
||||
void vmw_kms_cursor_snoop(struct vmw_surface *srf,
|
||||
struct ttm_object_file *tfile,
|
||||
struct ttm_buffer_object *bo,
|
||||
SVGA3dCmdHeader *header);
|
||||
int vmw_kms_write_svga(struct vmw_private *vmw_priv,
|
||||
unsigned width, unsigned height, unsigned pitch,
|
||||
unsigned bpp, unsigned depth);
|
||||
void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
|
||||
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
|
||||
uint32_t pitch,
|
||||
uint32_t height);
|
||||
u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
|
||||
int vmw_enable_vblank(struct drm_device *dev, int crtc);
|
||||
void vmw_disable_vblank(struct drm_device *dev, int crtc);
|
||||
int vmw_kms_present(struct vmw_private *dev_priv,
|
||||
struct drm_file *file_priv,
|
||||
struct vmw_framebuffer *vfb,
|
||||
struct vmw_surface *surface,
|
||||
uint32_t sid, int32_t destX, int32_t destY,
|
||||
struct drm_vmw_rect *clips,
|
||||
uint32_t num_clips);
|
||||
int vmw_kms_readback(struct vmw_private *dev_priv,
|
||||
struct drm_file *file_priv,
|
||||
struct vmw_framebuffer *vfb,
|
||||
struct drm_vmw_fence_rep __user *user_fence_rep,
|
||||
struct drm_vmw_rect *clips,
|
||||
uint32_t num_clips);
|
||||
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
int vmw_dumb_create(struct drm_file *file_priv,
|
||||
struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args);
|
||||
|
||||
int vmw_dumb_map_offset(struct drm_file *file_priv,
|
||||
struct drm_device *dev, uint32_t handle,
|
||||
uint64_t *offset);
|
||||
int vmw_dumb_destroy(struct drm_file *file_priv,
|
||||
struct drm_device *dev,
|
||||
uint32_t handle);
|
||||
/**
|
||||
* Overlay control - vmwgfx_overlay.c
|
||||
*/
|
||||
|
||||
int vmw_overlay_init(struct vmw_private *dev_priv);
|
||||
int vmw_overlay_close(struct vmw_private *dev_priv);
|
||||
int vmw_overlay_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int vmw_overlay_stop_all(struct vmw_private *dev_priv);
|
||||
int vmw_overlay_resume_all(struct vmw_private *dev_priv);
|
||||
int vmw_overlay_pause_all(struct vmw_private *dev_priv);
|
||||
int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
|
||||
int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
|
||||
int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
|
||||
int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
|
||||
|
||||
/**
|
||||
* GMR Id manager
|
||||
*/
|
||||
|
||||
extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
|
||||
|
||||
/**
|
||||
* Inline helper functions
|
||||
*/
|
||||
|
||||
static inline void vmw_surface_unreference(struct vmw_surface **srf)
|
||||
{
|
||||
struct vmw_surface *tmp_srf = *srf;
|
||||
struct vmw_resource *res = &tmp_srf->res;
|
||||
*srf = NULL;
|
||||
|
||||
vmw_resource_unreference(&res);
|
||||
}
|
||||
|
||||
static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
|
||||
{
|
||||
(void) vmw_resource_reference(&srf->res);
|
||||
return srf;
|
||||
}
|
||||
|
||||
static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
|
||||
{
|
||||
struct vmw_dma_buffer *tmp_buf = *buf;
|
||||
|
||||
*buf = NULL;
|
||||
if (tmp_buf != NULL) {
|
||||
struct ttm_buffer_object *bo = &tmp_buf->base;
|
||||
|
||||
ttm_bo_unref(&bo);
|
||||
}
|
||||
}
|
||||
|
||||
static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
|
||||
{
|
||||
if (ttm_bo_reference(&buf->base))
|
||||
return buf;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
|
||||
{
|
||||
return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
|
||||
}
|
||||
|
||||
|
||||
#endif
|
1779
drivers/video/drm/vmwgfx/vmwgfx_execbuf.c
Normal file
1779
drivers/video/drm/vmwgfx/vmwgfx_execbuf.c
Normal file
File diff suppressed because it is too large
Load Diff
1157
drivers/video/drm/vmwgfx/vmwgfx_fence.c
Normal file
1157
drivers/video/drm/vmwgfx/vmwgfx_fence.c
Normal file
File diff suppressed because it is too large
Load Diff
123
drivers/video/drm/vmwgfx/vmwgfx_fence.h
Normal file
123
drivers/video/drm/vmwgfx/vmwgfx_fence.h
Normal file
@ -0,0 +1,123 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#ifndef _VMWGFX_FENCE_H_
|
||||
|
||||
#define VMW_FENCE_WAIT_TIMEOUT (5*HZ)
|
||||
|
||||
struct vmw_private;
|
||||
|
||||
struct vmw_fence_manager;
|
||||
|
||||
/**
|
||||
*
|
||||
*
|
||||
*/
|
||||
enum vmw_action_type {
|
||||
VMW_ACTION_EVENT = 0,
|
||||
VMW_ACTION_MAX
|
||||
};
|
||||
|
||||
struct vmw_fence_action {
|
||||
struct list_head head;
|
||||
enum vmw_action_type type;
|
||||
void (*seq_passed) (struct vmw_fence_action *action);
|
||||
void (*cleanup) (struct vmw_fence_action *action);
|
||||
};
|
||||
|
||||
struct vmw_fence_obj {
|
||||
struct kref kref;
|
||||
u32 seqno;
|
||||
|
||||
struct vmw_fence_manager *fman;
|
||||
struct list_head head;
|
||||
uint32_t signaled;
|
||||
uint32_t signal_mask;
|
||||
struct list_head seq_passed_actions;
|
||||
void (*destroy)(struct vmw_fence_obj *fence);
|
||||
wait_queue_head_t queue;
|
||||
};
|
||||
|
||||
extern struct vmw_fence_manager *
|
||||
vmw_fence_manager_init(struct vmw_private *dev_priv);
|
||||
|
||||
extern void vmw_fence_manager_takedown(struct vmw_fence_manager *fman);
|
||||
|
||||
extern void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p);
|
||||
|
||||
extern struct vmw_fence_obj *
|
||||
vmw_fence_obj_reference(struct vmw_fence_obj *fence);
|
||||
|
||||
extern void vmw_fences_update(struct vmw_fence_manager *fman);
|
||||
|
||||
extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence,
|
||||
uint32_t flags);
|
||||
|
||||
extern int vmw_fence_obj_wait(struct vmw_fence_obj *fence, uint32_t flags,
|
||||
bool lazy,
|
||||
bool interruptible, unsigned long timeout);
|
||||
|
||||
extern void vmw_fence_obj_flush(struct vmw_fence_obj *fence);
|
||||
|
||||
extern int vmw_fence_create(struct vmw_fence_manager *fman,
|
||||
uint32_t seqno,
|
||||
uint32_t mask,
|
||||
struct vmw_fence_obj **p_fence);
|
||||
|
||||
extern int vmw_user_fence_create(struct drm_file *file_priv,
|
||||
struct vmw_fence_manager *fman,
|
||||
uint32_t sequence,
|
||||
uint32_t mask,
|
||||
struct vmw_fence_obj **p_fence,
|
||||
uint32_t *p_handle);
|
||||
|
||||
extern void vmw_fence_fifo_up(struct vmw_fence_manager *fman);
|
||||
|
||||
extern void vmw_fence_fifo_down(struct vmw_fence_manager *fman);
|
||||
|
||||
extern int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
extern int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
extern int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman,
|
||||
struct list_head *event_list);
|
||||
/*
|
||||
extern int vmw_event_fence_action_queue(struct drm_file *filee_priv,
|
||||
struct vmw_fence_obj *fence,
|
||||
struct drm_pending_event *event,
|
||||
uint32_t *tv_sec,
|
||||
uint32_t *tv_usec,
|
||||
bool interruptible);
|
||||
*/
|
||||
|
||||
#endif /* _VMWGFX_FENCE_H_ */
|
575
drivers/video/drm/vmwgfx/vmwgfx_fifo.c
Normal file
575
drivers/video/drm/vmwgfx/vmwgfx_fifo.c
Normal file
@ -0,0 +1,575 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
#define mb() asm volatile("mfence" : : : "memory")
|
||||
#define rmb() asm volatile("lfence" : : : "memory")
|
||||
#define wmb() asm volatile("sfence" : : : "memory")
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
|
||||
#define TASK_INTERRUPTIBLE 1
|
||||
#define TASK_UNINTERRUPTIBLE 2
|
||||
|
||||
bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
|
||||
{
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t fifo_min, hwversion;
|
||||
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
|
||||
|
||||
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
|
||||
return false;
|
||||
|
||||
fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN);
|
||||
if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
|
||||
return false;
|
||||
|
||||
hwversion = ioread32(fifo_mem +
|
||||
((fifo->capabilities &
|
||||
SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
|
||||
SVGA_FIFO_3D_HWVERSION_REVISED :
|
||||
SVGA_FIFO_3D_HWVERSION));
|
||||
|
||||
if (hwversion == 0)
|
||||
return false;
|
||||
|
||||
if (hwversion < SVGA3D_HWVERSION_WS8_B1)
|
||||
return false;
|
||||
|
||||
/* Non-Screen Object path does not support surfaces */
|
||||
if (!dev_priv->sou_priv)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
|
||||
{
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t caps;
|
||||
|
||||
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
|
||||
return false;
|
||||
|
||||
caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
|
||||
if (caps & SVGA_FIFO_CAP_PITCHLOCK)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
|
||||
{
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t max;
|
||||
uint32_t min;
|
||||
uint32_t dummy;
|
||||
|
||||
ENTER();
|
||||
|
||||
fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
|
||||
fifo->static_buffer = KernelAlloc(fifo->static_buffer_size);
|
||||
if (unlikely(fifo->static_buffer == NULL))
|
||||
return -ENOMEM;
|
||||
|
||||
fifo->dynamic_buffer = NULL;
|
||||
fifo->reserved_size = 0;
|
||||
fifo->using_bounce_buffer = false;
|
||||
|
||||
mutex_init(&fifo->fifo_mutex);
|
||||
// init_rwsem(&fifo->rwsem);
|
||||
|
||||
/*
|
||||
* Allow mapping the first page read-only to user-space.
|
||||
*/
|
||||
|
||||
DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
|
||||
DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
|
||||
DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
|
||||
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
|
||||
dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
|
||||
dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
|
||||
vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
|
||||
|
||||
min = 4;
|
||||
if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
|
||||
min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
|
||||
min <<= 2;
|
||||
|
||||
if (min < PAGE_SIZE)
|
||||
min = PAGE_SIZE;
|
||||
|
||||
iowrite32(min, fifo_mem + SVGA_FIFO_MIN);
|
||||
iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
|
||||
wmb();
|
||||
iowrite32(min, fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
iowrite32(min, fifo_mem + SVGA_FIFO_STOP);
|
||||
iowrite32(0, fifo_mem + SVGA_FIFO_BUSY);
|
||||
mb();
|
||||
|
||||
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
|
||||
max = ioread32(fifo_mem + SVGA_FIFO_MAX);
|
||||
min = ioread32(fifo_mem + SVGA_FIFO_MIN);
|
||||
fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
|
||||
|
||||
DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
|
||||
(unsigned int) max,
|
||||
(unsigned int) min,
|
||||
(unsigned int) fifo->capabilities);
|
||||
|
||||
atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
|
||||
iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
|
||||
vmw_marker_queue_init(&fifo->marker_queue);
|
||||
|
||||
int ret = 0; //vmw_fifo_send_fence(dev_priv, &dummy);
|
||||
LEAVE();
|
||||
return ret;
|
||||
}
|
||||
|
||||
void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
|
||||
{
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
|
||||
if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
|
||||
iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
|
||||
vmw_write(dev_priv, SVGA_REG_SYNC, reason);
|
||||
}
|
||||
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
}
|
||||
|
||||
void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
|
||||
{
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
|
||||
while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
|
||||
vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
|
||||
|
||||
dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
|
||||
|
||||
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
|
||||
dev_priv->config_done_state);
|
||||
vmw_write(dev_priv, SVGA_REG_ENABLE,
|
||||
dev_priv->enable_state);
|
||||
vmw_write(dev_priv, SVGA_REG_TRACES,
|
||||
dev_priv->traces_state);
|
||||
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
vmw_marker_queue_takedown(&fifo->marker_queue);
|
||||
|
||||
if (likely(fifo->static_buffer != NULL)) {
|
||||
vfree(fifo->static_buffer);
|
||||
fifo->static_buffer = NULL;
|
||||
}
|
||||
|
||||
if (likely(fifo->dynamic_buffer != NULL)) {
|
||||
vfree(fifo->dynamic_buffer);
|
||||
fifo->dynamic_buffer = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
|
||||
{
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
|
||||
uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
|
||||
uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
|
||||
|
||||
return ((max - next_cmd) + (stop - min) <= bytes);
|
||||
}
|
||||
|
||||
static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
|
||||
uint32_t bytes, bool interruptible,
|
||||
unsigned long timeout)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned long end_jiffies = GetTimerTicks() + timeout;
|
||||
DEFINE_WAIT(__wait);
|
||||
|
||||
DRM_INFO("Fifo wait noirq.\n");
|
||||
|
||||
for (;;) {
|
||||
// prepare_to_wait(&dev_priv->fifo_queue, &__wait,
|
||||
// (interruptible) ?
|
||||
// TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
|
||||
if (!vmw_fifo_is_full(dev_priv, bytes))
|
||||
break;
|
||||
if (time_after_eq(GetTimerTicks(), end_jiffies)) {
|
||||
ret = -EBUSY;
|
||||
DRM_ERROR("SVGA device lockup.\n");
|
||||
break;
|
||||
}
|
||||
delay(1);
|
||||
}
|
||||
// finish_wait(&dev_priv->fifo_queue, &__wait);
|
||||
wake_up_all(&dev_priv->fifo_queue);
|
||||
DRM_INFO("Fifo noirq exit.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vmw_fifo_wait(struct vmw_private *dev_priv,
|
||||
uint32_t bytes, bool interruptible,
|
||||
unsigned long timeout)
|
||||
{
|
||||
long ret = 1L;
|
||||
unsigned long irq_flags;
|
||||
|
||||
if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
|
||||
return 0;
|
||||
|
||||
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
|
||||
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
|
||||
return vmw_fifo_wait_noirq(dev_priv, bytes,
|
||||
interruptible, timeout);
|
||||
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
|
||||
outl(SVGA_IRQFLAG_FIFO_PROGRESS,
|
||||
dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
dev_priv->irq_mask |= SVGA_IRQFLAG_FIFO_PROGRESS;
|
||||
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
|
||||
}
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
|
||||
if (interruptible)
|
||||
ret = wait_event_interruptible_timeout
|
||||
(dev_priv->fifo_queue,
|
||||
!vmw_fifo_is_full(dev_priv, bytes), timeout);
|
||||
else
|
||||
ret = wait_event_timeout
|
||||
(dev_priv->fifo_queue,
|
||||
!vmw_fifo_is_full(dev_priv, bytes), timeout);
|
||||
|
||||
if (unlikely(ret == 0))
|
||||
ret = -EBUSY;
|
||||
else if (likely(ret > 0))
|
||||
ret = 0;
|
||||
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
|
||||
dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
|
||||
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
|
||||
}
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reserve @bytes number of bytes in the fifo.
|
||||
*
|
||||
* This function will return NULL (error) on two conditions:
|
||||
* If it timeouts waiting for fifo space, or if @bytes is larger than the
|
||||
* available fifo space.
|
||||
*
|
||||
* Returns:
|
||||
* Pointer to the fifo, or null on error (possible hardware hang).
|
||||
*/
|
||||
void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
|
||||
{
|
||||
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t max;
|
||||
uint32_t min;
|
||||
uint32_t next_cmd;
|
||||
uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&fifo_state->fifo_mutex);
|
||||
max = ioread32(fifo_mem + SVGA_FIFO_MAX);
|
||||
min = ioread32(fifo_mem + SVGA_FIFO_MIN);
|
||||
next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
|
||||
if (unlikely(bytes >= (max - min)))
|
||||
goto out_err;
|
||||
|
||||
BUG_ON(fifo_state->reserved_size != 0);
|
||||
BUG_ON(fifo_state->dynamic_buffer != NULL);
|
||||
|
||||
fifo_state->reserved_size = bytes;
|
||||
|
||||
while (1) {
|
||||
uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
|
||||
bool need_bounce = false;
|
||||
bool reserve_in_place = false;
|
||||
|
||||
if (next_cmd >= stop) {
|
||||
if (likely((next_cmd + bytes < max ||
|
||||
(next_cmd + bytes == max && stop > min))))
|
||||
reserve_in_place = true;
|
||||
|
||||
else if (vmw_fifo_is_full(dev_priv, bytes)) {
|
||||
ret = vmw_fifo_wait(dev_priv, bytes,
|
||||
false, 3 * HZ);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err;
|
||||
} else
|
||||
need_bounce = true;
|
||||
|
||||
} else {
|
||||
|
||||
if (likely((next_cmd + bytes < stop)))
|
||||
reserve_in_place = true;
|
||||
else {
|
||||
ret = vmw_fifo_wait(dev_priv, bytes,
|
||||
false, 3 * HZ);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err;
|
||||
}
|
||||
}
|
||||
|
||||
if (reserve_in_place) {
|
||||
if (reserveable || bytes <= sizeof(uint32_t)) {
|
||||
fifo_state->using_bounce_buffer = false;
|
||||
|
||||
if (reserveable)
|
||||
iowrite32(bytes, fifo_mem +
|
||||
SVGA_FIFO_RESERVED);
|
||||
return fifo_mem + (next_cmd >> 2);
|
||||
} else {
|
||||
need_bounce = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (need_bounce) {
|
||||
fifo_state->using_bounce_buffer = true;
|
||||
if (bytes < fifo_state->static_buffer_size)
|
||||
return fifo_state->static_buffer;
|
||||
else {
|
||||
fifo_state->dynamic_buffer = kmalloc(bytes,0);
|
||||
return fifo_state->dynamic_buffer;
|
||||
}
|
||||
}
|
||||
}
|
||||
out_err:
|
||||
fifo_state->reserved_size = 0;
|
||||
mutex_unlock(&fifo_state->fifo_mutex);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
|
||||
__le32 __iomem *fifo_mem,
|
||||
uint32_t next_cmd,
|
||||
uint32_t max, uint32_t min, uint32_t bytes)
|
||||
{
|
||||
uint32_t chunk_size = max - next_cmd;
|
||||
uint32_t rest;
|
||||
uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
|
||||
fifo_state->dynamic_buffer : fifo_state->static_buffer;
|
||||
|
||||
if (bytes < chunk_size)
|
||||
chunk_size = bytes;
|
||||
|
||||
iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED);
|
||||
// mb();
|
||||
memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
|
||||
rest = bytes - chunk_size;
|
||||
if (rest)
|
||||
memcpy(fifo_mem + (min >> 2), buffer + (chunk_size >> 2),
|
||||
rest);
|
||||
}
|
||||
|
||||
static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
|
||||
__le32 __iomem *fifo_mem,
|
||||
uint32_t next_cmd,
|
||||
uint32_t max, uint32_t min, uint32_t bytes)
|
||||
{
|
||||
uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
|
||||
fifo_state->dynamic_buffer : fifo_state->static_buffer;
|
||||
|
||||
while (bytes > 0) {
|
||||
iowrite32(*buffer++, fifo_mem + (next_cmd >> 2));
|
||||
next_cmd += sizeof(uint32_t);
|
||||
if (unlikely(next_cmd == max))
|
||||
next_cmd = min;
|
||||
mb();
|
||||
iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
mb();
|
||||
bytes -= sizeof(uint32_t);
|
||||
}
|
||||
}
|
||||
|
||||
void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
|
||||
{
|
||||
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
|
||||
uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
|
||||
bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
|
||||
|
||||
BUG_ON((bytes & 3) != 0);
|
||||
BUG_ON(bytes > fifo_state->reserved_size);
|
||||
|
||||
fifo_state->reserved_size = 0;
|
||||
|
||||
if (fifo_state->using_bounce_buffer) {
|
||||
if (reserveable)
|
||||
vmw_fifo_res_copy(fifo_state, fifo_mem,
|
||||
next_cmd, max, min, bytes);
|
||||
else
|
||||
vmw_fifo_slow_copy(fifo_state, fifo_mem,
|
||||
next_cmd, max, min, bytes);
|
||||
|
||||
if (fifo_state->dynamic_buffer) {
|
||||
vfree(fifo_state->dynamic_buffer);
|
||||
fifo_state->dynamic_buffer = NULL;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// down_write(&fifo_state->rwsem);
|
||||
if (fifo_state->using_bounce_buffer || reserveable) {
|
||||
next_cmd += bytes;
|
||||
if (next_cmd >= max)
|
||||
next_cmd -= max - min;
|
||||
mb();
|
||||
iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
}
|
||||
|
||||
if (reserveable)
|
||||
iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
|
||||
// mb();
|
||||
// up_write(&fifo_state->rwsem);
|
||||
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
|
||||
mutex_unlock(&fifo_state->fifo_mutex);
|
||||
}
|
||||
|
||||
int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
|
||||
{
|
||||
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
|
||||
struct svga_fifo_cmd_fence *cmd_fence;
|
||||
void *fm;
|
||||
int ret = 0;
|
||||
uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence);
|
||||
|
||||
fm = vmw_fifo_reserve(dev_priv, bytes);
|
||||
if (unlikely(fm == NULL)) {
|
||||
*seqno = atomic_read(&dev_priv->marker_seq);
|
||||
ret = -ENOMEM;
|
||||
(void)vmw_fallback_wait(dev_priv, false, true, *seqno,
|
||||
false, 3*HZ);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
do {
|
||||
*seqno = atomic_add_return(1, &dev_priv->marker_seq);
|
||||
} while (*seqno == 0);
|
||||
|
||||
if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
|
||||
|
||||
/*
|
||||
* Don't request hardware to send a fence. The
|
||||
* waiting code in vmwgfx_irq.c will emulate this.
|
||||
*/
|
||||
|
||||
vmw_fifo_commit(dev_priv, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
*(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE);
|
||||
cmd_fence = (struct svga_fifo_cmd_fence *)
|
||||
((unsigned long)fm + sizeof(__le32));
|
||||
|
||||
iowrite32(*seqno, &cmd_fence->fence);
|
||||
vmw_fifo_commit(dev_priv, bytes);
|
||||
(void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
|
||||
vmw_update_seqno(dev_priv, fifo_state);
|
||||
|
||||
out_err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_fifo_emit_dummy_query - emits a dummy query to the fifo.
|
||||
*
|
||||
* @dev_priv: The device private structure.
|
||||
* @cid: The hardware context id used for the query.
|
||||
*
|
||||
* This function is used to emit a dummy occlusion query with
|
||||
* no primitives rendered between query begin and query end.
|
||||
* It's used to provide a query barrier, in order to know that when
|
||||
* this query is finished, all preceding queries are also finished.
|
||||
*
|
||||
* A Query results structure should have been initialized at the start
|
||||
* of the dev_priv->dummy_query_bo buffer object. And that buffer object
|
||||
* must also be either reserved or pinned when this function is called.
|
||||
*
|
||||
* Returns -ENOMEM on failure to reserve fifo space.
|
||||
*/
|
||||
int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
|
||||
uint32_t cid)
|
||||
{
|
||||
/*
|
||||
* A query wait without a preceding query end will
|
||||
* actually finish all queries for this cid
|
||||
* without writing to the query result structure.
|
||||
*/
|
||||
|
||||
struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
|
||||
struct {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdWaitForQuery body;
|
||||
} *cmd;
|
||||
|
||||
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
||||
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Out of fifo space for dummy query.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
|
||||
cmd->header.size = sizeof(cmd->body);
|
||||
cmd->body.cid = cid;
|
||||
cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
|
||||
|
||||
if (bo->mem.mem_type == TTM_PL_VRAM) {
|
||||
cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
|
||||
cmd->body.guestResult.offset = bo->offset;
|
||||
} else {
|
||||
cmd->body.guestResult.gmrId = bo->mem.start;
|
||||
cmd->body.guestResult.offset = 0;
|
||||
}
|
||||
|
||||
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||
|
||||
return 0;
|
||||
}
|
137
drivers/video/drm/vmwgfx/vmwgfx_gmr.c
Normal file
137
drivers/video/drm/vmwgfx/vmwgfx_gmr.c
Normal file
@ -0,0 +1,137 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
#define mb() asm volatile("mfence" : : : "memory")
|
||||
#define rmb() asm volatile("lfence" : : : "memory")
|
||||
#define wmb() asm volatile("sfence" : : : "memory")
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include "vmwgfx_drv.h"
|
||||
|
||||
|
||||
#define VMW_PPN_SIZE sizeof(unsigned long)
|
||||
|
||||
static int vmw_gmr2_bind(struct vmw_private *dev_priv,
|
||||
struct page *pages[],
|
||||
unsigned long num_pages,
|
||||
int gmr_id)
|
||||
{
|
||||
SVGAFifoCmdDefineGMR2 define_cmd;
|
||||
SVGAFifoCmdRemapGMR2 remap_cmd;
|
||||
uint32_t define_size = sizeof(define_cmd) + 4;
|
||||
uint32_t remap_size = VMW_PPN_SIZE * num_pages + sizeof(remap_cmd) + 4;
|
||||
uint32_t *cmd;
|
||||
uint32_t *cmd_orig;
|
||||
uint32_t i;
|
||||
|
||||
cmd_orig = cmd = vmw_fifo_reserve(dev_priv, define_size + remap_size);
|
||||
if (unlikely(cmd == NULL))
|
||||
return -ENOMEM;
|
||||
|
||||
define_cmd.gmrId = gmr_id;
|
||||
define_cmd.numPages = num_pages;
|
||||
|
||||
remap_cmd.gmrId = gmr_id;
|
||||
remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ?
|
||||
SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32;
|
||||
remap_cmd.offsetPages = 0;
|
||||
remap_cmd.numPages = num_pages;
|
||||
|
||||
*cmd++ = SVGA_CMD_DEFINE_GMR2;
|
||||
memcpy(cmd, &define_cmd, sizeof(define_cmd));
|
||||
cmd += sizeof(define_cmd) / sizeof(uint32);
|
||||
|
||||
*cmd++ = SVGA_CMD_REMAP_GMR2;
|
||||
memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
|
||||
cmd += sizeof(remap_cmd) / sizeof(uint32);
|
||||
|
||||
for (i = 0; i < num_pages; ++i) {
|
||||
if (VMW_PPN_SIZE <= 4)
|
||||
*cmd = page_to_pfn(*pages++);
|
||||
else
|
||||
*((uint64_t *)cmd) = page_to_pfn(*pages++);
|
||||
|
||||
cmd += VMW_PPN_SIZE / sizeof(*cmd);
|
||||
}
|
||||
|
||||
vmw_fifo_commit(dev_priv, define_size + remap_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
|
||||
int gmr_id)
|
||||
{
|
||||
SVGAFifoCmdDefineGMR2 define_cmd;
|
||||
uint32_t define_size = sizeof(define_cmd) + 4;
|
||||
uint32_t *cmd;
|
||||
|
||||
cmd = vmw_fifo_reserve(dev_priv, define_size);
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("GMR2 unbind failed.\n");
|
||||
return;
|
||||
}
|
||||
define_cmd.gmrId = gmr_id;
|
||||
define_cmd.numPages = 0;
|
||||
|
||||
*cmd++ = SVGA_CMD_DEFINE_GMR2;
|
||||
memcpy(cmd, &define_cmd, sizeof(define_cmd));
|
||||
|
||||
vmw_fifo_commit(dev_priv, define_size);
|
||||
}
|
||||
|
||||
|
||||
int vmw_gmr_bind(struct vmw_private *dev_priv,
|
||||
struct page *pages[],
|
||||
unsigned long num_pages,
|
||||
int gmr_id)
|
||||
{
|
||||
struct list_head desc_pages;
|
||||
int ret;
|
||||
|
||||
if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
|
||||
return vmw_gmr2_bind(dev_priv, pages, num_pages, gmr_id);
|
||||
|
||||
printf("%s epic fail\n",__FUNCTION__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
|
||||
{
|
||||
if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) {
|
||||
vmw_gmr2_unbind(dev_priv, gmr_id);
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
|
||||
wmb();
|
||||
vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0);
|
||||
mb();
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
}
|
161
drivers/video/drm/vmwgfx/vmwgfx_gmrid_manager.c
Normal file
161
drivers/video/drm/vmwgfx/vmwgfx_gmrid_manager.c
Normal file
@ -0,0 +1,161 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
#include <drm/ttm/ttm_module.h>
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
struct vmwgfx_gmrid_man {
|
||||
spinlock_t lock;
|
||||
struct ida gmr_ida;
|
||||
uint32_t max_gmr_ids;
|
||||
uint32_t max_gmr_pages;
|
||||
uint32_t used_gmr_pages;
|
||||
};
|
||||
|
||||
static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
|
||||
struct ttm_buffer_object *bo,
|
||||
struct ttm_placement *placement,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct vmwgfx_gmrid_man *gman =
|
||||
(struct vmwgfx_gmrid_man *)man->priv;
|
||||
int ret = 0;
|
||||
int id;
|
||||
|
||||
mem->mm_node = NULL;
|
||||
|
||||
spin_lock(&gman->lock);
|
||||
|
||||
if (gman->max_gmr_pages > 0) {
|
||||
gman->used_gmr_pages += bo->num_pages;
|
||||
if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
|
||||
goto out_err_locked;
|
||||
}
|
||||
|
||||
do {
|
||||
spin_unlock(&gman->lock);
|
||||
if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0)) {
|
||||
ret = -ENOMEM;
|
||||
goto out_err;
|
||||
}
|
||||
spin_lock(&gman->lock);
|
||||
|
||||
ret = ida_get_new(&gman->gmr_ida, &id);
|
||||
if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) {
|
||||
ida_remove(&gman->gmr_ida, id);
|
||||
ret = 0;
|
||||
goto out_err_locked;
|
||||
}
|
||||
} while (ret == -EAGAIN);
|
||||
|
||||
if (likely(ret == 0)) {
|
||||
mem->mm_node = gman;
|
||||
mem->start = id;
|
||||
mem->num_pages = bo->num_pages;
|
||||
} else
|
||||
goto out_err_locked;
|
||||
|
||||
spin_unlock(&gman->lock);
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
spin_lock(&gman->lock);
|
||||
out_err_locked:
|
||||
gman->used_gmr_pages -= bo->num_pages;
|
||||
spin_unlock(&gman->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct vmwgfx_gmrid_man *gman =
|
||||
(struct vmwgfx_gmrid_man *)man->priv;
|
||||
|
||||
if (mem->mm_node) {
|
||||
spin_lock(&gman->lock);
|
||||
ida_remove(&gman->gmr_ida, mem->start);
|
||||
gman->used_gmr_pages -= mem->num_pages;
|
||||
spin_unlock(&gman->lock);
|
||||
mem->mm_node = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
|
||||
unsigned long p_size)
|
||||
{
|
||||
struct vmw_private *dev_priv =
|
||||
container_of(man->bdev, struct vmw_private, bdev);
|
||||
struct vmwgfx_gmrid_man *gman =
|
||||
kzalloc(sizeof(*gman), GFP_KERNEL);
|
||||
|
||||
if (unlikely(gman == NULL))
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&gman->lock);
|
||||
gman->max_gmr_pages = dev_priv->max_gmr_pages;
|
||||
gman->used_gmr_pages = 0;
|
||||
ida_init(&gman->gmr_ida);
|
||||
gman->max_gmr_ids = p_size;
|
||||
man->priv = (void *) gman;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_gmrid_man_takedown(struct ttm_mem_type_manager *man)
|
||||
{
|
||||
struct vmwgfx_gmrid_man *gman =
|
||||
(struct vmwgfx_gmrid_man *)man->priv;
|
||||
|
||||
if (gman) {
|
||||
ida_destroy(&gman->gmr_ida);
|
||||
kfree(gman);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
|
||||
const char *prefix)
|
||||
{
|
||||
printk(KERN_INFO "%s: No debug info available for the GMR "
|
||||
"id manager.\n", prefix);
|
||||
}
|
||||
|
||||
const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
|
||||
vmw_gmrid_man_init,
|
||||
vmw_gmrid_man_takedown,
|
||||
vmw_gmrid_man_get_node,
|
||||
vmw_gmrid_man_put_node,
|
||||
vmw_gmrid_man_debug
|
||||
};
|
326
drivers/video/drm/vmwgfx/vmwgfx_irq.c
Normal file
326
drivers/video/drm/vmwgfx/vmwgfx_irq.c
Normal file
@ -0,0 +1,326 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include "vmwgfx_drv.h"
|
||||
|
||||
#define TASK_INTERRUPTIBLE 1
|
||||
#define TASK_UNINTERRUPTIBLE 2
|
||||
|
||||
#define VMW_FENCE_WRAP (1 << 24)
|
||||
|
||||
irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
|
||||
{
|
||||
struct drm_device *dev = (struct drm_device *)arg;
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
uint32_t status, masked_status;
|
||||
|
||||
spin_lock(&dev_priv->irq_lock);
|
||||
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
masked_status = status & dev_priv->irq_mask;
|
||||
spin_unlock(&dev_priv->irq_lock);
|
||||
|
||||
if (likely(status))
|
||||
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
|
||||
if (!masked_status)
|
||||
return IRQ_NONE;
|
||||
|
||||
if (masked_status & (SVGA_IRQFLAG_ANY_FENCE |
|
||||
SVGA_IRQFLAG_FENCE_GOAL)) {
|
||||
vmw_fences_update(dev_priv->fman);
|
||||
wake_up_all(&dev_priv->fence_queue);
|
||||
}
|
||||
|
||||
if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
|
||||
wake_up_all(&dev_priv->fifo_queue);
|
||||
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
|
||||
{
|
||||
uint32_t busy;
|
||||
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
busy = vmw_read(dev_priv, SVGA_REG_BUSY);
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
|
||||
return (busy == 0);
|
||||
}
|
||||
|
||||
void vmw_update_seqno(struct vmw_private *dev_priv,
|
||||
struct vmw_fifo_state *fifo_state)
|
||||
{
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
|
||||
|
||||
if (dev_priv->last_read_seqno != seqno) {
|
||||
dev_priv->last_read_seqno = seqno;
|
||||
vmw_marker_pull(&fifo_state->marker_queue, seqno);
|
||||
vmw_fences_update(dev_priv->fman);
|
||||
}
|
||||
}
|
||||
|
||||
bool vmw_seqno_passed(struct vmw_private *dev_priv,
|
||||
uint32_t seqno)
|
||||
{
|
||||
struct vmw_fifo_state *fifo_state;
|
||||
bool ret;
|
||||
|
||||
if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
|
||||
return true;
|
||||
|
||||
fifo_state = &dev_priv->fifo;
|
||||
vmw_update_seqno(dev_priv, fifo_state);
|
||||
if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
|
||||
return true;
|
||||
|
||||
if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
|
||||
vmw_fifo_idle(dev_priv, seqno))
|
||||
return true;
|
||||
|
||||
/**
|
||||
* Then check if the seqno is higher than what we've actually
|
||||
* emitted. Then the fence is stale and signaled.
|
||||
*/
|
||||
|
||||
ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
|
||||
> VMW_FENCE_WRAP);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vmw_fallback_wait(struct vmw_private *dev_priv,
|
||||
bool lazy,
|
||||
bool fifo_idle,
|
||||
uint32_t seqno,
|
||||
bool interruptible,
|
||||
unsigned long timeout)
|
||||
{
|
||||
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
|
||||
|
||||
uint32_t count = 0;
|
||||
uint32_t signal_seq;
|
||||
int ret;
|
||||
unsigned long end_jiffies = GetTimerTicks() + timeout;
|
||||
bool (*wait_condition)(struct vmw_private *, uint32_t);
|
||||
DEFINE_WAIT(__wait);
|
||||
|
||||
wait_condition = (fifo_idle) ? &vmw_fifo_idle :
|
||||
&vmw_seqno_passed;
|
||||
|
||||
/**
|
||||
* Block command submission while waiting for idle.
|
||||
*/
|
||||
|
||||
// if (fifo_idle)
|
||||
// down_read(&fifo_state->rwsem);
|
||||
signal_seq = atomic_read(&dev_priv->marker_seq);
|
||||
ret = 0;
|
||||
|
||||
for (;;) {
|
||||
// prepare_to_wait(&dev_priv->fence_queue, &__wait,
|
||||
// (interruptible) ?
|
||||
// TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
|
||||
if (wait_condition(dev_priv, seqno))
|
||||
break;
|
||||
if (time_after_eq(GetTimerTicks(), end_jiffies)) {
|
||||
DRM_ERROR("SVGA device lockup.\n");
|
||||
break;
|
||||
}
|
||||
if (lazy)
|
||||
delay(1);
|
||||
else if ((++count & 0x0F) == 0) {
|
||||
/**
|
||||
* FIXME: Use schedule_hr_timeout here for
|
||||
* newer kernels and lower CPU utilization.
|
||||
*/
|
||||
|
||||
delay(1);
|
||||
}
|
||||
}
|
||||
// finish_wait(&dev_priv->fence_queue, &__wait);
|
||||
if (ret == 0 && fifo_idle) {
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
|
||||
}
|
||||
wake_up_all(&dev_priv->fence_queue);
|
||||
// if (fifo_idle)
|
||||
// up_read(&fifo_state->rwsem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
|
||||
{
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
if (dev_priv->fence_queue_waiters++ == 0) {
|
||||
unsigned long irq_flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
|
||||
outl(SVGA_IRQFLAG_ANY_FENCE,
|
||||
dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
dev_priv->irq_mask |= SVGA_IRQFLAG_ANY_FENCE;
|
||||
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
|
||||
}
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
}
|
||||
|
||||
void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
|
||||
{
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
if (--dev_priv->fence_queue_waiters == 0) {
|
||||
unsigned long irq_flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
|
||||
dev_priv->irq_mask &= ~SVGA_IRQFLAG_ANY_FENCE;
|
||||
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
|
||||
}
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
}
|
||||
|
||||
|
||||
void vmw_goal_waiter_add(struct vmw_private *dev_priv)
|
||||
{
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
if (dev_priv->goal_queue_waiters++ == 0) {
|
||||
unsigned long irq_flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
|
||||
outl(SVGA_IRQFLAG_FENCE_GOAL,
|
||||
dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
dev_priv->irq_mask |= SVGA_IRQFLAG_FENCE_GOAL;
|
||||
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
|
||||
}
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
}
|
||||
|
||||
void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
|
||||
{
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
if (--dev_priv->goal_queue_waiters == 0) {
|
||||
unsigned long irq_flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
|
||||
dev_priv->irq_mask &= ~SVGA_IRQFLAG_FENCE_GOAL;
|
||||
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
|
||||
}
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
}
|
||||
|
||||
int vmw_wait_seqno(struct vmw_private *dev_priv,
|
||||
bool lazy, uint32_t seqno,
|
||||
bool interruptible, unsigned long timeout)
|
||||
{
|
||||
long ret;
|
||||
struct vmw_fifo_state *fifo = &dev_priv->fifo;
|
||||
|
||||
if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
|
||||
return 0;
|
||||
|
||||
if (likely(vmw_seqno_passed(dev_priv, seqno)))
|
||||
return 0;
|
||||
|
||||
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
|
||||
|
||||
if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE))
|
||||
return vmw_fallback_wait(dev_priv, lazy, true, seqno,
|
||||
interruptible, timeout);
|
||||
|
||||
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
|
||||
return vmw_fallback_wait(dev_priv, lazy, false, seqno,
|
||||
interruptible, timeout);
|
||||
|
||||
vmw_seqno_waiter_add(dev_priv);
|
||||
|
||||
if (interruptible)
|
||||
ret = wait_event_interruptible_timeout
|
||||
(dev_priv->fence_queue,
|
||||
vmw_seqno_passed(dev_priv, seqno),
|
||||
timeout);
|
||||
else
|
||||
ret = wait_event_timeout
|
||||
(dev_priv->fence_queue,
|
||||
vmw_seqno_passed(dev_priv, seqno),
|
||||
timeout);
|
||||
|
||||
vmw_seqno_waiter_remove(dev_priv);
|
||||
|
||||
if (unlikely(ret == 0))
|
||||
ret = -EBUSY;
|
||||
else if (likely(ret > 0))
|
||||
ret = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void vmw_irq_preinstall(struct drm_device *dev)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
uint32_t status;
|
||||
|
||||
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
|
||||
return;
|
||||
|
||||
spin_lock_init(&dev_priv->irq_lock);
|
||||
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
}
|
||||
|
||||
int vmw_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void vmw_irq_uninstall(struct drm_device *dev)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
uint32_t status;
|
||||
|
||||
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
|
||||
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
}
|
||||
|
||||
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
|
||||
{
|
||||
list_del_init(&wait->task_list);
|
||||
return 1;
|
||||
}
|
||||
|
2073
drivers/video/drm/vmwgfx/vmwgfx_kms.c
Normal file
2073
drivers/video/drm/vmwgfx/vmwgfx_kms.c
Normal file
File diff suppressed because it is too large
Load Diff
166
drivers/video/drm/vmwgfx/vmwgfx_kms.h
Normal file
166
drivers/video/drm/vmwgfx/vmwgfx_kms.h
Normal file
@ -0,0 +1,166 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#ifndef VMWGFX_KMS_H_
|
||||
#define VMWGFX_KMS_H_
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include "vmwgfx_drv.h"
|
||||
|
||||
#define VMWGFX_NUM_DISPLAY_UNITS 8
|
||||
|
||||
|
||||
#define vmw_framebuffer_to_vfb(x) \
|
||||
container_of(x, struct vmw_framebuffer, base)
|
||||
|
||||
/**
|
||||
* Base class for framebuffers
|
||||
*
|
||||
* @pin is called the when ever a crtc uses this framebuffer
|
||||
* @unpin is called
|
||||
*/
|
||||
struct vmw_framebuffer {
|
||||
struct drm_framebuffer base;
|
||||
int (*pin)(struct vmw_framebuffer *fb);
|
||||
int (*unpin)(struct vmw_framebuffer *fb);
|
||||
bool dmabuf;
|
||||
struct ttm_base_object *user_obj;
|
||||
uint32_t user_handle;
|
||||
};
|
||||
|
||||
|
||||
#define vmw_crtc_to_du(x) \
|
||||
container_of(x, struct vmw_display_unit, crtc)
|
||||
|
||||
/*
|
||||
* Basic cursor manipulation
|
||||
*/
|
||||
int vmw_cursor_update_image(struct vmw_private *dev_priv,
|
||||
u32 *image, u32 width, u32 height,
|
||||
u32 hotspotX, u32 hotspotY);
|
||||
int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *dmabuf,
|
||||
u32 width, u32 height,
|
||||
u32 hotspotX, u32 hotspotY);
|
||||
void vmw_cursor_update_position(struct vmw_private *dev_priv,
|
||||
bool show, int x, int y);
|
||||
|
||||
|
||||
/**
|
||||
* Base class display unit.
|
||||
*
|
||||
* Since the SVGA hw doesn't have a concept of a crtc, encoder or connector
|
||||
* so the display unit is all of them at the same time. This is true for both
|
||||
* legacy multimon and screen objects.
|
||||
*/
|
||||
struct vmw_display_unit {
|
||||
struct drm_crtc crtc;
|
||||
struct drm_encoder encoder;
|
||||
struct drm_connector connector;
|
||||
|
||||
struct vmw_surface *cursor_surface;
|
||||
struct vmw_dma_buffer *cursor_dmabuf;
|
||||
size_t cursor_age;
|
||||
|
||||
int cursor_x;
|
||||
int cursor_y;
|
||||
|
||||
int hotspot_x;
|
||||
int hotspot_y;
|
||||
|
||||
unsigned unit;
|
||||
|
||||
/*
|
||||
* Prefered mode tracking.
|
||||
*/
|
||||
unsigned pref_width;
|
||||
unsigned pref_height;
|
||||
bool pref_active;
|
||||
struct drm_display_mode *pref_mode;
|
||||
|
||||
/*
|
||||
* Gui positioning
|
||||
*/
|
||||
int gui_x;
|
||||
int gui_y;
|
||||
bool is_implicit;
|
||||
};
|
||||
|
||||
#define vmw_crtc_to_du(x) \
|
||||
container_of(x, struct vmw_display_unit, crtc)
|
||||
#define vmw_connector_to_du(x) \
|
||||
container_of(x, struct vmw_display_unit, connector)
|
||||
|
||||
|
||||
/*
|
||||
* Shared display unit functions - vmwgfx_kms.c
|
||||
*/
|
||||
void vmw_display_unit_cleanup(struct vmw_display_unit *du);
|
||||
int vmw_du_page_flip(struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_pending_vblank_event *event);
|
||||
void vmw_du_crtc_save(struct drm_crtc *crtc);
|
||||
void vmw_du_crtc_restore(struct drm_crtc *crtc);
|
||||
void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
|
||||
u16 *r, u16 *g, u16 *b,
|
||||
uint32_t start, uint32_t size);
|
||||
int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
|
||||
uint32_t handle, uint32_t width, uint32_t height);
|
||||
int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
|
||||
void vmw_du_connector_dpms(struct drm_connector *connector, int mode);
|
||||
void vmw_du_connector_save(struct drm_connector *connector);
|
||||
void vmw_du_connector_restore(struct drm_connector *connector);
|
||||
enum drm_connector_status
|
||||
vmw_du_connector_detect(struct drm_connector *connector, bool force);
|
||||
int vmw_du_connector_fill_modes(struct drm_connector *connector,
|
||||
uint32_t max_width, uint32_t max_height);
|
||||
int vmw_du_connector_set_property(struct drm_connector *connector,
|
||||
struct drm_property *property,
|
||||
uint64_t val);
|
||||
|
||||
|
||||
/*
|
||||
* Legacy display unit functions - vmwgfx_ldu.c
|
||||
*/
|
||||
int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv);
|
||||
int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv);
|
||||
|
||||
/*
|
||||
* Screen Objects display functions - vmwgfx_scrn.c
|
||||
*/
|
||||
int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv);
|
||||
int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv);
|
||||
int vmw_kms_sou_update_layout(struct vmw_private *dev_priv, unsigned num,
|
||||
struct drm_vmw_rect *rects);
|
||||
bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv,
|
||||
struct drm_crtc *crtc);
|
||||
void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
|
||||
struct drm_crtc *crtc);
|
||||
|
||||
|
||||
#endif
|
208
drivers/video/drm/vmwgfx/vmwgfx_marker.c
Normal file
208
drivers/video/drm/vmwgfx/vmwgfx_marker.c
Normal file
@ -0,0 +1,208 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
#include <linux/time.h>
|
||||
|
||||
struct vmw_marker {
|
||||
struct list_head head;
|
||||
uint32_t seqno;
|
||||
struct timespec submitted;
|
||||
};
|
||||
|
||||
void vmw_marker_queue_init(struct vmw_marker_queue *queue)
|
||||
{
|
||||
INIT_LIST_HEAD(&queue->head);
|
||||
queue->lag = ns_to_timespec(0);
|
||||
// getrawmonotonic(&queue->lag_time);
|
||||
spin_lock_init(&queue->lock);
|
||||
}
|
||||
|
||||
void vmw_marker_queue_takedown(struct vmw_marker_queue *queue)
|
||||
{
|
||||
struct vmw_marker *marker, *next;
|
||||
|
||||
spin_lock(&queue->lock);
|
||||
list_for_each_entry_safe(marker, next, &queue->head, head) {
|
||||
kfree(marker);
|
||||
}
|
||||
spin_unlock(&queue->lock);
|
||||
}
|
||||
|
||||
int vmw_marker_push(struct vmw_marker_queue *queue,
|
||||
uint32_t seqno)
|
||||
{
|
||||
struct vmw_marker *marker = kmalloc(sizeof(*marker), GFP_KERNEL);
|
||||
|
||||
if (unlikely(!marker))
|
||||
return -ENOMEM;
|
||||
|
||||
marker->seqno = seqno;
|
||||
// getrawmonotonic(&marker->submitted);
|
||||
spin_lock(&queue->lock);
|
||||
list_add_tail(&marker->head, &queue->head);
|
||||
spin_unlock(&queue->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmw_marker_pull(struct vmw_marker_queue *queue,
|
||||
uint32_t signaled_seqno)
|
||||
{
|
||||
struct vmw_marker *marker, *next;
|
||||
struct timespec now;
|
||||
bool updated = false;
|
||||
|
||||
spin_lock(&queue->lock);
|
||||
// getrawmonotonic(&now);
|
||||
|
||||
if (list_empty(&queue->head)) {
|
||||
// queue->lag = ns_to_timespec(0);
|
||||
queue->lag_time = now;
|
||||
updated = true;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(marker, next, &queue->head, head) {
|
||||
if (signaled_seqno - marker->seqno > (1 << 30))
|
||||
continue;
|
||||
|
||||
// queue->lag = timespec_sub(now, marker->submitted);
|
||||
queue->lag_time = now;
|
||||
updated = true;
|
||||
list_del(&marker->head);
|
||||
kfree(marker);
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
spin_unlock(&queue->lock);
|
||||
|
||||
return (updated) ? 0 : -EBUSY;
|
||||
}
|
||||
|
||||
static struct timespec vmw_timespec_add(struct timespec t1,
|
||||
struct timespec t2)
|
||||
{
|
||||
t1.tv_sec += t2.tv_sec;
|
||||
t1.tv_nsec += t2.tv_nsec;
|
||||
if (t1.tv_nsec >= 1000000000L) {
|
||||
t1.tv_sec += 1;
|
||||
t1.tv_nsec -= 1000000000L;
|
||||
}
|
||||
|
||||
return t1;
|
||||
}
|
||||
|
||||
static struct timespec vmw_fifo_lag(struct vmw_marker_queue *queue)
|
||||
{
|
||||
struct timespec now;
|
||||
|
||||
spin_lock(&queue->lock);
|
||||
// getrawmonotonic(&now);
|
||||
// queue->lag = vmw_timespec_add(queue->lag,
|
||||
// timespec_sub(now, queue->lag_time));
|
||||
queue->lag_time = now;
|
||||
spin_unlock(&queue->lock);
|
||||
return queue->lag;
|
||||
}
|
||||
|
||||
|
||||
static bool vmw_lag_lt(struct vmw_marker_queue *queue,
|
||||
uint32_t us)
|
||||
{
|
||||
struct timespec lag, cond;
|
||||
|
||||
cond = ns_to_timespec((s64) us * 1000);
|
||||
lag = vmw_fifo_lag(queue);
|
||||
return (timespec_compare(&lag, &cond) < 1);
|
||||
}
|
||||
|
||||
int vmw_wait_lag(struct vmw_private *dev_priv,
|
||||
struct vmw_marker_queue *queue, uint32_t us)
|
||||
{
|
||||
struct vmw_marker *marker;
|
||||
uint32_t seqno;
|
||||
int ret;
|
||||
|
||||
while (!vmw_lag_lt(queue, us)) {
|
||||
spin_lock(&queue->lock);
|
||||
if (list_empty(&queue->head))
|
||||
seqno = atomic_read(&dev_priv->marker_seq);
|
||||
else {
|
||||
marker = list_first_entry(&queue->head,
|
||||
struct vmw_marker, head);
|
||||
seqno = marker->seqno;
|
||||
}
|
||||
spin_unlock(&queue->lock);
|
||||
|
||||
ret = vmw_wait_seqno(dev_priv, false, seqno, true,
|
||||
3*HZ);
|
||||
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
(void) vmw_marker_pull(queue, seqno);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
|
||||
{
|
||||
u64 quotient;
|
||||
|
||||
if (dividend < 0) {
|
||||
quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder);
|
||||
*remainder = -*remainder;
|
||||
if (divisor > 0)
|
||||
quotient = -quotient;
|
||||
} else {
|
||||
quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder);
|
||||
if (divisor < 0)
|
||||
quotient = -quotient;
|
||||
}
|
||||
return quotient;
|
||||
}
|
||||
|
||||
struct timespec ns_to_timespec(const s64 nsec)
|
||||
{
|
||||
struct timespec ts;
|
||||
s32 rem;
|
||||
|
||||
if (!nsec)
|
||||
return (struct timespec) {0, 0};
|
||||
|
||||
ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
|
||||
if (unlikely(rem < 0)) {
|
||||
ts.tv_sec--;
|
||||
rem += NSEC_PER_SEC;
|
||||
}
|
||||
ts.tv_nsec = rem;
|
||||
|
||||
return ts;
|
||||
}
|
||||
|
57
drivers/video/drm/vmwgfx/vmwgfx_reg.h
Normal file
57
drivers/video/drm/vmwgfx/vmwgfx_reg.h
Normal file
@ -0,0 +1,57 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
/**
|
||||
* This file contains virtual hardware defines for kernel space.
|
||||
*/
|
||||
|
||||
#ifndef _VMWGFX_REG_H_
|
||||
#define _VMWGFX_REG_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#define VMWGFX_INDEX_PORT 0x0
|
||||
#define VMWGFX_VALUE_PORT 0x1
|
||||
#define VMWGFX_IRQSTATUS_PORT 0x8
|
||||
|
||||
struct svga_guest_mem_descriptor {
|
||||
__le32 ppn;
|
||||
__le32 num_pages;
|
||||
};
|
||||
|
||||
struct svga_fifo_cmd_fence {
|
||||
__le32 fence;
|
||||
};
|
||||
|
||||
#define SVGA_SYNC_GENERIC 1
|
||||
#define SVGA_SYNC_FIFOFULL 2
|
||||
|
||||
#include "svga_types.h"
|
||||
|
||||
#include "svga3d_reg.h"
|
||||
|
||||
#endif
|
1308
drivers/video/drm/vmwgfx/vmwgfx_resource.c
Normal file
1308
drivers/video/drm/vmwgfx/vmwgfx_resource.c
Normal file
File diff suppressed because it is too large
Load Diff
84
drivers/video/drm/vmwgfx/vmwgfx_resource_priv.h
Normal file
84
drivers/video/drm/vmwgfx/vmwgfx_resource_priv.h
Normal file
@ -0,0 +1,84 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#ifndef _VMWGFX_RESOURCE_PRIV_H_
|
||||
#define _VMWGFX_RESOURCE_PRIV_H_
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
|
||||
/**
|
||||
* struct vmw_user_resource_conv - Identify a derived user-exported resource
|
||||
* type and provide a function to convert its ttm_base_object pointer to
|
||||
* a struct vmw_resource
|
||||
*/
|
||||
struct vmw_user_resource_conv {
|
||||
enum ttm_object_type object_type;
|
||||
struct vmw_resource *(*base_obj_to_res)(struct ttm_base_object *base);
|
||||
void (*res_free) (struct vmw_resource *res);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_res_func - members and functions common for a resource type
|
||||
*
|
||||
* @res_type: Enum that identifies the lru list to use for eviction.
|
||||
* @needs_backup: Whether the resource is guest-backed and needs
|
||||
* persistent buffer storage.
|
||||
* @type_name: String that identifies the resource type.
|
||||
* @backup_placement: TTM placement for backup buffers.
|
||||
* @may_evict Whether the resource may be evicted.
|
||||
* @create: Create a hardware resource.
|
||||
* @destroy: Destroy a hardware resource.
|
||||
* @bind: Bind a hardware resource to persistent buffer storage.
|
||||
* @unbind: Unbind a hardware resource from persistent
|
||||
* buffer storage.
|
||||
*/
|
||||
|
||||
struct vmw_res_func {
|
||||
enum vmw_res_type res_type;
|
||||
bool needs_backup;
|
||||
const char *type_name;
|
||||
struct ttm_placement *backup_placement;
|
||||
bool may_evict;
|
||||
|
||||
int (*create) (struct vmw_resource *res);
|
||||
int (*destroy) (struct vmw_resource *res);
|
||||
int (*bind) (struct vmw_resource *res,
|
||||
struct ttm_validate_buffer *val_buf);
|
||||
int (*unbind) (struct vmw_resource *res,
|
||||
bool readback,
|
||||
struct ttm_validate_buffer *val_buf);
|
||||
};
|
||||
|
||||
int vmw_resource_alloc_id(struct vmw_resource *res);
|
||||
void vmw_resource_release_id(struct vmw_resource *res);
|
||||
int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
|
||||
bool delay_id,
|
||||
void (*res_free) (struct vmw_resource *res),
|
||||
const struct vmw_res_func *func);
|
||||
void vmw_resource_activate(struct vmw_resource *res,
|
||||
void (*hw_destroy) (struct vmw_resource *));
|
||||
#endif
|
574
drivers/video/drm/vmwgfx/vmwgfx_scrn.c
Normal file
574
drivers/video/drm/vmwgfx/vmwgfx_scrn.c
Normal file
@ -0,0 +1,574 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include "vmwgfx_kms.h"
|
||||
|
||||
|
||||
#define vmw_crtc_to_sou(x) \
|
||||
container_of(x, struct vmw_screen_object_unit, base.crtc)
|
||||
#define vmw_encoder_to_sou(x) \
|
||||
container_of(x, struct vmw_screen_object_unit, base.encoder)
|
||||
#define vmw_connector_to_sou(x) \
|
||||
container_of(x, struct vmw_screen_object_unit, base.connector)
|
||||
|
||||
struct vmw_screen_object_display {
|
||||
unsigned num_implicit;
|
||||
|
||||
struct vmw_framebuffer *implicit_fb;
|
||||
};
|
||||
|
||||
/**
|
||||
* Display unit using screen objects.
|
||||
*/
|
||||
struct vmw_screen_object_unit {
|
||||
struct vmw_display_unit base;
|
||||
|
||||
unsigned long buffer_size; /**< Size of allocated buffer */
|
||||
struct vmw_dma_buffer *buffer; /**< Backing store buffer */
|
||||
|
||||
bool defined;
|
||||
bool active_implicit;
|
||||
};
|
||||
|
||||
static void vmw_sou_destroy(struct vmw_screen_object_unit *sou)
|
||||
{
|
||||
// vmw_display_unit_cleanup(&sou->base);
|
||||
kfree(sou);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Screen Object Display Unit CRTC functions
|
||||
*/
|
||||
|
||||
static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
|
||||
{
|
||||
vmw_sou_destroy(vmw_crtc_to_sou(crtc));
|
||||
}
|
||||
|
||||
static void vmw_sou_del_active(struct vmw_private *vmw_priv,
|
||||
struct vmw_screen_object_unit *sou)
|
||||
{
|
||||
struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
|
||||
|
||||
if (sou->active_implicit) {
|
||||
if (--(ld->num_implicit) == 0)
|
||||
ld->implicit_fb = NULL;
|
||||
sou->active_implicit = false;
|
||||
}
|
||||
}
|
||||
|
||||
static void vmw_sou_add_active(struct vmw_private *vmw_priv,
|
||||
struct vmw_screen_object_unit *sou,
|
||||
struct vmw_framebuffer *vfb)
|
||||
{
|
||||
struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
|
||||
|
||||
BUG_ON(!ld->num_implicit && ld->implicit_fb);
|
||||
|
||||
if (!sou->active_implicit && sou->base.is_implicit) {
|
||||
ld->implicit_fb = vfb;
|
||||
sou->active_implicit = true;
|
||||
ld->num_implicit++;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Send the fifo command to create a screen.
|
||||
*/
|
||||
static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
|
||||
struct vmw_screen_object_unit *sou,
|
||||
uint32_t x, uint32_t y,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
size_t fifo_size;
|
||||
|
||||
struct {
|
||||
struct {
|
||||
uint32_t cmdType;
|
||||
} header;
|
||||
SVGAScreenObject obj;
|
||||
} *cmd;
|
||||
|
||||
BUG_ON(!sou->buffer);
|
||||
|
||||
fifo_size = sizeof(*cmd);
|
||||
cmd = vmw_fifo_reserve(dev_priv, fifo_size);
|
||||
/* The hardware has hung, nothing we can do about it here. */
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Fifo reserve failed.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(cmd, 0, fifo_size);
|
||||
cmd->header.cmdType = SVGA_CMD_DEFINE_SCREEN;
|
||||
cmd->obj.structSize = sizeof(SVGAScreenObject);
|
||||
cmd->obj.id = sou->base.unit;
|
||||
cmd->obj.flags = SVGA_SCREEN_HAS_ROOT |
|
||||
(sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0);
|
||||
cmd->obj.size.width = mode->hdisplay;
|
||||
cmd->obj.size.height = mode->vdisplay;
|
||||
if (sou->base.is_implicit) {
|
||||
cmd->obj.root.x = x;
|
||||
cmd->obj.root.y = y;
|
||||
} else {
|
||||
cmd->obj.root.x = sou->base.gui_x;
|
||||
cmd->obj.root.y = sou->base.gui_y;
|
||||
}
|
||||
|
||||
/* Ok to assume that buffer is pinned in vram */
|
||||
vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
|
||||
cmd->obj.backingStore.pitch = mode->hdisplay * 4;
|
||||
|
||||
vmw_fifo_commit(dev_priv, fifo_size);
|
||||
|
||||
sou->defined = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Send the fifo command to destroy a screen.
|
||||
*/
|
||||
static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
|
||||
struct vmw_screen_object_unit *sou)
|
||||
{
|
||||
size_t fifo_size;
|
||||
int ret;
|
||||
|
||||
struct {
|
||||
struct {
|
||||
uint32_t cmdType;
|
||||
} header;
|
||||
SVGAFifoCmdDestroyScreen body;
|
||||
} *cmd;
|
||||
|
||||
/* no need to do anything */
|
||||
if (unlikely(!sou->defined))
|
||||
return 0;
|
||||
|
||||
fifo_size = sizeof(*cmd);
|
||||
cmd = vmw_fifo_reserve(dev_priv, fifo_size);
|
||||
/* the hardware has hung, nothing we can do about it here */
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Fifo reserve failed.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(cmd, 0, fifo_size);
|
||||
cmd->header.cmdType = SVGA_CMD_DESTROY_SCREEN;
|
||||
cmd->body.screenId = sou->base.unit;
|
||||
|
||||
vmw_fifo_commit(dev_priv, fifo_size);
|
||||
|
||||
/* Force sync */
|
||||
ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
|
||||
if (unlikely(ret != 0))
|
||||
DRM_ERROR("Failed to sync with HW");
|
||||
else
|
||||
sou->defined = false;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Free the backing store.
|
||||
*/
|
||||
static void vmw_sou_backing_free(struct vmw_private *dev_priv,
|
||||
struct vmw_screen_object_unit *sou)
|
||||
{
|
||||
struct ttm_buffer_object *bo;
|
||||
|
||||
if (unlikely(sou->buffer == NULL))
|
||||
return;
|
||||
|
||||
bo = &sou->buffer->base;
|
||||
ttm_bo_unref(&bo);
|
||||
sou->buffer = NULL;
|
||||
sou->buffer_size = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocate the backing store for the buffer.
|
||||
*/
|
||||
static int vmw_sou_backing_alloc(struct vmw_private *dev_priv,
|
||||
struct vmw_screen_object_unit *sou,
|
||||
unsigned long size)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (sou->buffer_size == size)
|
||||
return 0;
|
||||
|
||||
if (sou->buffer)
|
||||
vmw_sou_backing_free(dev_priv, sou);
|
||||
|
||||
sou->buffer = kzalloc(sizeof(*sou->buffer), GFP_KERNEL);
|
||||
if (unlikely(sou->buffer == NULL))
|
||||
return -ENOMEM;
|
||||
|
||||
/* After we have alloced the backing store might not be able to
|
||||
* resume the overlays, this is preferred to failing to alloc.
|
||||
*/
|
||||
// vmw_overlay_pause_all(dev_priv);
|
||||
ret = vmw_dmabuf_init(dev_priv, sou->buffer, size,
|
||||
&vmw_vram_ne_placement,
|
||||
false, &vmw_dmabuf_bo_free);
|
||||
// vmw_overlay_resume_all(dev_priv);
|
||||
|
||||
if (unlikely(ret != 0))
|
||||
sou->buffer = NULL; /* vmw_dmabuf_init frees on error */
|
||||
else
|
||||
sou->buffer_size = size;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
|
||||
{
|
||||
struct vmw_private *dev_priv;
|
||||
struct vmw_screen_object_unit *sou;
|
||||
struct drm_connector *connector;
|
||||
struct drm_display_mode *mode;
|
||||
struct drm_encoder *encoder;
|
||||
struct vmw_framebuffer *vfb;
|
||||
struct drm_framebuffer *fb;
|
||||
struct drm_crtc *crtc;
|
||||
int ret = 0;
|
||||
|
||||
if (!set)
|
||||
return -EINVAL;
|
||||
|
||||
if (!set->crtc)
|
||||
return -EINVAL;
|
||||
|
||||
/* get the sou */
|
||||
crtc = set->crtc;
|
||||
sou = vmw_crtc_to_sou(crtc);
|
||||
vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL;
|
||||
dev_priv = vmw_priv(crtc->dev);
|
||||
|
||||
if (set->num_connectors > 1) {
|
||||
DRM_ERROR("to many connectors\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (set->num_connectors == 1 &&
|
||||
set->connectors[0] != &sou->base.connector) {
|
||||
DRM_ERROR("connector doesn't match %p %p\n",
|
||||
set->connectors[0], &sou->base.connector);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* sou only supports one fb active at the time */
|
||||
if (sou->base.is_implicit &&
|
||||
dev_priv->sou_priv->implicit_fb && vfb &&
|
||||
!(dev_priv->sou_priv->num_implicit == 1 &&
|
||||
sou->active_implicit) &&
|
||||
dev_priv->sou_priv->implicit_fb != vfb) {
|
||||
DRM_ERROR("Multiple framebuffers not supported\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* since they always map one to one these are safe */
|
||||
connector = &sou->base.connector;
|
||||
encoder = &sou->base.encoder;
|
||||
|
||||
/* should we turn the crtc off */
|
||||
if (set->num_connectors == 0 || !set->mode || !set->fb) {
|
||||
ret = vmw_sou_fifo_destroy(dev_priv, sou);
|
||||
/* the hardware has hung don't do anything more */
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
connector->encoder = NULL;
|
||||
encoder->crtc = NULL;
|
||||
crtc->fb = NULL;
|
||||
crtc->x = 0;
|
||||
crtc->y = 0;
|
||||
|
||||
vmw_sou_del_active(dev_priv, sou);
|
||||
|
||||
vmw_sou_backing_free(dev_priv, sou);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* we now know we want to set a mode */
|
||||
mode = set->mode;
|
||||
fb = set->fb;
|
||||
|
||||
if (set->x + mode->hdisplay > fb->width ||
|
||||
set->y + mode->vdisplay > fb->height) {
|
||||
DRM_ERROR("set outside of framebuffer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
// vmw_fb_off(dev_priv);
|
||||
|
||||
if (mode->hdisplay != crtc->mode.hdisplay ||
|
||||
mode->vdisplay != crtc->mode.vdisplay) {
|
||||
/* no need to check if depth is different, because backing
|
||||
* store depth is forced to 4 by the device.
|
||||
*/
|
||||
|
||||
ret = vmw_sou_fifo_destroy(dev_priv, sou);
|
||||
/* the hardware has hung don't do anything more */
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
vmw_sou_backing_free(dev_priv, sou);
|
||||
}
|
||||
|
||||
if (!sou->buffer) {
|
||||
/* forced to depth 4 by the device */
|
||||
size_t size = mode->hdisplay * mode->vdisplay * 4;
|
||||
ret = vmw_sou_backing_alloc(dev_priv, sou, size);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = vmw_sou_fifo_create(dev_priv, sou, set->x, set->y, mode);
|
||||
if (unlikely(ret != 0)) {
|
||||
/*
|
||||
* We are in a bit of a situation here, the hardware has
|
||||
* hung and we may or may not have a buffer hanging of
|
||||
* the screen object, best thing to do is not do anything
|
||||
* if we where defined, if not just turn the crtc of.
|
||||
* Not what userspace wants but it needs to htfu.
|
||||
*/
|
||||
if (sou->defined)
|
||||
return ret;
|
||||
|
||||
connector->encoder = NULL;
|
||||
encoder->crtc = NULL;
|
||||
crtc->fb = NULL;
|
||||
crtc->x = 0;
|
||||
crtc->y = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
vmw_sou_add_active(dev_priv, sou, vfb);
|
||||
|
||||
connector->encoder = encoder;
|
||||
encoder->crtc = crtc;
|
||||
crtc->mode = *mode;
|
||||
crtc->fb = fb;
|
||||
crtc->x = set->x;
|
||||
crtc->y = set->y;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
|
||||
.save = vmw_du_crtc_save,
|
||||
.restore = vmw_du_crtc_restore,
|
||||
// .cursor_set = vmw_du_crtc_cursor_set,
|
||||
// .cursor_move = vmw_du_crtc_cursor_move,
|
||||
.gamma_set = vmw_du_crtc_gamma_set,
|
||||
.destroy = vmw_sou_crtc_destroy,
|
||||
.set_config = vmw_sou_crtc_set_config,
|
||||
// .page_flip = vmw_du_page_flip,
|
||||
};
|
||||
|
||||
/*
|
||||
* Screen Object Display Unit encoder functions
|
||||
*/
|
||||
|
||||
static void vmw_sou_encoder_destroy(struct drm_encoder *encoder)
|
||||
{
|
||||
vmw_sou_destroy(vmw_encoder_to_sou(encoder));
|
||||
}
|
||||
|
||||
static struct drm_encoder_funcs vmw_screen_object_encoder_funcs = {
|
||||
.destroy = vmw_sou_encoder_destroy,
|
||||
};
|
||||
|
||||
/*
|
||||
* Screen Object Display Unit connector functions
|
||||
*/
|
||||
|
||||
static void vmw_sou_connector_destroy(struct drm_connector *connector)
|
||||
{
|
||||
vmw_sou_destroy(vmw_connector_to_sou(connector));
|
||||
}
|
||||
|
||||
static struct drm_connector_funcs vmw_legacy_connector_funcs = {
|
||||
.dpms = vmw_du_connector_dpms,
|
||||
.save = vmw_du_connector_save,
|
||||
.restore = vmw_du_connector_restore,
|
||||
.detect = vmw_du_connector_detect,
|
||||
.fill_modes = vmw_du_connector_fill_modes,
|
||||
.set_property = vmw_du_connector_set_property,
|
||||
.destroy = vmw_sou_connector_destroy,
|
||||
};
|
||||
|
||||
static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
|
||||
{
|
||||
struct vmw_screen_object_unit *sou;
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_connector *connector;
|
||||
struct drm_encoder *encoder;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
sou = kzalloc(sizeof(*sou), GFP_KERNEL);
|
||||
if (!sou)
|
||||
return -ENOMEM;
|
||||
|
||||
sou->base.unit = unit;
|
||||
crtc = &sou->base.crtc;
|
||||
encoder = &sou->base.encoder;
|
||||
connector = &sou->base.connector;
|
||||
|
||||
sou->active_implicit = false;
|
||||
|
||||
sou->base.pref_active = (unit == 0);
|
||||
sou->base.pref_width = dev_priv->initial_width;
|
||||
sou->base.pref_height = dev_priv->initial_height;
|
||||
sou->base.pref_mode = NULL;
|
||||
sou->base.is_implicit = true;
|
||||
|
||||
drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_VIRTUAL);
|
||||
connector->status = vmw_du_connector_detect(connector, true);
|
||||
|
||||
drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
|
||||
DRM_MODE_ENCODER_VIRTUAL);
|
||||
drm_mode_connector_attach_encoder(connector, encoder);
|
||||
encoder->possible_crtcs = (1 << unit);
|
||||
encoder->possible_clones = 0;
|
||||
|
||||
drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs);
|
||||
|
||||
drm_mode_crtc_set_gamma_size(crtc, 256);
|
||||
|
||||
drm_object_attach_property(&connector->base,
|
||||
dev->mode_config.dirty_info_property,
|
||||
1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
int i, ret;
|
||||
|
||||
ENTER();
|
||||
|
||||
if (dev_priv->sou_priv) {
|
||||
DRM_INFO("sou system already on\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
|
||||
DRM_INFO("Not using screen objects,"
|
||||
" missing cap SCREEN_OBJECT_2\n");
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
ret = -ENOMEM;
|
||||
dev_priv->sou_priv = kmalloc(sizeof(*dev_priv->sou_priv), GFP_KERNEL);
|
||||
if (unlikely(!dev_priv->sou_priv))
|
||||
goto err_no_mem;
|
||||
|
||||
dev_priv->sou_priv->num_implicit = 0;
|
||||
dev_priv->sou_priv->implicit_fb = NULL;
|
||||
|
||||
// ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
|
||||
// if (unlikely(ret != 0))
|
||||
// goto err_free;
|
||||
|
||||
ret = drm_mode_create_dirty_info_property(dev);
|
||||
if (unlikely(ret != 0))
|
||||
goto err_vblank_cleanup;
|
||||
|
||||
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
|
||||
vmw_sou_init(dev_priv, i);
|
||||
|
||||
DRM_INFO("Screen objects system initialized\n");
|
||||
|
||||
LEAVE();
|
||||
return 0;
|
||||
|
||||
err_vblank_cleanup:
|
||||
// drm_vblank_cleanup(dev);
|
||||
err_free:
|
||||
kfree(dev_priv->sou_priv);
|
||||
dev_priv->sou_priv = NULL;
|
||||
err_no_mem:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
||||
if (!dev_priv->sou_priv)
|
||||
return -ENOSYS;
|
||||
|
||||
// drm_vblank_cleanup(dev);
|
||||
|
||||
kfree(dev_priv->sou_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns if this unit can be page flipped.
|
||||
* Must be called with the mode_config mutex held.
|
||||
*/
|
||||
bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv,
|
||||
struct drm_crtc *crtc)
|
||||
{
|
||||
struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
|
||||
|
||||
if (!sou->base.is_implicit)
|
||||
return true;
|
||||
|
||||
if (dev_priv->sou_priv->num_implicit != 1)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the implicit fb to the current fb of this crtc.
|
||||
* Must be called with the mode_config mutex held.
|
||||
*/
|
||||
void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
|
||||
struct drm_crtc *crtc)
|
||||
{
|
||||
struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
|
||||
|
||||
BUG_ON(!sou->base.is_implicit);
|
||||
|
||||
dev_priv->sou_priv->implicit_fb =
|
||||
vmw_framebuffer_to_vfb(sou->base.crtc.fb);
|
||||
}
|
879
drivers/video/drm/vmwgfx/vmwgfx_surface.c
Normal file
879
drivers/video/drm/vmwgfx/vmwgfx_surface.c
Normal file
@ -0,0 +1,879 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
#include "vmwgfx_resource_priv.h"
|
||||
#include <ttm/ttm_placement.h>
|
||||
#include "svga3d_surfacedefs.h"
|
||||
|
||||
/**
|
||||
* struct vmw_user_surface - User-space visible surface resource
|
||||
*
|
||||
* @base: The TTM base object handling user-space visibility.
|
||||
* @srf: The surface metadata.
|
||||
* @size: TTM accounting size for the surface.
|
||||
*/
|
||||
struct vmw_user_surface {
|
||||
struct ttm_base_object base;
|
||||
struct vmw_surface srf;
|
||||
uint32_t size;
|
||||
uint32_t backup_handle;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_surface_offset - Backing store mip level offset info
|
||||
*
|
||||
* @face: Surface face.
|
||||
* @mip: Mip level.
|
||||
* @bo_offset: Offset into backing store of this mip level.
|
||||
*
|
||||
*/
|
||||
struct vmw_surface_offset {
|
||||
uint32_t face;
|
||||
uint32_t mip;
|
||||
uint32_t bo_offset;
|
||||
};
|
||||
|
||||
static void vmw_user_surface_free(struct vmw_resource *res);
|
||||
static struct vmw_resource *
|
||||
vmw_user_surface_base_to_res(struct ttm_base_object *base);
|
||||
static int vmw_legacy_srf_bind(struct vmw_resource *res,
|
||||
struct ttm_validate_buffer *val_buf);
|
||||
static int vmw_legacy_srf_unbind(struct vmw_resource *res,
|
||||
bool readback,
|
||||
struct ttm_validate_buffer *val_buf);
|
||||
static int vmw_legacy_srf_create(struct vmw_resource *res);
|
||||
static int vmw_legacy_srf_destroy(struct vmw_resource *res);
|
||||
|
||||
static const struct vmw_user_resource_conv user_surface_conv = {
|
||||
.object_type = VMW_RES_SURFACE,
|
||||
.base_obj_to_res = vmw_user_surface_base_to_res,
|
||||
.res_free = vmw_user_surface_free
|
||||
};
|
||||
|
||||
const struct vmw_user_resource_conv *user_surface_converter =
|
||||
&user_surface_conv;
|
||||
|
||||
|
||||
static uint64_t vmw_user_surface_size;
|
||||
|
||||
static const struct vmw_res_func vmw_legacy_surface_func = {
|
||||
.res_type = vmw_res_surface,
|
||||
.needs_backup = false,
|
||||
.may_evict = true,
|
||||
.type_name = "legacy surfaces",
|
||||
.backup_placement = &vmw_srf_placement,
|
||||
.create = &vmw_legacy_srf_create,
|
||||
.destroy = &vmw_legacy_srf_destroy,
|
||||
.bind = &vmw_legacy_srf_bind,
|
||||
.unbind = &vmw_legacy_srf_unbind
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_surface_dma - SVGA3D DMA command
|
||||
*/
|
||||
struct vmw_surface_dma {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdSurfaceDMA body;
|
||||
SVGA3dCopyBox cb;
|
||||
SVGA3dCmdSurfaceDMASuffix suffix;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_surface_define - SVGA3D Surface Define command
|
||||
*/
|
||||
struct vmw_surface_define {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdDefineSurface body;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_surface_destroy - SVGA3D Surface Destroy command
|
||||
*/
|
||||
struct vmw_surface_destroy {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdDestroySurface body;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* vmw_surface_dma_size - Compute fifo size for a dma command.
|
||||
*
|
||||
* @srf: Pointer to a struct vmw_surface
|
||||
*
|
||||
* Computes the required size for a surface dma command for backup or
|
||||
* restoration of the surface represented by @srf.
|
||||
*/
|
||||
static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
|
||||
{
|
||||
return srf->num_sizes * sizeof(struct vmw_surface_dma);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* vmw_surface_define_size - Compute fifo size for a surface define command.
|
||||
*
|
||||
* @srf: Pointer to a struct vmw_surface
|
||||
*
|
||||
* Computes the required size for a surface define command for the definition
|
||||
* of the surface represented by @srf.
|
||||
*/
|
||||
static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
|
||||
{
|
||||
return sizeof(struct vmw_surface_define) + srf->num_sizes *
|
||||
sizeof(SVGA3dSize);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
|
||||
*
|
||||
* Computes the required size for a surface destroy command for the destruction
|
||||
* of a hw surface.
|
||||
*/
|
||||
static inline uint32_t vmw_surface_destroy_size(void)
|
||||
{
|
||||
return sizeof(struct vmw_surface_destroy);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_surface_destroy_encode - Encode a surface_destroy command.
|
||||
*
|
||||
* @id: The surface id
|
||||
* @cmd_space: Pointer to memory area in which the commands should be encoded.
|
||||
*/
|
||||
static void vmw_surface_destroy_encode(uint32_t id,
|
||||
void *cmd_space)
|
||||
{
|
||||
struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
|
||||
cmd_space;
|
||||
|
||||
cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
|
||||
cmd->header.size = sizeof(cmd->body);
|
||||
cmd->body.sid = id;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_surface_define_encode - Encode a surface_define command.
|
||||
*
|
||||
* @srf: Pointer to a struct vmw_surface object.
|
||||
* @cmd_space: Pointer to memory area in which the commands should be encoded.
|
||||
*/
|
||||
static void vmw_surface_define_encode(const struct vmw_surface *srf,
|
||||
void *cmd_space)
|
||||
{
|
||||
struct vmw_surface_define *cmd = (struct vmw_surface_define *)
|
||||
cmd_space;
|
||||
struct drm_vmw_size *src_size;
|
||||
SVGA3dSize *cmd_size;
|
||||
uint32_t cmd_len;
|
||||
int i;
|
||||
|
||||
cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
|
||||
|
||||
cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
|
||||
cmd->header.size = cmd_len;
|
||||
cmd->body.sid = srf->res.id;
|
||||
cmd->body.surfaceFlags = srf->flags;
|
||||
cmd->body.format = cpu_to_le32(srf->format);
|
||||
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
|
||||
cmd->body.face[i].numMipLevels = srf->mip_levels[i];
|
||||
|
||||
cmd += 1;
|
||||
cmd_size = (SVGA3dSize *) cmd;
|
||||
src_size = srf->sizes;
|
||||
|
||||
for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
|
||||
cmd_size->width = src_size->width;
|
||||
cmd_size->height = src_size->height;
|
||||
cmd_size->depth = src_size->depth;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_surface_dma_encode - Encode a surface_dma command.
|
||||
*
|
||||
* @srf: Pointer to a struct vmw_surface object.
|
||||
* @cmd_space: Pointer to memory area in which the commands should be encoded.
|
||||
* @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
|
||||
* should be placed or read from.
|
||||
* @to_surface: Boolean whether to DMA to the surface or from the surface.
|
||||
*/
|
||||
static void vmw_surface_dma_encode(struct vmw_surface *srf,
|
||||
void *cmd_space,
|
||||
const SVGAGuestPtr *ptr,
|
||||
bool to_surface)
|
||||
{
|
||||
uint32_t i;
|
||||
struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
|
||||
const struct svga3d_surface_desc *desc =
|
||||
svga3dsurface_get_desc(srf->format);
|
||||
|
||||
for (i = 0; i < srf->num_sizes; ++i) {
|
||||
SVGA3dCmdHeader *header = &cmd->header;
|
||||
SVGA3dCmdSurfaceDMA *body = &cmd->body;
|
||||
SVGA3dCopyBox *cb = &cmd->cb;
|
||||
SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
|
||||
const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
|
||||
const struct drm_vmw_size *cur_size = &srf->sizes[i];
|
||||
|
||||
header->id = SVGA_3D_CMD_SURFACE_DMA;
|
||||
header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
|
||||
|
||||
body->guest.ptr = *ptr;
|
||||
body->guest.ptr.offset += cur_offset->bo_offset;
|
||||
body->guest.pitch = svga3dsurface_calculate_pitch(desc,
|
||||
cur_size);
|
||||
body->host.sid = srf->res.id;
|
||||
body->host.face = cur_offset->face;
|
||||
body->host.mipmap = cur_offset->mip;
|
||||
body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM :
|
||||
SVGA3D_READ_HOST_VRAM);
|
||||
cb->x = 0;
|
||||
cb->y = 0;
|
||||
cb->z = 0;
|
||||
cb->srcx = 0;
|
||||
cb->srcy = 0;
|
||||
cb->srcz = 0;
|
||||
cb->w = cur_size->width;
|
||||
cb->h = cur_size->height;
|
||||
cb->d = cur_size->depth;
|
||||
|
||||
suffix->suffixSize = sizeof(*suffix);
|
||||
suffix->maximumOffset =
|
||||
svga3dsurface_get_image_buffer_size(desc, cur_size,
|
||||
body->guest.pitch);
|
||||
suffix->flags.discard = 0;
|
||||
suffix->flags.unsynchronized = 0;
|
||||
suffix->flags.reserved = 0;
|
||||
++cmd;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* vmw_hw_surface_destroy - destroy a Device surface
|
||||
*
|
||||
* @res: Pointer to a struct vmw_resource embedded in a struct
|
||||
* vmw_surface.
|
||||
*
|
||||
* Destroys a the device surface associated with a struct vmw_surface if
|
||||
* any, and adjusts accounting and resource count accordingly.
|
||||
*/
|
||||
static void vmw_hw_surface_destroy(struct vmw_resource *res)
|
||||
{
|
||||
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
struct vmw_surface *srf;
|
||||
void *cmd;
|
||||
|
||||
if (res->id != -1) {
|
||||
|
||||
cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Failed reserving FIFO space for surface "
|
||||
"destruction.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
vmw_surface_destroy_encode(res->id, cmd);
|
||||
vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
|
||||
|
||||
/*
|
||||
* used_memory_size_atomic, or separate lock
|
||||
* to avoid taking dev_priv::cmdbuf_mutex in
|
||||
* the destroy path.
|
||||
*/
|
||||
|
||||
mutex_lock(&dev_priv->cmdbuf_mutex);
|
||||
srf = vmw_res_to_srf(res);
|
||||
dev_priv->used_memory_size -= res->backup_size;
|
||||
mutex_unlock(&dev_priv->cmdbuf_mutex);
|
||||
}
|
||||
vmw_3d_resource_dec(dev_priv, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_legacy_srf_create - Create a device surface as part of the
|
||||
* resource validation process.
|
||||
*
|
||||
* @res: Pointer to a struct vmw_surface.
|
||||
*
|
||||
* If the surface doesn't have a hw id.
|
||||
*
|
||||
* Returns -EBUSY if there wasn't sufficient device resources to
|
||||
* complete the validation. Retry after freeing up resources.
|
||||
*
|
||||
* May return other errors if the kernel is out of guest resources.
|
||||
*/
|
||||
static int vmw_legacy_srf_create(struct vmw_resource *res)
|
||||
{
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
struct vmw_surface *srf;
|
||||
uint32_t submit_size;
|
||||
uint8_t *cmd;
|
||||
int ret;
|
||||
|
||||
if (likely(res->id != -1))
|
||||
return 0;
|
||||
|
||||
srf = vmw_res_to_srf(res);
|
||||
if (unlikely(dev_priv->used_memory_size + res->backup_size >=
|
||||
dev_priv->memory_size))
|
||||
return -EBUSY;
|
||||
|
||||
/*
|
||||
* Alloc id for the resource.
|
||||
*/
|
||||
|
||||
ret = vmw_resource_alloc_id(res);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Failed to allocate a surface id.\n");
|
||||
goto out_no_id;
|
||||
}
|
||||
|
||||
if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
|
||||
ret = -EBUSY;
|
||||
goto out_no_fifo;
|
||||
}
|
||||
|
||||
/*
|
||||
* Encode surface define- commands.
|
||||
*/
|
||||
|
||||
submit_size = vmw_surface_define_size(srf);
|
||||
cmd = vmw_fifo_reserve(dev_priv, submit_size);
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Failed reserving FIFO space for surface "
|
||||
"creation.\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_no_fifo;
|
||||
}
|
||||
|
||||
vmw_surface_define_encode(srf, cmd);
|
||||
vmw_fifo_commit(dev_priv, submit_size);
|
||||
/*
|
||||
* Surface memory usage accounting.
|
||||
*/
|
||||
|
||||
dev_priv->used_memory_size += res->backup_size;
|
||||
return 0;
|
||||
|
||||
out_no_fifo:
|
||||
vmw_resource_release_id(res);
|
||||
out_no_id:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
|
||||
*
|
||||
* @res: Pointer to a struct vmw_res embedded in a struct
|
||||
* vmw_surface.
|
||||
* @val_buf: Pointer to a struct ttm_validate_buffer containing
|
||||
* information about the backup buffer.
|
||||
* @bind: Boolean wether to DMA to the surface.
|
||||
*
|
||||
* Transfer backup data to or from a legacy surface as part of the
|
||||
* validation process.
|
||||
* May return other errors if the kernel is out of guest resources.
|
||||
* The backup buffer will be fenced or idle upon successful completion,
|
||||
* and if the surface needs persistent backup storage, the backup buffer
|
||||
* will also be returned reserved iff @bind is true.
|
||||
*/
|
||||
static int vmw_legacy_srf_dma(struct vmw_resource *res,
|
||||
struct ttm_validate_buffer *val_buf,
|
||||
bool bind)
|
||||
{
|
||||
SVGAGuestPtr ptr;
|
||||
struct vmw_fence_obj *fence;
|
||||
uint32_t submit_size;
|
||||
struct vmw_surface *srf = vmw_res_to_srf(res);
|
||||
uint8_t *cmd;
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
|
||||
BUG_ON(val_buf->bo == NULL);
|
||||
|
||||
submit_size = vmw_surface_dma_size(srf);
|
||||
cmd = vmw_fifo_reserve(dev_priv, submit_size);
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Failed reserving FIFO space for surface "
|
||||
"DMA.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
|
||||
vmw_surface_dma_encode(srf, cmd, &ptr, bind);
|
||||
|
||||
vmw_fifo_commit(dev_priv, submit_size);
|
||||
|
||||
/*
|
||||
* Create a fence object and fence the backup buffer.
|
||||
*/
|
||||
|
||||
(void) vmw_execbuf_fence_commands(NULL, dev_priv,
|
||||
&fence, NULL);
|
||||
|
||||
vmw_fence_single_bo(val_buf->bo, fence);
|
||||
|
||||
if (likely(fence != NULL))
|
||||
vmw_fence_obj_unreference(&fence);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
|
||||
* surface validation process.
|
||||
*
|
||||
* @res: Pointer to a struct vmw_res embedded in a struct
|
||||
* vmw_surface.
|
||||
* @val_buf: Pointer to a struct ttm_validate_buffer containing
|
||||
* information about the backup buffer.
|
||||
*
|
||||
* This function will copy backup data to the surface if the
|
||||
* backup buffer is dirty.
|
||||
*/
|
||||
static int vmw_legacy_srf_bind(struct vmw_resource *res,
|
||||
struct ttm_validate_buffer *val_buf)
|
||||
{
|
||||
if (!res->backup_dirty)
|
||||
return 0;
|
||||
|
||||
return vmw_legacy_srf_dma(res, val_buf, true);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
|
||||
* surface eviction process.
|
||||
*
|
||||
* @res: Pointer to a struct vmw_res embedded in a struct
|
||||
* vmw_surface.
|
||||
* @val_buf: Pointer to a struct ttm_validate_buffer containing
|
||||
* information about the backup buffer.
|
||||
*
|
||||
* This function will copy backup data from the surface.
|
||||
*/
|
||||
static int vmw_legacy_srf_unbind(struct vmw_resource *res,
|
||||
bool readback,
|
||||
struct ttm_validate_buffer *val_buf)
|
||||
{
|
||||
if (unlikely(readback))
|
||||
return vmw_legacy_srf_dma(res, val_buf, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_legacy_srf_destroy - Destroy a device surface as part of a
|
||||
* resource eviction process.
|
||||
*
|
||||
* @res: Pointer to a struct vmw_res embedded in a struct
|
||||
* vmw_surface.
|
||||
*/
|
||||
static int vmw_legacy_srf_destroy(struct vmw_resource *res)
|
||||
{
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
uint32_t submit_size;
|
||||
uint8_t *cmd;
|
||||
|
||||
BUG_ON(res->id == -1);
|
||||
|
||||
/*
|
||||
* Encode the dma- and surface destroy commands.
|
||||
*/
|
||||
|
||||
submit_size = vmw_surface_destroy_size();
|
||||
cmd = vmw_fifo_reserve(dev_priv, submit_size);
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Failed reserving FIFO space for surface "
|
||||
"eviction.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
vmw_surface_destroy_encode(res->id, cmd);
|
||||
vmw_fifo_commit(dev_priv, submit_size);
|
||||
|
||||
/*
|
||||
* Surface memory usage accounting.
|
||||
*/
|
||||
|
||||
dev_priv->used_memory_size -= res->backup_size;
|
||||
|
||||
/*
|
||||
* Release the surface ID.
|
||||
*/
|
||||
|
||||
vmw_resource_release_id(res);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* vmw_surface_init - initialize a struct vmw_surface
|
||||
*
|
||||
* @dev_priv: Pointer to a device private struct.
|
||||
* @srf: Pointer to the struct vmw_surface to initialize.
|
||||
* @res_free: Pointer to a resource destructor used to free
|
||||
* the object.
|
||||
*/
|
||||
static int vmw_surface_init(struct vmw_private *dev_priv,
|
||||
struct vmw_surface *srf,
|
||||
void (*res_free) (struct vmw_resource *res))
|
||||
{
|
||||
int ret;
|
||||
struct vmw_resource *res = &srf->res;
|
||||
|
||||
BUG_ON(res_free == NULL);
|
||||
(void) vmw_3d_resource_inc(dev_priv, false);
|
||||
ret = vmw_resource_init(dev_priv, res, true, res_free,
|
||||
&vmw_legacy_surface_func);
|
||||
|
||||
if (unlikely(ret != 0)) {
|
||||
vmw_3d_resource_dec(dev_priv, false);
|
||||
res_free(res);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The surface won't be visible to hardware until a
|
||||
* surface validate.
|
||||
*/
|
||||
|
||||
vmw_resource_activate(res, vmw_hw_surface_destroy);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_user_surface_base_to_res - TTM base object to resource converter for
|
||||
* user visible surfaces
|
||||
*
|
||||
* @base: Pointer to a TTM base object
|
||||
*
|
||||
* Returns the struct vmw_resource embedded in a struct vmw_surface
|
||||
* for the user-visible object identified by the TTM base object @base.
|
||||
*/
|
||||
static struct vmw_resource *
|
||||
vmw_user_surface_base_to_res(struct ttm_base_object *base)
|
||||
{
|
||||
return &(container_of(base, struct vmw_user_surface, base)->srf.res);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_user_surface_free - User visible surface resource destructor
|
||||
*
|
||||
* @res: A struct vmw_resource embedded in a struct vmw_surface.
|
||||
*/
|
||||
static void vmw_user_surface_free(struct vmw_resource *res)
|
||||
{
|
||||
struct vmw_surface *srf = vmw_res_to_srf(res);
|
||||
struct vmw_user_surface *user_srf =
|
||||
container_of(srf, struct vmw_user_surface, srf);
|
||||
struct vmw_private *dev_priv = srf->res.dev_priv;
|
||||
uint32_t size = user_srf->size;
|
||||
|
||||
kfree(srf->offsets);
|
||||
kfree(srf->sizes);
|
||||
kfree(srf->snooper.image);
|
||||
// ttm_base_object_kfree(user_srf, base);
|
||||
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_user_surface_free - User visible surface TTM base object destructor
|
||||
*
|
||||
* @p_base: Pointer to a pointer to a TTM base object
|
||||
* embedded in a struct vmw_user_surface.
|
||||
*
|
||||
* Drops the base object's reference on its resource, and the
|
||||
* pointer pointed to by *p_base is set to NULL.
|
||||
*/
|
||||
static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
|
||||
{
|
||||
struct ttm_base_object *base = *p_base;
|
||||
struct vmw_user_surface *user_srf =
|
||||
container_of(base, struct vmw_user_surface, base);
|
||||
struct vmw_resource *res = &user_srf->srf.res;
|
||||
|
||||
*p_base = NULL;
|
||||
vmw_resource_unreference(&res);
|
||||
}
|
||||
|
||||
#if 0
|
||||
/**
|
||||
* vmw_user_surface_define_ioctl - Ioctl function implementing
|
||||
* the user surface define functionality.
|
||||
*
|
||||
* @dev: Pointer to a struct drm_device.
|
||||
* @data: Pointer to data copied from / to user-space.
|
||||
* @file_priv: Pointer to a drm file private structure.
|
||||
*/
|
||||
int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
struct vmw_user_surface *user_srf;
|
||||
struct vmw_surface *srf;
|
||||
struct vmw_resource *res;
|
||||
struct vmw_resource *tmp;
|
||||
union drm_vmw_surface_create_arg *arg =
|
||||
(union drm_vmw_surface_create_arg *)data;
|
||||
struct drm_vmw_surface_create_req *req = &arg->req;
|
||||
struct drm_vmw_surface_arg *rep = &arg->rep;
|
||||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||
struct drm_vmw_size __user *user_sizes;
|
||||
int ret;
|
||||
int i, j;
|
||||
uint32_t cur_bo_offset;
|
||||
struct drm_vmw_size *cur_size;
|
||||
struct vmw_surface_offset *cur_offset;
|
||||
uint32_t num_sizes;
|
||||
uint32_t size;
|
||||
struct vmw_master *vmaster = vmw_master(file_priv->master);
|
||||
const struct svga3d_surface_desc *desc;
|
||||
|
||||
if (unlikely(vmw_user_surface_size == 0))
|
||||
vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
|
||||
128;
|
||||
|
||||
num_sizes = 0;
|
||||
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
|
||||
num_sizes += req->mip_levels[i];
|
||||
|
||||
if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
|
||||
DRM_VMW_MAX_MIP_LEVELS)
|
||||
return -EINVAL;
|
||||
|
||||
size = vmw_user_surface_size + 128 +
|
||||
ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
|
||||
ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
|
||||
|
||||
|
||||
desc = svga3dsurface_get_desc(req->format);
|
||||
if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
|
||||
DRM_ERROR("Invalid surface format for surface creation.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = ttm_read_lock(&vmaster->lock, true);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
|
||||
size, false, true);
|
||||
if (unlikely(ret != 0)) {
|
||||
if (ret != -ERESTARTSYS)
|
||||
DRM_ERROR("Out of graphics memory for surface"
|
||||
" creation.\n");
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
|
||||
if (unlikely(user_srf == NULL)) {
|
||||
ret = -ENOMEM;
|
||||
goto out_no_user_srf;
|
||||
}
|
||||
|
||||
srf = &user_srf->srf;
|
||||
res = &srf->res;
|
||||
|
||||
srf->flags = req->flags;
|
||||
srf->format = req->format;
|
||||
srf->scanout = req->scanout;
|
||||
|
||||
memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
|
||||
srf->num_sizes = num_sizes;
|
||||
user_srf->size = size;
|
||||
|
||||
srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
|
||||
if (unlikely(srf->sizes == NULL)) {
|
||||
ret = -ENOMEM;
|
||||
goto out_no_sizes;
|
||||
}
|
||||
srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
|
||||
GFP_KERNEL);
|
||||
if (unlikely(srf->sizes == NULL)) {
|
||||
ret = -ENOMEM;
|
||||
goto out_no_offsets;
|
||||
}
|
||||
|
||||
user_sizes = (struct drm_vmw_size __user *)(unsigned long)
|
||||
req->size_addr;
|
||||
|
||||
ret = copy_from_user(srf->sizes, user_sizes,
|
||||
srf->num_sizes * sizeof(*srf->sizes));
|
||||
if (unlikely(ret != 0)) {
|
||||
ret = -EFAULT;
|
||||
goto out_no_copy;
|
||||
}
|
||||
|
||||
srf->base_size = *srf->sizes;
|
||||
srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
|
||||
srf->multisample_count = 1;
|
||||
|
||||
cur_bo_offset = 0;
|
||||
cur_offset = srf->offsets;
|
||||
cur_size = srf->sizes;
|
||||
|
||||
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
|
||||
for (j = 0; j < srf->mip_levels[i]; ++j) {
|
||||
uint32_t stride = svga3dsurface_calculate_pitch
|
||||
(desc, cur_size);
|
||||
|
||||
cur_offset->face = i;
|
||||
cur_offset->mip = j;
|
||||
cur_offset->bo_offset = cur_bo_offset;
|
||||
cur_bo_offset += svga3dsurface_get_image_buffer_size
|
||||
(desc, cur_size, stride);
|
||||
++cur_offset;
|
||||
++cur_size;
|
||||
}
|
||||
}
|
||||
res->backup_size = cur_bo_offset;
|
||||
if (srf->scanout &&
|
||||
srf->num_sizes == 1 &&
|
||||
srf->sizes[0].width == 64 &&
|
||||
srf->sizes[0].height == 64 &&
|
||||
srf->format == SVGA3D_A8R8G8B8) {
|
||||
|
||||
srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
|
||||
/* clear the image */
|
||||
if (srf->snooper.image) {
|
||||
memset(srf->snooper.image, 0x00, 64 * 64 * 4);
|
||||
} else {
|
||||
DRM_ERROR("Failed to allocate cursor_image\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_no_copy;
|
||||
}
|
||||
} else {
|
||||
srf->snooper.image = NULL;
|
||||
}
|
||||
srf->snooper.crtc = NULL;
|
||||
|
||||
user_srf->base.shareable = false;
|
||||
user_srf->base.tfile = NULL;
|
||||
|
||||
/**
|
||||
* From this point, the generic resource management functions
|
||||
* destroy the object on failure.
|
||||
*/
|
||||
|
||||
ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_unlock;
|
||||
|
||||
tmp = vmw_resource_reference(&srf->res);
|
||||
ret = ttm_base_object_init(tfile, &user_srf->base,
|
||||
req->shareable, VMW_RES_SURFACE,
|
||||
&vmw_user_surface_base_release, NULL);
|
||||
|
||||
if (unlikely(ret != 0)) {
|
||||
vmw_resource_unreference(&tmp);
|
||||
vmw_resource_unreference(&res);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
rep->sid = user_srf->base.hash.key;
|
||||
vmw_resource_unreference(&res);
|
||||
|
||||
ttm_read_unlock(&vmaster->lock);
|
||||
return 0;
|
||||
out_no_copy:
|
||||
kfree(srf->offsets);
|
||||
out_no_offsets:
|
||||
kfree(srf->sizes);
|
||||
out_no_sizes:
|
||||
ttm_base_object_kfree(user_srf, base);
|
||||
out_no_user_srf:
|
||||
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
|
||||
out_unlock:
|
||||
ttm_read_unlock(&vmaster->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_user_surface_define_ioctl - Ioctl function implementing
|
||||
* the user surface reference functionality.
|
||||
*
|
||||
* @dev: Pointer to a struct drm_device.
|
||||
* @data: Pointer to data copied from / to user-space.
|
||||
* @file_priv: Pointer to a drm file private structure.
|
||||
*/
|
||||
int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
union drm_vmw_surface_reference_arg *arg =
|
||||
(union drm_vmw_surface_reference_arg *)data;
|
||||
struct drm_vmw_surface_arg *req = &arg->req;
|
||||
struct drm_vmw_surface_create_req *rep = &arg->rep;
|
||||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||
struct vmw_surface *srf;
|
||||
struct vmw_user_surface *user_srf;
|
||||
struct drm_vmw_size __user *user_sizes;
|
||||
struct ttm_base_object *base;
|
||||
int ret = -EINVAL;
|
||||
|
||||
base = ttm_base_object_lookup(tfile, req->sid);
|
||||
if (unlikely(base == NULL)) {
|
||||
DRM_ERROR("Could not find surface to reference.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (unlikely(base->object_type != VMW_RES_SURFACE))
|
||||
goto out_bad_resource;
|
||||
|
||||
user_srf = container_of(base, struct vmw_user_surface, base);
|
||||
srf = &user_srf->srf;
|
||||
|
||||
ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Could not add a reference to a surface.\n");
|
||||
goto out_no_reference;
|
||||
}
|
||||
|
||||
rep->flags = srf->flags;
|
||||
rep->format = srf->format;
|
||||
memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
|
||||
user_sizes = (struct drm_vmw_size __user *)(unsigned long)
|
||||
rep->size_addr;
|
||||
|
||||
if (user_sizes)
|
||||
ret = copy_to_user(user_sizes, srf->sizes,
|
||||
srf->num_sizes * sizeof(*srf->sizes));
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("copy_to_user failed %p %u\n",
|
||||
user_sizes, srf->num_sizes);
|
||||
ret = -EFAULT;
|
||||
}
|
||||
out_bad_resource:
|
||||
out_no_reference:
|
||||
ttm_base_object_unref(&base);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif
|
105
drivers/video/drm/vmwgfx/vmwgfx_ttm_glue.c
Normal file
105
drivers/video/drm/vmwgfx/vmwgfx_ttm_glue.c
Normal file
@ -0,0 +1,105 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_global.h>
|
||||
#include "vmwgfx_drv.h"
|
||||
|
||||
#if 0
|
||||
int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
{
|
||||
struct drm_file *file_priv;
|
||||
struct vmw_private *dev_priv;
|
||||
|
||||
// if (unlikely(vma->vm_pgoff < VMWGFX_FILE_PAGE_OFFSET)) {
|
||||
// DRM_ERROR("Illegal attempt to mmap old fifo space.\n");
|
||||
// return -EINVAL;
|
||||
// }
|
||||
|
||||
file_priv = filp->private_data;
|
||||
dev_priv = vmw_priv(file_priv->minor->dev);
|
||||
return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int vmw_ttm_mem_global_init(struct drm_global_reference *ref)
|
||||
{
|
||||
DRM_INFO("global init.\n");
|
||||
return ttm_mem_global_init(ref->object);
|
||||
}
|
||||
|
||||
static void vmw_ttm_mem_global_release(struct drm_global_reference *ref)
|
||||
{
|
||||
// ttm_mem_global_release(ref->object);
|
||||
}
|
||||
|
||||
int vmw_ttm_global_init(struct vmw_private *dev_priv)
|
||||
{
|
||||
ENTER();
|
||||
|
||||
struct drm_global_reference *global_ref;
|
||||
int ret;
|
||||
|
||||
global_ref = &dev_priv->mem_global_ref;
|
||||
global_ref->global_type = DRM_GLOBAL_TTM_MEM;
|
||||
global_ref->size = sizeof(struct ttm_mem_global);
|
||||
global_ref->init = &vmw_ttm_mem_global_init;
|
||||
global_ref->release = &vmw_ttm_mem_global_release;
|
||||
|
||||
ret = drm_global_item_ref(global_ref);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Failed setting up TTM memory accounting.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
dev_priv->bo_global_ref.mem_glob =
|
||||
dev_priv->mem_global_ref.object;
|
||||
global_ref = &dev_priv->bo_global_ref.ref;
|
||||
global_ref->global_type = DRM_GLOBAL_TTM_BO;
|
||||
global_ref->size = sizeof(struct ttm_bo_global);
|
||||
global_ref->init = &ttm_bo_global_init;
|
||||
global_ref->release = &ttm_bo_global_release;
|
||||
ret = drm_global_item_ref(global_ref);
|
||||
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Failed setting up TTM buffer objects.\n");
|
||||
goto out_no_bo;
|
||||
}
|
||||
|
||||
LEAVE();
|
||||
return 0;
|
||||
|
||||
out_no_bo:
|
||||
drm_global_item_unref(&dev_priv->mem_global_ref);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void vmw_ttm_global_release(struct vmw_private *dev_priv)
|
||||
{
|
||||
drm_global_item_unref(&dev_priv->bo_global_ref.ref);
|
||||
drm_global_item_unref(&dev_priv->mem_global_ref);
|
||||
}
|
Loading…
Reference in New Issue
Block a user