forked from KolibriOS/kolibrios
i915-4.6.7
git-svn-id: svn://kolibrios.org@7144 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
parent
66fbaa74b0
commit
cb2af79a5a
@ -28,6 +28,7 @@
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_atomic.h>
|
||||
#include <drm/drm_mode.h>
|
||||
#include <drm/drm_plane_helper.h>
|
||||
|
||||
/**
|
||||
@ -304,7 +305,7 @@ int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
|
||||
if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0)
|
||||
return 0;
|
||||
|
||||
drm_property_unreference_blob(state->mode_blob);
|
||||
drm_property_unreference_blob(state->mode_blob);
|
||||
state->mode_blob = NULL;
|
||||
|
||||
if (mode) {
|
||||
@ -350,9 +351,11 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
|
||||
if (blob == state->mode_blob)
|
||||
return 0;
|
||||
|
||||
drm_property_unreference_blob(state->mode_blob);
|
||||
drm_property_unreference_blob(state->mode_blob);
|
||||
state->mode_blob = NULL;
|
||||
|
||||
memset(&state->mode, 0, sizeof(state->mode));
|
||||
|
||||
if (blob) {
|
||||
if (blob->length != sizeof(struct drm_mode_modeinfo) ||
|
||||
drm_mode_convert_umode(&state->mode,
|
||||
@ -365,7 +368,6 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
|
||||
DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
|
||||
state->mode.name, state);
|
||||
} else {
|
||||
memset(&state->mode, 0, sizeof(state->mode));
|
||||
state->enable = false;
|
||||
DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
|
||||
state);
|
||||
@ -375,6 +377,58 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
|
||||
}
|
||||
EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
|
||||
|
||||
/**
|
||||
* drm_atomic_replace_property_blob - replace a blob property
|
||||
* @blob: a pointer to the member blob to be replaced
|
||||
* @new_blob: the new blob to replace with
|
||||
* @replaced: whether the blob has been replaced
|
||||
*
|
||||
* RETURNS:
|
||||
* Zero on success, error code on failure
|
||||
*/
|
||||
static void
|
||||
drm_atomic_replace_property_blob(struct drm_property_blob **blob,
|
||||
struct drm_property_blob *new_blob,
|
||||
bool *replaced)
|
||||
{
|
||||
struct drm_property_blob *old_blob = *blob;
|
||||
|
||||
if (old_blob == new_blob)
|
||||
return;
|
||||
|
||||
if (old_blob)
|
||||
drm_property_unreference_blob(old_blob);
|
||||
if (new_blob)
|
||||
drm_property_reference_blob(new_blob);
|
||||
*blob = new_blob;
|
||||
*replaced = true;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int
|
||||
drm_atomic_replace_property_blob_from_id(struct drm_crtc *crtc,
|
||||
struct drm_property_blob **blob,
|
||||
uint64_t blob_id,
|
||||
ssize_t expected_size,
|
||||
bool *replaced)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_property_blob *new_blob = NULL;
|
||||
|
||||
if (blob_id != 0) {
|
||||
new_blob = drm_property_lookup_blob(dev, blob_id);
|
||||
if (new_blob == NULL)
|
||||
return -EINVAL;
|
||||
if (expected_size > 0 && expected_size != new_blob->length)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
drm_atomic_replace_property_blob(blob, new_blob, replaced);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_atomic_crtc_set_property - set property on CRTC
|
||||
* @crtc: the drm CRTC to set a property on
|
||||
@ -397,6 +451,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_mode_config *config = &dev->mode_config;
|
||||
bool replaced = false;
|
||||
int ret;
|
||||
|
||||
if (property == config->prop_active)
|
||||
@ -405,10 +460,33 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
|
||||
struct drm_property_blob *mode =
|
||||
drm_property_lookup_blob(dev, val);
|
||||
ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
|
||||
drm_property_unreference_blob(mode);
|
||||
drm_property_unreference_blob(mode);
|
||||
return ret;
|
||||
}
|
||||
else if (crtc->funcs->atomic_set_property)
|
||||
} else if (property == config->degamma_lut_property) {
|
||||
ret = drm_atomic_replace_property_blob_from_id(crtc,
|
||||
&state->degamma_lut,
|
||||
val,
|
||||
-1,
|
||||
&replaced);
|
||||
state->color_mgmt_changed = replaced;
|
||||
return ret;
|
||||
} else if (property == config->ctm_property) {
|
||||
ret = drm_atomic_replace_property_blob_from_id(crtc,
|
||||
&state->ctm,
|
||||
val,
|
||||
sizeof(struct drm_color_ctm),
|
||||
&replaced);
|
||||
state->color_mgmt_changed = replaced;
|
||||
return ret;
|
||||
} else if (property == config->gamma_lut_property) {
|
||||
ret = drm_atomic_replace_property_blob_from_id(crtc,
|
||||
&state->gamma_lut,
|
||||
val,
|
||||
-1,
|
||||
&replaced);
|
||||
state->color_mgmt_changed = replaced;
|
||||
return ret;
|
||||
} else if (crtc->funcs->atomic_set_property)
|
||||
return crtc->funcs->atomic_set_property(crtc, state, property, val);
|
||||
else
|
||||
return -EINVAL;
|
||||
@ -444,6 +522,12 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc,
|
||||
*val = state->active;
|
||||
else if (property == config->prop_mode_id)
|
||||
*val = (state->mode_blob) ? state->mode_blob->base.id : 0;
|
||||
else if (property == config->degamma_lut_property)
|
||||
*val = (state->degamma_lut) ? state->degamma_lut->base.id : 0;
|
||||
else if (property == config->ctm_property)
|
||||
*val = (state->ctm) ? state->ctm->base.id : 0;
|
||||
else if (property == config->gamma_lut_property)
|
||||
*val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
|
||||
else if (crtc->funcs->atomic_get_property)
|
||||
return crtc->funcs->atomic_get_property(crtc, state, property, val);
|
||||
else
|
||||
@ -1204,14 +1288,39 @@ EXPORT_SYMBOL(drm_atomic_add_affected_planes);
|
||||
*/
|
||||
void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_device *dev = state->dev;
|
||||
unsigned crtc_mask = 0;
|
||||
struct drm_crtc *crtc;
|
||||
int ret;
|
||||
bool global = false;
|
||||
|
||||
drm_for_each_crtc(crtc, dev) {
|
||||
if (crtc->acquire_ctx != state->acquire_ctx)
|
||||
continue;
|
||||
|
||||
crtc_mask |= drm_crtc_mask(crtc);
|
||||
crtc->acquire_ctx = NULL;
|
||||
}
|
||||
|
||||
if (WARN_ON(dev->mode_config.acquire_ctx == state->acquire_ctx)) {
|
||||
global = true;
|
||||
|
||||
dev->mode_config.acquire_ctx = NULL;
|
||||
}
|
||||
|
||||
retry:
|
||||
drm_modeset_backoff(state->acquire_ctx);
|
||||
|
||||
ret = drm_modeset_lock_all_ctx(state->dev, state->acquire_ctx);
|
||||
ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
|
||||
if (ret)
|
||||
goto retry;
|
||||
|
||||
drm_for_each_crtc(crtc, dev)
|
||||
if (drm_crtc_mask(crtc) & crtc_mask)
|
||||
crtc->acquire_ctx = state->acquire_ctx;
|
||||
|
||||
if (global)
|
||||
dev->mode_config.acquire_ctx = state->acquire_ctx;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_atomic_legacy_backoff);
|
||||
|
||||
@ -1343,46 +1452,25 @@ static struct drm_pending_vblank_event *create_vblank_event(
|
||||
struct drm_device *dev, struct drm_file *file_priv, uint64_t user_data)
|
||||
{
|
||||
struct drm_pending_vblank_event *e = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
if (file_priv->event_space < sizeof e->event) {
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
goto out;
|
||||
}
|
||||
file_priv->event_space -= sizeof e->event;
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
int ret;
|
||||
|
||||
e = kzalloc(sizeof *e, GFP_KERNEL);
|
||||
if (e == NULL) {
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
file_priv->event_space += sizeof e->event;
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
goto out;
|
||||
}
|
||||
if (!e)
|
||||
return NULL;
|
||||
|
||||
e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
|
||||
e->event.base.length = sizeof e->event;
|
||||
e->event.base.length = sizeof(e->event);
|
||||
e->event.user_data = user_data;
|
||||
e->base.event = &e->event.base;
|
||||
e->base.file_priv = file_priv;
|
||||
e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
|
||||
|
||||
out:
|
||||
ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
|
||||
if (ret) {
|
||||
kfree(e);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return e;
|
||||
}
|
||||
|
||||
static void destroy_vblank_event(struct drm_device *dev,
|
||||
struct drm_file *file_priv, struct drm_pending_vblank_event *e)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
file_priv->event_space += sizeof e->event;
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
kfree(e);
|
||||
}
|
||||
|
||||
static int atomic_set_prop(struct drm_atomic_state *state,
|
||||
struct drm_mode_object *obj, struct drm_property *prop,
|
||||
uint64_t prop_value)
|
||||
|
@ -67,7 +67,8 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
|
||||
struct drm_crtc_state *crtc_state;
|
||||
|
||||
if (plane->state->crtc) {
|
||||
crtc_state = state->crtc_states[drm_crtc_index(plane->state->crtc)];
|
||||
crtc_state = drm_atomic_get_existing_crtc_state(state,
|
||||
plane->state->crtc);
|
||||
|
||||
if (WARN_ON(!crtc_state))
|
||||
return;
|
||||
@ -76,8 +77,8 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
|
||||
}
|
||||
|
||||
if (plane_state->crtc) {
|
||||
crtc_state =
|
||||
state->crtc_states[drm_crtc_index(plane_state->crtc)];
|
||||
crtc_state = drm_atomic_get_existing_crtc_state(state,
|
||||
plane_state->crtc);
|
||||
|
||||
if (WARN_ON(!crtc_state))
|
||||
return;
|
||||
@ -86,110 +87,185 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
check_pending_encoder_assignment(struct drm_atomic_state *state,
|
||||
struct drm_encoder *new_encoder)
|
||||
static int handle_conflicting_encoders(struct drm_atomic_state *state,
|
||||
bool disable_conflicting_encoders)
|
||||
{
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_state *conn_state;
|
||||
int i;
|
||||
|
||||
for_each_connector_in_state(state, connector, conn_state, i) {
|
||||
if (conn_state->best_encoder != new_encoder)
|
||||
continue;
|
||||
|
||||
/* encoder already assigned and we're trying to re-steal it! */
|
||||
if (connector->state->best_encoder != conn_state->best_encoder)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static struct drm_crtc *
|
||||
get_current_crtc_for_encoder(struct drm_device *dev,
|
||||
struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_mode_config *config = &dev->mode_config;
|
||||
struct drm_connector *connector;
|
||||
|
||||
WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
|
||||
|
||||
drm_for_each_connector(connector, dev) {
|
||||
if (connector->state->best_encoder != encoder)
|
||||
continue;
|
||||
|
||||
return connector->state->crtc;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
steal_encoder(struct drm_atomic_state *state,
|
||||
struct drm_encoder *encoder,
|
||||
struct drm_crtc *encoder_crtc)
|
||||
{
|
||||
struct drm_mode_config *config = &state->dev->mode_config;
|
||||
struct drm_crtc_state *crtc_state;
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_state *connector_state;
|
||||
int ret;
|
||||
struct drm_encoder *encoder;
|
||||
unsigned encoder_mask = 0;
|
||||
int i, ret;
|
||||
|
||||
/*
|
||||
* We can only steal an encoder coming from a connector, which means we
|
||||
* must already hold the connection_mutex.
|
||||
* First loop, find all newly assigned encoders from the connectors
|
||||
* part of the state. If the same encoder is assigned to multiple
|
||||
* connectors bail out.
|
||||
*/
|
||||
WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
|
||||
for_each_connector_in_state(state, connector, conn_state, i) {
|
||||
const struct drm_connector_helper_funcs *funcs = connector->helper_private;
|
||||
struct drm_encoder *new_encoder;
|
||||
|
||||
DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
|
||||
encoder->base.id, encoder->name,
|
||||
encoder_crtc->base.id, encoder_crtc->name);
|
||||
|
||||
crtc_state = drm_atomic_get_crtc_state(state, encoder_crtc);
|
||||
if (IS_ERR(crtc_state))
|
||||
return PTR_ERR(crtc_state);
|
||||
|
||||
crtc_state->connectors_changed = true;
|
||||
|
||||
list_for_each_entry(connector, &config->connector_list, head) {
|
||||
if (connector->state->best_encoder != encoder)
|
||||
if (!conn_state->crtc)
|
||||
continue;
|
||||
|
||||
DRM_DEBUG_ATOMIC("Stealing encoder from [CONNECTOR:%d:%s]\n",
|
||||
connector->base.id,
|
||||
connector->name);
|
||||
if (funcs->atomic_best_encoder)
|
||||
new_encoder = funcs->atomic_best_encoder(connector, conn_state);
|
||||
else
|
||||
new_encoder = funcs->best_encoder(connector);
|
||||
|
||||
connector_state = drm_atomic_get_connector_state(state,
|
||||
connector);
|
||||
if (IS_ERR(connector_state))
|
||||
return PTR_ERR(connector_state);
|
||||
if (new_encoder) {
|
||||
if (encoder_mask & (1 << drm_encoder_index(new_encoder))) {
|
||||
DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n",
|
||||
new_encoder->base.id, new_encoder->name,
|
||||
connector->base.id, connector->name);
|
||||
|
||||
ret = drm_atomic_set_crtc_for_connector(connector_state, NULL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
encoder_mask |= 1 << drm_encoder_index(new_encoder);
|
||||
}
|
||||
}
|
||||
|
||||
if (!encoder_mask)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Second loop, iterate over all connectors not part of the state.
|
||||
*
|
||||
* If a conflicting encoder is found and disable_conflicting_encoders
|
||||
* is not set, an error is returned. Userspace can provide a solution
|
||||
* through the atomic ioctl.
|
||||
*
|
||||
* If the flag is set conflicting connectors are removed from the crtc
|
||||
* and the crtc is disabled if no encoder is left. This preserves
|
||||
* compatibility with the legacy set_config behavior.
|
||||
*/
|
||||
drm_for_each_connector(connector, state->dev) {
|
||||
struct drm_crtc_state *crtc_state;
|
||||
|
||||
if (drm_atomic_get_existing_connector_state(state, connector))
|
||||
continue;
|
||||
|
||||
encoder = connector->state->best_encoder;
|
||||
if (!encoder || !(encoder_mask & (1 << drm_encoder_index(encoder))))
|
||||
continue;
|
||||
|
||||
if (!disable_conflicting_encoders) {
|
||||
DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n",
|
||||
encoder->base.id, encoder->name,
|
||||
connector->state->crtc->base.id,
|
||||
connector->state->crtc->name,
|
||||
connector->base.id, connector->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
conn_state = drm_atomic_get_connector_state(state, connector);
|
||||
if (IS_ERR(conn_state))
|
||||
return PTR_ERR(conn_state);
|
||||
|
||||
DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n",
|
||||
encoder->base.id, encoder->name,
|
||||
conn_state->crtc->base.id, conn_state->crtc->name,
|
||||
connector->base.id, connector->name);
|
||||
|
||||
crtc_state = drm_atomic_get_existing_crtc_state(state, conn_state->crtc);
|
||||
|
||||
ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
connector_state->best_encoder = NULL;
|
||||
|
||||
if (!crtc_state->connector_mask) {
|
||||
ret = drm_atomic_set_mode_prop_for_crtc(crtc_state,
|
||||
NULL);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
crtc_state->active = false;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
set_best_encoder(struct drm_atomic_state *state,
|
||||
struct drm_connector_state *conn_state,
|
||||
struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_crtc_state *crtc_state;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
if (conn_state->best_encoder) {
|
||||
/* Unset the encoder_mask in the old crtc state. */
|
||||
crtc = conn_state->connector->state->crtc;
|
||||
|
||||
/* A NULL crtc is an error here because we should have
|
||||
* duplicated a NULL best_encoder when crtc was NULL.
|
||||
* As an exception restoring duplicated atomic state
|
||||
* during resume is allowed, so don't warn when
|
||||
* best_encoder is equal to encoder we intend to set.
|
||||
*/
|
||||
WARN_ON(!crtc && encoder != conn_state->best_encoder);
|
||||
if (crtc) {
|
||||
crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
|
||||
|
||||
crtc_state->encoder_mask &=
|
||||
~(1 << drm_encoder_index(conn_state->best_encoder));
|
||||
}
|
||||
}
|
||||
|
||||
if (encoder) {
|
||||
crtc = conn_state->crtc;
|
||||
WARN_ON(!crtc);
|
||||
if (crtc) {
|
||||
crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
|
||||
|
||||
crtc_state->encoder_mask |=
|
||||
1 << drm_encoder_index(encoder);
|
||||
}
|
||||
}
|
||||
|
||||
conn_state->best_encoder = encoder;
|
||||
}
|
||||
|
||||
static void
|
||||
steal_encoder(struct drm_atomic_state *state,
|
||||
struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_crtc_state *crtc_state;
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_state *connector_state;
|
||||
int i;
|
||||
|
||||
for_each_connector_in_state(state, connector, connector_state, i) {
|
||||
struct drm_crtc *encoder_crtc;
|
||||
|
||||
if (connector_state->best_encoder != encoder)
|
||||
continue;
|
||||
|
||||
encoder_crtc = connector->state->crtc;
|
||||
|
||||
DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
|
||||
encoder->base.id, encoder->name,
|
||||
encoder_crtc->base.id, encoder_crtc->name);
|
||||
|
||||
set_best_encoder(state, connector_state, NULL);
|
||||
|
||||
crtc_state = drm_atomic_get_existing_crtc_state(state, encoder_crtc);
|
||||
crtc_state->connectors_changed = true;
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
update_connector_routing(struct drm_atomic_state *state, int conn_idx)
|
||||
update_connector_routing(struct drm_atomic_state *state,
|
||||
struct drm_connector *connector,
|
||||
struct drm_connector_state *connector_state)
|
||||
{
|
||||
const struct drm_connector_helper_funcs *funcs;
|
||||
struct drm_encoder *new_encoder;
|
||||
struct drm_crtc *encoder_crtc;
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_state *connector_state;
|
||||
struct drm_crtc_state *crtc_state;
|
||||
int idx, ret;
|
||||
|
||||
connector = state->connectors[conn_idx];
|
||||
connector_state = state->connector_states[conn_idx];
|
||||
|
||||
if (!connector)
|
||||
return 0;
|
||||
|
||||
DRM_DEBUG_ATOMIC("Updating routing for [CONNECTOR:%d:%s]\n",
|
||||
connector->base.id,
|
||||
@ -197,16 +273,12 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
|
||||
|
||||
if (connector->state->crtc != connector_state->crtc) {
|
||||
if (connector->state->crtc) {
|
||||
idx = drm_crtc_index(connector->state->crtc);
|
||||
|
||||
crtc_state = state->crtc_states[idx];
|
||||
crtc_state = drm_atomic_get_existing_crtc_state(state, connector->state->crtc);
|
||||
crtc_state->connectors_changed = true;
|
||||
}
|
||||
|
||||
if (connector_state->crtc) {
|
||||
idx = drm_crtc_index(connector_state->crtc);
|
||||
|
||||
crtc_state = state->crtc_states[idx];
|
||||
crtc_state = drm_atomic_get_existing_crtc_state(state, connector_state->crtc);
|
||||
crtc_state->connectors_changed = true;
|
||||
}
|
||||
}
|
||||
@ -216,7 +288,7 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
|
||||
connector->base.id,
|
||||
connector->name);
|
||||
|
||||
connector_state->best_encoder = NULL;
|
||||
set_best_encoder(state, connector_state, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -245,6 +317,8 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
|
||||
}
|
||||
|
||||
if (new_encoder == connector_state->best_encoder) {
|
||||
set_best_encoder(state, connector_state, new_encoder);
|
||||
|
||||
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n",
|
||||
connector->base.id,
|
||||
connector->name,
|
||||
@ -256,33 +330,11 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!check_pending_encoder_assignment(state, new_encoder)) {
|
||||
DRM_DEBUG_ATOMIC("Encoder for [CONNECTOR:%d:%s] already assigned\n",
|
||||
connector->base.id,
|
||||
connector->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
steal_encoder(state, new_encoder);
|
||||
|
||||
encoder_crtc = get_current_crtc_for_encoder(state->dev,
|
||||
new_encoder);
|
||||
set_best_encoder(state, connector_state, new_encoder);
|
||||
|
||||
if (encoder_crtc) {
|
||||
ret = steal_encoder(state, new_encoder, encoder_crtc);
|
||||
if (ret) {
|
||||
DRM_DEBUG_ATOMIC("Encoder stealing failed for [CONNECTOR:%d:%s]\n",
|
||||
connector->base.id,
|
||||
connector->name);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (WARN_ON(!connector_state->crtc))
|
||||
return -EINVAL;
|
||||
|
||||
connector_state->best_encoder = new_encoder;
|
||||
idx = drm_crtc_index(connector_state->crtc);
|
||||
|
||||
crtc_state = state->crtc_states[idx];
|
||||
crtc_state = drm_atomic_get_existing_crtc_state(state, connector_state->crtc);
|
||||
crtc_state->connectors_changed = true;
|
||||
|
||||
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",
|
||||
@ -323,8 +375,8 @@ mode_fixup(struct drm_atomic_state *state)
|
||||
if (!conn_state->crtc || !conn_state->best_encoder)
|
||||
continue;
|
||||
|
||||
crtc_state =
|
||||
state->crtc_states[drm_crtc_index(conn_state->crtc)];
|
||||
crtc_state = drm_atomic_get_existing_crtc_state(state,
|
||||
conn_state->crtc);
|
||||
|
||||
/*
|
||||
* Each encoder has at most one connector (since we always steal
|
||||
@ -445,13 +497,18 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
|
||||
}
|
||||
}
|
||||
|
||||
ret = handle_conflicting_encoders(state, state->legacy_set_config);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for_each_connector_in_state(state, connector, connector_state, i) {
|
||||
/*
|
||||
* This only sets crtc->mode_changed for routing changes,
|
||||
* drivers must set crtc->mode_changed themselves when connector
|
||||
* properties need to be updated.
|
||||
*/
|
||||
ret = update_connector_routing(state, i);
|
||||
ret = update_connector_routing(state, connector,
|
||||
connector_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -483,7 +540,7 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
|
||||
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n",
|
||||
crtc->base.id, crtc->name,
|
||||
crtc_state->enable ? 'y' : 'n',
|
||||
crtc_state->active ? 'y' : 'n');
|
||||
crtc_state->active ? 'y' : 'n');
|
||||
|
||||
ret = drm_atomic_add_affected_connectors(state, crtc);
|
||||
if (ret != 0)
|
||||
@ -617,14 +674,14 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
|
||||
for_each_connector_in_state(old_state, connector, old_conn_state, i) {
|
||||
const struct drm_encoder_helper_funcs *funcs;
|
||||
struct drm_encoder *encoder;
|
||||
struct drm_crtc_state *old_crtc_state;
|
||||
|
||||
/* Shut down everything that's in the changeset and currently
|
||||
* still on. So need to check the old, saved state. */
|
||||
if (!old_conn_state->crtc)
|
||||
continue;
|
||||
|
||||
old_crtc_state = old_state->crtc_states[drm_crtc_index(old_conn_state->crtc)];
|
||||
old_crtc_state = drm_atomic_get_existing_crtc_state(old_state,
|
||||
old_conn_state->crtc);
|
||||
|
||||
if (!old_crtc_state->active ||
|
||||
!drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
|
||||
@ -961,8 +1018,8 @@ static void wait_for_fences(struct drm_device *dev,
|
||||
* true if the framebuffer changed.
|
||||
*/
|
||||
bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev,
|
||||
struct drm_atomic_state *old_state,
|
||||
struct drm_crtc *crtc)
|
||||
struct drm_atomic_state *old_state,
|
||||
struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_plane *plane;
|
||||
struct drm_plane_state *old_plane_state;
|
||||
@ -1719,28 +1776,18 @@ static int update_output_state(struct drm_atomic_state *state,
|
||||
struct drm_crtc_state *crtc_state;
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_state *conn_state;
|
||||
int ret, i, j;
|
||||
int ret, i;
|
||||
|
||||
ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
|
||||
state->acquire_ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* First grab all affected connector/crtc states. */
|
||||
for (i = 0; i < set->num_connectors; i++) {
|
||||
conn_state = drm_atomic_get_connector_state(state,
|
||||
set->connectors[i]);
|
||||
if (IS_ERR(conn_state))
|
||||
return PTR_ERR(conn_state);
|
||||
}
|
||||
/* First disable all connectors on the target crtc. */
|
||||
ret = drm_atomic_add_affected_connectors(state, set->crtc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for_each_crtc_in_state(state, crtc, crtc_state, i) {
|
||||
ret = drm_atomic_add_affected_connectors(state, crtc);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Then recompute connector->crtc links and crtc enabling state. */
|
||||
for_each_connector_in_state(state, connector, conn_state, i) {
|
||||
if (conn_state->crtc == set->crtc) {
|
||||
ret = drm_atomic_set_crtc_for_connector(conn_state,
|
||||
@ -1748,16 +1795,19 @@ static int update_output_state(struct drm_atomic_state *state,
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
for (j = 0; j < set->num_connectors; j++) {
|
||||
if (set->connectors[j] == connector) {
|
||||
ret = drm_atomic_set_crtc_for_connector(conn_state,
|
||||
set->crtc);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
}
|
||||
}
|
||||
/* Then set all connectors from set->connectors on the target crtc */
|
||||
for (i = 0; i < set->num_connectors; i++) {
|
||||
conn_state = drm_atomic_get_connector_state(state,
|
||||
set->connectors[i]);
|
||||
if (IS_ERR(conn_state))
|
||||
return PTR_ERR(conn_state);
|
||||
|
||||
ret = drm_atomic_set_crtc_for_connector(conn_state,
|
||||
set->crtc);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
for_each_crtc_in_state(state, crtc, crtc_state, i) {
|
||||
@ -1800,6 +1850,7 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set)
|
||||
if (!state)
|
||||
return -ENOMEM;
|
||||
|
||||
state->legacy_set_config = true;
|
||||
state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
|
||||
retry:
|
||||
ret = __drm_atomic_helper_set_config(set, state);
|
||||
@ -2446,8 +2497,12 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_dpms);
|
||||
*/
|
||||
void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
|
||||
{
|
||||
if (crtc->state)
|
||||
if (crtc->state) {
|
||||
drm_property_unreference_blob(crtc->state->mode_blob);
|
||||
drm_property_unreference_blob(crtc->state->degamma_lut);
|
||||
drm_property_unreference_blob(crtc->state->ctm);
|
||||
drm_property_unreference_blob(crtc->state->gamma_lut);
|
||||
}
|
||||
kfree(crtc->state);
|
||||
crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL);
|
||||
|
||||
@ -2471,10 +2526,17 @@ void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
|
||||
|
||||
if (state->mode_blob)
|
||||
drm_property_reference_blob(state->mode_blob);
|
||||
if (state->degamma_lut)
|
||||
drm_property_reference_blob(state->degamma_lut);
|
||||
if (state->ctm)
|
||||
drm_property_reference_blob(state->ctm);
|
||||
if (state->gamma_lut)
|
||||
drm_property_reference_blob(state->gamma_lut);
|
||||
state->mode_changed = false;
|
||||
state->active_changed = false;
|
||||
state->planes_changed = false;
|
||||
state->connectors_changed = false;
|
||||
state->color_mgmt_changed = false;
|
||||
state->event = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state);
|
||||
@ -2514,7 +2576,10 @@ EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
|
||||
void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
{
|
||||
drm_property_unreference_blob(state->mode_blob);
|
||||
drm_property_unreference_blob(state->mode_blob);
|
||||
drm_property_unreference_blob(state->degamma_lut);
|
||||
drm_property_unreference_blob(state->ctm);
|
||||
drm_property_unreference_blob(state->gamma_lut);
|
||||
}
|
||||
EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state);
|
||||
|
||||
@ -2549,8 +2614,10 @@ void drm_atomic_helper_plane_reset(struct drm_plane *plane)
|
||||
kfree(plane->state);
|
||||
plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL);
|
||||
|
||||
if (plane->state)
|
||||
if (plane->state) {
|
||||
plane->state->plane = plane;
|
||||
plane->state->rotation = BIT(DRM_ROTATE_0);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
|
||||
|
||||
@ -2826,3 +2893,98 @@ void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
|
||||
kfree(state);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
|
||||
|
||||
/**
|
||||
* drm_atomic_helper_legacy_gamma_set - set the legacy gamma correction table
|
||||
* @crtc: CRTC object
|
||||
* @red: red correction table
|
||||
* @green: green correction table
|
||||
* @blue: green correction table
|
||||
* @start:
|
||||
* @size: size of the tables
|
||||
*
|
||||
* Implements support for legacy gamma correction table for drivers
|
||||
* that support color management through the DEGAMMA_LUT/GAMMA_LUT
|
||||
* properties.
|
||||
*/
|
||||
void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
|
||||
u16 *red, u16 *green, u16 *blue,
|
||||
uint32_t start, uint32_t size)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_mode_config *config = &dev->mode_config;
|
||||
struct drm_atomic_state *state;
|
||||
struct drm_crtc_state *crtc_state;
|
||||
struct drm_property_blob *blob = NULL;
|
||||
struct drm_color_lut *blob_data;
|
||||
int i, ret = 0;
|
||||
|
||||
state = drm_atomic_state_alloc(crtc->dev);
|
||||
if (!state)
|
||||
return;
|
||||
|
||||
blob = drm_property_create_blob(dev,
|
||||
sizeof(struct drm_color_lut) * size,
|
||||
NULL);
|
||||
if (IS_ERR(blob)) {
|
||||
ret = PTR_ERR(blob);
|
||||
blob = NULL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Prepare GAMMA_LUT with the legacy values. */
|
||||
blob_data = (struct drm_color_lut *) blob->data;
|
||||
for (i = 0; i < size; i++) {
|
||||
blob_data[i].red = red[i];
|
||||
blob_data[i].green = green[i];
|
||||
blob_data[i].blue = blue[i];
|
||||
}
|
||||
|
||||
state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
|
||||
retry:
|
||||
crtc_state = drm_atomic_get_crtc_state(state, crtc);
|
||||
if (IS_ERR(crtc_state)) {
|
||||
ret = PTR_ERR(crtc_state);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Reset DEGAMMA_LUT and CTM properties. */
|
||||
ret = drm_atomic_crtc_set_property(crtc, crtc_state,
|
||||
config->degamma_lut_property, 0);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
ret = drm_atomic_crtc_set_property(crtc, crtc_state,
|
||||
config->ctm_property, 0);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
ret = drm_atomic_crtc_set_property(crtc, crtc_state,
|
||||
config->gamma_lut_property, blob->base.id);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
ret = drm_atomic_commit(state);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
/* Driver takes ownership of state on successful commit. */
|
||||
|
||||
drm_property_unreference_blob(blob);
|
||||
|
||||
return;
|
||||
fail:
|
||||
if (ret == -EDEADLK)
|
||||
goto backoff;
|
||||
|
||||
drm_atomic_state_free(state);
|
||||
drm_property_unreference_blob(blob);
|
||||
|
||||
return;
|
||||
backoff:
|
||||
drm_atomic_state_clear(state);
|
||||
drm_atomic_legacy_backoff(state);
|
||||
|
||||
goto retry;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);
|
||||
|
@ -38,7 +38,7 @@
|
||||
* A bridge is always attached to a single &drm_encoder at a time, but can be
|
||||
* either connected to it directly, or through an intermediate bridge:
|
||||
*
|
||||
* encoder ---> bridge B ---> bridge A
|
||||
* encoder ---> bridge B ---> bridge A
|
||||
*
|
||||
* Here, the output of the encoder feeds to bridge B, and that furthers feeds to
|
||||
* bridge A.
|
||||
@ -186,7 +186,8 @@ void drm_bridge_disable(struct drm_bridge *bridge)
|
||||
|
||||
drm_bridge_disable(bridge->next);
|
||||
|
||||
bridge->funcs->disable(bridge);
|
||||
if (bridge->funcs->disable)
|
||||
bridge->funcs->disable(bridge);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_bridge_disable);
|
||||
|
||||
@ -206,7 +207,8 @@ void drm_bridge_post_disable(struct drm_bridge *bridge)
|
||||
if (!bridge)
|
||||
return;
|
||||
|
||||
bridge->funcs->post_disable(bridge);
|
||||
if (bridge->funcs->post_disable)
|
||||
bridge->funcs->post_disable(bridge);
|
||||
|
||||
drm_bridge_post_disable(bridge->next);
|
||||
}
|
||||
@ -256,7 +258,8 @@ void drm_bridge_pre_enable(struct drm_bridge *bridge)
|
||||
|
||||
drm_bridge_pre_enable(bridge->next);
|
||||
|
||||
bridge->funcs->pre_enable(bridge);
|
||||
if (bridge->funcs->pre_enable)
|
||||
bridge->funcs->pre_enable(bridge);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_bridge_pre_enable);
|
||||
|
||||
@ -276,7 +279,8 @@ void drm_bridge_enable(struct drm_bridge *bridge)
|
||||
if (!bridge)
|
||||
return;
|
||||
|
||||
bridge->funcs->enable(bridge);
|
||||
if (bridge->funcs->enable)
|
||||
bridge->funcs->enable(bridge);
|
||||
|
||||
drm_bridge_enable(bridge->next);
|
||||
}
|
||||
|
@ -430,9 +430,7 @@ EXPORT_SYMBOL(drm_framebuffer_init);
|
||||
static void __drm_framebuffer_unregister(struct drm_device *dev,
|
||||
struct drm_framebuffer *fb)
|
||||
{
|
||||
mutex_lock(&dev->mode_config.idr_mutex);
|
||||
idr_remove(&dev->mode_config.crtc_idr, fb->base.id);
|
||||
mutex_unlock(&dev->mode_config.idr_mutex);
|
||||
drm_mode_object_put(dev, &fb->base);
|
||||
|
||||
fb->base.id = 0;
|
||||
}
|
||||
@ -1126,9 +1124,9 @@ int drm_encoder_init(struct drm_device *dev,
|
||||
encoder->name = kvasprintf(GFP_KERNEL, name, ap);
|
||||
va_end(ap);
|
||||
} else {
|
||||
encoder->name = kasprintf(GFP_KERNEL, "%s-%d",
|
||||
drm_encoder_enum_list[encoder_type].name,
|
||||
encoder->base.id);
|
||||
encoder->name = kasprintf(GFP_KERNEL, "%s-%d",
|
||||
drm_encoder_enum_list[encoder_type].name,
|
||||
encoder->base.id);
|
||||
}
|
||||
if (!encoder->name) {
|
||||
ret = -ENOMEM;
|
||||
@ -1149,6 +1147,29 @@ out_unlock:
|
||||
}
|
||||
EXPORT_SYMBOL(drm_encoder_init);
|
||||
|
||||
/**
|
||||
* drm_encoder_index - find the index of a registered encoder
|
||||
* @encoder: encoder to find index for
|
||||
*
|
||||
* Given a registered encoder, return the index of that encoder within a DRM
|
||||
* device's list of encoders.
|
||||
*/
|
||||
unsigned int drm_encoder_index(struct drm_encoder *encoder)
|
||||
{
|
||||
unsigned int index = 0;
|
||||
struct drm_encoder *tmp;
|
||||
|
||||
drm_for_each_encoder(tmp, encoder->dev) {
|
||||
if (tmp == encoder)
|
||||
return index;
|
||||
|
||||
index++;
|
||||
}
|
||||
|
||||
BUG();
|
||||
}
|
||||
EXPORT_SYMBOL(drm_encoder_index);
|
||||
|
||||
/**
|
||||
* drm_encoder_cleanup - cleans up an initialised encoder
|
||||
* @encoder: encoder to cleanup
|
||||
@ -1531,6 +1552,41 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
|
||||
return -ENOMEM;
|
||||
dev->mode_config.prop_mode_id = prop;
|
||||
|
||||
prop = drm_property_create(dev,
|
||||
DRM_MODE_PROP_BLOB,
|
||||
"DEGAMMA_LUT", 0);
|
||||
if (!prop)
|
||||
return -ENOMEM;
|
||||
dev->mode_config.degamma_lut_property = prop;
|
||||
|
||||
prop = drm_property_create_range(dev,
|
||||
DRM_MODE_PROP_IMMUTABLE,
|
||||
"DEGAMMA_LUT_SIZE", 0, UINT_MAX);
|
||||
if (!prop)
|
||||
return -ENOMEM;
|
||||
dev->mode_config.degamma_lut_size_property = prop;
|
||||
|
||||
prop = drm_property_create(dev,
|
||||
DRM_MODE_PROP_BLOB,
|
||||
"CTM", 0);
|
||||
if (!prop)
|
||||
return -ENOMEM;
|
||||
dev->mode_config.ctm_property = prop;
|
||||
|
||||
prop = drm_property_create(dev,
|
||||
DRM_MODE_PROP_BLOB,
|
||||
"GAMMA_LUT", 0);
|
||||
if (!prop)
|
||||
return -ENOMEM;
|
||||
dev->mode_config.gamma_lut_property = prop;
|
||||
|
||||
prop = drm_property_create_range(dev,
|
||||
DRM_MODE_PROP_IMMUTABLE,
|
||||
"GAMMA_LUT_SIZE", 0, UINT_MAX);
|
||||
if (!prop)
|
||||
return -ENOMEM;
|
||||
dev->mode_config.gamma_lut_size_property = prop;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2747,8 +2803,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
|
||||
goto out;
|
||||
}
|
||||
|
||||
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
|
||||
|
||||
/*
|
||||
* Check whether the primary plane supports the fb pixel format.
|
||||
* Drivers not implementing the universal planes API use a
|
||||
@ -3289,6 +3343,24 @@ int drm_mode_addfb2(struct drm_device *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct drm_mode_rmfb_work {
|
||||
struct work_struct work;
|
||||
struct list_head fbs;
|
||||
};
|
||||
|
||||
static void drm_mode_rmfb_work_fn(struct work_struct *w)
|
||||
{
|
||||
struct drm_mode_rmfb_work *arg = container_of(w, typeof(*arg), work);
|
||||
|
||||
while (!list_empty(&arg->fbs)) {
|
||||
struct drm_framebuffer *fb =
|
||||
list_first_entry(&arg->fbs, typeof(*fb), filp_head);
|
||||
|
||||
list_del_init(&fb->filp_head);
|
||||
drm_framebuffer_remove(fb);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_mode_rmfb - remove an FB from the configuration
|
||||
* @dev: drm device for the ioctl
|
||||
@ -3482,7 +3554,6 @@ out_err1:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* drm_fb_release - remove and free the FBs on this file
|
||||
* @priv: drm file for the ioctl
|
||||
@ -4630,7 +4701,7 @@ static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
|
||||
|
||||
/* Do DPMS ourselves */
|
||||
if (property == connector->dev->mode_config.dpms_property) {
|
||||
ret = (*connector->funcs->dpms)(connector, (int)value);
|
||||
ret = (*connector->funcs->dpms)(connector, (int)value);
|
||||
} else if (connector->funcs->set_property)
|
||||
ret = connector->funcs->set_property(connector, property, value);
|
||||
|
||||
|
@ -73,9 +73,6 @@
|
||||
* &drm_crtc_helper_funcs, struct &drm_encoder_helper_funcs and struct
|
||||
* &drm_connector_helper_funcs.
|
||||
*/
|
||||
MODULE_AUTHOR("David Airlie, Jesse Barnes");
|
||||
MODULE_DESCRIPTION("DRM KMS helper");
|
||||
MODULE_LICENSE("GPL and additional rights");
|
||||
|
||||
/**
|
||||
* drm_helper_move_panel_connectors_to_head() - move panels to the front in the
|
||||
@ -220,6 +217,15 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev)
|
||||
* disconnected connectors. Then it will disable all unused encoders and CRTCs
|
||||
* either by calling their disable callback if available or by calling their
|
||||
* dpms callback with DRM_MODE_DPMS_OFF.
|
||||
*
|
||||
* NOTE:
|
||||
*
|
||||
* This function is part of the legacy modeset helper library and will cause
|
||||
* major confusion with atomic drivers. This is because atomic helpers guarantee
|
||||
* to never call ->disable() hooks on a disabled function, or ->enable() hooks
|
||||
* on an enabled functions. drm_helper_disable_unused_functions() on the other
|
||||
* hand throws such guarantees into the wind and calls disable hooks
|
||||
* unconditionally on unused functions.
|
||||
*/
|
||||
void drm_helper_disable_unused_functions(struct drm_device *dev)
|
||||
{
|
||||
@ -328,16 +334,21 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
encoder_funcs = encoder->helper_private;
|
||||
if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
|
||||
adjusted_mode))) {
|
||||
DRM_DEBUG_KMS("Encoder fixup failed\n");
|
||||
goto done;
|
||||
if (encoder_funcs->mode_fixup) {
|
||||
if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
|
||||
adjusted_mode))) {
|
||||
DRM_DEBUG_KMS("Encoder fixup failed\n");
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) {
|
||||
DRM_DEBUG_KMS("CRTC fixup failed\n");
|
||||
goto done;
|
||||
if (crtc_funcs->mode_fixup) {
|
||||
if (!(ret = crtc_funcs->mode_fixup(crtc, mode,
|
||||
adjusted_mode))) {
|
||||
DRM_DEBUG_KMS("CRTC fixup failed\n");
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
|
||||
|
||||
@ -522,7 +533,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
|
||||
DRM_DEBUG_KMS("[CRTC:%d:%s] [FB:%d] #connectors=%d (x y) (%i %i)\n",
|
||||
set->crtc->base.id, set->crtc->name,
|
||||
set->fb->base.id,
|
||||
(int)set->num_connectors, set->x, set->y);
|
||||
(int)set->num_connectors, set->x, set->y);
|
||||
} else {
|
||||
DRM_DEBUG_KMS("[CRTC:%d:%s] [NOFB]\n",
|
||||
set->crtc->base.id, set->crtc->name);
|
||||
@ -578,8 +589,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
|
||||
if (set->crtc->primary->fb == NULL) {
|
||||
DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
|
||||
mode_changed = true;
|
||||
} else if (set->fb == NULL) {
|
||||
mode_changed = true;
|
||||
} else if (set->fb->pixel_format !=
|
||||
set->crtc->primary->fb->pixel_format) {
|
||||
mode_changed = true;
|
||||
@ -590,7 +599,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
|
||||
if (set->x != set->crtc->x || set->y != set->crtc->y)
|
||||
fb_changed = true;
|
||||
|
||||
if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
|
||||
if (!drm_mode_equal(set->mode, &set->crtc->mode)) {
|
||||
DRM_DEBUG_KMS("modes are different, full mode set\n");
|
||||
drm_mode_debug_printmodeline(&set->crtc->mode);
|
||||
drm_mode_debug_printmodeline(set->mode);
|
||||
@ -666,11 +675,11 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
|
||||
}
|
||||
if (new_crtc) {
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d:%s]\n",
|
||||
connector->base.id, connector->name,
|
||||
connector->base.id, connector->name,
|
||||
new_crtc->base.id, new_crtc->name);
|
||||
} else {
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
|
||||
connector->base.id, connector->name);
|
||||
connector->base.id, connector->name);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1066,3 +1075,36 @@ int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
|
||||
return drm_plane_helper_commit(plane, plane_state, old_fb);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_helper_crtc_mode_set_base);
|
||||
|
||||
/**
|
||||
* drm_helper_crtc_enable_color_mgmt - enable color management properties
|
||||
* @crtc: DRM CRTC
|
||||
* @degamma_lut_size: the size of the degamma lut (before CSC)
|
||||
* @gamma_lut_size: the size of the gamma lut (after CSC)
|
||||
*
|
||||
* This function lets the driver enable the color correction properties on a
|
||||
* CRTC. This includes 3 degamma, csc and gamma properties that userspace can
|
||||
* set and 2 size properties to inform the userspace of the lut sizes.
|
||||
*/
|
||||
void drm_helper_crtc_enable_color_mgmt(struct drm_crtc *crtc,
|
||||
int degamma_lut_size,
|
||||
int gamma_lut_size)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_mode_config *config = &dev->mode_config;
|
||||
|
||||
drm_object_attach_property(&crtc->base,
|
||||
config->degamma_lut_property, 0);
|
||||
drm_object_attach_property(&crtc->base,
|
||||
config->ctm_property, 0);
|
||||
drm_object_attach_property(&crtc->base,
|
||||
config->gamma_lut_property, 0);
|
||||
|
||||
drm_object_attach_property(&crtc->base,
|
||||
config->degamma_lut_size_property,
|
||||
degamma_lut_size);
|
||||
drm_object_attach_property(&crtc->base,
|
||||
config->gamma_lut_size_property,
|
||||
gamma_lut_size);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_helper_crtc_enable_color_mgmt);
|
||||
|
366
drivers/video/drm/drm_dp_dual_mode_helper.c
Normal file
366
drivers/video/drm/drm_dp_dual_mode_helper.c
Normal file
@ -0,0 +1,366 @@
|
||||
/*
|
||||
* Copyright © 2016 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
#include <drm/drm_dp_dual_mode_helper.h>
|
||||
#include <drm/drmP.h>
|
||||
|
||||
/**
|
||||
* DOC: dp dual mode helpers
|
||||
*
|
||||
* Helper functions to deal with DP dual mode (aka. DP++) adaptors.
|
||||
*
|
||||
* Type 1:
|
||||
* Adaptor registers (if any) and the sink DDC bus may be accessed via I2C.
|
||||
*
|
||||
* Type 2:
|
||||
* Adaptor registers and sink DDC bus can be accessed either via I2C or
|
||||
* I2C-over-AUX. Source devices may choose to implement either of these
|
||||
* access methods.
|
||||
*/
|
||||
|
||||
#define DP_DUAL_MODE_SLAVE_ADDRESS 0x40
|
||||
|
||||
/**
|
||||
* drm_dp_dual_mode_read - Read from the DP dual mode adaptor register(s)
|
||||
* @adapter: I2C adapter for the DDC bus
|
||||
* @offset: register offset
|
||||
* @buffer: buffer for return data
|
||||
* @size: sizo of the buffer
|
||||
*
|
||||
* Reads @size bytes from the DP dual mode adaptor registers
|
||||
* starting at @offset.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, negative error code on failure
|
||||
*/
|
||||
ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter,
|
||||
u8 offset, void *buffer, size_t size)
|
||||
{
|
||||
struct i2c_msg msgs[] = {
|
||||
{
|
||||
.addr = DP_DUAL_MODE_SLAVE_ADDRESS,
|
||||
.flags = 0,
|
||||
.len = 1,
|
||||
.buf = &offset,
|
||||
},
|
||||
{
|
||||
.addr = DP_DUAL_MODE_SLAVE_ADDRESS,
|
||||
.flags = I2C_M_RD,
|
||||
.len = size,
|
||||
.buf = buffer,
|
||||
},
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = i2c_transfer(adapter, msgs, ARRAY_SIZE(msgs));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (ret != ARRAY_SIZE(msgs))
|
||||
return -EPROTO;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_dual_mode_read);
|
||||
|
||||
/**
|
||||
* drm_dp_dual_mode_write - Write to the DP dual mode adaptor register(s)
|
||||
* @adapter: I2C adapter for the DDC bus
|
||||
* @offset: register offset
|
||||
* @buffer: buffer for write data
|
||||
* @size: sizo of the buffer
|
||||
*
|
||||
* Writes @size bytes to the DP dual mode adaptor registers
|
||||
* starting at @offset.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, negative error code on failure
|
||||
*/
|
||||
ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter,
|
||||
u8 offset, const void *buffer, size_t size)
|
||||
{
|
||||
struct i2c_msg msg = {
|
||||
.addr = DP_DUAL_MODE_SLAVE_ADDRESS,
|
||||
.flags = 0,
|
||||
.len = 1 + size,
|
||||
.buf = NULL,
|
||||
};
|
||||
void *data;
|
||||
int ret;
|
||||
|
||||
data = kmalloc(msg.len, GFP_TEMPORARY);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
msg.buf = data;
|
||||
|
||||
memcpy(data, &offset, 1);
|
||||
memcpy(data + 1, buffer, size);
|
||||
|
||||
ret = i2c_transfer(adapter, &msg, 1);
|
||||
|
||||
kfree(data);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (ret != 1)
|
||||
return -EPROTO;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_dual_mode_write);
|
||||
|
||||
static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN])
|
||||
{
|
||||
static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] =
|
||||
"DP-HDMI ADAPTOR\x04";
|
||||
|
||||
return memcmp(hdmi_id, dp_dual_mode_hdmi_id,
|
||||
sizeof(dp_dual_mode_hdmi_id)) == 0;
|
||||
}
|
||||
|
||||
static bool is_type2_adaptor(uint8_t adaptor_id)
|
||||
{
|
||||
return adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 |
|
||||
DP_DUAL_MODE_REV_TYPE2);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_dp_dual_mode_detect - Identify the DP dual mode adaptor
|
||||
* @adapter: I2C adapter for the DDC bus
|
||||
*
|
||||
* Attempt to identify the type of the DP dual mode adaptor used.
|
||||
*
|
||||
* Note that when the answer is @DRM_DP_DUAL_MODE_UNKNOWN it's not
|
||||
* certain whether we're dealing with a native HDMI port or
|
||||
* a type 1 DVI dual mode adaptor. The driver will have to use
|
||||
* some other hardware/driver specific mechanism to make that
|
||||
* distinction.
|
||||
*
|
||||
* Returns:
|
||||
* The type of the DP dual mode adaptor used
|
||||
*/
|
||||
enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter)
|
||||
{
|
||||
char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] = {};
|
||||
uint8_t adaptor_id = 0x00;
|
||||
ssize_t ret;
|
||||
|
||||
/*
|
||||
* Let's see if the adaptor is there the by reading the
|
||||
* HDMI ID registers.
|
||||
*
|
||||
* Note that type 1 DVI adaptors are not required to implemnt
|
||||
* any registers, and that presents a problem for detection.
|
||||
* If the i2c transfer is nacked, we may or may not be dealing
|
||||
* with a type 1 DVI adaptor. Some other mechanism of detecting
|
||||
* the presence of the adaptor is required. One way would be
|
||||
* to check the state of the CONFIG1 pin, Another method would
|
||||
* simply require the driver to know whether the port is a DP++
|
||||
* port or a native HDMI port. Both of these methods are entirely
|
||||
* hardware/driver specific so we can't deal with them here.
|
||||
*/
|
||||
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_HDMI_ID,
|
||||
hdmi_id, sizeof(hdmi_id));
|
||||
if (ret)
|
||||
return DRM_DP_DUAL_MODE_UNKNOWN;
|
||||
|
||||
/*
|
||||
* Sigh. Some (maybe all?) type 1 adaptors are broken and ack
|
||||
* the offset but ignore it, and instead they just always return
|
||||
* data from the start of the HDMI ID buffer. So for a broken
|
||||
* type 1 HDMI adaptor a single byte read will always give us
|
||||
* 0x44, and for a type 1 DVI adaptor it should give 0x00
|
||||
* (assuming it implements any registers). Fortunately neither
|
||||
* of those values will match the type 2 signature of the
|
||||
* DP_DUAL_MODE_ADAPTOR_ID register so we can proceed with
|
||||
* the type 2 adaptor detection safely even in the presence
|
||||
* of broken type 1 adaptors.
|
||||
*/
|
||||
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID,
|
||||
&adaptor_id, sizeof(adaptor_id));
|
||||
if (ret == 0) {
|
||||
if (is_type2_adaptor(adaptor_id)) {
|
||||
if (is_hdmi_adaptor(hdmi_id))
|
||||
return DRM_DP_DUAL_MODE_TYPE2_HDMI;
|
||||
else
|
||||
return DRM_DP_DUAL_MODE_TYPE2_DVI;
|
||||
}
|
||||
}
|
||||
|
||||
if (is_hdmi_adaptor(hdmi_id))
|
||||
return DRM_DP_DUAL_MODE_TYPE1_HDMI;
|
||||
else
|
||||
return DRM_DP_DUAL_MODE_TYPE1_DVI;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_dual_mode_detect);
|
||||
|
||||
/**
|
||||
* drm_dp_dual_mode_max_tmds_clock - Max TMDS clock for DP dual mode adaptor
|
||||
* @type: DP dual mode adaptor type
|
||||
* @adapter: I2C adapter for the DDC bus
|
||||
*
|
||||
* Determine the max TMDS clock the adaptor supports based on the
|
||||
* type of the dual mode adaptor and the DP_DUAL_MODE_MAX_TMDS_CLOCK
|
||||
* register (on type2 adaptors). As some type 1 adaptors have
|
||||
* problems with registers (see comments in drm_dp_dual_mode_detect())
|
||||
* we don't read the register on those, instead we simply assume
|
||||
* a 165 MHz limit based on the specification.
|
||||
*
|
||||
* Returns:
|
||||
* Maximum supported TMDS clock rate for the DP dual mode adaptor in kHz.
|
||||
*/
|
||||
int drm_dp_dual_mode_max_tmds_clock(enum drm_dp_dual_mode_type type,
|
||||
struct i2c_adapter *adapter)
|
||||
{
|
||||
uint8_t max_tmds_clock;
|
||||
ssize_t ret;
|
||||
|
||||
/* native HDMI so no limit */
|
||||
if (type == DRM_DP_DUAL_MODE_NONE)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Type 1 adaptors are limited to 165MHz
|
||||
* Type 2 adaptors can tells us their limit
|
||||
*/
|
||||
if (type < DRM_DP_DUAL_MODE_TYPE2_DVI)
|
||||
return 165000;
|
||||
|
||||
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_MAX_TMDS_CLOCK,
|
||||
&max_tmds_clock, sizeof(max_tmds_clock));
|
||||
if (ret || max_tmds_clock == 0x00 || max_tmds_clock == 0xff) {
|
||||
DRM_DEBUG_KMS("Failed to query max TMDS clock\n");
|
||||
return 165000;
|
||||
}
|
||||
|
||||
return max_tmds_clock * 5000 / 2;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_dual_mode_max_tmds_clock);
|
||||
|
||||
/**
|
||||
* drm_dp_dual_mode_get_tmds_output - Get the state of the TMDS output buffers in the DP dual mode adaptor
|
||||
* @type: DP dual mode adaptor type
|
||||
* @adapter: I2C adapter for the DDC bus
|
||||
* @enabled: current state of the TMDS output buffers
|
||||
*
|
||||
* Get the state of the TMDS output buffers in the adaptor. For
|
||||
* type2 adaptors this is queried from the DP_DUAL_MODE_TMDS_OEN
|
||||
* register. As some type 1 adaptors have problems with registers
|
||||
* (see comments in drm_dp_dual_mode_detect()) we don't read the
|
||||
* register on those, instead we simply assume that the buffers
|
||||
* are always enabled.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, negative error code on failure
|
||||
*/
|
||||
int drm_dp_dual_mode_get_tmds_output(enum drm_dp_dual_mode_type type,
|
||||
struct i2c_adapter *adapter,
|
||||
bool *enabled)
|
||||
{
|
||||
uint8_t tmds_oen;
|
||||
ssize_t ret;
|
||||
|
||||
if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) {
|
||||
*enabled = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN,
|
||||
&tmds_oen, sizeof(tmds_oen));
|
||||
if (ret) {
|
||||
DRM_DEBUG_KMS("Failed to query state of TMDS output buffers\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
*enabled = !(tmds_oen & DP_DUAL_MODE_TMDS_DISABLE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_dual_mode_get_tmds_output);
|
||||
|
||||
/**
|
||||
* drm_dp_dual_mode_set_tmds_output - Enable/disable TMDS output buffers in the DP dual mode adaptor
|
||||
* @type: DP dual mode adaptor type
|
||||
* @adapter: I2C adapter for the DDC bus
|
||||
* @enable: enable (as opposed to disable) the TMDS output buffers
|
||||
*
|
||||
* Set the state of the TMDS output buffers in the adaptor. For
|
||||
* type2 this is set via the DP_DUAL_MODE_TMDS_OEN register. As
|
||||
* some type 1 adaptors have problems with registers (see comments
|
||||
* in drm_dp_dual_mode_detect()) we avoid touching the register,
|
||||
* making this function a no-op on type 1 adaptors.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, negative error code on failure
|
||||
*/
|
||||
int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
|
||||
struct i2c_adapter *adapter, bool enable)
|
||||
{
|
||||
uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE;
|
||||
ssize_t ret;
|
||||
|
||||
if (type < DRM_DP_DUAL_MODE_TYPE2_DVI)
|
||||
return 0;
|
||||
|
||||
ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
|
||||
&tmds_oen, sizeof(tmds_oen));
|
||||
if (ret) {
|
||||
DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n",
|
||||
enable ? "enable" : "disable");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output);
|
||||
|
||||
/**
|
||||
* drm_dp_get_dual_mode_type_name - Get the name of the DP dual mode adaptor type as a string
|
||||
* @type: DP dual mode adaptor type
|
||||
*
|
||||
* Returns:
|
||||
* String representation of the DP dual mode adaptor type
|
||||
*/
|
||||
const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type)
|
||||
{
|
||||
switch (type) {
|
||||
case DRM_DP_DUAL_MODE_NONE:
|
||||
return "none";
|
||||
case DRM_DP_DUAL_MODE_TYPE1_DVI:
|
||||
return "type 1 DVI";
|
||||
case DRM_DP_DUAL_MODE_TYPE1_HDMI:
|
||||
return "type 1 HDMI";
|
||||
case DRM_DP_DUAL_MODE_TYPE2_DVI:
|
||||
return "type 2 DVI";
|
||||
case DRM_DP_DUAL_MODE_TYPE2_HDMI:
|
||||
return "type 2 HDMI";
|
||||
default:
|
||||
WARN_ON(type != DRM_DP_DUAL_MODE_UNKNOWN);
|
||||
return "unknown";
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_get_dual_mode_type_name);
|
@ -28,6 +28,7 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <drm/drm_dp_helper.h>
|
||||
#include <drm/drm_dp_aux_dev.h>
|
||||
#include <drm/drmP.h>
|
||||
|
||||
/**
|
||||
@ -761,6 +762,8 @@ static const struct i2c_algorithm drm_dp_i2c_algo = {
|
||||
*/
|
||||
int drm_dp_aux_register(struct drm_dp_aux *aux)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_init(&aux->hw_mutex);
|
||||
|
||||
aux->ddc.algo = &drm_dp_i2c_algo;
|
||||
|
@ -1062,7 +1062,7 @@ static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
|
||||
|
||||
ret = drm_dp_dpcd_write(
|
||||
mstb->mgr->aux,
|
||||
DP_GUID,
|
||||
DP_GUID,
|
||||
mstb->guid,
|
||||
16);
|
||||
}
|
||||
@ -1420,7 +1420,7 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
|
||||
req_type == DP_RESOURCE_STATUS_NOTIFY)
|
||||
hdr->broadcast = 1;
|
||||
else
|
||||
hdr->broadcast = 0;
|
||||
hdr->broadcast = 0;
|
||||
hdr->path_msg = txmsg->path_msg;
|
||||
hdr->lct = mstb->lct;
|
||||
hdr->lcr = mstb->lct - 1;
|
||||
@ -1686,8 +1686,8 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
|
||||
|
||||
if (!mstb) {
|
||||
drm_dp_put_port(port);
|
||||
return -EINVAL;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
|
||||
@ -2066,7 +2066,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
|
||||
kref_get(&mgr->mst_primary->kref);
|
||||
|
||||
ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
|
||||
DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
|
||||
DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
|
||||
if (ret < 0) {
|
||||
goto out_unlock;
|
||||
}
|
||||
@ -2283,14 +2283,14 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
|
||||
bool seqno;
|
||||
|
||||
if (!mgr->up_req_recv.initial_hdr.broadcast) {
|
||||
mstb = drm_dp_get_mst_branch_device(mgr,
|
||||
mgr->up_req_recv.initial_hdr.lct,
|
||||
mgr->up_req_recv.initial_hdr.rad);
|
||||
if (!mstb) {
|
||||
DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
|
||||
memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
|
||||
return 0;
|
||||
}
|
||||
mstb = drm_dp_get_mst_branch_device(mgr,
|
||||
mgr->up_req_recv.initial_hdr.lct,
|
||||
mgr->up_req_recv.initial_hdr.rad);
|
||||
if (!mstb) {
|
||||
DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
|
||||
memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
seqno = mgr->up_req_recv.initial_hdr.seqno;
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include <linux/hdmi.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/vga_switcheroo.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_displayid.h>
|
||||
@ -204,7 +205,7 @@ static const struct drm_display_mode drm_dmt_modes[] = {
|
||||
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
|
||||
/* 0x0f - 1024x768@43Hz, interlace */
|
||||
{ DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
|
||||
1208, 1264, 0, 768, 768, 772, 817, 0,
|
||||
1208, 1264, 0, 768, 768, 776, 817, 0,
|
||||
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
|
||||
DRM_MODE_FLAG_INTERLACE) },
|
||||
/* 0x10 - 1024x768@60Hz */
|
||||
@ -521,12 +522,12 @@ static const struct drm_display_mode edid_est_modes[] = {
|
||||
720, 840, 0, 480, 481, 484, 500, 0,
|
||||
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
|
||||
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
|
||||
704, 832, 0, 480, 489, 491, 520, 0,
|
||||
704, 832, 0, 480, 489, 492, 520, 0,
|
||||
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
|
||||
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
|
||||
768, 864, 0, 480, 483, 486, 525, 0,
|
||||
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
|
||||
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
|
||||
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
|
||||
752, 800, 0, 480, 490, 492, 525, 0,
|
||||
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
|
||||
{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
|
||||
@ -538,7 +539,7 @@ static const struct drm_display_mode edid_est_modes[] = {
|
||||
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
|
||||
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
|
||||
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
|
||||
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
|
||||
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
|
||||
1136, 1312, 0, 768, 769, 772, 800, 0,
|
||||
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
|
||||
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
|
||||
@ -1394,6 +1395,31 @@ struct edid *drm_get_edid(struct drm_connector *connector,
|
||||
}
|
||||
EXPORT_SYMBOL(drm_get_edid);
|
||||
|
||||
/**
|
||||
* drm_get_edid_switcheroo - get EDID data for a vga_switcheroo output
|
||||
* @connector: connector we're probing
|
||||
* @adapter: I2C adapter to use for DDC
|
||||
*
|
||||
* Wrapper around drm_get_edid() for laptops with dual GPUs using one set of
|
||||
* outputs. The wrapper adds the requisite vga_switcheroo calls to temporarily
|
||||
* switch DDC to the GPU which is retrieving EDID.
|
||||
*
|
||||
* Return: Pointer to valid EDID or %NULL if we couldn't find any.
|
||||
*/
|
||||
struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
|
||||
struct i2c_adapter *adapter)
|
||||
{
|
||||
struct pci_dev *pdev = connector->dev->pdev;
|
||||
struct edid *edid;
|
||||
|
||||
vga_switcheroo_lock_ddc(pdev);
|
||||
edid = drm_get_edid(connector, adapter);
|
||||
vga_switcheroo_unlock_ddc(pdev);
|
||||
|
||||
return edid;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_get_edid_switcheroo);
|
||||
|
||||
/**
|
||||
* drm_edid_duplicate - duplicate an EDID and the extensions
|
||||
* @edid: EDID to duplicate
|
||||
@ -2215,7 +2241,7 @@ drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
|
||||
{
|
||||
int i, j, m, modes = 0;
|
||||
struct drm_display_mode *mode;
|
||||
u8 *est = ((u8 *)timing) + 5;
|
||||
u8 *est = ((u8 *)timing) + 6;
|
||||
|
||||
for (i = 0; i < 6; i++) {
|
||||
for (j = 7; j >= 0; j--) {
|
||||
@ -3282,7 +3308,7 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
|
||||
u8 *cea;
|
||||
u8 *name;
|
||||
u8 *db;
|
||||
int sad_count = 0;
|
||||
int total_sad_count = 0;
|
||||
int mnl;
|
||||
int dbl;
|
||||
|
||||
@ -3296,6 +3322,7 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
|
||||
|
||||
name = NULL;
|
||||
drm_for_each_detailed_block((u8 *)edid, monitor_name, &name);
|
||||
/* max: 13 bytes EDID, 16 bytes ELD */
|
||||
for (mnl = 0; name && mnl < 13; mnl++) {
|
||||
if (name[mnl] == 0x0a)
|
||||
break;
|
||||
@ -3324,11 +3351,15 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
|
||||
dbl = cea_db_payload_len(db);
|
||||
|
||||
switch (cea_db_tag(db)) {
|
||||
int sad_count;
|
||||
|
||||
case AUDIO_BLOCK:
|
||||
/* Audio Data Block, contains SADs */
|
||||
sad_count = dbl / 3;
|
||||
if (dbl >= 1)
|
||||
memcpy(eld + 20 + mnl, &db[1], dbl);
|
||||
sad_count = min(dbl / 3, 15 - total_sad_count);
|
||||
if (sad_count >= 1)
|
||||
memcpy(eld + 20 + mnl + total_sad_count * 3,
|
||||
&db[1], sad_count * 3);
|
||||
total_sad_count += sad_count;
|
||||
break;
|
||||
case SPEAKER_BLOCK:
|
||||
/* Speaker Allocation Data Block */
|
||||
@ -3345,13 +3376,13 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
|
||||
}
|
||||
}
|
||||
}
|
||||
eld[5] |= sad_count << 4;
|
||||
eld[5] |= total_sad_count << 4;
|
||||
|
||||
eld[DRM_ELD_BASELINE_ELD_LEN] =
|
||||
DIV_ROUND_UP(drm_eld_calc_baseline_block_size(eld), 4);
|
||||
|
||||
DRM_DEBUG_KMS("ELD size %d, SAD count %d\n",
|
||||
drm_eld_size(eld), sad_count);
|
||||
drm_eld_size(eld), total_sad_count);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_edid_to_eld);
|
||||
|
||||
|
@ -104,21 +104,17 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
|
||||
{
|
||||
struct drm_device *dev = fb_helper->dev;
|
||||
struct drm_connector *connector;
|
||||
int i;
|
||||
int i, ret;
|
||||
|
||||
if (!drm_fbdev_emulation)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
drm_for_each_connector(connector, dev) {
|
||||
struct drm_fb_helper_connector *fb_helper_connector;
|
||||
ret = drm_fb_helper_add_one_connector(fb_helper, connector);
|
||||
|
||||
fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
|
||||
if (!fb_helper_connector)
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
fb_helper_connector->connector = connector;
|
||||
fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
|
||||
}
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
return 0;
|
||||
@ -130,7 +126,7 @@ fail:
|
||||
fb_helper->connector_count = 0;
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
|
||||
return -ENOMEM;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
|
||||
|
||||
@ -1676,13 +1672,13 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
|
||||
width = dev->mode_config.max_width;
|
||||
height = dev->mode_config.max_height;
|
||||
|
||||
crtcs = kcalloc(dev->mode_config.num_connector,
|
||||
crtcs = kcalloc(fb_helper->connector_count,
|
||||
sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
|
||||
modes = kcalloc(dev->mode_config.num_connector,
|
||||
modes = kcalloc(fb_helper->connector_count,
|
||||
sizeof(struct drm_display_mode *), GFP_KERNEL);
|
||||
offsets = kcalloc(dev->mode_config.num_connector,
|
||||
offsets = kcalloc(fb_helper->connector_count,
|
||||
sizeof(struct drm_fb_offset), GFP_KERNEL);
|
||||
enabled = kcalloc(dev->mode_config.num_connector,
|
||||
enabled = kcalloc(fb_helper->connector_count,
|
||||
sizeof(bool), GFP_KERNEL);
|
||||
if (!crtcs || !modes || !enabled || !offsets) {
|
||||
DRM_ERROR("Memory allocation failed\n");
|
||||
@ -1696,9 +1692,9 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
|
||||
fb_helper->funcs->initial_config(fb_helper, crtcs, modes,
|
||||
offsets,
|
||||
enabled, width, height))) {
|
||||
memset(modes, 0, dev->mode_config.num_connector*sizeof(modes[0]));
|
||||
memset(crtcs, 0, dev->mode_config.num_connector*sizeof(crtcs[0]));
|
||||
memset(offsets, 0, dev->mode_config.num_connector*sizeof(offsets[0]));
|
||||
memset(modes, 0, fb_helper->connector_count*sizeof(modes[0]));
|
||||
memset(crtcs, 0, fb_helper->connector_count*sizeof(crtcs[0]));
|
||||
memset(offsets, 0, fb_helper->connector_count*sizeof(offsets[0]));
|
||||
|
||||
if (!drm_target_cloned(fb_helper, modes, offsets,
|
||||
enabled, width, height) &&
|
||||
@ -1778,6 +1774,27 @@ out:
|
||||
* drm_fb_helper_fill_fix() are provided as helpers to setup simple default
|
||||
* values for the fbdev info structure.
|
||||
*
|
||||
* HANG DEBUGGING:
|
||||
*
|
||||
* When you have fbcon support built-in or already loaded, this function will do
|
||||
* a full modeset to setup the fbdev console. Due to locking misdesign in the
|
||||
* VT/fbdev subsystem that entire modeset sequence has to be done while holding
|
||||
* console_lock. Until console_unlock is called no dmesg lines will be sent out
|
||||
* to consoles, not even serial console. This means when your driver crashes,
|
||||
* you will see absolutely nothing else but a system stuck in this function,
|
||||
* with no further output. Any kind of printk() you place within your own driver
|
||||
* or in the drm core modeset code will also never show up.
|
||||
*
|
||||
* Standard debug practice is to run the fbcon setup without taking the
|
||||
* console_lock as a hack, to be able to see backtraces and crashes on the
|
||||
* serial line. This can be done by setting the fb.lockless_register_fb=1 kernel
|
||||
* cmdline option.
|
||||
*
|
||||
* The other option is to just disable fbdev emulation since very likely the
|
||||
* first modest from userspace will crash in the same way, and is even easier to
|
||||
* debug. This can be done by setting the drm_kms_helper.fbdev_emulation=0
|
||||
* kernel cmdline option.
|
||||
*
|
||||
* RETURNS:
|
||||
* Zero if everything went ok, nonzero otherwise.
|
||||
*/
|
||||
@ -1832,6 +1849,8 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
|
||||
struct drm_device *dev = fb_helper->dev;
|
||||
u32 max_width, max_height;
|
||||
|
||||
ENTER();
|
||||
|
||||
if (!drm_fbdev_emulation)
|
||||
return 0;
|
||||
|
||||
@ -1853,6 +1872,7 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
|
||||
// drm_setup_crtcs(fb_helper);
|
||||
// drm_modeset_unlock_all(dev);
|
||||
// drm_fb_helper_set_par(fb_helper->fbdev);
|
||||
LEAVE();
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
|
||||
@ -1861,9 +1881,9 @@ EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
|
||||
* but the module doesn't depend on any fb console symbols. At least
|
||||
* attempt to load fbcon to avoid leaving the system without a usable console.
|
||||
*/
|
||||
#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT)
|
||||
static int __init drm_fb_helper_modinit(void)
|
||||
int __init drm_fb_helper_modinit(void)
|
||||
{
|
||||
#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT)
|
||||
const char *name = "fbcon";
|
||||
struct module *fbcon;
|
||||
|
||||
@ -1873,8 +1893,7 @@ static int __init drm_fb_helper_modinit(void)
|
||||
|
||||
if (!fbcon)
|
||||
request_module_nowait(name);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
module_init(drm_fb_helper_modinit);
|
||||
#endif
|
||||
EXPORT_SYMBOL(drm_fb_helper_modinit);
|
||||
|
@ -224,7 +224,7 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
|
||||
mutex_unlock(&dev->object_name_lock);
|
||||
|
||||
if (final)
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -298,7 +298,7 @@ EXPORT_SYMBOL(drm_gem_handle_delete);
|
||||
* @file: drm file-private structure to remove the dumb handle from
|
||||
* @dev: corresponding drm_device
|
||||
* @handle: the dumb handle to remove
|
||||
*
|
||||
*
|
||||
* This implements the ->dumb_destroy kms driver callback for drivers which use
|
||||
* gem to manage their backing storage.
|
||||
*/
|
||||
@ -315,7 +315,7 @@ EXPORT_SYMBOL(drm_gem_dumb_destroy);
|
||||
* @file_priv: drm file-private structure to register the handle for
|
||||
* @obj: object to register
|
||||
* @handlep: pointer to return the created handle to the caller
|
||||
*
|
||||
*
|
||||
* This expects the dev->object_name_lock to be held already and will drop it
|
||||
* before returning. Used to avoid races in establishing new handles when
|
||||
* importing an object from either an flink name or a dma-buf.
|
||||
@ -787,7 +787,13 @@ drm_gem_object_free(struct kref *kref)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_object_free);
|
||||
|
||||
|
||||
/**
|
||||
* drm_gem_vm_open - vma->ops->open implementation for GEM
|
||||
* @vma: VM area structure
|
||||
*
|
||||
* This function implements the #vm_operations_struct open() callback for GEM
|
||||
* drivers. This must be used together with drm_gem_vm_close().
|
||||
*/
|
||||
#if 0
|
||||
void drm_gem_vm_open(struct vm_area_struct *vma)
|
||||
{
|
||||
@ -797,19 +803,135 @@ void drm_gem_vm_open(struct vm_area_struct *vma)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_vm_open);
|
||||
|
||||
/**
|
||||
* drm_gem_vm_close - vma->ops->close implementation for GEM
|
||||
* @vma: VM area structure
|
||||
*
|
||||
* This function implements the #vm_operations_struct close() callback for GEM
|
||||
* drivers. This must be used together with drm_gem_vm_open().
|
||||
*/
|
||||
void drm_gem_vm_close(struct vm_area_struct *vma)
|
||||
{
|
||||
struct drm_gem_object *obj = vma->vm_private_data;
|
||||
struct drm_device *dev = obj->dev;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_vm_close_locked(obj->dev, vma);
|
||||
drm_gem_object_unreference(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_vm_close);
|
||||
|
||||
#endif
|
||||
/**
|
||||
* drm_gem_mmap_obj - memory map a GEM object
|
||||
* @obj: the GEM object to map
|
||||
* @obj_size: the object size to be mapped, in bytes
|
||||
* @vma: VMA for the area to be mapped
|
||||
*
|
||||
* Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
|
||||
* provided by the driver. Depending on their requirements, drivers can either
|
||||
* provide a fault handler in their gem_vm_ops (in which case any accesses to
|
||||
* the object will be trapped, to perform migration, GTT binding, surface
|
||||
* register allocation, or performance monitoring), or mmap the buffer memory
|
||||
* synchronously after calling drm_gem_mmap_obj.
|
||||
*
|
||||
* This function is mainly intended to implement the DMABUF mmap operation, when
|
||||
* the GEM object is not looked up based on its fake offset. To implement the
|
||||
* DRM mmap operation, drivers should use the drm_gem_mmap() function.
|
||||
*
|
||||
* drm_gem_mmap_obj() assumes the user is granted access to the buffer while
|
||||
* drm_gem_mmap() prevents unprivileged users from mapping random objects. So
|
||||
* callers must verify access restrictions before calling this helper.
|
||||
*
|
||||
* Return 0 or success or -EINVAL if the object size is smaller than the VMA
|
||||
* size, or if no gem_vm_ops are provided.
|
||||
*/
|
||||
int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
|
||||
/* Check for valid size. */
|
||||
if (obj_size < vma->vm_end - vma->vm_start)
|
||||
return -EINVAL;
|
||||
|
||||
if (!dev->driver->gem_vm_ops)
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vma->vm_ops = dev->driver->gem_vm_ops;
|
||||
vma->vm_private_data = obj;
|
||||
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
|
||||
|
||||
/* Take a ref for this mapping of the object, so that the fault
|
||||
* handler can dereference the mmap offset's pointer to the object.
|
||||
* This reference is cleaned up by the corresponding vm_close
|
||||
* (which should happen whether the vma was created by this call, or
|
||||
* by a vm_open due to mremap or partial unmap or whatever).
|
||||
*/
|
||||
drm_gem_object_reference(obj);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_mmap_obj);
|
||||
|
||||
/**
|
||||
* drm_gem_mmap - memory map routine for GEM objects
|
||||
* @filp: DRM file pointer
|
||||
* @vma: VMA for the area to be mapped
|
||||
*
|
||||
* If a driver supports GEM object mapping, mmap calls on the DRM file
|
||||
* descriptor will end up here.
|
||||
*
|
||||
* Look up the GEM object based on the offset passed in (vma->vm_pgoff will
|
||||
* contain the fake offset we created when the GTT map ioctl was called on
|
||||
* the object) and map it with a call to drm_gem_mmap_obj().
|
||||
*
|
||||
* If the caller is not granted access to the buffer object, the mmap will fail
|
||||
* with EACCES. Please see the vma manager for more information.
|
||||
*/
|
||||
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
{
|
||||
struct drm_file *priv = filp->private_data;
|
||||
struct drm_device *dev = priv->minor->dev;
|
||||
struct drm_gem_object *obj = NULL;
|
||||
struct drm_vma_offset_node *node;
|
||||
int ret;
|
||||
|
||||
if (drm_device_is_unplugged(dev))
|
||||
return -ENODEV;
|
||||
|
||||
drm_vma_offset_lock_lookup(dev->vma_offset_manager);
|
||||
node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
|
||||
vma->vm_pgoff,
|
||||
vma_pages(vma));
|
||||
if (likely(node)) {
|
||||
obj = container_of(node, struct drm_gem_object, vma_node);
|
||||
/*
|
||||
* When the object is being freed, after it hits 0-refcnt it
|
||||
* proceeds to tear down the object. In the process it will
|
||||
* attempt to remove the VMA offset and so acquire this
|
||||
* mgr->vm_lock. Therefore if we find an object with a 0-refcnt
|
||||
* that matches our range, we know it is in the process of being
|
||||
* destroyed and will be freed as soon as we release the lock -
|
||||
* so we have to check for the 0-refcnted object and treat it as
|
||||
* invalid.
|
||||
*/
|
||||
if (!kref_get_unless_zero(&obj->refcount))
|
||||
obj = NULL;
|
||||
}
|
||||
drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
|
||||
|
||||
if (!obj)
|
||||
return -EINVAL;
|
||||
|
||||
if (!drm_vma_node_is_allowed(node, filp)) {
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
|
||||
vma);
|
||||
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_mmap);
|
||||
#endif
|
@ -37,15 +37,6 @@
|
||||
#include <linux/hash.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/rculist.h>
|
||||
|
||||
#define hlist_for_each_entry_rcu(pos, head, member) \
|
||||
for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\
|
||||
typeof(*(pos)), member); \
|
||||
pos; \
|
||||
pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
|
||||
&(pos)->member)), typeof(*(pos)), member))
|
||||
|
||||
|
||||
int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
|
||||
{
|
||||
|
@ -908,15 +908,11 @@ static void send_vblank_event(struct drm_device *dev,
|
||||
struct drm_pending_vblank_event *e,
|
||||
unsigned long seq, struct timeval *now)
|
||||
{
|
||||
assert_spin_locked(&dev->event_lock);
|
||||
|
||||
e->event.sequence = seq;
|
||||
e->event.tv_sec = now->tv_sec;
|
||||
e->event.tv_usec = now->tv_usec;
|
||||
|
||||
list_add_tail(&e->base.link,
|
||||
&e->base.file_priv->event_list);
|
||||
wake_up_interruptible(&e->base.file_priv->event_wait);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1241,7 +1237,7 @@ void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
|
||||
|
||||
/* Avoid redundant vblank disables without previous drm_vblank_on(). */
|
||||
if (drm_core_check_feature(dev, DRIVER_ATOMIC) || !vblank->inmodeset)
|
||||
vblank_disable_and_save(dev, pipe);
|
||||
vblank_disable_and_save(dev, pipe);
|
||||
|
||||
wake_up(&vblank->queue);
|
||||
|
||||
|
@ -185,6 +185,44 @@ int mipi_dsi_create_packet(struct mipi_dsi_packet *packet,
|
||||
}
|
||||
EXPORT_SYMBOL(mipi_dsi_create_packet);
|
||||
|
||||
/**
|
||||
* mipi_dsi_shutdown_peripheral() - sends a Shutdown Peripheral command
|
||||
* @dsi: DSI peripheral device
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi)
|
||||
{
|
||||
struct mipi_dsi_msg msg = {
|
||||
.channel = dsi->channel,
|
||||
.type = MIPI_DSI_SHUTDOWN_PERIPHERAL,
|
||||
.tx_buf = (u8 [2]) { 0, 0 },
|
||||
.tx_len = 2,
|
||||
};
|
||||
|
||||
return mipi_dsi_device_transfer(dsi, &msg);
|
||||
}
|
||||
EXPORT_SYMBOL(mipi_dsi_shutdown_peripheral);
|
||||
|
||||
/**
|
||||
* mipi_dsi_turn_on_peripheral() - sends a Turn On Peripheral command
|
||||
* @dsi: DSI peripheral device
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi)
|
||||
{
|
||||
struct mipi_dsi_msg msg = {
|
||||
.channel = dsi->channel,
|
||||
.type = MIPI_DSI_TURN_ON_PERIPHERAL,
|
||||
.tx_buf = (u8 [2]) { 0, 0 },
|
||||
.tx_len = 2,
|
||||
};
|
||||
|
||||
return mipi_dsi_device_transfer(dsi, &msg);
|
||||
}
|
||||
EXPORT_SYMBOL(mipi_dsi_turn_on_peripheral);
|
||||
|
||||
/*
|
||||
* mipi_dsi_set_maximum_return_packet_size() - specify the maximum size of the
|
||||
* the payload in a long packet transmitted from the peripheral back to the
|
||||
|
@ -553,10 +553,10 @@ EXPORT_SYMBOL(drm_gtf_mode_complex);
|
||||
* drivers/video/fbmon.c
|
||||
*
|
||||
* Standard GTF parameters:
|
||||
* M = 600
|
||||
* C = 40
|
||||
* K = 128
|
||||
* J = 20
|
||||
* M = 600
|
||||
* C = 40
|
||||
* K = 128
|
||||
* J = 20
|
||||
*
|
||||
* Returns:
|
||||
* The modeline based on the GTF algorithm stored in a drm_display_mode object.
|
||||
@ -1216,13 +1216,13 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
|
||||
pmode->type |= mode->type;
|
||||
drm_mode_copy(mode, pmode);
|
||||
} else {
|
||||
mode->type |= pmode->type;
|
||||
mode->type |= pmode->type;
|
||||
}
|
||||
|
||||
list_del(&pmode->head);
|
||||
drm_mode_destroy(connector->dev, pmode);
|
||||
break;
|
||||
}
|
||||
list_del(&pmode->head);
|
||||
drm_mode_destroy(connector->dev, pmode);
|
||||
break;
|
||||
}
|
||||
|
||||
if (!found_it) {
|
||||
list_move_tail(&pmode->head, &connector->modes);
|
||||
@ -1244,7 +1244,7 @@ EXPORT_SYMBOL(drm_mode_connector_list_update);
|
||||
* This uses the same parameters as the fb modedb.c, except for an extra
|
||||
* force-enable, force-enable-digital and force-disable bit at the end:
|
||||
*
|
||||
* <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
|
||||
* <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
|
||||
*
|
||||
* The intermediate drm_cmdline_mode structure is required to store additional
|
||||
* options from the command line modline like the force-enable/disable flag.
|
||||
@ -1371,8 +1371,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
|
||||
}
|
||||
done:
|
||||
if (i >= 0) {
|
||||
printk(KERN_WARNING
|
||||
"parse error at position %i in video mode '%s'\n",
|
||||
pr_warn("[drm] parse error at position %i in video mode '%s'\n",
|
||||
i, name);
|
||||
mode->specified = false;
|
||||
return false;
|
||||
@ -1519,6 +1518,8 @@ int drm_mode_convert_umode(struct drm_display_mode *out,
|
||||
if (out->status != MODE_OK)
|
||||
goto out;
|
||||
|
||||
drm_mode_set_crtcinfo(out, CRTC_INTERLACE_HALVE_V);
|
||||
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
|
@ -40,13 +40,13 @@
|
||||
* The basic usage pattern is to:
|
||||
*
|
||||
* drm_modeset_acquire_init(&ctx)
|
||||
* retry:
|
||||
* retry:
|
||||
* foreach (lock in random_ordered_set_of_locks) {
|
||||
* ret = drm_modeset_lock(lock, &ctx)
|
||||
* if (ret == -EDEADLK) {
|
||||
* drm_modeset_backoff(&ctx);
|
||||
* goto retry;
|
||||
* }
|
||||
* ret = drm_modeset_lock(lock, &ctx)
|
||||
* if (ret == -EDEADLK) {
|
||||
* drm_modeset_backoff(&ctx);
|
||||
* goto retry;
|
||||
* }
|
||||
* }
|
||||
* ... do stuff ...
|
||||
* drm_modeset_drop_locks(&ctx);
|
||||
@ -459,7 +459,7 @@ EXPORT_SYMBOL(drm_modeset_unlock);
|
||||
* Returns: 0 on success or a negative error-code on failure.
|
||||
*/
|
||||
int drm_modeset_lock_all_ctx(struct drm_device *dev,
|
||||
struct drm_modeset_acquire_ctx *ctx)
|
||||
struct drm_modeset_acquire_ctx *ctx)
|
||||
{
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_plane *plane;
|
||||
|
@ -219,30 +219,30 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
|
||||
connector->status = connector->funcs->detect(connector, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* Normally either the driver's hpd code or the poll loop should
|
||||
* pick up any changes and fire the hotplug event. But if
|
||||
* userspace sneaks in a probe, we might miss a change. Hence
|
||||
* check here, and if anything changed start the hotplug code.
|
||||
*/
|
||||
if (old_status != connector->status) {
|
||||
/*
|
||||
* Normally either the driver's hpd code or the poll loop should
|
||||
* pick up any changes and fire the hotplug event. But if
|
||||
* userspace sneaks in a probe, we might miss a change. Hence
|
||||
* check here, and if anything changed start the hotplug code.
|
||||
*/
|
||||
if (old_status != connector->status) {
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
|
||||
connector->base.id,
|
||||
connector->name,
|
||||
connector->base.id,
|
||||
connector->name,
|
||||
drm_get_connector_status_name(old_status),
|
||||
drm_get_connector_status_name(connector->status));
|
||||
|
||||
/*
|
||||
* The hotplug event code might call into the fb
|
||||
* helpers, and so expects that we do not hold any
|
||||
* locks. Fire up the poll struct instead, it will
|
||||
* disable itself again.
|
||||
*/
|
||||
dev->mode_config.delayed_event = true;
|
||||
if (dev->mode_config.poll_enabled)
|
||||
schedule_delayed_work(&dev->mode_config.output_poll_work,
|
||||
0);
|
||||
}
|
||||
/*
|
||||
* The hotplug event code might call into the fb
|
||||
* helpers, and so expects that we do not hold any
|
||||
* locks. Fire up the poll struct instead, it will
|
||||
* disable itself again.
|
||||
*/
|
||||
dev->mode_config.delayed_event = true;
|
||||
if (dev->mode_config.poll_enabled)
|
||||
schedule_delayed_work(&dev->mode_config.output_poll_work,
|
||||
0);
|
||||
}
|
||||
|
||||
/* Re-enable polling in case the global poll config changed. */
|
||||
if (drm_kms_helper_poll != dev->mode_config.poll_running)
|
||||
@ -258,11 +258,11 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
|
||||
goto prune;
|
||||
}
|
||||
|
||||
if (connector->override_edid) {
|
||||
struct edid *edid = (struct edid *) connector->edid_blob_ptr->data;
|
||||
if (connector->override_edid) {
|
||||
struct edid *edid = (struct edid *) connector->edid_blob_ptr->data;
|
||||
|
||||
count = drm_add_edid_modes(connector, edid);
|
||||
drm_edid_to_eld(connector, edid);
|
||||
count = drm_add_edid_modes(connector, edid);
|
||||
drm_edid_to_eld(connector, edid);
|
||||
} else {
|
||||
#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
|
||||
count = drm_load_edid_firmware(connector);
|
||||
@ -341,7 +341,7 @@ EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
|
||||
*/
|
||||
void drm_kms_helper_hotplug_event(struct drm_device *dev)
|
||||
{
|
||||
/* send a uevent + call fbdev */
|
||||
/* send a uevent + call fbdev */
|
||||
drm_sysfs_hotplug_event(dev);
|
||||
if (dev->mode_config.funcs->output_poll_changed)
|
||||
dev->mode_config.funcs->output_poll_changed(dev);
|
||||
|
@ -2,8 +2,8 @@ CC = kos32-gcc
|
||||
FASM = fasm.exe
|
||||
|
||||
DEFINES = -DDRM_DEBUG_CODE=0 -D__KERNEL__ -DCONFIG_X86 -DCONFIG_X86_32 -DCONFIG_PCI
|
||||
DEFINES += -DCONFIG_X86_CMPXCHG64 -DCONFIG_TINY_RCU -DCONFIG_X86_L1_CACHE_SHIFT=6
|
||||
DEFINES += -DCONFIG_DRM_FBDEV_EMULATION -DCONFIG_DMI
|
||||
DEFINES += -DCONFIG_HAVE_ATOMIC_IOMAP -DCONFIG_X86_CMPXCHG64 -DCONFIG_TINY_RCU
|
||||
DEFINES += -DCONFIG_X86_L1_CACHE_SHIFT=6 -DCONFIG_DRM_FBDEV_EMULATION -DCONFIG_DMI
|
||||
DEFINES += -DKBUILD_MODNAME=\"i915.dll\"
|
||||
|
||||
DDK_TOPDIR = /d/kos/kolibri/drivers/ddk
|
||||
@ -127,6 +127,7 @@ NAME_SRC= main.c \
|
||||
$(DRM_TOPDIR)/i2c/i2c-algo-bit.c \
|
||||
$(DRM_TOPDIR)/drm_crtc.c \
|
||||
$(DRM_TOPDIR)/drm_crtc_helper.c \
|
||||
$(DRM_TOPDIR)/drm_dp_dual_mode_helper.c \
|
||||
$(DRM_TOPDIR)/drm_dp_helper.c \
|
||||
$(DRM_TOPDIR)/drm_dp_mst_topology.c \
|
||||
$(DRM_TOPDIR)/drm_atomic.c \
|
||||
@ -147,7 +148,7 @@ NAME_SRC= main.c \
|
||||
$(DRM_TOPDIR)/drm_rect.c \
|
||||
$(DRM_TOPDIR)/drm_stub.c
|
||||
|
||||
SRC_DEP:=
|
||||
SRC_DEP:=
|
||||
|
||||
|
||||
NAME_OBJS = $(patsubst %.S, %.o, $(patsubst %.asm, %.o,\
|
||||
|
@ -1,10 +1,9 @@
|
||||
|
||||
CC = kos32-gcc
|
||||
FASM = fasm.exe
|
||||
|
||||
DEFINES = -DDRM_DEBUG_CODE=0 -D__KERNEL__ -DCONFIG_X86 -DCONFIG_X86_32 -DCONFIG_PCI
|
||||
DEFINES += -DCONFIG_X86_CMPXCHG64 -DCONFIG_TINY_RCU -DCONFIG_X86_L1_CACHE_SHIFT=6
|
||||
DEFINES += -DCONFIG_DRM_FBDEV_EMULATION -DCONFIG_DMI
|
||||
DEFINES += -DCONFIG_HAVE_ATOMIC_IOMAP -DCONFIG_X86_CMPXCHG64 -DCONFIG_TINY_RCU
|
||||
DEFINES += -DCONFIG_X86_L1_CACHE_SHIFT=6 -DCONFIG_DRM_FBDEV_EMULATION -DCONFIG_DMI
|
||||
DEFINES += -DKBUILD_MODNAME=\"i915.dll\"
|
||||
|
||||
DDK_TOPDIR = /d/kos/kolibri/drivers/ddk
|
||||
@ -127,6 +126,7 @@ NAME_SRC= main.c \
|
||||
$(DRM_TOPDIR)/i2c/i2c-algo-bit.c \
|
||||
$(DRM_TOPDIR)/drm_crtc.c \
|
||||
$(DRM_TOPDIR)/drm_crtc_helper.c \
|
||||
$(DRM_TOPDIR)/drm_dp_dual_mode_helper.c \
|
||||
$(DRM_TOPDIR)/drm_dp_helper.c \
|
||||
$(DRM_TOPDIR)/drm_dp_mst_topology.c \
|
||||
$(DRM_TOPDIR)/drm_atomic.c \
|
||||
|
@ -41,7 +41,7 @@
|
||||
#include <linux/vgaarb.h>
|
||||
#include <linux/acpi.h>
|
||||
//#include <linux/pnp.h>
|
||||
//#include <linux/vga_switcheroo.h>
|
||||
#include <linux/vga_switcheroo.h>
|
||||
#include <linux/slab.h>
|
||||
//#include <acpi/video.h>
|
||||
#include <linux/pm.h>
|
||||
@ -167,6 +167,9 @@ int i915_getparam(struct drm_device *dev, void *data,
|
||||
case I915_PARAM_HAS_RESOURCE_STREAMER:
|
||||
value = HAS_RESOURCE_STREAMER(dev);
|
||||
break;
|
||||
case I915_PARAM_HAS_EXEC_SOFTPIN:
|
||||
value = 1;
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG("Unknown parameter %d\n", param->param);
|
||||
return -EINVAL;
|
||||
@ -298,12 +301,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
||||
* vga_client_register() fails with -ENODEV.
|
||||
*/
|
||||
|
||||
/* Initialise stolen first so that we may reserve preallocated
|
||||
* objects for the BIOS to KMS transition.
|
||||
*/
|
||||
ret = i915_gem_init_stolen(dev);
|
||||
if (ret)
|
||||
goto cleanup_vga_switcheroo;
|
||||
|
||||
intel_power_domains_init_hw(dev_priv, false);
|
||||
|
||||
@ -311,7 +308,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
||||
|
||||
ret = intel_irq_install(dev_priv);
|
||||
if (ret)
|
||||
goto cleanup_gem_stolen;
|
||||
goto cleanup_csr;
|
||||
|
||||
intel_setup_gmbus(dev);
|
||||
|
||||
@ -363,13 +360,8 @@ cleanup_gem:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
cleanup_irq:
|
||||
intel_guc_ucode_fini(dev);
|
||||
// drm_irq_uninstall(dev);
|
||||
cleanup_gem_stolen:
|
||||
i915_gem_cleanup_stolen(dev);
|
||||
cleanup_vga_switcheroo:
|
||||
// vga_switcheroo_unregister_client(dev->pdev);
|
||||
cleanup_csr:
|
||||
cleanup_vga_client:
|
||||
// vga_client_register(dev->pdev, NULL, NULL, NULL);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
@ -722,7 +714,41 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
|
||||
!(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
|
||||
DRM_INFO("Display fused off, disabling\n");
|
||||
info->num_pipes = 0;
|
||||
} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
|
||||
DRM_INFO("PipeC fused off\n");
|
||||
info->num_pipes -= 1;
|
||||
}
|
||||
} else if (info->num_pipes > 0 && INTEL_INFO(dev)->gen == 9) {
|
||||
u32 dfsm = I915_READ(SKL_DFSM);
|
||||
u8 disabled_mask = 0;
|
||||
bool invalid;
|
||||
int num_bits;
|
||||
|
||||
if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
|
||||
disabled_mask |= BIT(PIPE_A);
|
||||
if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
|
||||
disabled_mask |= BIT(PIPE_B);
|
||||
if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
|
||||
disabled_mask |= BIT(PIPE_C);
|
||||
|
||||
num_bits = hweight8(disabled_mask);
|
||||
|
||||
switch (disabled_mask) {
|
||||
case BIT(PIPE_A):
|
||||
case BIT(PIPE_B):
|
||||
case BIT(PIPE_A) | BIT(PIPE_B):
|
||||
case BIT(PIPE_A) | BIT(PIPE_C):
|
||||
invalid = true;
|
||||
break;
|
||||
default:
|
||||
invalid = false;
|
||||
}
|
||||
|
||||
if (num_bits > info->num_pipes || invalid)
|
||||
DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
|
||||
disabled_mask);
|
||||
else
|
||||
info->num_pipes -= num_bits;
|
||||
}
|
||||
|
||||
/* Initialize slice/subslice/EU info */
|
||||
@ -761,6 +787,83 @@ static void intel_init_dpio(struct drm_i915_private *dev_priv)
|
||||
}
|
||||
}
|
||||
|
||||
static int i915_workqueues_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
/*
|
||||
* The i915 workqueue is primarily used for batched retirement of
|
||||
* requests (and thus managing bo) once the task has been completed
|
||||
* by the GPU. i915_gem_retire_requests() is called directly when we
|
||||
* need high-priority retirement, such as waiting for an explicit
|
||||
* bo.
|
||||
*
|
||||
* It is also used for periodic low-priority events, such as
|
||||
* idle-timers and recording error state.
|
||||
*
|
||||
* All tasks on the workqueue are expected to acquire the dev mutex
|
||||
* so there is no point in running more than one instance of the
|
||||
* workqueue at any time. Use an ordered one.
|
||||
*/
|
||||
dev_priv->wq = alloc_ordered_workqueue("i915", 0);
|
||||
if (dev_priv->wq == NULL)
|
||||
goto out_err;
|
||||
|
||||
dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
|
||||
if (dev_priv->hotplug.dp_wq == NULL)
|
||||
goto out_free_wq;
|
||||
|
||||
dev_priv->gpu_error.hangcheck_wq =
|
||||
alloc_ordered_workqueue("i915-hangcheck", 0);
|
||||
if (dev_priv->gpu_error.hangcheck_wq == NULL)
|
||||
goto out_free_dp_wq;
|
||||
|
||||
system_wq = dev_priv->wq;
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_dp_wq:
|
||||
out_free_wq:
|
||||
out_err:
|
||||
DRM_ERROR("Failed to allocate workqueues.\n");
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
}
|
||||
|
||||
static int i915_mmio_setup(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int mmio_bar;
|
||||
int mmio_size;
|
||||
|
||||
mmio_bar = IS_GEN2(dev) ? 1 : 0;
|
||||
/*
|
||||
* Before gen4, the registers and the GTT are behind different BARs.
|
||||
* However, from gen4 onwards, the registers and the GTT are shared
|
||||
* in the same BAR, so we want to restrict this ioremap from
|
||||
* clobbering the GTT which we want ioremap_wc instead. Fortunately,
|
||||
* the register BAR remains the same size for all the earlier
|
||||
* generations up to Ironlake.
|
||||
*/
|
||||
if (INTEL_INFO(dev)->gen < 5)
|
||||
mmio_size = 512 * 1024;
|
||||
else
|
||||
mmio_size = 2 * 1024 * 1024;
|
||||
dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
|
||||
if (dev_priv->regs == NULL) {
|
||||
DRM_ERROR("failed to map registers\n");
|
||||
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* Try to make sure MCHBAR is enabled before poking at it */
|
||||
intel_setup_mchbar(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_driver_load - setup chip and create an initial config
|
||||
* @dev: DRM device
|
||||
@ -776,7 +879,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
{
|
||||
struct drm_i915_private *dev_priv;
|
||||
struct intel_device_info *info, *device_info;
|
||||
int ret = 0, mmio_bar, mmio_size;
|
||||
int ret = 0;
|
||||
uint32_t aperture_size;
|
||||
|
||||
info = (struct intel_device_info *) flags;
|
||||
@ -803,6 +906,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
mutex_init(&dev_priv->modeset_restore_lock);
|
||||
mutex_init(&dev_priv->av_mutex);
|
||||
|
||||
ret = i915_workqueues_init(dev_priv);
|
||||
if (ret < 0)
|
||||
goto out_free_priv;
|
||||
|
||||
intel_pm_setup(dev);
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
@ -821,28 +928,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
|
||||
if (i915_get_bridge_dev(dev)) {
|
||||
ret = -EIO;
|
||||
goto free_priv;
|
||||
goto out_runtime_pm_put;
|
||||
}
|
||||
|
||||
mmio_bar = IS_GEN2(dev) ? 1 : 0;
|
||||
/* Before gen4, the registers and the GTT are behind different BARs.
|
||||
* However, from gen4 onwards, the registers and the GTT are shared
|
||||
* in the same BAR, so we want to restrict this ioremap from
|
||||
* clobbering the GTT which we want ioremap_wc instead. Fortunately,
|
||||
* the register BAR remains the same size for all the earlier
|
||||
* generations up to Ironlake.
|
||||
*/
|
||||
if (info->gen < 5)
|
||||
mmio_size = 512*1024;
|
||||
else
|
||||
mmio_size = 2*1024*1024;
|
||||
|
||||
dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
|
||||
if (!dev_priv->regs) {
|
||||
DRM_ERROR("failed to map registers\n");
|
||||
ret = -EIO;
|
||||
ret = i915_mmio_setup(dev);
|
||||
if (ret < 0)
|
||||
goto put_bridge;
|
||||
}
|
||||
|
||||
set_fake_framebuffer();
|
||||
|
||||
@ -853,7 +944,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
|
||||
ret = i915_gem_gtt_init(dev);
|
||||
if (ret)
|
||||
goto out_freecsr;
|
||||
goto out_uncore_fini;
|
||||
|
||||
/* WARNING: Apparently we must kick fbdev drivers before vgacon,
|
||||
* otherwise the vga fbdev driver falls over. */
|
||||
@ -883,49 +974,22 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
|
||||
aperture_size = dev_priv->gtt.mappable_end;
|
||||
|
||||
dev_priv->gtt.mappable = AllocKernelSpace(8192);
|
||||
printk("aperture base %x size = %x\n",(u32)dev_priv->gtt.mappable_base,(u32)aperture_size);
|
||||
dev_priv->gtt.mappable =
|
||||
io_mapping_create_wc(dev_priv->gtt.mappable_base,
|
||||
aperture_size);
|
||||
if (dev_priv->gtt.mappable == NULL) {
|
||||
ret = -EIO;
|
||||
goto out_gtt;
|
||||
}
|
||||
|
||||
|
||||
/* The i915 workqueue is primarily used for batched retirement of
|
||||
* requests (and thus managing bo) once the task has been completed
|
||||
* by the GPU. i915_gem_retire_requests() is called directly when we
|
||||
* need high-priority retirement, such as waiting for an explicit
|
||||
* bo.
|
||||
*
|
||||
* It is also used for periodic low-priority events, such as
|
||||
* idle-timers and recording error state.
|
||||
*
|
||||
* All tasks on the workqueue are expected to acquire the dev mutex
|
||||
* so there is no point in running more than one instance of the
|
||||
* workqueue at any time. Use an ordered one.
|
||||
*/
|
||||
dev_priv->wq = alloc_ordered_workqueue("i915", 0);
|
||||
if (dev_priv->wq == NULL) {
|
||||
DRM_ERROR("Failed to create our workqueue.\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_mtrrfree;
|
||||
}
|
||||
system_wq = dev_priv->wq;
|
||||
|
||||
dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
|
||||
if (dev_priv->hotplug.dp_wq == NULL) {
|
||||
DRM_ERROR("Failed to create our dp workqueue.\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_freewq;
|
||||
}
|
||||
|
||||
intel_irq_init(dev_priv);
|
||||
intel_uncore_sanitize(dev);
|
||||
|
||||
/* Try to make sure MCHBAR is enabled before poking at it */
|
||||
intel_setup_mchbar(dev);
|
||||
intel_opregion_setup(dev);
|
||||
|
||||
i915_gem_load(dev);
|
||||
i915_gem_load_init(dev);
|
||||
|
||||
/* On the 945G/GM, the chipset reports the MSI capability on the
|
||||
* integrated graphics even though the support isn't actually there
|
||||
@ -981,16 +1045,16 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
out_power_well:
|
||||
drm_vblank_cleanup(dev);
|
||||
out_gem_unload:
|
||||
|
||||
out_freewq:
|
||||
out_mtrrfree:
|
||||
out_gtt:
|
||||
i915_global_gtt_cleanup(dev);
|
||||
out_freecsr:
|
||||
out_uncore_fini:
|
||||
put_bridge:
|
||||
free_priv:
|
||||
kfree(dev_priv);
|
||||
return ret;
|
||||
out_runtime_pm_put:
|
||||
i915_workqueues_cleanup(dev_priv);
|
||||
out_free_priv:
|
||||
kfree(dev_priv);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if 0
|
||||
@ -1015,8 +1079,7 @@ int i915_driver_unload(struct drm_device *dev)
|
||||
|
||||
i915_teardown_sysfs(dev);
|
||||
|
||||
WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
|
||||
unregister_shrinker(&dev_priv->mm.shrinker);
|
||||
i915_gem_shrinker_cleanup(dev_priv);
|
||||
|
||||
io_mapping_free(dev_priv->gtt.mappable);
|
||||
arch_phys_wc_del(dev_priv->gtt.mtrr);
|
||||
@ -1044,6 +1107,8 @@ int i915_driver_unload(struct drm_device *dev)
|
||||
vga_switcheroo_unregister_client(dev->pdev);
|
||||
vga_client_register(dev->pdev, NULL, NULL, NULL);
|
||||
|
||||
intel_csr_ucode_fini(dev_priv);
|
||||
|
||||
/* Free error state after interrupts are fully disabled. */
|
||||
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
|
||||
i915_destroy_error_state(dev);
|
||||
@ -1062,27 +1127,17 @@ int i915_driver_unload(struct drm_device *dev)
|
||||
i915_gem_context_fini(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
intel_fbc_cleanup_cfb(dev_priv);
|
||||
i915_gem_cleanup_stolen(dev);
|
||||
|
||||
intel_csr_ucode_fini(dev_priv);
|
||||
|
||||
intel_teardown_mchbar(dev);
|
||||
|
||||
destroy_workqueue(dev_priv->hotplug.dp_wq);
|
||||
destroy_workqueue(dev_priv->wq);
|
||||
destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
|
||||
pm_qos_remove_request(&dev_priv->pm_qos);
|
||||
|
||||
i915_global_gtt_cleanup(dev);
|
||||
|
||||
intel_uncore_fini(dev);
|
||||
if (dev_priv->regs != NULL)
|
||||
pci_iounmap(dev->pdev, dev_priv->regs);
|
||||
i915_mmio_cleanup(dev);
|
||||
|
||||
kmem_cache_destroy(dev_priv->requests);
|
||||
kmem_cache_destroy(dev_priv->vmas);
|
||||
kmem_cache_destroy(dev_priv->objects);
|
||||
i915_gem_load_cleanup(dev);
|
||||
pci_dev_put(dev_priv->bridge_dev);
|
||||
i915_workqueues_cleanup(dev_priv);
|
||||
kfree(dev_priv);
|
||||
|
||||
return 0;
|
||||
@ -1125,8 +1180,6 @@ void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
|
||||
i915_gem_context_close(dev, file);
|
||||
i915_gem_release(dev, file);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
intel_modeset_preclose(dev, file);
|
||||
}
|
||||
|
||||
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
|
||||
|
@ -37,6 +37,8 @@
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/vgaarb.h>
|
||||
#include <linux/vga_switcheroo.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
|
||||
#include <syscall.h>
|
||||
@ -572,13 +574,7 @@ static int i915_drm_suspend(struct drm_device *dev)
|
||||
|
||||
intel_suspend_gt_powersave(dev);
|
||||
|
||||
/*
|
||||
* Disable CRTCs directly since we want to preserve sw state
|
||||
* for _thaw. Also, power gate the CRTC power wells.
|
||||
*/
|
||||
drm_modeset_lock_all(dev);
|
||||
intel_display_suspend(dev);
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
intel_dp_mst_suspend(dev);
|
||||
|
||||
@ -733,12 +729,10 @@ static int i915_drm_resume(struct drm_device *dev)
|
||||
dev_priv->display.hpd_irq_setup(dev);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
intel_display_resume(dev);
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
intel_dp_mst_resume(dev);
|
||||
|
||||
intel_display_resume(dev);
|
||||
|
||||
/*
|
||||
* ... but also need to make sure that hotplug processing
|
||||
* doesn't cause havoc. Like in the driver load code we don't
|
||||
@ -858,6 +852,7 @@ int i915_resume_switcheroo(struct drm_device *dev)
|
||||
|
||||
return i915_drm_resume(dev);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* i915_reset - reset chip after a hang
|
||||
@ -910,7 +905,7 @@ int i915_reset(struct drm_device *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
intel_overlay_reset(dev_priv);
|
||||
// intel_overlay_reset(dev_priv);
|
||||
|
||||
/* Ok, now get things going again... */
|
||||
|
||||
@ -952,6 +947,7 @@ int i915_reset(struct drm_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if 0
|
||||
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
{
|
||||
struct intel_device_info *intel_info =
|
||||
@ -1081,7 +1077,6 @@ static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
broxton_init_cdclk(dev);
|
||||
broxton_ddi_phy_init(dev);
|
||||
intel_prepare_ddi(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1341,8 +1336,8 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
|
||||
return 0;
|
||||
|
||||
DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
|
||||
wait_for_on ? "on" : "off",
|
||||
I915_READ(VLV_GTLC_PW_STATUS));
|
||||
onoff(wait_for_on),
|
||||
I915_READ(VLV_GTLC_PW_STATUS));
|
||||
|
||||
/*
|
||||
* RC6 transitioning can be delayed up to 2 msec (see
|
||||
@ -1351,7 +1346,7 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
|
||||
err = wait_for(COND, 3);
|
||||
if (err)
|
||||
DRM_ERROR("timeout waiting for GT wells to go %s\n",
|
||||
wait_for_on ? "on" : "off");
|
||||
onoff(wait_for_on));
|
||||
|
||||
return err;
|
||||
#undef COND
|
||||
@ -1362,7 +1357,7 @@ static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
|
||||
if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
|
||||
return;
|
||||
|
||||
DRM_ERROR("GT register access while GT waking disabled\n");
|
||||
DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
|
||||
I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
|
||||
}
|
||||
|
||||
@ -1506,6 +1501,10 @@ static int intel_runtime_suspend(struct device *device)
|
||||
|
||||
enable_rpm_wakeref_asserts(dev_priv);
|
||||
WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
|
||||
|
||||
if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
|
||||
DRM_ERROR("Unclaimed access detected prior to suspending\n");
|
||||
|
||||
dev_priv->pm.suspended = true;
|
||||
|
||||
/*
|
||||
@ -1554,6 +1553,8 @@ static int intel_runtime_resume(struct device *device)
|
||||
|
||||
intel_opregion_notify_adapter(dev, PCI_D0);
|
||||
dev_priv->pm.suspended = false;
|
||||
if (intel_uncore_unclaimed_mmio(dev_priv))
|
||||
DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
|
||||
|
||||
intel_guc_resume(dev);
|
||||
|
||||
|
@ -34,13 +34,14 @@
|
||||
#include <uapi/drm/drm_fourcc.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include "i915_params.h"
|
||||
#include "i915_reg.h"
|
||||
#include "intel_bios.h"
|
||||
#include "intel_ringbuffer.h"
|
||||
#include "intel_lrc.h"
|
||||
#include "i915_gem_gtt.h"
|
||||
#include "i915_gem_render_state.h"
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/io-mapping.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/i2c-algo-bit.h>
|
||||
#include <drm/intel-gtt.h>
|
||||
@ -48,17 +49,16 @@
|
||||
#include <drm/drm_gem.h>
|
||||
#include <linux/backlight.h>
|
||||
#include <linux/hashtable.h>
|
||||
#include <linux/intel-iommu.h>
|
||||
#include <linux/kref.h>
|
||||
#include "intel_guc.h"
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
/* General customization:
|
||||
*/
|
||||
|
||||
#define DRIVER_NAME "i915"
|
||||
#define DRIVER_DESC "Intel Graphics"
|
||||
#define DRIVER_DATE "20151218"
|
||||
#define DRIVER_DATE "20160229"
|
||||
|
||||
#undef WARN_ON
|
||||
/* Many gcc seem to no see through this and fall over :( */
|
||||
@ -69,11 +69,11 @@
|
||||
BUILD_BUG_ON(__i915_warn_cond); \
|
||||
WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
|
||||
#else
|
||||
#define WARN_ON(x) WARN((x), "WARN_ON(%s)", #x )
|
||||
#define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
|
||||
#endif
|
||||
|
||||
#undef WARN_ON_ONCE
|
||||
#define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(%s)", #x )
|
||||
#define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")")
|
||||
|
||||
#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
|
||||
(long) (x), __func__);
|
||||
@ -87,31 +87,25 @@
|
||||
*/
|
||||
#define I915_STATE_WARN(condition, format...) ({ \
|
||||
int __ret_warn_on = !!(condition); \
|
||||
if (unlikely(__ret_warn_on)) { \
|
||||
if (i915.verbose_state_checks) \
|
||||
WARN(1, format); \
|
||||
else \
|
||||
if (unlikely(__ret_warn_on)) \
|
||||
if (!WARN(i915.verbose_state_checks, format)) \
|
||||
DRM_ERROR(format); \
|
||||
} \
|
||||
unlikely(__ret_warn_on); \
|
||||
})
|
||||
|
||||
#define I915_STATE_WARN_ON(condition) ({ \
|
||||
int __ret_warn_on = !!(condition); \
|
||||
if (unlikely(__ret_warn_on)) { \
|
||||
if (i915.verbose_state_checks) \
|
||||
WARN(1, "WARN_ON(" #condition ")\n"); \
|
||||
else \
|
||||
DRM_ERROR("WARN_ON(" #condition ")\n"); \
|
||||
} \
|
||||
unlikely(__ret_warn_on); \
|
||||
})
|
||||
#define I915_STATE_WARN_ON(x) \
|
||||
I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
|
||||
|
||||
static inline const char *yesno(bool v)
|
||||
{
|
||||
return v ? "yes" : "no";
|
||||
}
|
||||
|
||||
static inline const char *onoff(bool v)
|
||||
{
|
||||
return v ? "on" : "off";
|
||||
}
|
||||
|
||||
enum pipe {
|
||||
INVALID_PIPE = -1,
|
||||
PIPE_A = 0,
|
||||
@ -266,6 +260,9 @@ struct i915_hotplug {
|
||||
|
||||
#define for_each_pipe(__dev_priv, __p) \
|
||||
for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
|
||||
#define for_each_pipe_masked(__dev_priv, __p, __mask) \
|
||||
for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
|
||||
for_each_if ((__mask) & (1 << (__p)))
|
||||
#define for_each_plane(__dev_priv, __pipe, __p) \
|
||||
for ((__p) = 0; \
|
||||
(__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
|
||||
@ -339,7 +336,7 @@ struct drm_i915_file_private {
|
||||
unsigned boosts;
|
||||
} rps;
|
||||
|
||||
struct intel_engine_cs *bsd_ring;
|
||||
unsigned int bsd_ring;
|
||||
};
|
||||
|
||||
enum intel_dpll_id {
|
||||
@ -633,6 +630,7 @@ struct drm_i915_display_funcs {
|
||||
struct dpll *best_clock);
|
||||
int (*compute_pipe_wm)(struct intel_crtc *crtc,
|
||||
struct drm_atomic_state *state);
|
||||
void (*program_watermarks)(struct intel_crtc_state *cstate);
|
||||
void (*update_wm)(struct drm_crtc *crtc);
|
||||
int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
|
||||
void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
|
||||
@ -657,9 +655,6 @@ struct drm_i915_display_funcs {
|
||||
struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_request *req,
|
||||
uint32_t flags);
|
||||
void (*update_primary_plane)(struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
int x, int y);
|
||||
void (*hpd_irq_setup)(struct drm_device *dev);
|
||||
/* clock updates for mode set */
|
||||
/* cursor updates */
|
||||
@ -726,6 +721,8 @@ struct intel_uncore {
|
||||
i915_reg_t reg_post;
|
||||
u32 val_reset;
|
||||
} fw_domain[FW_DOMAIN_ID_COUNT];
|
||||
|
||||
int unclaimed_mmio_check;
|
||||
};
|
||||
|
||||
/* Iterate over initialised fw domains */
|
||||
@ -890,6 +887,9 @@ struct intel_context {
|
||||
struct drm_i915_gem_object *state;
|
||||
struct intel_ringbuffer *ringbuf;
|
||||
int pin_count;
|
||||
struct i915_vma *lrc_vma;
|
||||
u64 lrc_desc;
|
||||
uint32_t *lrc_reg_state;
|
||||
} engine[I915_NUM_RINGS];
|
||||
|
||||
struct list_head link;
|
||||
@ -903,16 +903,15 @@ enum fb_op_origin {
|
||||
ORIGIN_DIRTYFB,
|
||||
};
|
||||
|
||||
struct i915_fbc {
|
||||
struct intel_fbc {
|
||||
/* This is always the inner lock when overlapping with struct_mutex and
|
||||
* it's the outer lock when overlapping with stolen_lock. */
|
||||
struct mutex lock;
|
||||
unsigned threshold;
|
||||
unsigned int fb_id;
|
||||
unsigned int possible_framebuffer_bits;
|
||||
unsigned int busy_bits;
|
||||
unsigned int visible_pipes_mask;
|
||||
struct intel_crtc *crtc;
|
||||
int y;
|
||||
|
||||
struct drm_mm_node compressed_fb;
|
||||
struct drm_mm_node *compressed_llb;
|
||||
@ -922,18 +921,52 @@ struct i915_fbc {
|
||||
bool enabled;
|
||||
bool active;
|
||||
|
||||
struct intel_fbc_state_cache {
|
||||
struct {
|
||||
unsigned int mode_flags;
|
||||
uint32_t hsw_bdw_pixel_rate;
|
||||
} crtc;
|
||||
|
||||
struct {
|
||||
unsigned int rotation;
|
||||
int src_w;
|
||||
int src_h;
|
||||
bool visible;
|
||||
} plane;
|
||||
|
||||
struct {
|
||||
u64 ilk_ggtt_offset;
|
||||
uint32_t pixel_format;
|
||||
unsigned int stride;
|
||||
int fence_reg;
|
||||
unsigned int tiling_mode;
|
||||
} fb;
|
||||
} state_cache;
|
||||
|
||||
struct intel_fbc_reg_params {
|
||||
struct {
|
||||
enum pipe pipe;
|
||||
enum plane plane;
|
||||
unsigned int fence_y_offset;
|
||||
} crtc;
|
||||
|
||||
struct {
|
||||
u64 ggtt_offset;
|
||||
uint32_t pixel_format;
|
||||
unsigned int stride;
|
||||
int fence_reg;
|
||||
} fb;
|
||||
|
||||
int cfb_size;
|
||||
} params;
|
||||
|
||||
struct intel_fbc_work {
|
||||
bool scheduled;
|
||||
u32 scheduled_vblank;
|
||||
struct work_struct work;
|
||||
struct drm_framebuffer *fb;
|
||||
unsigned long enable_jiffies;
|
||||
} work;
|
||||
|
||||
const char *no_fbc_reason;
|
||||
|
||||
bool (*is_active)(struct drm_i915_private *dev_priv);
|
||||
void (*activate)(struct intel_crtc *crtc);
|
||||
void (*deactivate)(struct drm_i915_private *dev_priv);
|
||||
};
|
||||
|
||||
/**
|
||||
@ -973,6 +1006,7 @@ struct i915_psr {
|
||||
unsigned busy_frontbuffer_bits;
|
||||
bool psr2_support;
|
||||
bool aux_frame_sync;
|
||||
bool link_standby;
|
||||
};
|
||||
|
||||
enum intel_pch {
|
||||
@ -1299,7 +1333,7 @@ struct i915_gem_mm {
|
||||
bool busy;
|
||||
|
||||
/* the indicator for dispatch video commands on two BSD rings */
|
||||
int bsd_ring_dispatch_index;
|
||||
unsigned int bsd_ring_dispatch_index;
|
||||
|
||||
/** Bit 6 swizzling required for X tiling */
|
||||
uint32_t bit_6_swizzle_x;
|
||||
@ -1485,7 +1519,7 @@ struct intel_vbt_data {
|
||||
u8 seq_version;
|
||||
u32 size;
|
||||
u8 *data;
|
||||
u8 *sequence[MIPI_SEQ_MAX];
|
||||
const u8 *sequence[MIPI_SEQ_MAX];
|
||||
} dsi;
|
||||
|
||||
int crt_ddc_pin;
|
||||
@ -1657,11 +1691,18 @@ struct i915_wa_reg {
|
||||
u32 mask;
|
||||
};
|
||||
|
||||
#define I915_MAX_WA_REGS 16
|
||||
/*
|
||||
* RING_MAX_NONPRIV_SLOTS is per-engine but at this point we are only
|
||||
* allowing it for RCS as we don't foresee any requirement of having
|
||||
* a whitelist for other engines. When it is really required for
|
||||
* other engines then the limit need to be increased.
|
||||
*/
|
||||
#define I915_MAX_WA_REGS (16 + RING_MAX_NONPRIV_SLOTS)
|
||||
|
||||
struct i915_workarounds {
|
||||
struct i915_wa_reg reg[I915_MAX_WA_REGS];
|
||||
u32 count;
|
||||
u32 hw_whitelist_count[I915_NUM_RINGS];
|
||||
};
|
||||
|
||||
struct i915_virtual_gpu {
|
||||
@ -1756,7 +1797,7 @@ struct drm_i915_private {
|
||||
u32 pipestat_irq_mask[I915_MAX_PIPES];
|
||||
|
||||
struct i915_hotplug hotplug;
|
||||
struct i915_fbc fbc;
|
||||
struct intel_fbc fbc;
|
||||
struct i915_drrs drrs;
|
||||
struct intel_opregion opregion;
|
||||
struct intel_vbt_data vbt;
|
||||
@ -1780,7 +1821,7 @@ struct drm_i915_private {
|
||||
|
||||
unsigned int fsb_freq, mem_freq, is_ddr3;
|
||||
unsigned int skl_boot_cdclk;
|
||||
unsigned int cdclk_freq, max_cdclk_freq;
|
||||
unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq;
|
||||
unsigned int max_dotclk_freq;
|
||||
unsigned int hpll_freq;
|
||||
unsigned int czclk_freq;
|
||||
@ -1805,6 +1846,7 @@ struct drm_i915_private {
|
||||
|
||||
enum modeset_restore modeset_restore;
|
||||
struct mutex modeset_restore_lock;
|
||||
struct drm_atomic_state *modeset_restore_state;
|
||||
|
||||
struct list_head vm_list; /* Global list of all address spaces */
|
||||
struct i915_gtt gtt; /* VM representing the global address space */
|
||||
@ -1825,8 +1867,13 @@ struct drm_i915_private {
|
||||
struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
|
||||
#endif
|
||||
|
||||
/* dpll and cdclk state is protected by connection_mutex */
|
||||
int num_shared_dpll;
|
||||
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
|
||||
|
||||
unsigned int active_crtcs;
|
||||
unsigned int min_pixclk[I915_MAX_PIPES];
|
||||
|
||||
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
|
||||
|
||||
struct i915_workarounds workarounds;
|
||||
@ -1941,6 +1988,8 @@ struct drm_i915_private {
|
||||
void (*stop_ring)(struct intel_engine_cs *ring);
|
||||
} gt;
|
||||
|
||||
struct intel_context *kernel_context;
|
||||
|
||||
bool edp_low_vswing;
|
||||
|
||||
/* perform PHY state sanity checks? */
|
||||
@ -2265,9 +2314,9 @@ struct drm_i915_gem_request {
|
||||
|
||||
};
|
||||
|
||||
int i915_gem_request_alloc(struct intel_engine_cs *ring,
|
||||
struct intel_context *ctx,
|
||||
struct drm_i915_gem_request **req_out);
|
||||
struct drm_i915_gem_request * __must_check
|
||||
i915_gem_request_alloc(struct intel_engine_cs *engine,
|
||||
struct intel_context *ctx);
|
||||
void i915_gem_request_cancel(struct drm_i915_gem_request *req);
|
||||
void i915_gem_request_free(struct kref *req_ref);
|
||||
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
|
||||
@ -2576,6 +2625,12 @@ struct drm_i915_cmd_table {
|
||||
|
||||
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
|
||||
#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
|
||||
|
||||
/* WaRsDisableCoarsePowerGating:skl,bxt */
|
||||
#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \
|
||||
IS_SKL_GT3(dev) || \
|
||||
IS_SKL_GT4(dev))
|
||||
|
||||
/*
|
||||
* dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
|
||||
* even when in MSI mode. This results in spurious interrupt warnings if the
|
||||
@ -2665,47 +2720,7 @@ extern int i915_max_ioctl;
|
||||
extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
|
||||
extern int i915_resume_switcheroo(struct drm_device *dev);
|
||||
|
||||
/* i915_params.c */
|
||||
struct i915_params {
|
||||
int modeset;
|
||||
int panel_ignore_lid;
|
||||
int semaphores;
|
||||
int lvds_channel_mode;
|
||||
int panel_use_ssc;
|
||||
int vbt_sdvo_panel_type;
|
||||
int enable_rc6;
|
||||
int enable_dc;
|
||||
int enable_fbc;
|
||||
int enable_ppgtt;
|
||||
int enable_execlists;
|
||||
int enable_psr;
|
||||
unsigned int preliminary_hw_support;
|
||||
int disable_power_well;
|
||||
int enable_ips;
|
||||
int invert_brightness;
|
||||
int enable_cmd_parser;
|
||||
/* leave bools at the end to not create holes */
|
||||
bool enable_hangcheck;
|
||||
bool fastboot;
|
||||
bool prefault_disable;
|
||||
bool load_detect_test;
|
||||
bool reset;
|
||||
bool disable_display;
|
||||
bool disable_vtd_wa;
|
||||
bool enable_guc_submission;
|
||||
int guc_log_level;
|
||||
int use_mmio_flip;
|
||||
int mmio_debug;
|
||||
bool verbose_state_checks;
|
||||
bool nuclear_pageflip;
|
||||
int edp_vswing;
|
||||
/* Kolibri related */
|
||||
char *log_file;
|
||||
char *cmdline_mode;
|
||||
};
|
||||
extern struct i915_params i915 __read_mostly;
|
||||
|
||||
/* i915_dma.c */
|
||||
/* i915_dma.c */
|
||||
extern int i915_driver_load(struct drm_device *, unsigned long flags);
|
||||
extern int i915_driver_unload(struct drm_device *);
|
||||
extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
|
||||
@ -2748,7 +2763,8 @@ extern void intel_uncore_sanitize(struct drm_device *dev);
|
||||
extern void intel_uncore_early_sanitize(struct drm_device *dev,
|
||||
bool restore_forcewake);
|
||||
extern void intel_uncore_init(struct drm_device *dev);
|
||||
extern void intel_uncore_check_errors(struct drm_device *dev);
|
||||
extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
|
||||
extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
|
||||
extern void intel_uncore_fini(struct drm_device *dev);
|
||||
extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
|
||||
const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
|
||||
@ -2870,7 +2886,8 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
void i915_gem_load(struct drm_device *dev);
|
||||
void i915_gem_load_init(struct drm_device *dev);
|
||||
void i915_gem_load_cleanup(struct drm_device *dev);
|
||||
void *i915_gem_object_alloc(struct drm_device *dev);
|
||||
void i915_gem_object_free(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_object_init(struct drm_i915_gem_object *obj,
|
||||
@ -3134,18 +3151,11 @@ bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
|
||||
/* Some GGTT VM helpers */
|
||||
#define i915_obj_to_ggtt(obj) \
|
||||
(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
|
||||
static inline bool i915_is_ggtt(struct i915_address_space *vm)
|
||||
{
|
||||
struct i915_address_space *ggtt =
|
||||
&((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base;
|
||||
return vm == ggtt;
|
||||
}
|
||||
|
||||
static inline struct i915_hw_ppgtt *
|
||||
i915_vm_to_ppgtt(struct i915_address_space *vm)
|
||||
{
|
||||
WARN_ON(i915_is_ggtt(vm));
|
||||
|
||||
return container_of(vm, struct i915_hw_ppgtt, base);
|
||||
}
|
||||
|
||||
@ -3283,6 +3293,7 @@ unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
|
||||
#define I915_SHRINK_ACTIVE 0x8
|
||||
unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
|
||||
void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
|
||||
void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
|
||||
|
||||
|
||||
/* i915_gem_tiling.c */
|
||||
@ -3453,16 +3464,14 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val
|
||||
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
|
||||
void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
|
||||
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
|
||||
u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
|
||||
void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
|
||||
u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg);
|
||||
void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val);
|
||||
u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
|
||||
void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
|
||||
u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
|
||||
void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
|
||||
u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
|
||||
void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
|
||||
u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
|
||||
void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
|
||||
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
|
||||
void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
|
||||
u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
|
||||
@ -3619,6 +3628,4 @@ static inline void i915_trace_irq_get(struct intel_engine_cs *ring,
|
||||
i915_gem_request_assign(&ring->trace_irq_req, req);
|
||||
}
|
||||
|
||||
#include "intel_drv.h"
|
||||
|
||||
#endif
|
||||
|
@ -41,14 +41,11 @@
|
||||
#define RQ_BUG_ON(expr)
|
||||
|
||||
extern int x86_clflush_size;
|
||||
#define __copy_to_user_inatomic __copy_to_user
|
||||
|
||||
#define PROT_READ 0x1 /* page can be read */
|
||||
#define PROT_WRITE 0x2 /* page can be written */
|
||||
#define MAP_SHARED 0x01 /* Share changes */
|
||||
|
||||
|
||||
|
||||
struct drm_i915_gem_object *get_fb_obj();
|
||||
|
||||
unsigned long vm_mmap(struct file *file, unsigned long addr,
|
||||
@ -155,10 +152,10 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
|
||||
|
||||
pinned = 0;
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
list_for_each_entry(vma, &ggtt->base.active_list, mm_list)
|
||||
list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
|
||||
if (vma->pin_count)
|
||||
pinned += vma->node.size;
|
||||
list_for_each_entry(vma, &ggtt->base.inactive_list, mm_list)
|
||||
list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
|
||||
if (vma->pin_count)
|
||||
pinned += vma->node.size;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
@ -247,7 +244,7 @@ drop_pages(struct drm_i915_gem_object *obj)
|
||||
int ret;
|
||||
|
||||
drm_gem_object_reference(&obj->base);
|
||||
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link)
|
||||
list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link)
|
||||
if (i915_vma_unbind(vma))
|
||||
break;
|
||||
|
||||
@ -653,6 +650,24 @@ unlock:
|
||||
* page faults in the source data
|
||||
*/
|
||||
|
||||
static inline int
|
||||
fast_user_write(struct io_mapping *mapping,
|
||||
loff_t page_base, int page_offset,
|
||||
char __user *user_data,
|
||||
int length)
|
||||
{
|
||||
void __iomem *vaddr_atomic;
|
||||
void *vaddr;
|
||||
unsigned long unwritten;
|
||||
|
||||
vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
|
||||
/* We can use the cpu mem copy function because this is X86. */
|
||||
vaddr = (void __force*)vaddr_atomic + page_offset;
|
||||
unwritten = __copy_from_user_inatomic_nocache(vaddr,
|
||||
user_data, length);
|
||||
io_mapping_unmap_atomic(vaddr_atomic);
|
||||
return unwritten;
|
||||
}
|
||||
|
||||
/**
|
||||
* This is the fast pwrite path, where we copy the data directly from the
|
||||
@ -702,10 +717,15 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
|
||||
if ((page_offset + remain) > PAGE_SIZE)
|
||||
page_length = PAGE_SIZE - page_offset;
|
||||
|
||||
MapPage(dev_priv->gtt.mappable,
|
||||
dev_priv->gtt.mappable_base+page_base, PG_WRITEC|PG_SW);
|
||||
|
||||
memcpy((char*)dev_priv->gtt.mappable+page_offset, user_data, page_length);
|
||||
/* If we get a fault while copying data, then (presumably) our
|
||||
* source page isn't available. Return the error and we'll
|
||||
* retry in the slow path.
|
||||
*/
|
||||
if (fast_user_write(dev_priv->gtt.mappable, page_base,
|
||||
page_offset, user_data, page_length)) {
|
||||
ret = -EFAULT;
|
||||
goto out_flush;
|
||||
}
|
||||
|
||||
remain -= page_length;
|
||||
user_data += page_length;
|
||||
@ -741,9 +761,8 @@ shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
|
||||
if (needs_clflush_before)
|
||||
drm_clflush_virt_range(vaddr + shmem_page_offset,
|
||||
page_length);
|
||||
memcpy(vaddr + shmem_page_offset,
|
||||
user_data,
|
||||
page_length);
|
||||
ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
|
||||
user_data, page_length);
|
||||
if (needs_clflush_after)
|
||||
drm_clflush_virt_range(vaddr + shmem_page_offset,
|
||||
page_length);
|
||||
@ -1126,7 +1145,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
|
||||
int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
|
||||
wait_queue_t wait;
|
||||
unsigned long timeout_expire;
|
||||
s64 before, now;
|
||||
s64 before = 0; /* Only to silence a compiler warning. */
|
||||
int ret;
|
||||
|
||||
WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
|
||||
@ -1146,14 +1165,17 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
|
||||
return -ETIME;
|
||||
|
||||
timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout);
|
||||
|
||||
/*
|
||||
* Record current time in case interrupted by signal, or wedged.
|
||||
*/
|
||||
before = ktime_get_raw_ns();
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev_priv)->gen >= 6)
|
||||
gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
|
||||
|
||||
/* Record current time in case interrupted by signal, or wedged */
|
||||
trace_i915_gem_request_wait_begin(req);
|
||||
before = ktime_get_raw_ns();
|
||||
|
||||
/* Optimistic spin for the next jiffie before touching IRQs */
|
||||
ret = __i915_spin_request(req, state);
|
||||
@ -1213,11 +1235,10 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
|
||||
DestroyEvent(wait.evnt);
|
||||
|
||||
out:
|
||||
now = ktime_get_raw_ns();
|
||||
trace_i915_gem_request_wait_end(req);
|
||||
|
||||
if (timeout) {
|
||||
s64 tres = *timeout - (now - before);
|
||||
s64 tres = *timeout - (ktime_get_raw_ns() - before);
|
||||
|
||||
*timeout = tres < 0 ? 0 : tres;
|
||||
|
||||
@ -2053,7 +2074,7 @@ void i915_vma_move_to_active(struct i915_vma *vma,
|
||||
list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
|
||||
i915_gem_request_assign(&obj->last_read_req[ring->id], req);
|
||||
|
||||
list_move_tail(&vma->mm_list, &vma->vm->active_list);
|
||||
list_move_tail(&vma->vm_link, &vma->vm->active_list);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -2091,9 +2112,9 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
|
||||
list_move_tail(&obj->global_list,
|
||||
&to_i915(obj->base.dev)->mm.bound_list);
|
||||
|
||||
list_for_each_entry(vma, &obj->vma_list, vma_link) {
|
||||
if (!list_empty(&vma->mm_list))
|
||||
list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
|
||||
list_for_each_entry(vma, &obj->vma_list, obj_link) {
|
||||
if (!list_empty(&vma->vm_link))
|
||||
list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
|
||||
}
|
||||
|
||||
i915_gem_request_assign(&obj->last_fenced_req, NULL);
|
||||
@ -2250,7 +2271,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
|
||||
|
||||
trace_i915_gem_request_add(request);
|
||||
|
||||
// i915_queue_hangcheck(ring->dev);
|
||||
i915_queue_hangcheck(ring->dev);
|
||||
|
||||
queue_delayed_work(dev_priv->wq,
|
||||
&dev_priv->mm.retire_work,
|
||||
@ -2316,10 +2337,8 @@ void i915_gem_request_free(struct kref *req_ref)
|
||||
i915_gem_request_remove_from_client(req);
|
||||
|
||||
if (ctx) {
|
||||
if (i915.enable_execlists) {
|
||||
if (ctx != req->ring->default_context)
|
||||
intel_lr_context_unpin(req);
|
||||
}
|
||||
if (i915.enable_execlists && ctx != req->i915->kernel_context)
|
||||
intel_lr_context_unpin(ctx, req->ring);
|
||||
|
||||
i915_gem_context_unreference(ctx);
|
||||
}
|
||||
@ -2327,9 +2346,10 @@ void i915_gem_request_free(struct kref *req_ref)
|
||||
kfree(req);
|
||||
}
|
||||
|
||||
int i915_gem_request_alloc(struct intel_engine_cs *ring,
|
||||
struct intel_context *ctx,
|
||||
struct drm_i915_gem_request **req_out)
|
||||
static inline int
|
||||
__i915_gem_request_alloc(struct intel_engine_cs *ring,
|
||||
struct intel_context *ctx,
|
||||
struct drm_i915_gem_request **req_out)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(ring->dev);
|
||||
struct drm_i915_gem_request *req;
|
||||
@ -2393,6 +2413,31 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_gem_request_alloc - allocate a request structure
|
||||
*
|
||||
* @engine: engine that we wish to issue the request on.
|
||||
* @ctx: context that the request will be associated with.
|
||||
* This can be NULL if the request is not directly related to
|
||||
* any specific user context, in which case this function will
|
||||
* choose an appropriate context to use.
|
||||
*
|
||||
* Returns a pointer to the allocated request if successful,
|
||||
* or an error code if not.
|
||||
*/
|
||||
struct drm_i915_gem_request *
|
||||
i915_gem_request_alloc(struct intel_engine_cs *engine,
|
||||
struct intel_context *ctx)
|
||||
{
|
||||
struct drm_i915_gem_request *req;
|
||||
int err;
|
||||
|
||||
if (ctx == NULL)
|
||||
ctx = to_i915(engine->dev)->kernel_context;
|
||||
err = __i915_gem_request_alloc(engine, ctx, &req);
|
||||
return err ? ERR_PTR(err) : req;
|
||||
}
|
||||
|
||||
void i915_gem_request_cancel(struct drm_i915_gem_request *req)
|
||||
{
|
||||
intel_ring_reserved_space_cancel(req->ringbuf);
|
||||
@ -2584,11 +2629,9 @@ i915_gem_retire_requests(struct drm_device *dev)
|
||||
i915_gem_retire_requests_ring(ring);
|
||||
idle &= list_empty(&ring->request_list);
|
||||
if (i915.enable_execlists) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ring->execlist_lock, flags);
|
||||
spin_lock_irq(&ring->execlist_lock);
|
||||
idle &= list_empty(&ring->execlist_queue);
|
||||
spin_unlock_irqrestore(&ring->execlist_lock, flags);
|
||||
spin_unlock_irq(&ring->execlist_lock);
|
||||
|
||||
intel_execlists_retire_requests(ring);
|
||||
}
|
||||
@ -2810,9 +2853,13 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
|
||||
return 0;
|
||||
|
||||
if (*to_req == NULL) {
|
||||
ret = i915_gem_request_alloc(to, to->default_context, to_req);
|
||||
if (ret)
|
||||
return ret;
|
||||
struct drm_i915_gem_request *req;
|
||||
|
||||
req = i915_gem_request_alloc(to, NULL);
|
||||
if (IS_ERR(req))
|
||||
return PTR_ERR(req);
|
||||
|
||||
*to_req = req;
|
||||
}
|
||||
|
||||
trace_i915_gem_ring_sync_to(*to_req, from, from_req);
|
||||
@ -2929,7 +2976,7 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
|
||||
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
|
||||
int ret;
|
||||
|
||||
if (list_empty(&vma->vma_link))
|
||||
if (list_empty(&vma->obj_link))
|
||||
return 0;
|
||||
|
||||
if (!drm_mm_node_allocated(&vma->node)) {
|
||||
@ -2948,8 +2995,7 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (i915_is_ggtt(vma->vm) &&
|
||||
vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
|
||||
if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
|
||||
i915_gem_object_finish_gtt(obj);
|
||||
|
||||
/* release the fence reg _after_ flushing */
|
||||
@ -2963,8 +3009,8 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
|
||||
vma->vm->unbind_vma(vma);
|
||||
vma->bound = 0;
|
||||
|
||||
list_del_init(&vma->mm_list);
|
||||
if (i915_is_ggtt(vma->vm)) {
|
||||
list_del_init(&vma->vm_link);
|
||||
if (vma->is_ggtt) {
|
||||
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
|
||||
obj->map_and_fenceable = false;
|
||||
} else if (vma->ggtt_view.pages) {
|
||||
@ -3012,9 +3058,9 @@ int i915_gpu_idle(struct drm_device *dev)
|
||||
if (!i915.enable_execlists) {
|
||||
struct drm_i915_gem_request *req;
|
||||
|
||||
ret = i915_gem_request_alloc(ring, ring->default_context, &req);
|
||||
if (ret)
|
||||
return ret;
|
||||
req = i915_gem_request_alloc(ring, NULL);
|
||||
if (IS_ERR(req))
|
||||
return PTR_ERR(req);
|
||||
|
||||
ret = i915_switch_context(req);
|
||||
if (ret) {
|
||||
@ -3179,25 +3225,25 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
|
||||
if (ret)
|
||||
goto err_free_vma;
|
||||
} else {
|
||||
if (flags & PIN_HIGH) {
|
||||
search_flag = DRM_MM_SEARCH_BELOW;
|
||||
alloc_flag = DRM_MM_CREATE_TOP;
|
||||
} else {
|
||||
search_flag = DRM_MM_SEARCH_DEFAULT;
|
||||
alloc_flag = DRM_MM_CREATE_DEFAULT;
|
||||
}
|
||||
if (flags & PIN_HIGH) {
|
||||
search_flag = DRM_MM_SEARCH_BELOW;
|
||||
alloc_flag = DRM_MM_CREATE_TOP;
|
||||
} else {
|
||||
search_flag = DRM_MM_SEARCH_DEFAULT;
|
||||
alloc_flag = DRM_MM_CREATE_DEFAULT;
|
||||
}
|
||||
|
||||
search_free:
|
||||
ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
|
||||
size, alignment,
|
||||
obj->cache_level,
|
||||
start, end,
|
||||
search_flag,
|
||||
alloc_flag);
|
||||
if (ret) {
|
||||
ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
|
||||
size, alignment,
|
||||
obj->cache_level,
|
||||
start, end,
|
||||
search_flag,
|
||||
alloc_flag);
|
||||
if (ret) {
|
||||
|
||||
goto err_free_vma;
|
||||
}
|
||||
goto err_free_vma;
|
||||
}
|
||||
}
|
||||
if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
|
||||
ret = -EINVAL;
|
||||
@ -3210,7 +3256,7 @@ search_free:
|
||||
goto err_remove_node;
|
||||
|
||||
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
|
||||
list_add_tail(&vma->mm_list, &vm->inactive_list);
|
||||
list_add_tail(&vma->vm_link, &vm->inactive_list);
|
||||
|
||||
return vma;
|
||||
|
||||
@ -3375,7 +3421,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
|
||||
/* And bump the LRU for this access */
|
||||
vma = i915_gem_obj_to_ggtt(obj);
|
||||
if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
|
||||
list_move_tail(&vma->mm_list,
|
||||
list_move_tail(&vma->vm_link,
|
||||
&to_i915(obj->base.dev)->gtt.base.inactive_list);
|
||||
|
||||
return 0;
|
||||
@ -3410,7 +3456,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
|
||||
* catch the issue of the CS prefetch crossing page boundaries and
|
||||
* reading an invalid PTE on older architectures.
|
||||
*/
|
||||
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
|
||||
list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
|
||||
if (!drm_mm_node_allocated(&vma->node))
|
||||
continue;
|
||||
|
||||
@ -3473,7 +3519,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
|
||||
*/
|
||||
}
|
||||
|
||||
list_for_each_entry(vma, &obj->vma_list, vma_link) {
|
||||
list_for_each_entry(vma, &obj->vma_list, obj_link) {
|
||||
if (!drm_mm_node_allocated(&vma->node))
|
||||
continue;
|
||||
|
||||
@ -3483,7 +3529,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry(vma, &obj->vma_list, vma_link)
|
||||
list_for_each_entry(vma, &obj->vma_list, obj_link)
|
||||
vma->node.color = cache_level;
|
||||
obj->cache_level = cache_level;
|
||||
|
||||
@ -3957,10 +4003,20 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
|
||||
if (ret)
|
||||
goto unref;
|
||||
|
||||
BUILD_BUG_ON(I915_NUM_RINGS > 16);
|
||||
args->busy = obj->active << 16;
|
||||
if (obj->last_write_req)
|
||||
args->busy |= obj->last_write_req->ring->id;
|
||||
args->busy = 0;
|
||||
if (obj->active) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
struct drm_i915_gem_request *req;
|
||||
|
||||
req = obj->last_read_req[i];
|
||||
if (req)
|
||||
args->busy |= 1 << (16 + req->ring->exec_id);
|
||||
}
|
||||
if (obj->last_write_req)
|
||||
args->busy |= obj->last_write_req->ring->exec_id;
|
||||
}
|
||||
|
||||
unref:
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
@ -4136,7 +4192,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
|
||||
|
||||
trace_i915_gem_object_destroy(obj);
|
||||
|
||||
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
|
||||
list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
|
||||
int ret;
|
||||
|
||||
vma->pin_count = 0;
|
||||
@ -4190,7 +4246,7 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
|
||||
struct i915_address_space *vm)
|
||||
{
|
||||
struct i915_vma *vma;
|
||||
list_for_each_entry(vma, &obj->vma_list, vma_link) {
|
||||
list_for_each_entry(vma, &obj->vma_list, obj_link) {
|
||||
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
|
||||
vma->vm == vm)
|
||||
return vma;
|
||||
@ -4207,7 +4263,7 @@ struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
|
||||
if (WARN_ONCE(!view, "no view specified"))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
list_for_each_entry(vma, &obj->vma_list, vma_link)
|
||||
list_for_each_entry(vma, &obj->vma_list, obj_link)
|
||||
if (vma->vm == ggtt &&
|
||||
i915_ggtt_view_equal(&vma->ggtt_view, view))
|
||||
return vma;
|
||||
@ -4216,19 +4272,16 @@ struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
|
||||
|
||||
void i915_gem_vma_destroy(struct i915_vma *vma)
|
||||
{
|
||||
struct i915_address_space *vm = NULL;
|
||||
WARN_ON(vma->node.allocated);
|
||||
|
||||
/* Keep the vma as a placeholder in the execbuffer reservation lists */
|
||||
if (!list_empty(&vma->exec_list))
|
||||
return;
|
||||
|
||||
vm = vma->vm;
|
||||
if (!vma->is_ggtt)
|
||||
i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
|
||||
|
||||
if (!i915_is_ggtt(vm))
|
||||
i915_ppgtt_put(i915_vm_to_ppgtt(vm));
|
||||
|
||||
list_del(&vma->vma_link);
|
||||
list_del(&vma->obj_link);
|
||||
|
||||
kfree(vma);
|
||||
}
|
||||
@ -4450,7 +4503,7 @@ i915_gem_init_hw(struct drm_device *dev)
|
||||
*/
|
||||
init_unused_rings(dev);
|
||||
|
||||
BUG_ON(!dev_priv->ring[RCS].default_context);
|
||||
BUG_ON(!dev_priv->kernel_context);
|
||||
|
||||
ret = i915_ppgtt_init_hw(dev);
|
||||
if (ret) {
|
||||
@ -4471,7 +4524,7 @@ i915_gem_init_hw(struct drm_device *dev)
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to initialize GuC, error %d\n", ret);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
@ -4487,10 +4540,9 @@ i915_gem_init_hw(struct drm_device *dev)
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
struct drm_i915_gem_request *req;
|
||||
|
||||
WARN_ON(!ring->default_context);
|
||||
|
||||
ret = i915_gem_request_alloc(ring, ring->default_context, &req);
|
||||
if (ret) {
|
||||
req = i915_gem_request_alloc(ring, NULL);
|
||||
if (IS_ERR(req)) {
|
||||
ret = PTR_ERR(req);
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
goto out;
|
||||
}
|
||||
@ -4595,6 +4647,14 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
|
||||
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
dev_priv->gt.cleanup_ring(ring);
|
||||
|
||||
if (i915.enable_execlists)
|
||||
/*
|
||||
* Neither the BIOS, ourselves or any other kernel
|
||||
* expects the system to be in execlists mode on startup,
|
||||
* so we need to reset the GPU back to legacy mode.
|
||||
*/
|
||||
intel_gpu_reset(dev);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -4605,7 +4665,7 @@ init_ring_lists(struct intel_engine_cs *ring)
|
||||
}
|
||||
|
||||
void
|
||||
i915_gem_load(struct drm_device *dev)
|
||||
i915_gem_load_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
@ -4651,6 +4711,7 @@ i915_gem_load(struct drm_device *dev)
|
||||
i915_gem_restore_fences(dev);
|
||||
|
||||
i915_gem_detect_bit_6_swizzle(dev);
|
||||
init_waitqueue_head(&dev_priv->pending_flip_queue);
|
||||
|
||||
dev_priv->mm.interruptible = true;
|
||||
|
||||
@ -4703,6 +4764,8 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
|
||||
spin_lock_init(&file_priv->mm.lock);
|
||||
INIT_LIST_HEAD(&file_priv->mm.request_list);
|
||||
|
||||
file_priv->bsd_ring = -1;
|
||||
|
||||
ret = i915_gem_context_open(dev, file);
|
||||
if (ret)
|
||||
kfree(file_priv);
|
||||
@ -4745,8 +4808,8 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
|
||||
|
||||
WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
|
||||
|
||||
list_for_each_entry(vma, &o->vma_list, vma_link) {
|
||||
if (i915_is_ggtt(vma->vm) &&
|
||||
list_for_each_entry(vma, &o->vma_list, obj_link) {
|
||||
if (vma->is_ggtt &&
|
||||
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
|
||||
continue;
|
||||
if (vma->vm == vm)
|
||||
@ -4764,7 +4827,7 @@ u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
|
||||
struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
|
||||
struct i915_vma *vma;
|
||||
|
||||
list_for_each_entry(vma, &o->vma_list, vma_link)
|
||||
list_for_each_entry(vma, &o->vma_list, obj_link)
|
||||
if (vma->vm == ggtt &&
|
||||
i915_ggtt_view_equal(&vma->ggtt_view, view))
|
||||
return vma->node.start;
|
||||
@ -4778,8 +4841,8 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
|
||||
{
|
||||
struct i915_vma *vma;
|
||||
|
||||
list_for_each_entry(vma, &o->vma_list, vma_link) {
|
||||
if (i915_is_ggtt(vma->vm) &&
|
||||
list_for_each_entry(vma, &o->vma_list, obj_link) {
|
||||
if (vma->is_ggtt &&
|
||||
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
|
||||
continue;
|
||||
if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
|
||||
@ -4795,7 +4858,7 @@ bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
|
||||
struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
|
||||
struct i915_vma *vma;
|
||||
|
||||
list_for_each_entry(vma, &o->vma_list, vma_link)
|
||||
list_for_each_entry(vma, &o->vma_list, obj_link)
|
||||
if (vma->vm == ggtt &&
|
||||
i915_ggtt_view_equal(&vma->ggtt_view, view) &&
|
||||
drm_mm_node_allocated(&vma->node))
|
||||
@ -4808,7 +4871,7 @@ bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
|
||||
{
|
||||
struct i915_vma *vma;
|
||||
|
||||
list_for_each_entry(vma, &o->vma_list, vma_link)
|
||||
list_for_each_entry(vma, &o->vma_list, obj_link)
|
||||
if (drm_mm_node_allocated(&vma->node))
|
||||
return true;
|
||||
|
||||
@ -4825,8 +4888,8 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
|
||||
|
||||
BUG_ON(list_empty(&o->vma_list));
|
||||
|
||||
list_for_each_entry(vma, &o->vma_list, vma_link) {
|
||||
if (i915_is_ggtt(vma->vm) &&
|
||||
list_for_each_entry(vma, &o->vma_list, obj_link) {
|
||||
if (vma->is_ggtt &&
|
||||
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
|
||||
continue;
|
||||
if (vma->vm == vm)
|
||||
@ -4838,7 +4901,7 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
|
||||
bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct i915_vma *vma;
|
||||
list_for_each_entry(vma, &obj->vma_list, vma_link)
|
||||
list_for_each_entry(vma, &obj->vma_list, obj_link)
|
||||
if (vma->pin_count > 0)
|
||||
return true;
|
||||
|
||||
|
@ -142,7 +142,7 @@ static void i915_gem_context_clean(struct intel_context *ctx)
|
||||
return;
|
||||
|
||||
list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
|
||||
mm_list) {
|
||||
vm_link) {
|
||||
if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
|
||||
break;
|
||||
}
|
||||
@ -321,6 +321,18 @@ err_destroy:
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static void i915_gem_context_unpin(struct intel_context *ctx,
|
||||
struct intel_engine_cs *engine)
|
||||
{
|
||||
if (i915.enable_execlists) {
|
||||
intel_lr_context_unpin(ctx, engine);
|
||||
} else {
|
||||
if (engine->id == RCS && ctx->legacy_hw_ctx.rcs_state)
|
||||
i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
|
||||
i915_gem_context_unreference(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
void i915_gem_context_reset(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -329,40 +341,31 @@ void i915_gem_context_reset(struct drm_device *dev)
|
||||
if (i915.enable_execlists) {
|
||||
struct intel_context *ctx;
|
||||
|
||||
list_for_each_entry(ctx, &dev_priv->context_list, link) {
|
||||
list_for_each_entry(ctx, &dev_priv->context_list, link)
|
||||
intel_lr_context_reset(dev, ctx);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[i];
|
||||
struct intel_context *lctx = ring->last_context;
|
||||
|
||||
if (lctx) {
|
||||
if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
|
||||
i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
|
||||
|
||||
i915_gem_context_unreference(lctx);
|
||||
if (ring->last_context) {
|
||||
i915_gem_context_unpin(ring->last_context, ring);
|
||||
ring->last_context = NULL;
|
||||
}
|
||||
|
||||
/* Force the GPU state to be reinitialised on enabling */
|
||||
if (ring->default_context)
|
||||
ring->default_context->legacy_hw_ctx.initialized = false;
|
||||
}
|
||||
|
||||
/* Force the GPU state to be reinitialised on enabling */
|
||||
dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
|
||||
}
|
||||
|
||||
int i915_gem_context_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_context *ctx;
|
||||
int i;
|
||||
|
||||
/* Init should only be called once per module load. Eventually the
|
||||
* restriction on the context_disabled check can be loosened. */
|
||||
if (WARN_ON(dev_priv->ring[RCS].default_context))
|
||||
if (WARN_ON(dev_priv->kernel_context))
|
||||
return 0;
|
||||
|
||||
if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) {
|
||||
@ -392,12 +395,7 @@ int i915_gem_context_init(struct drm_device *dev)
|
||||
return PTR_ERR(ctx);
|
||||
}
|
||||
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[i];
|
||||
|
||||
/* NB: RCS will hold a ref for all rings */
|
||||
ring->default_context = ctx;
|
||||
}
|
||||
dev_priv->kernel_context = ctx;
|
||||
|
||||
DRM_DEBUG_DRIVER("%s context support initialized\n",
|
||||
i915.enable_execlists ? "LR" :
|
||||
@ -408,7 +406,7 @@ int i915_gem_context_init(struct drm_device *dev)
|
||||
void i915_gem_context_fini(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_context *dctx = dev_priv->ring[RCS].default_context;
|
||||
struct intel_context *dctx = dev_priv->kernel_context;
|
||||
int i;
|
||||
|
||||
if (dctx->legacy_hw_ctx.rcs_state) {
|
||||
@ -424,28 +422,21 @@ void i915_gem_context_fini(struct drm_device *dev)
|
||||
* to offset the do_switch part, so that i915_gem_context_unreference()
|
||||
* can then free the base object correctly. */
|
||||
WARN_ON(!dev_priv->ring[RCS].last_context);
|
||||
if (dev_priv->ring[RCS].last_context == dctx) {
|
||||
/* Fake switch to NULL context */
|
||||
WARN_ON(dctx->legacy_hw_ctx.rcs_state->active);
|
||||
i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
|
||||
i915_gem_context_unreference(dctx);
|
||||
dev_priv->ring[RCS].last_context = NULL;
|
||||
}
|
||||
|
||||
i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
|
||||
}
|
||||
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
for (i = I915_NUM_RINGS; --i >= 0;) {
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[i];
|
||||
|
||||
if (ring->last_context)
|
||||
i915_gem_context_unreference(ring->last_context);
|
||||
|
||||
ring->default_context = NULL;
|
||||
ring->last_context = NULL;
|
||||
if (ring->last_context) {
|
||||
i915_gem_context_unpin(ring->last_context, ring);
|
||||
ring->last_context = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
i915_gem_context_unreference(dctx);
|
||||
dev_priv->kernel_context = NULL;
|
||||
}
|
||||
|
||||
int i915_gem_context_enable(struct drm_i915_gem_request *req)
|
||||
@ -864,6 +855,9 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
|
||||
if (!contexts_enabled(dev))
|
||||
return -ENODEV;
|
||||
|
||||
if (args->pad != 0)
|
||||
return -EINVAL;
|
||||
|
||||
ret = i915_mutex_lock_interruptible(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -887,6 +881,9 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
|
||||
struct intel_context *ctx;
|
||||
int ret;
|
||||
|
||||
if (args->pad != 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
|
||||
return -ENOENT;
|
||||
|
||||
|
@ -116,7 +116,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
|
||||
|
||||
search_again:
|
||||
/* First see if there is a large enough contiguous idle region... */
|
||||
list_for_each_entry(vma, &vm->inactive_list, mm_list) {
|
||||
list_for_each_entry(vma, &vm->inactive_list, vm_link) {
|
||||
if (mark_free(vma, &unwind_list))
|
||||
goto found;
|
||||
}
|
||||
@ -125,7 +125,7 @@ search_again:
|
||||
goto none;
|
||||
|
||||
/* Now merge in the soon-to-be-expired objects... */
|
||||
list_for_each_entry(vma, &vm->active_list, mm_list) {
|
||||
list_for_each_entry(vma, &vm->active_list, vm_link) {
|
||||
if (mark_free(vma, &unwind_list))
|
||||
goto found;
|
||||
}
|
||||
@ -270,7 +270,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
|
||||
WARN_ON(!list_empty(&vm->active_list));
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
|
||||
list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link)
|
||||
if (vma->pin_count == 0)
|
||||
WARN_ON(i915_vma_unbind(vma));
|
||||
|
||||
|
@ -193,13 +193,10 @@ static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
|
||||
return eb->lut[handle];
|
||||
} else {
|
||||
struct hlist_head *head;
|
||||
struct hlist_node *node;
|
||||
struct i915_vma *vma;
|
||||
|
||||
head = &eb->buckets[handle & eb->and];
|
||||
hlist_for_each(node, head) {
|
||||
struct i915_vma *vma;
|
||||
|
||||
vma = hlist_entry(node, struct i915_vma, exec_node);
|
||||
hlist_for_each_entry(vma, head, exec_node) {
|
||||
if (vma->exec_handle == handle)
|
||||
return vma;
|
||||
}
|
||||
@ -333,12 +330,25 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
|
||||
/* Map the page containing the relocation we're going to perform. */
|
||||
offset = i915_gem_obj_ggtt_offset(obj);
|
||||
offset += reloc->offset;
|
||||
MapPage(dev_priv->gtt.mappable,dev_priv->gtt.mappable_base +
|
||||
(offset & PAGE_MASK), PG_SW);
|
||||
reloc_page = dev_priv->gtt.mappable;
|
||||
reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
|
||||
offset & PAGE_MASK);
|
||||
iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
|
||||
|
||||
// io_mapping_unmap_atomic(reloc_page);
|
||||
if (INTEL_INFO(dev)->gen >= 8) {
|
||||
offset += sizeof(uint32_t);
|
||||
|
||||
if (offset_in_page(offset) == 0) {
|
||||
io_mapping_unmap_atomic(reloc_page);
|
||||
reloc_page =
|
||||
io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
|
||||
offset);
|
||||
}
|
||||
|
||||
iowrite32(upper_32_bits(delta),
|
||||
reloc_page + offset_in_page(offset));
|
||||
}
|
||||
|
||||
io_mapping_unmap_atomic(reloc_page);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -476,7 +486,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
|
||||
ret = relocate_entry_cpu(obj, reloc, target_offset);
|
||||
else if (obj->map_and_fenceable)
|
||||
ret = relocate_entry_gtt(obj, reloc, target_offset);
|
||||
else if (1)
|
||||
else if (1)
|
||||
ret = relocate_entry_clflush(obj, reloc, target_offset);
|
||||
else {
|
||||
WARN_ONCE(1, "Impossible case in relocation handling\n");
|
||||
@ -512,7 +522,8 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
|
||||
count = ARRAY_SIZE(stack_reloc);
|
||||
remain -= count;
|
||||
|
||||
memcpy(r, user_relocs, count*sizeof(r[0]));
|
||||
if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
|
||||
return -EFAULT;
|
||||
|
||||
do {
|
||||
u64 offset = r->presumed_offset;
|
||||
@ -521,12 +532,12 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (r->presumed_offset != offset)
|
||||
{
|
||||
memcpy(&user_relocs->presumed_offset,
|
||||
&r->presumed_offset,
|
||||
sizeof(r->presumed_offset));
|
||||
}
|
||||
if (r->presumed_offset != offset &&
|
||||
__copy_to_user_inatomic(&user_relocs->presumed_offset,
|
||||
&r->presumed_offset,
|
||||
sizeof(r->presumed_offset))) {
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
user_relocs++;
|
||||
r++;
|
||||
@ -655,7 +666,7 @@ need_reloc_mappable(struct i915_vma *vma)
|
||||
if (entry->relocation_count == 0)
|
||||
return false;
|
||||
|
||||
if (!i915_is_ggtt(vma->vm))
|
||||
if (!vma->is_ggtt)
|
||||
return false;
|
||||
|
||||
/* See also use_cpu_reloc() */
|
||||
@ -674,8 +685,7 @@ eb_vma_misplaced(struct i915_vma *vma)
|
||||
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
|
||||
WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
|
||||
!i915_is_ggtt(vma->vm));
|
||||
WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && !vma->is_ggtt);
|
||||
|
||||
if (entry->alignment &&
|
||||
vma->node.start & (entry->alignment - 1))
|
||||
@ -1286,6 +1296,9 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
|
||||
exec_start = params->batch_obj_vm_offset +
|
||||
params->args_batch_start_offset;
|
||||
|
||||
if (exec_len == 0)
|
||||
exec_len = params->batch_obj->base.size;
|
||||
|
||||
ret = ring->dispatch_execbuffer(params->request,
|
||||
exec_start, exec_len,
|
||||
params->dispatch_flags);
|
||||
@ -1302,33 +1315,23 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
|
||||
|
||||
/**
|
||||
* Find one BSD ring to dispatch the corresponding BSD command.
|
||||
* The Ring ID is returned.
|
||||
* The ring index is returned.
|
||||
*/
|
||||
static int gen8_dispatch_bsd_ring(struct drm_device *dev,
|
||||
struct drm_file *file)
|
||||
static unsigned int
|
||||
gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||
|
||||
/* Check whether the file_priv is using one ring */
|
||||
if (file_priv->bsd_ring)
|
||||
return file_priv->bsd_ring->id;
|
||||
else {
|
||||
/* If no, use the ping-pong mechanism to select one ring */
|
||||
int ring_id;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (dev_priv->mm.bsd_ring_dispatch_index == 0) {
|
||||
ring_id = VCS;
|
||||
dev_priv->mm.bsd_ring_dispatch_index = 1;
|
||||
} else {
|
||||
ring_id = VCS2;
|
||||
dev_priv->mm.bsd_ring_dispatch_index = 0;
|
||||
}
|
||||
file_priv->bsd_ring = &dev_priv->ring[ring_id];
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ring_id;
|
||||
/* Check whether the file_priv has already selected one ring. */
|
||||
if ((int)file_priv->bsd_ring < 0) {
|
||||
/* If not, use the ping-pong mechanism to select one. */
|
||||
mutex_lock(&dev_priv->dev->struct_mutex);
|
||||
file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index;
|
||||
dev_priv->mm.bsd_ring_dispatch_index ^= 1;
|
||||
mutex_unlock(&dev_priv->dev->struct_mutex);
|
||||
}
|
||||
|
||||
return file_priv->bsd_ring;
|
||||
}
|
||||
|
||||
static struct drm_i915_gem_object *
|
||||
@ -1346,11 +1349,69 @@ eb_get_batch(struct eb_vmas *eb)
|
||||
* paranoia do it everywhere.
|
||||
*/
|
||||
if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
|
||||
vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
|
||||
vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
|
||||
|
||||
return vma->obj;
|
||||
}
|
||||
|
||||
#define I915_USER_RINGS (4)
|
||||
|
||||
static const enum intel_ring_id user_ring_map[I915_USER_RINGS + 1] = {
|
||||
[I915_EXEC_DEFAULT] = RCS,
|
||||
[I915_EXEC_RENDER] = RCS,
|
||||
[I915_EXEC_BLT] = BCS,
|
||||
[I915_EXEC_BSD] = VCS,
|
||||
[I915_EXEC_VEBOX] = VECS
|
||||
};
|
||||
|
||||
static int
|
||||
eb_select_ring(struct drm_i915_private *dev_priv,
|
||||
struct drm_file *file,
|
||||
struct drm_i915_gem_execbuffer2 *args,
|
||||
struct intel_engine_cs **ring)
|
||||
{
|
||||
unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
|
||||
|
||||
if (user_ring_id > I915_USER_RINGS) {
|
||||
DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((user_ring_id != I915_EXEC_BSD) &&
|
||||
((args->flags & I915_EXEC_BSD_MASK) != 0)) {
|
||||
DRM_DEBUG("execbuf with non bsd ring but with invalid "
|
||||
"bsd dispatch flags: %d\n", (int)(args->flags));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
|
||||
unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
|
||||
|
||||
if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
|
||||
bsd_idx = gen8_dispatch_bsd_ring(dev_priv, file);
|
||||
} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
|
||||
bsd_idx <= I915_EXEC_BSD_RING2) {
|
||||
bsd_idx >>= I915_EXEC_BSD_SHIFT;
|
||||
bsd_idx--;
|
||||
} else {
|
||||
DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
|
||||
bsd_idx);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*ring = &dev_priv->ring[_VCS(bsd_idx)];
|
||||
} else {
|
||||
*ring = &dev_priv->ring[user_ring_map[user_ring_id]];
|
||||
}
|
||||
|
||||
if (!intel_ring_initialized(*ring)) {
|
||||
DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
struct drm_file *file,
|
||||
@ -1358,6 +1419,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
struct drm_i915_gem_exec_object2 *exec)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_request *req = NULL;
|
||||
struct eb_vmas *eb;
|
||||
struct drm_i915_gem_object *batch_obj;
|
||||
struct drm_i915_gem_exec_object2 shadow_exec_entry;
|
||||
@ -1386,51 +1448,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
if (args->flags & I915_EXEC_IS_PINNED)
|
||||
dispatch_flags |= I915_DISPATCH_PINNED;
|
||||
|
||||
if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
|
||||
DRM_DEBUG("execbuf with unknown ring: %d\n",
|
||||
(int)(args->flags & I915_EXEC_RING_MASK));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (((args->flags & I915_EXEC_RING_MASK) != I915_EXEC_BSD) &&
|
||||
((args->flags & I915_EXEC_BSD_MASK) != 0)) {
|
||||
DRM_DEBUG("execbuf with non bsd ring but with invalid "
|
||||
"bsd dispatch flags: %d\n", (int)(args->flags));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
|
||||
ring = &dev_priv->ring[RCS];
|
||||
else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
|
||||
if (HAS_BSD2(dev)) {
|
||||
int ring_id;
|
||||
|
||||
switch (args->flags & I915_EXEC_BSD_MASK) {
|
||||
case I915_EXEC_BSD_DEFAULT:
|
||||
ring_id = gen8_dispatch_bsd_ring(dev, file);
|
||||
ring = &dev_priv->ring[ring_id];
|
||||
break;
|
||||
case I915_EXEC_BSD_RING1:
|
||||
ring = &dev_priv->ring[VCS];
|
||||
break;
|
||||
case I915_EXEC_BSD_RING2:
|
||||
ring = &dev_priv->ring[VCS2];
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG("execbuf with unknown bsd ring: %d\n",
|
||||
(int)(args->flags & I915_EXEC_BSD_MASK));
|
||||
return -EINVAL;
|
||||
}
|
||||
} else
|
||||
ring = &dev_priv->ring[VCS];
|
||||
} else
|
||||
ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
|
||||
|
||||
if (!intel_ring_initialized(ring)) {
|
||||
DRM_DEBUG("execbuf with invalid ring: %d\n",
|
||||
(int)(args->flags & I915_EXEC_RING_MASK));
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = eb_select_ring(dev_priv, file, args, &ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (args->buffer_count < 1) {
|
||||
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
|
||||
@ -1580,11 +1600,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
|
||||
|
||||
/* Allocate a request for this batch buffer nice and early. */
|
||||
ret = i915_gem_request_alloc(ring, ctx, ¶ms->request);
|
||||
if (ret)
|
||||
req = i915_gem_request_alloc(ring, ctx);
|
||||
if (IS_ERR(req)) {
|
||||
ret = PTR_ERR(req);
|
||||
goto err_batch_unpin;
|
||||
}
|
||||
|
||||
ret = i915_gem_request_add_to_client(params->request, file);
|
||||
ret = i915_gem_request_add_to_client(req, file);
|
||||
if (ret)
|
||||
goto err_batch_unpin;
|
||||
|
||||
@ -1600,6 +1622,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
params->dispatch_flags = dispatch_flags;
|
||||
params->batch_obj = batch_obj;
|
||||
params->ctx = ctx;
|
||||
params->request = req;
|
||||
|
||||
ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
|
||||
|
||||
@ -1623,8 +1646,8 @@ err:
|
||||
* must be freed again. If it was submitted then it is being tracked
|
||||
* on the active request list and no clean up is required here.
|
||||
*/
|
||||
if (ret && params->request)
|
||||
i915_gem_request_cancel(params->request);
|
||||
if (ret && !IS_ERR_OR_NULL(req))
|
||||
i915_gem_request_cancel(req);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
|
@ -34,8 +34,8 @@
|
||||
* set of these objects.
|
||||
*
|
||||
* Fences are used to detile GTT memory mappings. They're also connected to the
|
||||
* hardware frontbuffer render tracking and hence interract with frontbuffer
|
||||
* conmpression. Furthermore on older platforms fences are required for tiled
|
||||
* hardware frontbuffer render tracking and hence interact with frontbuffer
|
||||
* compression. Furthermore on older platforms fences are required for tiled
|
||||
* objects used by the display engine. They can also be used by the render
|
||||
* engine - they're required for blitter commands and are optional for render
|
||||
* commands. But on gen4+ both display (with the exception of fbc) and rendering
|
||||
@ -46,8 +46,8 @@
|
||||
*
|
||||
* Finally note that because fences are such a restricted resource they're
|
||||
* dynamically associated with objects. Furthermore fence state is committed to
|
||||
* the hardware lazily to avoid unecessary stalls on gen2/3. Therefore code must
|
||||
* explictly call i915_gem_object_get_fence() to synchronize fencing status
|
||||
* the hardware lazily to avoid unnecessary stalls on gen2/3. Therefore code must
|
||||
* explicitly call i915_gem_object_get_fence() to synchronize fencing status
|
||||
* for cpu access. Also note that some code wants an unfenced view, for those
|
||||
* cases the fence can be removed forcefully with i915_gem_object_put_fence().
|
||||
*
|
||||
@ -527,7 +527,7 @@ void i915_gem_restore_fences(struct drm_device *dev)
|
||||
* required.
|
||||
*
|
||||
* When bit 17 is XORed in, we simply refuse to tile at all. Bit
|
||||
* 17 is not just a page offset, so as we page an objet out and back in,
|
||||
* 17 is not just a page offset, so as we page an object out and back in,
|
||||
* individual pages in it will have different bit 17 addresses, resulting in
|
||||
* each 64 bytes being swapped with its neighbor!
|
||||
*
|
||||
|
@ -95,9 +95,11 @@
|
||||
static int
|
||||
i915_get_ggtt_vma_pages(struct i915_vma *vma);
|
||||
|
||||
const struct i915_ggtt_view i915_ggtt_view_normal;
|
||||
const struct i915_ggtt_view i915_ggtt_view_normal = {
|
||||
.type = I915_GGTT_VIEW_NORMAL,
|
||||
};
|
||||
const struct i915_ggtt_view i915_ggtt_view_rotated = {
|
||||
.type = I915_GGTT_VIEW_ROTATED
|
||||
.type = I915_GGTT_VIEW_ROTATED,
|
||||
};
|
||||
|
||||
static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
|
||||
@ -2122,6 +2124,25 @@ static void i915_address_space_init(struct i915_address_space *vm,
|
||||
list_add_tail(&vm->global_link, &dev_priv->vm_list);
|
||||
}
|
||||
|
||||
static void gtt_write_workarounds(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
/* This function is for gtt related workarounds. This function is
|
||||
* called on driver load and after a GPU reset, so you can place
|
||||
* workarounds here even if they get overwritten by GPU reset.
|
||||
*/
|
||||
/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt */
|
||||
if (IS_BROADWELL(dev))
|
||||
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
|
||||
else if (IS_CHERRYVIEW(dev))
|
||||
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
|
||||
else if (IS_SKYLAKE(dev))
|
||||
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
|
||||
else if (IS_BROXTON(dev))
|
||||
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
|
||||
}
|
||||
|
||||
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -2138,6 +2159,8 @@ int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
|
||||
|
||||
int i915_ppgtt_init_hw(struct drm_device *dev)
|
||||
{
|
||||
gtt_write_workarounds(dev);
|
||||
|
||||
/* In the case of execlists, PPGTT is enabled by the context descriptor
|
||||
* and the PDPs are contained within the context itself. We don't
|
||||
* need to do anything here. */
|
||||
@ -2727,7 +2750,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
|
||||
}
|
||||
vma->bound |= GLOBAL_BIND;
|
||||
__i915_vma_set_map_and_fenceable(vma);
|
||||
list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list);
|
||||
list_add_tail(&vma->vm_link, &ggtt_vm->inactive_list);
|
||||
}
|
||||
|
||||
/* Clear any non-preallocated blocks */
|
||||
@ -2799,6 +2822,8 @@ void i915_global_gtt_cleanup(struct drm_device *dev)
|
||||
ppgtt->base.cleanup(&ppgtt->base);
|
||||
}
|
||||
|
||||
i915_gem_cleanup_stolen(dev);
|
||||
|
||||
if (drm_mm_initialized(&vm->mm)) {
|
||||
if (intel_vgpu_active(dev))
|
||||
intel_vgt_deballoon();
|
||||
@ -3016,9 +3041,6 @@ static int gen8_gmch_probe(struct drm_device *dev,
|
||||
*mappable_base = pci_resource_start(dev->pdev, 2);
|
||||
*mappable_end = pci_resource_len(dev->pdev, 2);
|
||||
|
||||
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
|
||||
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));
|
||||
|
||||
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 9) {
|
||||
@ -3075,8 +3097,6 @@ static int gen6_gmch_probe(struct drm_device *dev,
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
|
||||
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
|
||||
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
|
||||
|
||||
*stolen = gen6_get_stolen_size(snb_gmch_ctl);
|
||||
@ -3165,12 +3185,21 @@ int i915_gem_gtt_init(struct drm_device *dev)
|
||||
}
|
||||
|
||||
gtt->base.dev = dev;
|
||||
gtt->base.is_ggtt = true;
|
||||
|
||||
ret = gtt->gtt_probe(dev, >t->base.total, >t->stolen_size,
|
||||
>t->mappable_base, >t->mappable_end);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Initialise stolen early so that we may reserve preallocated
|
||||
* objects for the BIOS to KMS transition.
|
||||
*/
|
||||
ret = i915_gem_init_stolen(dev);
|
||||
if (ret)
|
||||
goto out_gtt_cleanup;
|
||||
|
||||
/* GMADR is the PCI mmio aperture into the global GTT. */
|
||||
DRM_INFO("Memory usable by graphics device = %lluM\n",
|
||||
gtt->base.total >> 20);
|
||||
@ -3190,6 +3219,11 @@ int i915_gem_gtt_init(struct drm_device *dev)
|
||||
DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
|
||||
|
||||
return 0;
|
||||
|
||||
out_gtt_cleanup:
|
||||
gtt->base.cleanup(&dev_priv->gtt.base);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
|
||||
@ -3212,7 +3246,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
|
||||
vm = &dev_priv->gtt.base;
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
|
||||
flush = false;
|
||||
list_for_each_entry(vma, &obj->vma_list, vma_link) {
|
||||
list_for_each_entry(vma, &obj->vma_list, obj_link) {
|
||||
if (vma->vm != vm)
|
||||
continue;
|
||||
|
||||
@ -3269,19 +3303,20 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
|
||||
if (vma == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
INIT_LIST_HEAD(&vma->vma_link);
|
||||
INIT_LIST_HEAD(&vma->mm_list);
|
||||
INIT_LIST_HEAD(&vma->vm_link);
|
||||
INIT_LIST_HEAD(&vma->obj_link);
|
||||
INIT_LIST_HEAD(&vma->exec_list);
|
||||
vma->vm = vm;
|
||||
vma->obj = obj;
|
||||
vma->is_ggtt = i915_is_ggtt(vm);
|
||||
|
||||
if (i915_is_ggtt(vm))
|
||||
vma->ggtt_view = *ggtt_view;
|
||||
|
||||
list_add_tail(&vma->vma_link, &obj->vma_list);
|
||||
if (!i915_is_ggtt(vm))
|
||||
else
|
||||
i915_ppgtt_get(i915_vm_to_ppgtt(vm));
|
||||
|
||||
list_add_tail(&vma->obj_link, &obj->vma_list);
|
||||
|
||||
return vma;
|
||||
}
|
||||
|
||||
@ -3322,8 +3357,9 @@ i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
|
||||
}
|
||||
|
||||
static struct scatterlist *
|
||||
rotate_pages(dma_addr_t *in, unsigned int offset,
|
||||
rotate_pages(const dma_addr_t *in, unsigned int offset,
|
||||
unsigned int width, unsigned int height,
|
||||
unsigned int stride,
|
||||
struct sg_table *st, struct scatterlist *sg)
|
||||
{
|
||||
unsigned int column, row;
|
||||
@ -3335,7 +3371,7 @@ rotate_pages(dma_addr_t *in, unsigned int offset,
|
||||
}
|
||||
|
||||
for (column = 0; column < width; column++) {
|
||||
src_idx = width * (height - 1) + column;
|
||||
src_idx = stride * (height - 1) + column;
|
||||
for (row = 0; row < height; row++) {
|
||||
st->nents++;
|
||||
/* We don't need the pages, but need to initialize
|
||||
@ -3346,7 +3382,7 @@ rotate_pages(dma_addr_t *in, unsigned int offset,
|
||||
sg_dma_address(sg) = in[offset + src_idx];
|
||||
sg_dma_len(sg) = PAGE_SIZE;
|
||||
sg = sg_next(sg);
|
||||
src_idx -= width;
|
||||
src_idx -= stride;
|
||||
}
|
||||
}
|
||||
|
||||
@ -3354,10 +3390,9 @@ rotate_pages(dma_addr_t *in, unsigned int offset,
|
||||
}
|
||||
|
||||
static struct sg_table *
|
||||
intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
|
||||
intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
|
||||
struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct intel_rotation_info *rot_info = &ggtt_view->params.rotation_info;
|
||||
unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
|
||||
unsigned int size_pages_uv;
|
||||
struct sg_page_iter sg_iter;
|
||||
@ -3399,6 +3434,7 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
|
||||
/* Rotate the pages. */
|
||||
sg = rotate_pages(page_addr_list, 0,
|
||||
rot_info->width_pages, rot_info->height_pages,
|
||||
rot_info->width_pages,
|
||||
st, NULL);
|
||||
|
||||
/* Append the UV plane if NV12. */
|
||||
@ -3414,6 +3450,7 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
|
||||
rotate_pages(page_addr_list, uv_start_page,
|
||||
rot_info->width_pages_uv,
|
||||
rot_info->height_pages_uv,
|
||||
rot_info->width_pages_uv,
|
||||
st, sg);
|
||||
}
|
||||
|
||||
@ -3495,7 +3532,7 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
|
||||
vma->ggtt_view.pages = vma->obj->pages;
|
||||
else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
|
||||
vma->ggtt_view.pages =
|
||||
intel_rotate_fb_obj_pages(&vma->ggtt_view, vma->obj);
|
||||
intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj);
|
||||
else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL)
|
||||
vma->ggtt_view.pages =
|
||||
intel_partial_pages(&vma->ggtt_view, vma->obj);
|
||||
@ -3551,11 +3588,6 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
|
||||
return 0;
|
||||
|
||||
if (vma->bound == 0 && vma->vm->allocate_va_range) {
|
||||
trace_i915_va_alloc(vma->vm,
|
||||
vma->node.start,
|
||||
vma->node.size,
|
||||
VM_TO_TRACE_NAME(vma->vm));
|
||||
|
||||
/* XXX: i915_vma_pin() will fix this +- hack */
|
||||
vma->pin_count++;
|
||||
ret = vma->vm->allocate_va_range(vma->vm,
|
||||
@ -3589,7 +3621,7 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj,
|
||||
if (view->type == I915_GGTT_VIEW_NORMAL) {
|
||||
return obj->base.size;
|
||||
} else if (view->type == I915_GGTT_VIEW_ROTATED) {
|
||||
return view->params.rotation_info.size;
|
||||
return view->params.rotated.size;
|
||||
} else if (view->type == I915_GGTT_VIEW_PARTIAL) {
|
||||
return view->params.partial.size << PAGE_SHIFT;
|
||||
} else {
|
||||
|
@ -44,7 +44,6 @@ typedef uint64_t gen8_ppgtt_pml4e_t;
|
||||
|
||||
#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
|
||||
|
||||
|
||||
/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
|
||||
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
|
||||
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
|
||||
@ -156,7 +155,7 @@ struct i915_ggtt_view {
|
||||
u64 offset;
|
||||
unsigned int size;
|
||||
} partial;
|
||||
struct intel_rotation_info rotation_info;
|
||||
struct intel_rotation_info rotated;
|
||||
} params;
|
||||
|
||||
struct sg_table *pages;
|
||||
@ -184,6 +183,7 @@ struct i915_vma {
|
||||
#define GLOBAL_BIND (1<<0)
|
||||
#define LOCAL_BIND (1<<1)
|
||||
unsigned int bound : 4;
|
||||
bool is_ggtt : 1;
|
||||
|
||||
/**
|
||||
* Support different GGTT views into the same object.
|
||||
@ -195,9 +195,9 @@ struct i915_vma {
|
||||
struct i915_ggtt_view ggtt_view;
|
||||
|
||||
/** This object's place on the active/inactive lists */
|
||||
struct list_head mm_list;
|
||||
struct list_head vm_link;
|
||||
|
||||
struct list_head vma_link; /* Link in the object's VMA list */
|
||||
struct list_head obj_link; /* Link in the object's VMA list */
|
||||
|
||||
/** This vma's place in the batchbuffer or on the eviction list */
|
||||
struct list_head exec_list;
|
||||
@ -276,6 +276,8 @@ struct i915_address_space {
|
||||
u64 start; /* Start offset always 0 for dri2 */
|
||||
u64 total; /* size addr space maps (ex. 2GB for ggtt) */
|
||||
|
||||
bool is_ggtt;
|
||||
|
||||
struct i915_page_scratch *scratch_page;
|
||||
struct i915_page_table *scratch_pt;
|
||||
struct i915_page_directory *scratch_pd;
|
||||
@ -331,6 +333,8 @@ struct i915_address_space {
|
||||
u32 flags);
|
||||
};
|
||||
|
||||
#define i915_is_ggtt(V) ((V)->is_ggtt)
|
||||
|
||||
/* The Graphics Translation Table is the way in which GEN hardware translates a
|
||||
* Graphics Virtual Address into a Physical Address. In addition to the normal
|
||||
* collateral associated with any va->pa translations GEN hardware also has a
|
||||
@ -343,6 +347,8 @@ struct i915_gtt {
|
||||
|
||||
size_t stolen_size; /* Total size of stolen memory */
|
||||
size_t stolen_usable_size; /* Total size minus BIOS reserved */
|
||||
size_t stolen_reserved_base;
|
||||
size_t stolen_reserved_size;
|
||||
u64 mappable_end; /* End offset that we can CPU map */
|
||||
struct io_mapping *mappable; /* Mapping to our CPU mappable region */
|
||||
phys_addr_t mappable_base; /* PA of our GMADR */
|
||||
@ -417,7 +423,7 @@ static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
|
||||
static inline uint32_t i915_pte_count(uint64_t addr, size_t length,
|
||||
uint32_t pde_shift)
|
||||
{
|
||||
const uint64_t mask = ~((1 << pde_shift) - 1);
|
||||
const uint64_t mask = ~((1ULL << pde_shift) - 1);
|
||||
uint64_t end;
|
||||
|
||||
WARN_ON(length == 0);
|
||||
@ -456,7 +462,7 @@ static inline uint32_t gen6_pde_index(uint32_t addr)
|
||||
* over every page directory entry in a page directory.
|
||||
*/
|
||||
#define gen8_for_each_pde(pt, pd, start, length, iter) \
|
||||
for (iter = gen8_pde_index(start); \
|
||||
for (iter = gen8_pde_index(start); \
|
||||
length > 0 && iter < I915_PDES && \
|
||||
(pt = (pd)->page_table[iter], true); \
|
||||
({ u64 temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT); \
|
||||
@ -464,7 +470,7 @@ static inline uint32_t gen6_pde_index(uint32_t addr)
|
||||
start += temp, length -= temp; }), ++iter)
|
||||
|
||||
#define gen8_for_each_pdpe(pd, pdp, start, length, iter) \
|
||||
for (iter = gen8_pdpe_index(start); \
|
||||
for (iter = gen8_pdpe_index(start); \
|
||||
length > 0 && iter < I915_PDPES_PER_PDP(dev) && \
|
||||
(pd = (pdp)->page_directory[iter], true); \
|
||||
({ u64 temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT); \
|
||||
@ -472,7 +478,7 @@ static inline uint32_t gen6_pde_index(uint32_t addr)
|
||||
start += temp, length -= temp; }), ++iter)
|
||||
|
||||
#define gen8_for_each_pml4e(pdp, pml4, start, length, iter) \
|
||||
for (iter = gen8_pml4e_index(start); \
|
||||
for (iter = gen8_pml4e_index(start); \
|
||||
length > 0 && iter < GEN8_PML4ES_PER_PML4 && \
|
||||
(pdp = (pml4)->pdps[iter], true); \
|
||||
({ u64 temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT); \
|
||||
|
@ -392,6 +392,9 @@ int i915_gem_init_stolen(struct drm_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
dev_priv->gtt.stolen_reserved_base = reserved_base;
|
||||
dev_priv->gtt.stolen_reserved_size = reserved_size;
|
||||
|
||||
/* It is possible for the reserved area to end before the end of stolen
|
||||
* memory, so just consider the start. */
|
||||
reserved_total = stolen_top - reserved_base;
|
||||
@ -503,6 +506,9 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
|
||||
if (obj->pages == NULL)
|
||||
goto cleanup;
|
||||
|
||||
obj->get_page.sg = obj->pages->sgl;
|
||||
obj->get_page.last = 0;
|
||||
|
||||
i915_gem_object_pin_pages(obj);
|
||||
obj->stolen = stolen;
|
||||
|
||||
@ -566,6 +572,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
|
||||
if (!drm_mm_initialized(&dev_priv->mm.stolen))
|
||||
return NULL;
|
||||
|
||||
lockdep_assert_held(&dev->struct_mutex);
|
||||
|
||||
DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
|
||||
stolen_offset, gtt_offset, size);
|
||||
|
||||
@ -623,7 +631,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
|
||||
|
||||
vma->bound |= GLOBAL_BIND;
|
||||
__i915_vma_set_map_and_fenceable(vma);
|
||||
list_add_tail(&vma->mm_list, &ggtt->inactive_list);
|
||||
list_add_tail(&vma->vm_link, &ggtt->inactive_list);
|
||||
}
|
||||
|
||||
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
|
||||
|
@ -27,10 +27,9 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#define UTS_RELEASE " 4.6.7 "
|
||||
#include "i915_drv.h"
|
||||
|
||||
#if 0
|
||||
|
||||
static const char *ring_str(int ring)
|
||||
{
|
||||
switch (ring) {
|
||||
@ -366,6 +365,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
||||
err_printf(m, "Reset count: %u\n", error->reset_count);
|
||||
err_printf(m, "Suspend count: %u\n", error->suspend_count);
|
||||
err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
|
||||
err_printf(m, "PCI Revision: 0x%02x\n", dev->pdev->revision);
|
||||
err_printf(m, "PCI Subsystem: %04x:%04x\n",
|
||||
dev->pdev->subsystem_vendor,
|
||||
dev->pdev->subsystem_device);
|
||||
err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
|
||||
|
||||
if (HAS_CSR(dev)) {
|
||||
@ -511,8 +514,8 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
||||
}
|
||||
}
|
||||
|
||||
if (error->overlay)
|
||||
intel_overlay_print_error_state(m, error->overlay);
|
||||
// if (error->overlay)
|
||||
// intel_overlay_print_error_state(m, error->overlay);
|
||||
|
||||
if (error->display)
|
||||
intel_display_print_error_state(m, dev, error->display);
|
||||
@ -733,7 +736,7 @@ static u32 capture_active_bo(struct drm_i915_error_buffer *err,
|
||||
struct i915_vma *vma;
|
||||
int i = 0;
|
||||
|
||||
list_for_each_entry(vma, head, mm_list) {
|
||||
list_for_each_entry(vma, head, vm_link) {
|
||||
capture_bo(err++, vma);
|
||||
if (++i == count)
|
||||
break;
|
||||
@ -756,7 +759,7 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
|
||||
if (err == last)
|
||||
break;
|
||||
|
||||
list_for_each_entry(vma, &obj->vma_list, vma_link)
|
||||
list_for_each_entry(vma, &obj->vma_list, obj_link)
|
||||
if (vma->vm == vm && vma->pin_count > 0)
|
||||
capture_bo(err++, vma);
|
||||
}
|
||||
@ -1029,17 +1032,6 @@ static void i915_gem_record_rings(struct drm_device *dev,
|
||||
i915_error_ggtt_object_create(dev_priv,
|
||||
ring->scratch.obj);
|
||||
|
||||
if (request->pid) {
|
||||
struct task_struct *task;
|
||||
|
||||
rcu_read_lock();
|
||||
task = pid_task(request->pid, PIDTYPE_PID);
|
||||
if (task) {
|
||||
strcpy(error->ring[i].comm, task->comm);
|
||||
error->ring[i].pid = task->pid;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
}
|
||||
|
||||
if (i915.enable_execlists) {
|
||||
@ -1051,7 +1043,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
|
||||
if (request)
|
||||
rbuf = request->ctx->engine[ring->id].ringbuf;
|
||||
else
|
||||
rbuf = ring->default_context->engine[ring->id].ringbuf;
|
||||
rbuf = dev_priv->kernel_context->engine[ring->id].ringbuf;
|
||||
} else
|
||||
rbuf = ring->buffer;
|
||||
|
||||
@ -1124,12 +1116,12 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
|
||||
int i;
|
||||
|
||||
i = 0;
|
||||
list_for_each_entry(vma, &vm->active_list, mm_list)
|
||||
list_for_each_entry(vma, &vm->active_list, vm_link)
|
||||
i++;
|
||||
error->active_bo_count[ndx] = i;
|
||||
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
|
||||
list_for_each_entry(vma, &obj->vma_list, vma_link)
|
||||
list_for_each_entry(vma, &obj->vma_list, obj_link)
|
||||
if (vma->vm == vm && vma->pin_count > 0)
|
||||
i++;
|
||||
}
|
||||
@ -1338,9 +1330,9 @@ void i915_capture_error_state(struct drm_device *dev, bool wedged,
|
||||
i915_gem_record_fences(dev, error);
|
||||
i915_gem_record_rings(dev, error);
|
||||
|
||||
do_gettimeofday(&error->time);
|
||||
// do_gettimeofday(&error->time);
|
||||
|
||||
error->overlay = intel_overlay_capture_error_state(dev);
|
||||
// error->overlay = intel_overlay_capture_error_state(dev);
|
||||
error->display = intel_display_capture_error_state(dev);
|
||||
|
||||
i915_error_capture_msg(dev, error, wedged, error_msg);
|
||||
@ -1400,7 +1392,6 @@ void i915_destroy_error_state(struct drm_device *dev)
|
||||
if (error)
|
||||
kref_put(&error->ref, i915_error_state_free);
|
||||
}
|
||||
#endif
|
||||
|
||||
const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
|
||||
{
|
||||
|
@ -40,6 +40,7 @@
|
||||
#define GS_MIA_CORE_STATE (1 << GS_MIA_SHIFT)
|
||||
|
||||
#define SOFT_SCRATCH(n) _MMIO(0xc180 + (n) * 4)
|
||||
#define SOFT_SCRATCH_COUNT 16
|
||||
|
||||
#define UOS_RSA_SCRATCH(i) _MMIO(0xc200 + (i) * 4)
|
||||
#define UOS_RSA_SCRATCH_MAX_COUNT 64
|
||||
|
@ -158,10 +158,8 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
|
||||
|
||||
data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
|
||||
/* WaRsDisableCoarsePowerGating:skl,bxt */
|
||||
if (!intel_enable_rc6(dev_priv->dev) ||
|
||||
IS_BXT_REVID(dev, 0, BXT_REVID_A1) ||
|
||||
(IS_SKL_GT3(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)) ||
|
||||
(IS_SKL_GT4(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)))
|
||||
if (!intel_enable_rc6(dev) ||
|
||||
NEEDS_WaRsDisableCoarsePowerGating(dev))
|
||||
data[1] = 0;
|
||||
else
|
||||
/* bit 0 and 1 are for Render and Media domain separately */
|
||||
@ -246,6 +244,9 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
|
||||
db_exc.cookie = 1;
|
||||
}
|
||||
|
||||
/* Finally, update the cached copy of the GuC's WQ head */
|
||||
gc->wq_head = desc->head;
|
||||
|
||||
kunmap_atomic(base);
|
||||
return ret;
|
||||
}
|
||||
@ -375,6 +376,8 @@ static void guc_init_proc_desc(struct intel_guc *guc,
|
||||
static void guc_init_ctx_desc(struct intel_guc *guc,
|
||||
struct i915_guc_client *client)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = guc_to_i915(guc);
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_context *ctx = client->owner;
|
||||
struct guc_context_desc desc;
|
||||
struct sg_table *sg;
|
||||
@ -387,10 +390,8 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
|
||||
desc.priority = client->priority;
|
||||
desc.db_id = client->doorbell_id;
|
||||
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
struct guc_execlist_context *lrc = &desc.lrc[i];
|
||||
struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
|
||||
struct intel_engine_cs *ring;
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
struct guc_execlist_context *lrc = &desc.lrc[ring->guc_id];
|
||||
struct drm_i915_gem_object *obj;
|
||||
uint64_t ctx_desc;
|
||||
|
||||
@ -405,7 +406,6 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
|
||||
if (!obj)
|
||||
break; /* XXX: continue? */
|
||||
|
||||
ring = ringbuf->ring;
|
||||
ctx_desc = intel_lr_context_descriptor(ctx, ring);
|
||||
lrc->context_desc = (u32)ctx_desc;
|
||||
|
||||
@ -413,16 +413,16 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
|
||||
lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) +
|
||||
LRC_STATE_PN * PAGE_SIZE;
|
||||
lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
|
||||
(ring->id << GUC_ELC_ENGINE_OFFSET);
|
||||
(ring->guc_id << GUC_ELC_ENGINE_OFFSET);
|
||||
|
||||
obj = ringbuf->obj;
|
||||
obj = ctx->engine[i].ringbuf->obj;
|
||||
|
||||
lrc->ring_begin = i915_gem_obj_ggtt_offset(obj);
|
||||
lrc->ring_end = lrc->ring_begin + obj->base.size - 1;
|
||||
lrc->ring_next_free_location = lrc->ring_begin;
|
||||
lrc->ring_current_tail_pointer_value = 0;
|
||||
|
||||
desc.engines_used |= (1 << ring->id);
|
||||
desc.engines_used |= (1 << ring->guc_id);
|
||||
}
|
||||
|
||||
WARN_ON(desc.engines_used == 0);
|
||||
@ -471,28 +471,30 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
|
||||
sizeof(desc) * client->ctx_index);
|
||||
}
|
||||
|
||||
/* Get valid workqueue item and return it back to offset */
|
||||
static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset)
|
||||
int i915_guc_wq_check_space(struct i915_guc_client *gc)
|
||||
{
|
||||
struct guc_process_desc *desc;
|
||||
void *base;
|
||||
u32 size = sizeof(struct guc_wq_item);
|
||||
int ret = -ETIMEDOUT, timeout_counter = 200;
|
||||
|
||||
if (!gc)
|
||||
return 0;
|
||||
|
||||
/* Quickly return if wq space is available since last time we cache the
|
||||
* head position. */
|
||||
if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size)
|
||||
return 0;
|
||||
|
||||
base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
|
||||
desc = base + gc->proc_desc_offset;
|
||||
|
||||
while (timeout_counter-- > 0) {
|
||||
if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) {
|
||||
*offset = gc->wq_tail;
|
||||
gc->wq_head = desc->head;
|
||||
|
||||
/* advance the tail for next workqueue item */
|
||||
gc->wq_tail += size;
|
||||
gc->wq_tail &= gc->wq_size - 1;
|
||||
|
||||
/* this will break the loop */
|
||||
timeout_counter = 0;
|
||||
if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size) {
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
if (timeout_counter)
|
||||
@ -507,15 +509,18 @@ static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset)
|
||||
static int guc_add_workqueue_item(struct i915_guc_client *gc,
|
||||
struct drm_i915_gem_request *rq)
|
||||
{
|
||||
enum intel_ring_id ring_id = rq->ring->id;
|
||||
struct guc_wq_item *wqi;
|
||||
void *base;
|
||||
u32 tail, wq_len, wq_off = 0;
|
||||
int ret;
|
||||
u32 tail, wq_len, wq_off, space;
|
||||
|
||||
ret = guc_get_workqueue_space(gc, &wq_off);
|
||||
if (ret)
|
||||
return ret;
|
||||
space = CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size);
|
||||
if (WARN_ON(space < sizeof(struct guc_wq_item)))
|
||||
return -ENOSPC; /* shouldn't happen */
|
||||
|
||||
/* postincrement WQ tail for next time */
|
||||
wq_off = gc->wq_tail;
|
||||
gc->wq_tail += sizeof(struct guc_wq_item);
|
||||
gc->wq_tail &= gc->wq_size - 1;
|
||||
|
||||
/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
|
||||
* should not have the case where structure wqi is across page, neither
|
||||
@ -537,7 +542,7 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
|
||||
wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
|
||||
wqi->header = WQ_TYPE_INORDER |
|
||||
(wq_len << WQ_LEN_SHIFT) |
|
||||
(ring_id << WQ_TARGET_SHIFT) |
|
||||
(rq->ring->guc_id << WQ_TARGET_SHIFT) |
|
||||
WQ_NO_WCFLUSH_WAIT;
|
||||
|
||||
/* The GuC wants only the low-order word of the context descriptor */
|
||||
@ -553,29 +558,6 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define CTX_RING_BUFFER_START 0x08
|
||||
|
||||
/* Update the ringbuffer pointer in a saved context image */
|
||||
static void lr_context_update(struct drm_i915_gem_request *rq)
|
||||
{
|
||||
enum intel_ring_id ring_id = rq->ring->id;
|
||||
struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring_id].state;
|
||||
struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
|
||||
struct page *page;
|
||||
uint32_t *reg_state;
|
||||
|
||||
BUG_ON(!ctx_obj);
|
||||
WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
|
||||
WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
|
||||
|
||||
page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
|
||||
reg_state = kmap_atomic(page);
|
||||
|
||||
reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
|
||||
|
||||
kunmap_atomic(reg_state);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_guc_submit() - Submit commands through GuC
|
||||
* @client: the guc client where commands will go through
|
||||
@ -587,18 +569,14 @@ int i915_guc_submit(struct i915_guc_client *client,
|
||||
struct drm_i915_gem_request *rq)
|
||||
{
|
||||
struct intel_guc *guc = client->guc;
|
||||
enum intel_ring_id ring_id = rq->ring->id;
|
||||
unsigned int engine_id = rq->ring->guc_id;
|
||||
int q_ret, b_ret;
|
||||
|
||||
/* Need this because of the deferred pin ctx and ring */
|
||||
/* Shall we move this right after ring is pinned? */
|
||||
lr_context_update(rq);
|
||||
|
||||
q_ret = guc_add_workqueue_item(client, rq);
|
||||
if (q_ret == 0)
|
||||
b_ret = guc_ring_doorbell(client);
|
||||
|
||||
client->submissions[ring_id] += 1;
|
||||
client->submissions[engine_id] += 1;
|
||||
if (q_ret) {
|
||||
client->q_fail += 1;
|
||||
client->retcode = q_ret;
|
||||
@ -608,8 +586,8 @@ int i915_guc_submit(struct i915_guc_client *client,
|
||||
} else {
|
||||
client->retcode = 0;
|
||||
}
|
||||
guc->submissions[ring_id] += 1;
|
||||
guc->last_seqno[ring_id] = rq->seqno;
|
||||
guc->submissions[engine_id] += 1;
|
||||
guc->last_seqno[engine_id] = rq->seqno;
|
||||
|
||||
return q_ret;
|
||||
}
|
||||
@ -661,7 +639,7 @@ static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev,
|
||||
/**
|
||||
* gem_release_guc_obj() - Release gem object allocated for GuC usage
|
||||
* @obj: gem obj to be released
|
||||
*/
|
||||
*/
|
||||
static void gem_release_guc_obj(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
if (!obj)
|
||||
@ -832,6 +810,96 @@ static void guc_create_log(struct intel_guc *guc)
|
||||
guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
|
||||
}
|
||||
|
||||
static void init_guc_policies(struct guc_policies *policies)
|
||||
{
|
||||
struct guc_policy *policy;
|
||||
u32 p, i;
|
||||
|
||||
policies->dpc_promote_time = 500000;
|
||||
policies->max_num_work_items = POLICY_MAX_NUM_WI;
|
||||
|
||||
for (p = 0; p < GUC_CTX_PRIORITY_NUM; p++) {
|
||||
for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
|
||||
policy = &policies->policy[p][i];
|
||||
|
||||
policy->execution_quantum = 1000000;
|
||||
policy->preemption_time = 500000;
|
||||
policy->fault_time = 250000;
|
||||
policy->policy_flags = 0;
|
||||
}
|
||||
}
|
||||
|
||||
policies->is_valid = 1;
|
||||
}
|
||||
|
||||
static void guc_create_ads(struct intel_guc *guc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = guc_to_i915(guc);
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct guc_ads *ads;
|
||||
struct guc_policies *policies;
|
||||
struct guc_mmio_reg_state *reg_state;
|
||||
struct intel_engine_cs *ring;
|
||||
struct page *page;
|
||||
u32 size, i;
|
||||
|
||||
/* The ads obj includes the struct itself and buffers passed to GuC */
|
||||
size = sizeof(struct guc_ads) + sizeof(struct guc_policies) +
|
||||
sizeof(struct guc_mmio_reg_state) +
|
||||
GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE;
|
||||
|
||||
obj = guc->ads_obj;
|
||||
if (!obj) {
|
||||
obj = gem_allocate_guc_obj(dev_priv->dev, PAGE_ALIGN(size));
|
||||
if (!obj)
|
||||
return;
|
||||
|
||||
guc->ads_obj = obj;
|
||||
}
|
||||
|
||||
page = i915_gem_object_get_page(obj, 0);
|
||||
ads = kmap(page);
|
||||
|
||||
/*
|
||||
* The GuC requires a "Golden Context" when it reinitialises
|
||||
* engines after a reset. Here we use the Render ring default
|
||||
* context, which must already exist and be pinned in the GGTT,
|
||||
* so its address won't change after we've told the GuC where
|
||||
* to find it.
|
||||
*/
|
||||
ring = &dev_priv->ring[RCS];
|
||||
ads->golden_context_lrca = ring->status_page.gfx_addr;
|
||||
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
ads->eng_state_size[ring->guc_id] = intel_lr_context_size(ring);
|
||||
|
||||
/* GuC scheduling policies */
|
||||
policies = (void *)ads + sizeof(struct guc_ads);
|
||||
init_guc_policies(policies);
|
||||
|
||||
ads->scheduler_policies = i915_gem_obj_ggtt_offset(obj) +
|
||||
sizeof(struct guc_ads);
|
||||
|
||||
/* MMIO reg state */
|
||||
reg_state = (void *)policies + sizeof(struct guc_policies);
|
||||
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
reg_state->mmio_white_list[ring->guc_id].mmio_start =
|
||||
ring->mmio_base + GUC_MMIO_WHITE_LIST_START;
|
||||
|
||||
/* Nothing to be saved or restored for now. */
|
||||
reg_state->mmio_white_list[ring->guc_id].count = 0;
|
||||
}
|
||||
|
||||
ads->reg_state_addr = ads->scheduler_policies +
|
||||
sizeof(struct guc_policies);
|
||||
|
||||
ads->reg_state_buffer = ads->reg_state_addr +
|
||||
sizeof(struct guc_mmio_reg_state);
|
||||
|
||||
kunmap(page);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up the memory resources to be shared with the GuC. At this point,
|
||||
* we require just one object that can be mapped through the GGTT.
|
||||
@ -858,6 +926,8 @@ int i915_guc_submission_init(struct drm_device *dev)
|
||||
|
||||
guc_create_log(guc);
|
||||
|
||||
guc_create_ads(guc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -865,7 +935,7 @@ int i915_guc_submission_enable(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_guc *guc = &dev_priv->guc;
|
||||
struct intel_context *ctx = dev_priv->ring[RCS].default_context;
|
||||
struct intel_context *ctx = dev_priv->kernel_context;
|
||||
struct i915_guc_client *client;
|
||||
|
||||
/* client for execbuf submission */
|
||||
@ -896,6 +966,9 @@ void i915_guc_submission_fini(struct drm_device *dev)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_guc *guc = &dev_priv->guc;
|
||||
|
||||
gem_release_guc_obj(dev_priv->guc.ads_obj);
|
||||
guc->ads_obj = NULL;
|
||||
|
||||
gem_release_guc_obj(dev_priv->guc.log_obj);
|
||||
guc->log_obj = NULL;
|
||||
|
||||
@ -919,7 +992,7 @@ int intel_guc_suspend(struct drm_device *dev)
|
||||
if (!i915.enable_guc_submission)
|
||||
return 0;
|
||||
|
||||
ctx = dev_priv->ring[RCS].default_context;
|
||||
ctx = dev_priv->kernel_context;
|
||||
|
||||
data[0] = HOST2GUC_ACTION_ENTER_S_STATE;
|
||||
/* any value greater than GUC_POWER_D0 */
|
||||
@ -945,7 +1018,7 @@ int intel_guc_resume(struct drm_device *dev)
|
||||
if (!i915.enable_guc_submission)
|
||||
return 0;
|
||||
|
||||
ctx = dev_priv->ring[RCS].default_context;
|
||||
ctx = dev_priv->kernel_context;
|
||||
|
||||
data[0] = HOST2GUC_ACTION_EXIT_S_STATE;
|
||||
data[1] = GUC_POWER_D0;
|
||||
|
@ -216,8 +216,8 @@ void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
|
||||
* @enabled_irq_mask: mask of interrupt bits to enable
|
||||
*/
|
||||
void ilk_update_display_irq(struct drm_i915_private *dev_priv,
|
||||
uint32_t interrupt_mask,
|
||||
uint32_t enabled_irq_mask)
|
||||
uint32_t interrupt_mask,
|
||||
uint32_t enabled_irq_mask)
|
||||
{
|
||||
uint32_t new_val;
|
||||
|
||||
@ -288,11 +288,11 @@ static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
|
||||
}
|
||||
|
||||
/**
|
||||
* snb_update_pm_irq - update GEN6_PMIMR
|
||||
* @dev_priv: driver private
|
||||
* @interrupt_mask: mask of interrupt bits to update
|
||||
* @enabled_irq_mask: mask of interrupt bits to enable
|
||||
*/
|
||||
* snb_update_pm_irq - update GEN6_PMIMR
|
||||
* @dev_priv: driver private
|
||||
* @interrupt_mask: mask of interrupt bits to update
|
||||
* @enabled_irq_mask: mask of interrupt bits to enable
|
||||
*/
|
||||
static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
|
||||
uint32_t interrupt_mask,
|
||||
uint32_t enabled_irq_mask)
|
||||
@ -401,14 +401,15 @@ void gen6_disable_rps_interrupts(struct drm_device *dev)
|
||||
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
synchronize_irq(dev->irq);
|
||||
}
|
||||
|
||||
/**
|
||||
* bdw_update_port_irq - update DE port interrupt
|
||||
* @dev_priv: driver private
|
||||
* @interrupt_mask: mask of interrupt bits to update
|
||||
* @enabled_irq_mask: mask of interrupt bits to enable
|
||||
*/
|
||||
* bdw_update_port_irq - update DE port interrupt
|
||||
* @dev_priv: driver private
|
||||
* @interrupt_mask: mask of interrupt bits to update
|
||||
* @enabled_irq_mask: mask of interrupt bits to enable
|
||||
*/
|
||||
static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
|
||||
uint32_t interrupt_mask,
|
||||
uint32_t enabled_irq_mask)
|
||||
@ -1635,6 +1636,12 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
|
||||
int pipe;
|
||||
|
||||
spin_lock(&dev_priv->irq_lock);
|
||||
|
||||
if (!dev_priv->display_irqs_enabled) {
|
||||
spin_unlock(&dev_priv->irq_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
i915_reg_t reg;
|
||||
u32 mask, iir_bit = 0;
|
||||
@ -1880,7 +1887,7 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
|
||||
int pipe;
|
||||
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
|
||||
|
||||
ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
|
||||
ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
|
||||
|
||||
if (pch_iir & SDE_AUDIO_POWER_MASK) {
|
||||
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
|
||||
@ -1973,7 +1980,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
|
||||
int pipe;
|
||||
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
|
||||
|
||||
ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
|
||||
ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
|
||||
|
||||
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
|
||||
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
|
||||
@ -2172,10 +2179,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
|
||||
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
|
||||
disable_rpm_wakeref_asserts(dev_priv);
|
||||
|
||||
/* We get interrupts on unclaimed registers, so check for this before we
|
||||
* do any I915_{READ,WRITE}. */
|
||||
intel_uncore_check_errors(dev);
|
||||
|
||||
/* disable master interrupt before clearing iir */
|
||||
de_ier = I915_READ(DEIER);
|
||||
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
|
||||
@ -2252,43 +2255,20 @@ static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
|
||||
intel_hpd_irq_handler(dev, pin_mask, long_mask);
|
||||
}
|
||||
|
||||
static irqreturn_t gen8_irq_handler(int irq, void *arg)
|
||||
static irqreturn_t
|
||||
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
|
||||
{
|
||||
struct drm_device *dev = arg;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 master_ctl;
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
irqreturn_t ret = IRQ_NONE;
|
||||
uint32_t tmp = 0;
|
||||
u32 iir;
|
||||
enum pipe pipe;
|
||||
u32 aux_mask = GEN8_AUX_CHANNEL_A;
|
||||
|
||||
if (!intel_irqs_enabled(dev_priv))
|
||||
return IRQ_NONE;
|
||||
|
||||
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
|
||||
disable_rpm_wakeref_asserts(dev_priv);
|
||||
|
||||
if (INTEL_INFO(dev_priv)->gen >= 9)
|
||||
aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
|
||||
GEN9_AUX_CHANNEL_D;
|
||||
|
||||
master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
|
||||
master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
|
||||
if (!master_ctl)
|
||||
goto out;
|
||||
|
||||
I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
|
||||
|
||||
/* Find, clear, then process each source of interrupt */
|
||||
|
||||
ret = gen8_gt_irq_handler(dev_priv, master_ctl);
|
||||
|
||||
if (master_ctl & GEN8_DE_MISC_IRQ) {
|
||||
tmp = I915_READ(GEN8_DE_MISC_IIR);
|
||||
if (tmp) {
|
||||
I915_WRITE(GEN8_DE_MISC_IIR, tmp);
|
||||
iir = I915_READ(GEN8_DE_MISC_IIR);
|
||||
if (iir) {
|
||||
I915_WRITE(GEN8_DE_MISC_IIR, iir);
|
||||
ret = IRQ_HANDLED;
|
||||
if (tmp & GEN8_DE_MISC_GSE)
|
||||
if (iir & GEN8_DE_MISC_GSE)
|
||||
intel_opregion_asle_intr(dev);
|
||||
else
|
||||
DRM_ERROR("Unexpected DE Misc interrupt\n");
|
||||
@ -2298,33 +2278,40 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
|
||||
}
|
||||
|
||||
if (master_ctl & GEN8_DE_PORT_IRQ) {
|
||||
tmp = I915_READ(GEN8_DE_PORT_IIR);
|
||||
if (tmp) {
|
||||
iir = I915_READ(GEN8_DE_PORT_IIR);
|
||||
if (iir) {
|
||||
u32 tmp_mask;
|
||||
bool found = false;
|
||||
u32 hotplug_trigger = 0;
|
||||
|
||||
if (IS_BROXTON(dev_priv))
|
||||
hotplug_trigger = tmp & BXT_DE_PORT_HOTPLUG_MASK;
|
||||
else if (IS_BROADWELL(dev_priv))
|
||||
hotplug_trigger = tmp & GEN8_PORT_DP_A_HOTPLUG;
|
||||
|
||||
I915_WRITE(GEN8_DE_PORT_IIR, tmp);
|
||||
I915_WRITE(GEN8_DE_PORT_IIR, iir);
|
||||
ret = IRQ_HANDLED;
|
||||
|
||||
if (tmp & aux_mask) {
|
||||
tmp_mask = GEN8_AUX_CHANNEL_A;
|
||||
if (INTEL_INFO(dev_priv)->gen >= 9)
|
||||
tmp_mask |= GEN9_AUX_CHANNEL_B |
|
||||
GEN9_AUX_CHANNEL_C |
|
||||
GEN9_AUX_CHANNEL_D;
|
||||
|
||||
if (iir & tmp_mask) {
|
||||
dp_aux_irq_handler(dev);
|
||||
found = true;
|
||||
}
|
||||
|
||||
if (hotplug_trigger) {
|
||||
if (IS_BROXTON(dev))
|
||||
bxt_hpd_irq_handler(dev, hotplug_trigger, hpd_bxt);
|
||||
else
|
||||
ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_bdw);
|
||||
found = true;
|
||||
if (IS_BROXTON(dev_priv)) {
|
||||
tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
|
||||
if (tmp_mask) {
|
||||
bxt_hpd_irq_handler(dev, tmp_mask, hpd_bxt);
|
||||
found = true;
|
||||
}
|
||||
} else if (IS_BROADWELL(dev_priv)) {
|
||||
tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
|
||||
if (tmp_mask) {
|
||||
ilk_hpd_irq_handler(dev, tmp_mask, hpd_bdw);
|
||||
found = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
|
||||
if (IS_BROXTON(dev) && (iir & BXT_DE_PORT_GMBUS)) {
|
||||
gmbus_irq_handler(dev);
|
||||
found = true;
|
||||
}
|
||||
@ -2337,49 +2324,51 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
|
||||
}
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
|
||||
u32 flip_done, fault_errors;
|
||||
|
||||
if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
|
||||
continue;
|
||||
|
||||
pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
|
||||
if (pipe_iir) {
|
||||
ret = IRQ_HANDLED;
|
||||
I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
|
||||
|
||||
if (pipe_iir & GEN8_PIPE_VBLANK &&
|
||||
intel_pipe_handle_vblank(dev, pipe))
|
||||
intel_check_page_flip(dev, pipe);
|
||||
|
||||
if (INTEL_INFO(dev_priv)->gen >= 9)
|
||||
flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
|
||||
else
|
||||
flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
|
||||
|
||||
if (flip_done) {
|
||||
intel_prepare_page_flip(dev, pipe);
|
||||
intel_finish_page_flip_plane(dev, pipe);
|
||||
}
|
||||
|
||||
if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
|
||||
hsw_pipe_crc_irq_handler(dev, pipe);
|
||||
|
||||
if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
|
||||
intel_cpu_fifo_underrun_irq_handler(dev_priv,
|
||||
pipe);
|
||||
|
||||
|
||||
if (INTEL_INFO(dev_priv)->gen >= 9)
|
||||
fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
|
||||
else
|
||||
fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
|
||||
|
||||
if (fault_errors)
|
||||
DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
|
||||
pipe_name(pipe),
|
||||
pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
|
||||
} else
|
||||
iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
|
||||
if (!iir) {
|
||||
DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = IRQ_HANDLED;
|
||||
I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
|
||||
|
||||
if (iir & GEN8_PIPE_VBLANK &&
|
||||
intel_pipe_handle_vblank(dev, pipe))
|
||||
intel_check_page_flip(dev, pipe);
|
||||
|
||||
flip_done = iir;
|
||||
if (INTEL_INFO(dev_priv)->gen >= 9)
|
||||
flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE;
|
||||
else
|
||||
flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
|
||||
|
||||
if (flip_done) {
|
||||
intel_prepare_page_flip(dev, pipe);
|
||||
intel_finish_page_flip_plane(dev, pipe);
|
||||
}
|
||||
|
||||
if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
|
||||
hsw_pipe_crc_irq_handler(dev, pipe);
|
||||
|
||||
if (iir & GEN8_PIPE_FIFO_UNDERRUN)
|
||||
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
|
||||
|
||||
fault_errors = iir;
|
||||
if (INTEL_INFO(dev_priv)->gen >= 9)
|
||||
fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
|
||||
else
|
||||
fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
|
||||
|
||||
if (fault_errors)
|
||||
DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
|
||||
pipe_name(pipe),
|
||||
fault_errors);
|
||||
}
|
||||
|
||||
if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
|
||||
@ -2389,15 +2378,15 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
|
||||
* scheme also closed the SDE interrupt handling race we've seen
|
||||
* on older pch-split platforms. But this needs testing.
|
||||
*/
|
||||
u32 pch_iir = I915_READ(SDEIIR);
|
||||
if (pch_iir) {
|
||||
I915_WRITE(SDEIIR, pch_iir);
|
||||
iir = I915_READ(SDEIIR);
|
||||
if (iir) {
|
||||
I915_WRITE(SDEIIR, iir);
|
||||
ret = IRQ_HANDLED;
|
||||
|
||||
if (HAS_PCH_SPT(dev_priv))
|
||||
spt_irq_handler(dev, pch_iir);
|
||||
spt_irq_handler(dev, iir);
|
||||
else
|
||||
cpt_irq_handler(dev, pch_iir);
|
||||
cpt_irq_handler(dev, iir);
|
||||
} else {
|
||||
/*
|
||||
* Like on previous PCH there seems to be something
|
||||
@ -2407,10 +2396,36 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static irqreturn_t gen8_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = arg;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 master_ctl;
|
||||
irqreturn_t ret;
|
||||
|
||||
if (!intel_irqs_enabled(dev_priv))
|
||||
return IRQ_NONE;
|
||||
|
||||
master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
|
||||
master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
|
||||
if (!master_ctl)
|
||||
return IRQ_NONE;
|
||||
|
||||
I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
|
||||
|
||||
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
|
||||
disable_rpm_wakeref_asserts(dev_priv);
|
||||
|
||||
/* Find, clear, then process each source of interrupt */
|
||||
ret = gen8_gt_irq_handler(dev_priv, master_ctl);
|
||||
ret |= gen8_de_irq_handler(dev_priv, master_ctl);
|
||||
|
||||
I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
|
||||
POSTING_READ_FW(GEN8_MASTER_IRQ);
|
||||
|
||||
out:
|
||||
enable_rpm_wakeref_asserts(dev_priv);
|
||||
|
||||
return ret;
|
||||
@ -2481,15 +2496,17 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
|
||||
*/
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
intel_prepare_reset(dev);
|
||||
|
||||
/*
|
||||
* All state reset _must_ be completed before we update the
|
||||
* reset counter, for otherwise waiters might miss the reset
|
||||
* pending state and not properly drop locks, resulting in
|
||||
* deadlocks with the reset work.
|
||||
*/
|
||||
// ret = i915_reset(dev);
|
||||
ret = i915_reset(dev);
|
||||
|
||||
// intel_finish_reset(dev);
|
||||
intel_finish_reset(dev);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
|
||||
@ -2632,7 +2649,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged,
|
||||
vscnprintf(error_msg, sizeof(error_msg), fmt, args);
|
||||
va_end(args);
|
||||
|
||||
// i915_capture_error_state(dev);
|
||||
i915_capture_error_state(dev, wedged, error_msg);
|
||||
i915_report_and_clear_eir(dev);
|
||||
|
||||
if (wedged) {
|
||||
@ -2924,14 +2941,44 @@ static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
|
||||
ring->hangcheck.deadlock = 0;
|
||||
}
|
||||
|
||||
static enum intel_ring_hangcheck_action
|
||||
ring_stuck(struct intel_engine_cs *ring, u64 acthd)
|
||||
static bool subunits_stuck(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 tmp;
|
||||
u32 instdone[I915_NUM_INSTDONE_REG];
|
||||
bool stuck;
|
||||
int i;
|
||||
|
||||
if (ring->id != RCS)
|
||||
return true;
|
||||
|
||||
i915_get_extra_instdone(ring->dev, instdone);
|
||||
|
||||
/* There might be unstable subunit states even when
|
||||
* actual head is not moving. Filter out the unstable ones by
|
||||
* accumulating the undone -> done transitions and only
|
||||
* consider those as progress.
|
||||
*/
|
||||
stuck = true;
|
||||
for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
|
||||
const u32 tmp = instdone[i] | ring->hangcheck.instdone[i];
|
||||
|
||||
if (tmp != ring->hangcheck.instdone[i])
|
||||
stuck = false;
|
||||
|
||||
ring->hangcheck.instdone[i] |= tmp;
|
||||
}
|
||||
|
||||
return stuck;
|
||||
}
|
||||
|
||||
static enum intel_ring_hangcheck_action
|
||||
head_stuck(struct intel_engine_cs *ring, u64 acthd)
|
||||
{
|
||||
if (acthd != ring->hangcheck.acthd) {
|
||||
|
||||
/* Clear subunit states on head movement */
|
||||
memset(ring->hangcheck.instdone, 0,
|
||||
sizeof(ring->hangcheck.instdone));
|
||||
|
||||
if (acthd > ring->hangcheck.max_acthd) {
|
||||
ring->hangcheck.max_acthd = acthd;
|
||||
return HANGCHECK_ACTIVE;
|
||||
@ -2940,6 +2987,24 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd)
|
||||
return HANGCHECK_ACTIVE_LOOP;
|
||||
}
|
||||
|
||||
if (!subunits_stuck(ring))
|
||||
return HANGCHECK_ACTIVE;
|
||||
|
||||
return HANGCHECK_HUNG;
|
||||
}
|
||||
|
||||
static enum intel_ring_hangcheck_action
|
||||
ring_stuck(struct intel_engine_cs *ring, u64 acthd)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
enum intel_ring_hangcheck_action ha;
|
||||
u32 tmp;
|
||||
|
||||
ha = head_stuck(ring, acthd);
|
||||
if (ha != HANGCHECK_HUNG)
|
||||
return ha;
|
||||
|
||||
if (IS_GEN2(dev))
|
||||
return HANGCHECK_HUNG;
|
||||
|
||||
@ -3007,6 +3072,12 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
||||
*/
|
||||
DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
|
||||
|
||||
/* As enabling the GPU requires fairly extensive mmio access,
|
||||
* periodically arm the mmio checker to see if we are triggering
|
||||
* any invalid access.
|
||||
*/
|
||||
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
|
||||
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
u64 acthd;
|
||||
u32 seqno;
|
||||
@ -3081,7 +3152,11 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
||||
if (ring->hangcheck.score > 0)
|
||||
ring->hangcheck.score--;
|
||||
|
||||
/* Clear head and subunit states on seqno movement */
|
||||
ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
|
||||
|
||||
memset(ring->hangcheck.instdone, 0,
|
||||
sizeof(ring->hangcheck.instdone));
|
||||
}
|
||||
|
||||
ring->hangcheck.seqno = seqno;
|
||||
@ -3098,9 +3173,34 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
||||
}
|
||||
}
|
||||
|
||||
// if (rings_hung)
|
||||
// return i915_handle_error(dev, true);
|
||||
if (rings_hung) {
|
||||
i915_handle_error(dev, true, "Ring hung");
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (busy_count)
|
||||
/* Reset timer case chip hangs without another request
|
||||
* being added */
|
||||
i915_queue_hangcheck(dev);
|
||||
|
||||
out:
|
||||
ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
|
||||
}
|
||||
|
||||
void i915_queue_hangcheck(struct drm_device *dev)
|
||||
{
|
||||
struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
|
||||
|
||||
if (!i915.enable_hangcheck)
|
||||
return;
|
||||
|
||||
/* Don't continually defer the hangcheck so that it is always run at
|
||||
* least once after work has been scheduled on any ring. Otherwise,
|
||||
* we will ignore a hung ring if a second ring is kept busy.
|
||||
*/
|
||||
|
||||
queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
|
||||
round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
|
||||
}
|
||||
|
||||
static void ibx_irq_reset(struct drm_device *dev)
|
||||
@ -3227,23 +3327,30 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
|
||||
unsigned int pipe_mask)
|
||||
{
|
||||
uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
|
||||
enum pipe pipe;
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
if (pipe_mask & 1 << PIPE_A)
|
||||
GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
|
||||
dev_priv->de_irq_mask[PIPE_A],
|
||||
~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
|
||||
if (pipe_mask & 1 << PIPE_B)
|
||||
GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
|
||||
dev_priv->de_irq_mask[PIPE_B],
|
||||
~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
|
||||
if (pipe_mask & 1 << PIPE_C)
|
||||
GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
|
||||
dev_priv->de_irq_mask[PIPE_C],
|
||||
~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
|
||||
for_each_pipe_masked(dev_priv, pipe, pipe_mask)
|
||||
GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
|
||||
dev_priv->de_irq_mask[pipe],
|
||||
~dev_priv->de_irq_mask[pipe] | extra_ier);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
}
|
||||
|
||||
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
|
||||
unsigned int pipe_mask)
|
||||
{
|
||||
enum pipe pipe;
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
for_each_pipe_masked(dev_priv, pipe, pipe_mask)
|
||||
GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
/* make sure we're done processing display irqs */
|
||||
synchronize_irq(dev_priv->dev->irq);
|
||||
}
|
||||
|
||||
static void cherryview_irq_preinstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -4569,6 +4676,7 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
|
||||
dev_priv->pm.irqs_enabled = false;
|
||||
synchronize_irq(dev_priv->dev->irq);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -22,6 +22,7 @@
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "i915_params.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
struct i915_params i915 __read_mostly = {
|
||||
@ -35,7 +36,7 @@ struct i915_params i915 __read_mostly = {
|
||||
.enable_dc = -1,
|
||||
.enable_fbc = -1,
|
||||
.enable_execlists = -1,
|
||||
.enable_hangcheck = true,
|
||||
.enable_hangcheck = false,
|
||||
.enable_ppgtt = -1,
|
||||
.enable_psr = 0,
|
||||
.preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
|
||||
@ -48,7 +49,6 @@ struct i915_params i915 __read_mostly = {
|
||||
.invert_brightness = 0,
|
||||
.disable_display = 0,
|
||||
.enable_cmd_parser = 0,
|
||||
.disable_vtd_wa = 1,
|
||||
.use_mmio_flip = 0,
|
||||
.mmio_debug = 0,
|
||||
.verbose_state_checks = 1,
|
||||
@ -93,7 +93,7 @@ MODULE_PARM_DESC(enable_fbc,
|
||||
"Enable frame buffer compression for power savings "
|
||||
"(default: -1 (use per-chip default))");
|
||||
|
||||
module_param_named_unsafe(lvds_channel_mode, i915.lvds_channel_mode, int, 0600);
|
||||
module_param_named_unsafe(lvds_channel_mode, i915.lvds_channel_mode, int, 0400);
|
||||
MODULE_PARM_DESC(lvds_channel_mode,
|
||||
"Specify LVDS channel mode "
|
||||
"(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
|
||||
@ -103,7 +103,7 @@ MODULE_PARM_DESC(lvds_use_ssc,
|
||||
"Use Spread Spectrum Clock with panels [LVDS/eDP] "
|
||||
"(default: auto from VBT)");
|
||||
|
||||
module_param_named_unsafe(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0600);
|
||||
module_param_named_unsafe(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0400);
|
||||
MODULE_PARM_DESC(vbt_sdvo_panel_type,
|
||||
"Override/Ignore selection of SDVO panel mode in the VBT "
|
||||
"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
|
||||
@ -128,9 +128,11 @@ MODULE_PARM_DESC(enable_execlists,
|
||||
"(-1=auto [default], 0=disabled, 1=enabled)");
|
||||
|
||||
module_param_named_unsafe(enable_psr, i915.enable_psr, int, 0600);
|
||||
MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
|
||||
MODULE_PARM_DESC(enable_psr, "Enable PSR "
|
||||
"(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) "
|
||||
"Default: -1 (use per-chip default)");
|
||||
|
||||
module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, int, 0600);
|
||||
module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, int, 0400);
|
||||
MODULE_PARM_DESC(preliminary_hw_support,
|
||||
"Enable preliminary hardware support.");
|
||||
|
||||
@ -164,12 +166,9 @@ MODULE_PARM_DESC(invert_brightness,
|
||||
"to dri-devel@lists.freedesktop.org, if your machine needs it. "
|
||||
"It will then be included in an upcoming module version.");
|
||||
|
||||
module_param_named(disable_display, i915.disable_display, bool, 0600);
|
||||
module_param_named(disable_display, i915.disable_display, bool, 0400);
|
||||
MODULE_PARM_DESC(disable_display, "Disable display (default: false)");
|
||||
|
||||
module_param_named_unsafe(disable_vtd_wa, i915.disable_vtd_wa, bool, 0600);
|
||||
MODULE_PARM_DESC(disable_vtd_wa, "Disable all VT-d workarounds (default: false)");
|
||||
|
||||
module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
|
||||
MODULE_PARM_DESC(enable_cmd_parser,
|
||||
"Enable command parsing (1=enabled [default], 0=disabled)");
|
||||
|
69
drivers/video/drm/i915/i915_params.h
Normal file
69
drivers/video/drm/i915/i915_params.h
Normal file
@ -0,0 +1,69 @@
|
||||
/*
|
||||
* Copyright © 2015 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _I915_PARAMS_H_
|
||||
#define _I915_PARAMS_H_
|
||||
|
||||
#include <linux/cache.h> /* for __read_mostly */
|
||||
|
||||
struct i915_params {
|
||||
int modeset;
|
||||
int panel_ignore_lid;
|
||||
int semaphores;
|
||||
int lvds_channel_mode;
|
||||
int panel_use_ssc;
|
||||
int vbt_sdvo_panel_type;
|
||||
int enable_rc6;
|
||||
int enable_dc;
|
||||
int enable_fbc;
|
||||
int enable_ppgtt;
|
||||
int enable_execlists;
|
||||
int enable_psr;
|
||||
unsigned int preliminary_hw_support;
|
||||
int disable_power_well;
|
||||
int enable_ips;
|
||||
int invert_brightness;
|
||||
int enable_cmd_parser;
|
||||
int guc_log_level;
|
||||
int use_mmio_flip;
|
||||
int mmio_debug;
|
||||
int edp_vswing;
|
||||
/* leave bools at the end to not create holes */
|
||||
bool enable_hangcheck;
|
||||
bool fastboot;
|
||||
bool prefault_disable;
|
||||
bool load_detect_test;
|
||||
bool reset;
|
||||
bool disable_display;
|
||||
bool enable_guc_submission;
|
||||
bool verbose_state_checks;
|
||||
bool nuclear_pageflip;
|
||||
char *log_file;
|
||||
char *cmdline_mode;
|
||||
};
|
||||
|
||||
extern struct i915_params i915 __read_mostly;
|
||||
|
||||
#endif
|
||||
|
@ -610,16 +610,17 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
||||
#define IOSF_BYTE_ENABLES_SHIFT 4
|
||||
#define IOSF_BAR_SHIFT 1
|
||||
#define IOSF_SB_BUSY (1<<0)
|
||||
#define IOSF_PORT_BUNIT 0x3
|
||||
#define IOSF_PORT_PUNIT 0x4
|
||||
#define IOSF_PORT_BUNIT 0x03
|
||||
#define IOSF_PORT_PUNIT 0x04
|
||||
#define IOSF_PORT_NC 0x11
|
||||
#define IOSF_PORT_DPIO 0x12
|
||||
#define IOSF_PORT_DPIO_2 0x1a
|
||||
#define IOSF_PORT_GPIO_NC 0x13
|
||||
#define IOSF_PORT_CCK 0x14
|
||||
#define IOSF_PORT_CCU 0xA9
|
||||
#define IOSF_PORT_GPS_CORE 0x48
|
||||
#define IOSF_PORT_FLISDSI 0x1B
|
||||
#define IOSF_PORT_DPIO_2 0x1a
|
||||
#define IOSF_PORT_FLISDSI 0x1b
|
||||
#define IOSF_PORT_GPIO_SC 0x48
|
||||
#define IOSF_PORT_GPIO_SUS 0xa8
|
||||
#define IOSF_PORT_CCU 0xa9
|
||||
#define VLV_IOSF_DATA _MMIO(VLV_DISPLAY_BASE + 0x2104)
|
||||
#define VLV_IOSF_ADDR _MMIO(VLV_DISPLAY_BASE + 0x2108)
|
||||
|
||||
@ -857,29 +858,29 @@ enum skl_disp_power_wells {
|
||||
* digital port D (CHV) or port A (BXT).
|
||||
*
|
||||
*
|
||||
* Dual channel PHY (VLV/CHV/BXT)
|
||||
* ---------------------------------
|
||||
* | CH0 | CH1 |
|
||||
* | CMN/PLL/REF | CMN/PLL/REF |
|
||||
* |---------------|---------------| Display PHY
|
||||
* | PCS01 | PCS23 | PCS01 | PCS23 |
|
||||
* |-------|-------|-------|-------|
|
||||
* |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
|
||||
* ---------------------------------
|
||||
* | DDI0 | DDI1 | DP/HDMI ports
|
||||
* ---------------------------------
|
||||
* Dual channel PHY (VLV/CHV/BXT)
|
||||
* ---------------------------------
|
||||
* | CH0 | CH1 |
|
||||
* | CMN/PLL/REF | CMN/PLL/REF |
|
||||
* |---------------|---------------| Display PHY
|
||||
* | PCS01 | PCS23 | PCS01 | PCS23 |
|
||||
* |-------|-------|-------|-------|
|
||||
* |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
|
||||
* ---------------------------------
|
||||
* | DDI0 | DDI1 | DP/HDMI ports
|
||||
* ---------------------------------
|
||||
*
|
||||
* Single channel PHY (CHV/BXT)
|
||||
* -----------------
|
||||
* | CH0 |
|
||||
* | CMN/PLL/REF |
|
||||
* |---------------| Display PHY
|
||||
* | PCS01 | PCS23 |
|
||||
* |-------|-------|
|
||||
* |TX0|TX1|TX2|TX3|
|
||||
* -----------------
|
||||
* | DDI2 | DP/HDMI port
|
||||
* -----------------
|
||||
* Single channel PHY (CHV/BXT)
|
||||
* -----------------
|
||||
* | CH0 |
|
||||
* | CMN/PLL/REF |
|
||||
* |---------------| Display PHY
|
||||
* | PCS01 | PCS23 |
|
||||
* |-------|-------|
|
||||
* |TX0|TX1|TX2|TX3|
|
||||
* -----------------
|
||||
* | DDI2 | DP/HDMI port
|
||||
* -----------------
|
||||
*/
|
||||
#define DPIO_DEVFN 0
|
||||
|
||||
@ -1541,13 +1542,13 @@ enum skl_disp_power_wells {
|
||||
#define PGTBL_ADDRESS_LO_MASK 0xfffff000 /* bits [31:12] */
|
||||
#define PGTBL_ADDRESS_HI_MASK 0x000000f0 /* bits [35:32] (gen4) */
|
||||
#define PGTBL_ER _MMIO(0x02024)
|
||||
#define PRB0_BASE (0x2030-0x30)
|
||||
#define PRB1_BASE (0x2040-0x30) /* 830,gen3 */
|
||||
#define PRB2_BASE (0x2050-0x30) /* gen3 */
|
||||
#define SRB0_BASE (0x2100-0x30) /* gen2 */
|
||||
#define SRB1_BASE (0x2110-0x30) /* gen2 */
|
||||
#define SRB2_BASE (0x2120-0x30) /* 830 */
|
||||
#define SRB3_BASE (0x2130-0x30) /* 830 */
|
||||
#define PRB0_BASE (0x2030-0x30)
|
||||
#define PRB1_BASE (0x2040-0x30) /* 830,gen3 */
|
||||
#define PRB2_BASE (0x2050-0x30) /* gen3 */
|
||||
#define SRB0_BASE (0x2100-0x30) /* gen2 */
|
||||
#define SRB1_BASE (0x2110-0x30) /* gen2 */
|
||||
#define SRB2_BASE (0x2120-0x30) /* 830 */
|
||||
#define SRB3_BASE (0x2130-0x30) /* 830 */
|
||||
#define RENDER_RING_BASE 0x02000
|
||||
#define BSD_RING_BASE 0x04000
|
||||
#define GEN6_BSD_RING_BASE 0x12000
|
||||
@ -1635,6 +1636,9 @@ enum skl_disp_power_wells {
|
||||
#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */
|
||||
#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */
|
||||
|
||||
#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base)+0x4D0) + (i)*4)
|
||||
#define RING_MAX_NONPRIV_SLOTS 12
|
||||
|
||||
#define GEN7_TLB_RD_ADDR _MMIO(0x4700)
|
||||
|
||||
#if 0
|
||||
@ -1711,6 +1715,11 @@ enum skl_disp_power_wells {
|
||||
#define FPGA_DBG _MMIO(0x42300)
|
||||
#define FPGA_DBG_RM_NOCLAIM (1<<31)
|
||||
|
||||
#define CLAIM_ER _MMIO(VLV_DISPLAY_BASE + 0x2028)
|
||||
#define CLAIM_ER_CLR (1 << 31)
|
||||
#define CLAIM_ER_OVERFLOW (1 << 16)
|
||||
#define CLAIM_ER_CTR_MASK 0xffff
|
||||
|
||||
#define DERRMR _MMIO(0x44050)
|
||||
/* Note that HBLANK events are reserved on bdw+ */
|
||||
#define DERRMR_PIPEA_SCANLINE (1<<0)
|
||||
@ -2569,7 +2578,7 @@ enum skl_disp_power_wells {
|
||||
#define PALETTE_B_OFFSET 0xa800
|
||||
#define CHV_PALETTE_C_OFFSET 0xc000
|
||||
#define PALETTE(pipe, i) _MMIO(dev_priv->info.palette_offsets[pipe] + \
|
||||
dev_priv->info.display_mmio_offset + (i) * 4)
|
||||
dev_priv->info.display_mmio_offset + (i) * 4)
|
||||
|
||||
/* MCH MMIO space */
|
||||
|
||||
@ -3622,17 +3631,17 @@ enum skl_disp_power_wells {
|
||||
#define _VLV_BLC_PWM_CTL2_A (dev_priv->info.display_mmio_offset + 0x61250)
|
||||
#define _VLV_BLC_PWM_CTL2_B (dev_priv->info.display_mmio_offset + 0x61350)
|
||||
#define VLV_BLC_PWM_CTL2(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
|
||||
_VLV_BLC_PWM_CTL2_B)
|
||||
_VLV_BLC_PWM_CTL2_B)
|
||||
|
||||
#define _VLV_BLC_PWM_CTL_A (dev_priv->info.display_mmio_offset + 0x61254)
|
||||
#define _VLV_BLC_PWM_CTL_B (dev_priv->info.display_mmio_offset + 0x61354)
|
||||
#define VLV_BLC_PWM_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
|
||||
_VLV_BLC_PWM_CTL_B)
|
||||
_VLV_BLC_PWM_CTL_B)
|
||||
|
||||
#define _VLV_BLC_HIST_CTL_A (dev_priv->info.display_mmio_offset + 0x61260)
|
||||
#define _VLV_BLC_HIST_CTL_B (dev_priv->info.display_mmio_offset + 0x61360)
|
||||
#define VLV_BLC_HIST_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
|
||||
_VLV_BLC_HIST_CTL_B)
|
||||
_VLV_BLC_HIST_CTL_B)
|
||||
|
||||
/* Backlight control */
|
||||
#define BLC_PWM_CTL2 _MMIO(dev_priv->info.display_mmio_offset + 0x61250) /* 965+ only */
|
||||
@ -5034,7 +5043,7 @@ enum skl_disp_power_wells {
|
||||
#define DSPSIZE(plane) _MMIO_PIPE2(plane, _DSPASIZE)
|
||||
#define DSPSURF(plane) _MMIO_PIPE2(plane, _DSPASURF)
|
||||
#define DSPTILEOFF(plane) _MMIO_PIPE2(plane, _DSPATILEOFF)
|
||||
#define DSPLINOFF(plane) DSPADDR(plane)
|
||||
#define DSPLINOFF(plane) DSPADDR(plane)
|
||||
#define DSPOFFSET(plane) _MMIO_PIPE2(plane, _DSPAOFFSET)
|
||||
#define DSPSURFLIVE(plane) _MMIO_PIPE2(plane, _DSPASURFLIVE)
|
||||
|
||||
@ -5948,6 +5957,7 @@ enum skl_disp_power_wells {
|
||||
#define ILK_INTERNAL_GRAPHICS_DISABLE (1 << 31)
|
||||
#define ILK_INTERNAL_DISPLAY_DISABLE (1 << 30)
|
||||
#define ILK_DISPLAY_DEBUG_DISABLE (1 << 29)
|
||||
#define IVB_PIPE_C_DISABLE (1 << 28)
|
||||
#define ILK_HDCP_DISABLE (1 << 25)
|
||||
#define ILK_eDP_A_DISABLE (1 << 24)
|
||||
#define HSW_CDCLK_LIMIT (1 << 24)
|
||||
@ -5994,10 +6004,19 @@ enum skl_disp_power_wells {
|
||||
#define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23)
|
||||
#define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23)
|
||||
#define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23)
|
||||
#define SKL_DFSM_PIPE_A_DISABLE (1 << 30)
|
||||
#define SKL_DFSM_PIPE_B_DISABLE (1 << 21)
|
||||
#define SKL_DFSM_PIPE_C_DISABLE (1 << 28)
|
||||
|
||||
#define GEN7_FF_SLICE_CS_CHICKEN1 _MMIO(0x20e0)
|
||||
#define GEN9_FFSC_PERCTX_PREEMPT_CTRL (1<<14)
|
||||
|
||||
#define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4)
|
||||
#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
|
||||
|
||||
#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec)
|
||||
#define GEN8_CS_CHICKEN1 _MMIO(0x2580)
|
||||
|
||||
/* GEN7 chicken */
|
||||
#define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010)
|
||||
# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
|
||||
@ -6043,6 +6062,8 @@ enum skl_disp_power_wells {
|
||||
#define HDC_FORCE_NON_COHERENT (1<<4)
|
||||
#define HDC_BARRIER_PERFORMANCE_DISABLE (1<<10)
|
||||
|
||||
#define GEN8_HDC_CHICKEN1 _MMIO(0x7304)
|
||||
|
||||
/* GEN9 chicken */
|
||||
#define SLICE_ECO_CHICKEN0 _MMIO(0x7308)
|
||||
#define PIXEL_MASK_CAMMING_DISABLE (1 << 14)
|
||||
@ -6379,7 +6400,7 @@ enum skl_disp_power_wells {
|
||||
#define HSW_TVIDEO_DIP_VSC_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4)
|
||||
|
||||
#define _HSW_STEREO_3D_CTL_A 0x70020
|
||||
#define S3D_ENABLE (1<<31)
|
||||
#define S3D_ENABLE (1<<31)
|
||||
#define _HSW_STEREO_3D_CTL_B 0x71020
|
||||
|
||||
#define HSW_STEREO_3D_CTL(trans) _MMIO_PIPE2(trans, _HSW_STEREO_3D_CTL_A)
|
||||
@ -6390,7 +6411,7 @@ enum skl_disp_power_wells {
|
||||
#define _PCH_TRANS_VTOTAL_B 0xe100c
|
||||
#define _PCH_TRANS_VBLANK_B 0xe1010
|
||||
#define _PCH_TRANS_VSYNC_B 0xe1014
|
||||
#define _PCH_TRANS_VSYNCSHIFT_B 0xe1028
|
||||
#define _PCH_TRANS_VSYNCSHIFT_B 0xe1028
|
||||
|
||||
#define PCH_TRANS_HTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B)
|
||||
#define PCH_TRANS_HBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HBLANK_A, _PCH_TRANS_HBLANK_B)
|
||||
@ -6467,8 +6488,8 @@ enum skl_disp_power_wells {
|
||||
#define LPT_PWM_GRANULARITY (1<<5)
|
||||
#define DPLS_EDP_PPS_FIX_DIS (1<<0)
|
||||
|
||||
#define _FDI_RXA_CHICKEN 0xc200c
|
||||
#define _FDI_RXB_CHICKEN 0xc2010
|
||||
#define _FDI_RXA_CHICKEN 0xc200c
|
||||
#define _FDI_RXB_CHICKEN 0xc2010
|
||||
#define FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1)
|
||||
#define FDI_RX_PHASE_SYNC_POINTER_EN (1<<0)
|
||||
#define FDI_RX_CHICKEN(pipe) _MMIO_PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
|
||||
@ -6480,8 +6501,8 @@ enum skl_disp_power_wells {
|
||||
#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12)
|
||||
|
||||
/* CPU: FDI_TX */
|
||||
#define _FDI_TXA_CTL 0x60100
|
||||
#define _FDI_TXB_CTL 0x61100
|
||||
#define _FDI_TXA_CTL 0x60100
|
||||
#define _FDI_TXB_CTL 0x61100
|
||||
#define FDI_TX_CTL(pipe) _MMIO_PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL)
|
||||
#define FDI_TX_DISABLE (0<<31)
|
||||
#define FDI_TX_ENABLE (1<<31)
|
||||
@ -6570,10 +6591,10 @@ enum skl_disp_power_wells {
|
||||
#define FDI_RX_FDI_DELAY_90 (0x90<<0)
|
||||
#define FDI_RX_MISC(pipe) _MMIO_PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
|
||||
|
||||
#define _FDI_RXA_TUSIZE1 0xf0030
|
||||
#define _FDI_RXA_TUSIZE2 0xf0038
|
||||
#define _FDI_RXB_TUSIZE1 0xf1030
|
||||
#define _FDI_RXB_TUSIZE2 0xf1038
|
||||
#define _FDI_RXA_TUSIZE1 0xf0030
|
||||
#define _FDI_RXA_TUSIZE2 0xf0038
|
||||
#define _FDI_RXB_TUSIZE1 0xf1030
|
||||
#define _FDI_RXB_TUSIZE2 0xf1038
|
||||
#define FDI_RX_TUSIZE1(pipe) _MMIO_PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
|
||||
#define FDI_RX_TUSIZE2(pipe) _MMIO_PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
|
||||
|
||||
@ -6590,10 +6611,10 @@ enum skl_disp_power_wells {
|
||||
#define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1)
|
||||
#define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0)
|
||||
|
||||
#define _FDI_RXA_IIR 0xf0014
|
||||
#define _FDI_RXA_IMR 0xf0018
|
||||
#define _FDI_RXB_IIR 0xf1014
|
||||
#define _FDI_RXB_IMR 0xf1018
|
||||
#define _FDI_RXA_IIR 0xf0014
|
||||
#define _FDI_RXA_IMR 0xf0018
|
||||
#define _FDI_RXB_IIR 0xf1014
|
||||
#define _FDI_RXB_IMR 0xf1018
|
||||
#define FDI_RX_IIR(pipe) _MMIO_PIPE(pipe, _FDI_RXA_IIR, _FDI_RXB_IIR)
|
||||
#define FDI_RX_IMR(pipe) _MMIO_PIPE(pipe, _FDI_RXA_IMR, _FDI_RXB_IMR)
|
||||
|
||||
@ -6773,6 +6794,16 @@ enum skl_disp_power_wells {
|
||||
|
||||
#define VLV_PMWGICZ _MMIO(0x1300a4)
|
||||
|
||||
#define RC6_LOCATION _MMIO(0xD40)
|
||||
#define RC6_CTX_IN_DRAM (1 << 0)
|
||||
#define RC6_CTX_BASE _MMIO(0xD48)
|
||||
#define RC6_CTX_BASE_MASK 0xFFFFFFF0
|
||||
#define PWRCTX_MAXCNT_RCSUNIT _MMIO(0x2054)
|
||||
#define PWRCTX_MAXCNT_VCSUNIT0 _MMIO(0x12054)
|
||||
#define PWRCTX_MAXCNT_BCSUNIT _MMIO(0x22054)
|
||||
#define PWRCTX_MAXCNT_VECSUNIT _MMIO(0x1A054)
|
||||
#define PWRCTX_MAXCNT_VCSUNIT1 _MMIO(0x1C054)
|
||||
#define IDLE_TIME_MASK 0xFFFFF
|
||||
#define FORCEWAKE _MMIO(0xA18C)
|
||||
#define FORCEWAKE_VLV _MMIO(0x1300b0)
|
||||
#define FORCEWAKE_ACK_VLV _MMIO(0x1300b4)
|
||||
@ -6911,6 +6942,7 @@ enum skl_disp_power_wells {
|
||||
#define GEN6_RPDEUC _MMIO(0xA084)
|
||||
#define GEN6_RPDEUCSW _MMIO(0xA088)
|
||||
#define GEN6_RC_STATE _MMIO(0xA094)
|
||||
#define RC6_STATE (1 << 18)
|
||||
#define GEN6_RC1_WAKE_RATE_LIMIT _MMIO(0xA098)
|
||||
#define GEN6_RC6_WAKE_RATE_LIMIT _MMIO(0xA09C)
|
||||
#define GEN6_RC6pp_WAKE_RATE_LIMIT _MMIO(0xA0A0)
|
||||
@ -7115,11 +7147,11 @@ enum skl_disp_power_wells {
|
||||
#define _IBX_HDMIW_HDMIEDID_A 0xE2050
|
||||
#define _IBX_HDMIW_HDMIEDID_B 0xE2150
|
||||
#define IBX_HDMIW_HDMIEDID(pipe) _MMIO_PIPE(pipe, _IBX_HDMIW_HDMIEDID_A, \
|
||||
_IBX_HDMIW_HDMIEDID_B)
|
||||
_IBX_HDMIW_HDMIEDID_B)
|
||||
#define _IBX_AUD_CNTL_ST_A 0xE20B4
|
||||
#define _IBX_AUD_CNTL_ST_B 0xE21B4
|
||||
#define IBX_AUD_CNTL_ST(pipe) _MMIO_PIPE(pipe, _IBX_AUD_CNTL_ST_A, \
|
||||
_IBX_AUD_CNTL_ST_B)
|
||||
_IBX_AUD_CNTL_ST_B)
|
||||
#define IBX_ELD_BUFFER_SIZE_MASK (0x1f << 10)
|
||||
#define IBX_ELD_ADDRESS_MASK (0x1f << 5)
|
||||
#define IBX_ELD_ACK (1 << 4)
|
||||
@ -7545,6 +7577,7 @@ enum skl_disp_power_wells {
|
||||
#define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3
|
||||
|
||||
#define DC_STATE_DEBUG _MMIO(0x45520)
|
||||
#define DC_STATE_DEBUG_MASK_CORES (1<<0)
|
||||
#define DC_STATE_DEBUG_MASK_MEMORY_UP (1<<1)
|
||||
|
||||
/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
|
||||
@ -8164,4 +8197,11 @@ enum skl_disp_power_wells {
|
||||
#define GEN9_VEBOX_MOCS(i) _MMIO(0xcb00 + (i) * 4) /* Video MOCS registers */
|
||||
#define GEN9_BLT_MOCS(i) _MMIO(0xcc00 + (i) * 4) /* Blitter MOCS registers */
|
||||
|
||||
/* gamt regs */
|
||||
#define GEN8_L3_LRA_1_GPGPU _MMIO(0x4dd4)
|
||||
#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW 0x67F1427F /* max/min for LRA1/2 */
|
||||
#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV 0x5FF101FF /* max/min for LRA1/2 */
|
||||
#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL 0x67F1427F /* " " */
|
||||
#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT 0x5FF101FF /* " " */
|
||||
|
||||
#endif /* _I915_REG_H_ */
|
||||
|
@ -4,6 +4,10 @@
|
||||
//#include <linux/stringify.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_ringbuffer.h"
|
||||
|
||||
#define trace_i915_gem_object_create(x)
|
||||
#define trace_i915_gem_object_destroy(x)
|
||||
|
@ -98,6 +98,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
|
||||
crtc_state->disable_cxsr = false;
|
||||
crtc_state->update_wm_pre = false;
|
||||
crtc_state->update_wm_post = false;
|
||||
crtc_state->fb_changed = false;
|
||||
|
||||
return &crtc_state->base;
|
||||
}
|
||||
@ -309,5 +310,5 @@ void intel_atomic_state_clear(struct drm_atomic_state *s)
|
||||
{
|
||||
struct intel_atomic_state *state = to_intel_atomic_state(s);
|
||||
drm_atomic_state_default_clear(&state->base);
|
||||
state->dpll_set = false;
|
||||
state->dpll_set = state->modeset = false;
|
||||
}
|
||||
|
@ -152,9 +152,9 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
|
||||
intel_state->clip.x1 = 0;
|
||||
intel_state->clip.y1 = 0;
|
||||
intel_state->clip.x2 =
|
||||
crtc_state->base.active ? crtc_state->pipe_src_w : 0;
|
||||
crtc_state->base.enable ? crtc_state->pipe_src_w : 0;
|
||||
intel_state->clip.y2 =
|
||||
crtc_state->base.active ? crtc_state->pipe_src_h : 0;
|
||||
crtc_state->base.enable ? crtc_state->pipe_src_h : 0;
|
||||
|
||||
if (state->fb && intel_rotation_90_or_270(state->rotation)) {
|
||||
if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
|
||||
@ -194,8 +194,14 @@ static void intel_plane_atomic_update(struct drm_plane *plane,
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
struct intel_plane_state *intel_state =
|
||||
to_intel_plane_state(plane->state);
|
||||
struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
|
||||
|
||||
intel_plane->commit_plane(plane, intel_state);
|
||||
if (intel_state->visible)
|
||||
intel_plane->update_plane(plane,
|
||||
to_intel_crtc_state(crtc->state),
|
||||
intel_state);
|
||||
else
|
||||
intel_plane->disable_plane(plane, crtc);
|
||||
}
|
||||
|
||||
const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
|
||||
|
@ -571,7 +571,7 @@ void intel_init_audio(struct drm_device *dev)
|
||||
if (IS_G4X(dev)) {
|
||||
dev_priv->display.audio_codec_enable = g4x_audio_codec_enable;
|
||||
dev_priv->display.audio_codec_disable = g4x_audio_codec_disable;
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
|
||||
dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
|
||||
dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
|
||||
} else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) {
|
||||
@ -652,8 +652,8 @@ static int i915_audio_component_sync_audio_rate(struct device *dev,
|
||||
/* HSW, BDW, SKL, KBL need this fix */
|
||||
if (!IS_SKYLAKE(dev_priv) &&
|
||||
!IS_KABYLAKE(dev_priv) &&
|
||||
!IS_BROADWELL(dev_priv) &&
|
||||
!IS_HASWELL(dev_priv))
|
||||
!IS_BROADWELL(dev_priv) &&
|
||||
!IS_HASWELL(dev_priv))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&dev_priv->av_mutex);
|
||||
@ -666,8 +666,8 @@ static int i915_audio_component_sync_audio_rate(struct device *dev,
|
||||
err = -ENODEV;
|
||||
goto unlock;
|
||||
}
|
||||
crtc = to_intel_crtc(intel_encoder->base.crtc);
|
||||
pipe = crtc->pipe;
|
||||
crtc = to_intel_crtc(intel_encoder->base.crtc);
|
||||
pipe = crtc->pipe;
|
||||
if (pipe == INVALID_PIPE) {
|
||||
DRM_DEBUG_KMS("no pipe for the port %c\n", port_name(port));
|
||||
err = -ENODEV;
|
||||
|
@ -31,11 +31,49 @@
|
||||
#include "i915_drv.h"
|
||||
#include "intel_bios.h"
|
||||
|
||||
/**
|
||||
* DOC: Video BIOS Table (VBT)
|
||||
*
|
||||
* The Video BIOS Table, or VBT, provides platform and board specific
|
||||
* configuration information to the driver that is not discoverable or available
|
||||
* through other means. The configuration is mostly related to display
|
||||
* hardware. The VBT is available via the ACPI OpRegion or, on older systems, in
|
||||
* the PCI ROM.
|
||||
*
|
||||
* The VBT consists of a VBT Header (defined as &struct vbt_header), a BDB
|
||||
* Header (&struct bdb_header), and a number of BIOS Data Blocks (BDB) that
|
||||
* contain the actual configuration information. The VBT Header, and thus the
|
||||
* VBT, begins with "$VBT" signature. The VBT Header contains the offset of the
|
||||
* BDB Header. The data blocks are concatenated after the BDB Header. The data
|
||||
* blocks have a 1-byte Block ID, 2-byte Block Size, and Block Size bytes of
|
||||
* data. (Block 53, the MIPI Sequence Block is an exception.)
|
||||
*
|
||||
* The driver parses the VBT during load. The relevant information is stored in
|
||||
* driver private data for ease of use, and the actual VBT is not read after
|
||||
* that.
|
||||
*/
|
||||
|
||||
#define SLAVE_ADDR1 0x70
|
||||
#define SLAVE_ADDR2 0x72
|
||||
|
||||
static int panel_type;
|
||||
|
||||
/* Get BDB block size given a pointer to Block ID. */
|
||||
static u32 _get_blocksize(const u8 *block_base)
|
||||
{
|
||||
/* The MIPI Sequence Block v3+ has a separate size field. */
|
||||
if (*block_base == BDB_MIPI_SEQUENCE && *(block_base + 3) >= 3)
|
||||
return *((const u32 *)(block_base + 4));
|
||||
else
|
||||
return *((const u16 *)(block_base + 1));
|
||||
}
|
||||
|
||||
/* Get BDB block size give a pointer to data after Block ID and Block Size. */
|
||||
static u32 get_blocksize(const void *block_data)
|
||||
{
|
||||
return _get_blocksize(block_data - 3);
|
||||
}
|
||||
|
||||
static const void *
|
||||
find_section(const void *_bdb, int section_id)
|
||||
{
|
||||
@ -52,14 +90,8 @@ find_section(const void *_bdb, int section_id)
|
||||
/* walk the sections looking for section_id */
|
||||
while (index + 3 < total) {
|
||||
current_id = *(base + index);
|
||||
index++;
|
||||
|
||||
current_size = *((const u16 *)(base + index));
|
||||
index += 2;
|
||||
|
||||
/* The MIPI Sequence Block v3+ has a separate size field. */
|
||||
if (current_id == BDB_MIPI_SEQUENCE && *(base + index) >= 3)
|
||||
current_size = *((const u32 *)(base + index + 1));
|
||||
current_size = _get_blocksize(base + index);
|
||||
index += 3;
|
||||
|
||||
if (index + current_size > total)
|
||||
return NULL;
|
||||
@ -73,16 +105,6 @@ find_section(const void *_bdb, int section_id)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static u16
|
||||
get_blocksize(const void *p)
|
||||
{
|
||||
u16 *block_ptr, block_size;
|
||||
|
||||
block_ptr = (u16 *)((char *)p - 2);
|
||||
block_size = *block_ptr;
|
||||
return block_size;
|
||||
}
|
||||
|
||||
static void
|
||||
fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
|
||||
const struct lvds_dvo_timing *dvo_timing)
|
||||
@ -356,24 +378,24 @@ parse_general_features(struct drm_i915_private *dev_priv,
|
||||
if (!general)
|
||||
return;
|
||||
|
||||
dev_priv->vbt.int_tv_support = general->int_tv_support;
|
||||
dev_priv->vbt.int_tv_support = general->int_tv_support;
|
||||
/* int_crt_support can't be trusted on earlier platforms */
|
||||
if (bdb->version >= 155 &&
|
||||
(HAS_DDI(dev_priv) || IS_VALLEYVIEW(dev_priv)))
|
||||
dev_priv->vbt.int_crt_support = general->int_crt_support;
|
||||
dev_priv->vbt.lvds_use_ssc = general->enable_ssc;
|
||||
dev_priv->vbt.lvds_ssc_freq =
|
||||
dev_priv->vbt.lvds_use_ssc = general->enable_ssc;
|
||||
dev_priv->vbt.lvds_ssc_freq =
|
||||
intel_bios_ssc_frequency(dev_priv, general->ssc_freq);
|
||||
dev_priv->vbt.display_clock_mode = general->display_clock_mode;
|
||||
dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
|
||||
DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
|
||||
dev_priv->vbt.int_tv_support,
|
||||
dev_priv->vbt.int_crt_support,
|
||||
dev_priv->vbt.lvds_use_ssc,
|
||||
dev_priv->vbt.lvds_ssc_freq,
|
||||
dev_priv->vbt.display_clock_mode,
|
||||
dev_priv->vbt.fdi_rx_polarity_inverted);
|
||||
}
|
||||
dev_priv->vbt.display_clock_mode = general->display_clock_mode;
|
||||
dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
|
||||
DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
|
||||
dev_priv->vbt.int_tv_support,
|
||||
dev_priv->vbt.int_crt_support,
|
||||
dev_priv->vbt.lvds_use_ssc,
|
||||
dev_priv->vbt.lvds_ssc_freq,
|
||||
dev_priv->vbt.display_clock_mode,
|
||||
dev_priv->vbt.fdi_rx_polarity_inverted);
|
||||
}
|
||||
|
||||
static void
|
||||
parse_general_definitions(struct drm_i915_private *dev_priv,
|
||||
@ -675,84 +697,13 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
|
||||
dev_priv->vbt.psr.tp2_tp3_wakeup_time = psr_table->tp2_tp3_wakeup_time;
|
||||
}
|
||||
|
||||
static u8 *goto_next_sequence(u8 *data, int *size)
|
||||
{
|
||||
u16 len;
|
||||
int tmp = *size;
|
||||
|
||||
if (--tmp < 0)
|
||||
return NULL;
|
||||
|
||||
/* goto first element */
|
||||
data++;
|
||||
while (1) {
|
||||
switch (*data) {
|
||||
case MIPI_SEQ_ELEM_SEND_PKT:
|
||||
/*
|
||||
* skip by this element payload size
|
||||
* skip elem id, command flag and data type
|
||||
*/
|
||||
tmp -= 5;
|
||||
if (tmp < 0)
|
||||
return NULL;
|
||||
|
||||
data += 3;
|
||||
len = *((u16 *)data);
|
||||
|
||||
tmp -= len;
|
||||
if (tmp < 0)
|
||||
return NULL;
|
||||
|
||||
/* skip by len */
|
||||
data = data + 2 + len;
|
||||
break;
|
||||
case MIPI_SEQ_ELEM_DELAY:
|
||||
/* skip by elem id, and delay is 4 bytes */
|
||||
tmp -= 5;
|
||||
if (tmp < 0)
|
||||
return NULL;
|
||||
|
||||
data += 5;
|
||||
break;
|
||||
case MIPI_SEQ_ELEM_GPIO:
|
||||
tmp -= 3;
|
||||
if (tmp < 0)
|
||||
return NULL;
|
||||
|
||||
data += 3;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown element\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* end of sequence ? */
|
||||
if (*data == 0)
|
||||
break;
|
||||
}
|
||||
|
||||
/* goto next sequence or end of block byte */
|
||||
if (--tmp < 0)
|
||||
return NULL;
|
||||
|
||||
data++;
|
||||
|
||||
/* update amount of data left for the sequence block to be parsed */
|
||||
*size = tmp;
|
||||
return data;
|
||||
}
|
||||
|
||||
static void
|
||||
parse_mipi(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
|
||||
parse_mipi_config(struct drm_i915_private *dev_priv,
|
||||
const struct bdb_header *bdb)
|
||||
{
|
||||
const struct bdb_mipi_config *start;
|
||||
const struct bdb_mipi_sequence *sequence;
|
||||
const struct mipi_config *config;
|
||||
const struct mipi_pps_data *pps;
|
||||
u8 *data;
|
||||
const u8 *seq_data;
|
||||
int i, panel_id, seq_size;
|
||||
u16 block_size;
|
||||
|
||||
/* parse MIPI blocks only if LFP type is MIPI */
|
||||
if (!dev_priv->vbt.has_mipi)
|
||||
@ -798,8 +749,178 @@ parse_mipi(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
|
||||
|
||||
/* We have mandatory mipi config blocks. Initialize as generic panel */
|
||||
dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
|
||||
}
|
||||
|
||||
/* Find the sequence block and size for the given panel. */
|
||||
static const u8 *
|
||||
find_panel_sequence_block(const struct bdb_mipi_sequence *sequence,
|
||||
u16 panel_id, u32 *seq_size)
|
||||
{
|
||||
u32 total = get_blocksize(sequence);
|
||||
const u8 *data = &sequence->data[0];
|
||||
u8 current_id;
|
||||
u32 current_size;
|
||||
int header_size = sequence->version >= 3 ? 5 : 3;
|
||||
int index = 0;
|
||||
int i;
|
||||
|
||||
/* skip new block size */
|
||||
if (sequence->version >= 3)
|
||||
data += 4;
|
||||
|
||||
for (i = 0; i < MAX_MIPI_CONFIGURATIONS && index < total; i++) {
|
||||
if (index + header_size > total) {
|
||||
DRM_ERROR("Invalid sequence block (header)\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
current_id = *(data + index);
|
||||
if (sequence->version >= 3)
|
||||
current_size = *((const u32 *)(data + index + 1));
|
||||
else
|
||||
current_size = *((const u16 *)(data + index + 1));
|
||||
|
||||
index += header_size;
|
||||
|
||||
if (index + current_size > total) {
|
||||
DRM_ERROR("Invalid sequence block\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (current_id == panel_id) {
|
||||
*seq_size = current_size;
|
||||
return data + index;
|
||||
}
|
||||
|
||||
index += current_size;
|
||||
}
|
||||
|
||||
DRM_ERROR("Sequence block detected but no valid configuration\n");
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int goto_next_sequence(const u8 *data, int index, int total)
|
||||
{
|
||||
u16 len;
|
||||
|
||||
/* Skip Sequence Byte. */
|
||||
for (index = index + 1; index < total; index += len) {
|
||||
u8 operation_byte = *(data + index);
|
||||
index++;
|
||||
|
||||
switch (operation_byte) {
|
||||
case MIPI_SEQ_ELEM_END:
|
||||
return index;
|
||||
case MIPI_SEQ_ELEM_SEND_PKT:
|
||||
if (index + 4 > total)
|
||||
return 0;
|
||||
|
||||
len = *((const u16 *)(data + index + 2)) + 4;
|
||||
break;
|
||||
case MIPI_SEQ_ELEM_DELAY:
|
||||
len = 4;
|
||||
break;
|
||||
case MIPI_SEQ_ELEM_GPIO:
|
||||
len = 2;
|
||||
break;
|
||||
case MIPI_SEQ_ELEM_I2C:
|
||||
if (index + 7 > total)
|
||||
return 0;
|
||||
len = *(data + index + 6) + 7;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown operation byte\n");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int goto_next_sequence_v3(const u8 *data, int index, int total)
|
||||
{
|
||||
int seq_end;
|
||||
u16 len;
|
||||
u32 size_of_sequence;
|
||||
|
||||
/*
|
||||
* Could skip sequence based on Size of Sequence alone, but also do some
|
||||
* checking on the structure.
|
||||
*/
|
||||
if (total < 5) {
|
||||
DRM_ERROR("Too small sequence size\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Skip Sequence Byte. */
|
||||
index++;
|
||||
|
||||
/*
|
||||
* Size of Sequence. Excludes the Sequence Byte and the size itself,
|
||||
* includes MIPI_SEQ_ELEM_END byte, excludes the final MIPI_SEQ_END
|
||||
* byte.
|
||||
*/
|
||||
size_of_sequence = *((const uint32_t *)(data + index));
|
||||
index += 4;
|
||||
|
||||
seq_end = index + size_of_sequence;
|
||||
if (seq_end > total) {
|
||||
DRM_ERROR("Invalid sequence size\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (; index < total; index += len) {
|
||||
u8 operation_byte = *(data + index);
|
||||
index++;
|
||||
|
||||
if (operation_byte == MIPI_SEQ_ELEM_END) {
|
||||
if (index != seq_end) {
|
||||
DRM_ERROR("Invalid element structure\n");
|
||||
return 0;
|
||||
}
|
||||
return index;
|
||||
}
|
||||
|
||||
len = *(data + index);
|
||||
index++;
|
||||
|
||||
/*
|
||||
* FIXME: Would be nice to check elements like for v1/v2 in
|
||||
* goto_next_sequence() above.
|
||||
*/
|
||||
switch (operation_byte) {
|
||||
case MIPI_SEQ_ELEM_SEND_PKT:
|
||||
case MIPI_SEQ_ELEM_DELAY:
|
||||
case MIPI_SEQ_ELEM_GPIO:
|
||||
case MIPI_SEQ_ELEM_I2C:
|
||||
case MIPI_SEQ_ELEM_SPI:
|
||||
case MIPI_SEQ_ELEM_PMIC:
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown operation byte %u\n",
|
||||
operation_byte);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
parse_mipi_sequence(struct drm_i915_private *dev_priv,
|
||||
const struct bdb_header *bdb)
|
||||
{
|
||||
const struct bdb_mipi_sequence *sequence;
|
||||
const u8 *seq_data;
|
||||
u32 seq_size;
|
||||
u8 *data;
|
||||
int index = 0;
|
||||
|
||||
/* Only our generic panel driver uses the sequence block. */
|
||||
if (dev_priv->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID)
|
||||
return;
|
||||
|
||||
/* Check if we have sequence block as well */
|
||||
sequence = find_section(bdb, BDB_MIPI_SEQUENCE);
|
||||
if (!sequence) {
|
||||
DRM_DEBUG_KMS("No MIPI Sequence found, parsing complete\n");
|
||||
@ -807,95 +928,54 @@ parse_mipi(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
|
||||
}
|
||||
|
||||
/* Fail gracefully for forward incompatible sequence block. */
|
||||
if (sequence->version >= 3) {
|
||||
DRM_ERROR("Unable to parse MIPI Sequence Block v3+\n");
|
||||
if (sequence->version >= 4) {
|
||||
DRM_ERROR("Unable to parse MIPI Sequence Block v%u\n",
|
||||
sequence->version);
|
||||
return;
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("Found MIPI sequence block\n");
|
||||
DRM_DEBUG_DRIVER("Found MIPI sequence block v%u\n", sequence->version);
|
||||
|
||||
block_size = get_blocksize(sequence);
|
||||
seq_data = find_panel_sequence_block(sequence, panel_type, &seq_size);
|
||||
if (!seq_data)
|
||||
return;
|
||||
|
||||
/*
|
||||
* parse the sequence block for individual sequences
|
||||
*/
|
||||
dev_priv->vbt.dsi.seq_version = sequence->version;
|
||||
data = kmemdup(seq_data, seq_size, GFP_KERNEL);
|
||||
if (!data)
|
||||
return;
|
||||
|
||||
seq_data = &sequence->data[0];
|
||||
|
||||
/*
|
||||
* sequence block is variable length and hence we need to parse and
|
||||
* get the sequence data for specific panel id
|
||||
*/
|
||||
for (i = 0; i < MAX_MIPI_CONFIGURATIONS; i++) {
|
||||
panel_id = *seq_data;
|
||||
seq_size = *((u16 *) (seq_data + 1));
|
||||
if (panel_id == panel_type)
|
||||
/* Parse the sequences, store pointers to each sequence. */
|
||||
for (;;) {
|
||||
u8 seq_id = *(data + index);
|
||||
if (seq_id == MIPI_SEQ_END)
|
||||
break;
|
||||
|
||||
/* skip the sequence including seq header of 3 bytes */
|
||||
seq_data = seq_data + 3 + seq_size;
|
||||
if ((seq_data - &sequence->data[0]) > block_size) {
|
||||
DRM_ERROR("Sequence start is beyond sequence block size, corrupted sequence block\n");
|
||||
return;
|
||||
if (seq_id >= MIPI_SEQ_MAX) {
|
||||
DRM_ERROR("Unknown sequence %u\n", seq_id);
|
||||
goto err;
|
||||
}
|
||||
|
||||
dev_priv->vbt.dsi.sequence[seq_id] = data + index;
|
||||
|
||||
if (sequence->version >= 3)
|
||||
index = goto_next_sequence_v3(data, index, seq_size);
|
||||
else
|
||||
index = goto_next_sequence(data, index, seq_size);
|
||||
if (!index) {
|
||||
DRM_ERROR("Invalid sequence %u\n", seq_id);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
if (i == MAX_MIPI_CONFIGURATIONS) {
|
||||
DRM_ERROR("Sequence block detected but no valid configuration\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* check if found sequence is completely within the sequence block
|
||||
* just being paranoid */
|
||||
if (seq_size > block_size) {
|
||||
DRM_ERROR("Corrupted sequence/size, bailing out\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* skip the panel id(1 byte) and seq size(2 bytes) */
|
||||
dev_priv->vbt.dsi.data = kmemdup(seq_data + 3, seq_size, GFP_KERNEL);
|
||||
if (!dev_priv->vbt.dsi.data)
|
||||
return;
|
||||
|
||||
/*
|
||||
* loop into the sequence data and split into multiple sequneces
|
||||
* There are only 5 types of sequences as of now
|
||||
*/
|
||||
data = dev_priv->vbt.dsi.data;
|
||||
dev_priv->vbt.dsi.data = data;
|
||||
dev_priv->vbt.dsi.size = seq_size;
|
||||
dev_priv->vbt.dsi.seq_version = sequence->version;
|
||||
|
||||
/* two consecutive 0x00 indicate end of all sequences */
|
||||
while (1) {
|
||||
int seq_id = *data;
|
||||
if (MIPI_SEQ_MAX > seq_id && seq_id > MIPI_SEQ_UNDEFINED) {
|
||||
dev_priv->vbt.dsi.sequence[seq_id] = data;
|
||||
DRM_DEBUG_DRIVER("Found mipi sequence - %d\n", seq_id);
|
||||
} else {
|
||||
DRM_ERROR("undefined sequence\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* partial parsing to skip elements */
|
||||
data = goto_next_sequence(data, &seq_size);
|
||||
|
||||
if (data == NULL) {
|
||||
DRM_ERROR("Sequence elements going beyond block itself. Sequence block parsing failed\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (*data == 0)
|
||||
break; /* end of sequence reached */
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("MIPI related vbt parsing complete\n");
|
||||
DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n");
|
||||
return;
|
||||
err:
|
||||
kfree(dev_priv->vbt.dsi.data);
|
||||
dev_priv->vbt.dsi.data = NULL;
|
||||
|
||||
/* error during parsing so set all pointers to null
|
||||
* because of partial parsing */
|
||||
err:
|
||||
kfree(data);
|
||||
memset(dev_priv->vbt.dsi.sequence, 0, sizeof(dev_priv->vbt.dsi.sequence));
|
||||
}
|
||||
|
||||
@ -1088,7 +1168,12 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
|
||||
DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
|
||||
return;
|
||||
}
|
||||
if (bdb->version < 195) {
|
||||
if (bdb->version < 106) {
|
||||
expected_size = 22;
|
||||
} else if (bdb->version < 109) {
|
||||
expected_size = 27;
|
||||
} else if (bdb->version < 195) {
|
||||
BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33);
|
||||
expected_size = sizeof(struct old_child_dev_config);
|
||||
} else if (bdb->version == 195) {
|
||||
expected_size = 37;
|
||||
@ -1101,18 +1186,18 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
|
||||
bdb->version, expected_size);
|
||||
}
|
||||
|
||||
/* The legacy sized child device config is the minimum we need. */
|
||||
if (p_defs->child_dev_size < sizeof(struct old_child_dev_config)) {
|
||||
DRM_ERROR("Child device config size %u is too small.\n",
|
||||
p_defs->child_dev_size);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Flag an error for unexpected size, but continue anyway. */
|
||||
if (p_defs->child_dev_size != expected_size)
|
||||
DRM_ERROR("Unexpected child device config size %u (expected %u for VBT version %u)\n",
|
||||
p_defs->child_dev_size, expected_size, bdb->version);
|
||||
|
||||
/* The legacy sized child device config is the minimum we need. */
|
||||
if (p_defs->child_dev_size < sizeof(struct old_child_dev_config)) {
|
||||
DRM_DEBUG_KMS("Child device config size %u is too small.\n",
|
||||
p_defs->child_dev_size);
|
||||
return;
|
||||
}
|
||||
|
||||
/* get the block size of general definitions */
|
||||
block_size = get_blocksize(p_defs);
|
||||
/* get the number of child device */
|
||||
@ -1227,7 +1312,7 @@ static const struct bdb_header *get_bdb_header(const struct vbt_header *vbt)
|
||||
* Returns true on valid VBT.
|
||||
*/
|
||||
bool intel_bios_is_valid_vbt(const void *buf, size_t size)
|
||||
{
|
||||
{
|
||||
const struct vbt_header *vbt = buf;
|
||||
const struct bdb_header *bdb;
|
||||
|
||||
@ -1269,23 +1354,23 @@ static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
|
||||
if (ioread32(bios + i) != *((const u32 *) "$VBT"))
|
||||
continue;
|
||||
|
||||
/*
|
||||
/*
|
||||
* This is the one place where we explicitly discard the address
|
||||
* space (__iomem) of the BIOS/VBT.
|
||||
*/
|
||||
*/
|
||||
vbt = (void __force *) bios + i;
|
||||
if (intel_bios_is_valid_vbt(vbt, size - i))
|
||||
return vbt;
|
||||
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_bios_init - find VBT and initialize settings from the BIOS
|
||||
* @dev: DRM device
|
||||
* @dev_priv: i915 device instance
|
||||
*
|
||||
* Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
|
||||
* to appropriate values.
|
||||
@ -1337,7 +1422,8 @@ intel_bios_init(struct drm_i915_private *dev_priv)
|
||||
parse_driver_features(dev_priv, bdb);
|
||||
parse_edp(dev_priv, bdb);
|
||||
parse_psr(dev_priv, bdb);
|
||||
parse_mipi(dev_priv, bdb);
|
||||
parse_mipi_config(dev_priv, bdb);
|
||||
parse_mipi_sequence(dev_priv, bdb);
|
||||
parse_ddi_ports(dev_priv, bdb);
|
||||
|
||||
if (bios)
|
||||
|
@ -25,25 +25,43 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _I830_BIOS_H_
|
||||
#define _I830_BIOS_H_
|
||||
#ifndef _INTEL_BIOS_H_
|
||||
#define _INTEL_BIOS_H_
|
||||
|
||||
/**
|
||||
* struct vbt_header - VBT Header structure
|
||||
* @signature: VBT signature, always starts with "$VBT"
|
||||
* @version: Version of this structure
|
||||
* @header_size: Size of this structure
|
||||
* @vbt_size: Size of VBT (VBT Header, BDB Header and data blocks)
|
||||
* @vbt_checksum: Checksum
|
||||
* @reserved0: Reserved
|
||||
* @bdb_offset: Offset of &struct bdb_header from beginning of VBT
|
||||
* @aim_offset: Offsets of add-in data blocks from beginning of VBT
|
||||
*/
|
||||
struct vbt_header {
|
||||
u8 signature[20]; /**< Always starts with 'VBT$' */
|
||||
u16 version; /**< decimal */
|
||||
u16 header_size; /**< in bytes */
|
||||
u16 vbt_size; /**< in bytes */
|
||||
u8 signature[20];
|
||||
u16 version;
|
||||
u16 header_size;
|
||||
u16 vbt_size;
|
||||
u8 vbt_checksum;
|
||||
u8 reserved0;
|
||||
u32 bdb_offset; /**< from beginning of VBT */
|
||||
u32 aim_offset[4]; /**< from beginning of VBT */
|
||||
u32 bdb_offset;
|
||||
u32 aim_offset[4];
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct bdb_header - BDB Header structure
|
||||
* @signature: BDB signature "BIOS_DATA_BLOCK"
|
||||
* @version: Version of the data block definitions
|
||||
* @header_size: Size of this structure
|
||||
* @bdb_size: Size of BDB (BDB Header and data blocks)
|
||||
*/
|
||||
struct bdb_header {
|
||||
u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */
|
||||
u16 version; /**< decimal */
|
||||
u16 header_size; /**< in bytes */
|
||||
u16 bdb_size; /**< in bytes */
|
||||
u8 signature[16];
|
||||
u16 version;
|
||||
u16 header_size;
|
||||
u16 bdb_size;
|
||||
} __packed;
|
||||
|
||||
/* strictly speaking, this is a "skip" block, but it has interesting info */
|
||||
@ -936,21 +954,29 @@ struct bdb_mipi_sequence {
|
||||
|
||||
/* MIPI Sequnece Block definitions */
|
||||
enum mipi_seq {
|
||||
MIPI_SEQ_UNDEFINED = 0,
|
||||
MIPI_SEQ_END = 0,
|
||||
MIPI_SEQ_ASSERT_RESET,
|
||||
MIPI_SEQ_INIT_OTP,
|
||||
MIPI_SEQ_DISPLAY_ON,
|
||||
MIPI_SEQ_DISPLAY_OFF,
|
||||
MIPI_SEQ_DEASSERT_RESET,
|
||||
MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */
|
||||
MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */
|
||||
MIPI_SEQ_TEAR_ON, /* sequence block v2+ */
|
||||
MIPI_SEQ_TEAR_OFF, /* sequence block v3+ */
|
||||
MIPI_SEQ_POWER_ON, /* sequence block v3+ */
|
||||
MIPI_SEQ_POWER_OFF, /* sequence block v3+ */
|
||||
MIPI_SEQ_MAX
|
||||
};
|
||||
|
||||
enum mipi_seq_element {
|
||||
MIPI_SEQ_ELEM_UNDEFINED = 0,
|
||||
MIPI_SEQ_ELEM_END = 0,
|
||||
MIPI_SEQ_ELEM_SEND_PKT,
|
||||
MIPI_SEQ_ELEM_DELAY,
|
||||
MIPI_SEQ_ELEM_GPIO,
|
||||
MIPI_SEQ_ELEM_STATUS,
|
||||
MIPI_SEQ_ELEM_I2C, /* sequence block v2+ */
|
||||
MIPI_SEQ_ELEM_SPI, /* sequence block v3+ */
|
||||
MIPI_SEQ_ELEM_PMIC, /* sequence block v3+ */
|
||||
MIPI_SEQ_ELEM_MAX
|
||||
};
|
||||
|
||||
@ -965,4 +991,4 @@ enum mipi_gpio_pin_index {
|
||||
MIPI_GPIO_MAX
|
||||
};
|
||||
|
||||
#endif /* _I830_BIOS_H_ */
|
||||
#endif /* _INTEL_BIOS_H_ */
|
||||
|
@ -213,9 +213,7 @@ static void pch_post_disable_crt(struct intel_encoder *encoder)
|
||||
|
||||
static void intel_enable_crt(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_crt *crt = intel_encoder_to_crt(encoder);
|
||||
|
||||
intel_crt_set_dpms(encoder, crt->connector->base.dpms);
|
||||
intel_crt_set_dpms(encoder, DRM_MODE_DPMS_ON);
|
||||
}
|
||||
|
||||
static enum drm_mode_status
|
||||
@ -223,6 +221,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
int max_dotclk = to_i915(dev)->max_dotclk_freq;
|
||||
|
||||
int max_clock = 0;
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
||||
@ -238,6 +237,9 @@ intel_crt_mode_valid(struct drm_connector *connector,
|
||||
if (mode->clock > max_clock)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
if (mode->clock > max_dotclk)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
/* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
|
||||
if (HAS_PCH_LPT(dev) &&
|
||||
(ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
|
||||
@ -471,7 +473,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
|
||||
return true;
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
|
||||
DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
|
||||
} else {
|
||||
DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
|
||||
}
|
||||
@ -482,11 +484,10 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
|
||||
}
|
||||
|
||||
static enum drm_connector_status
|
||||
intel_crt_load_detect(struct intel_crt *crt)
|
||||
intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
|
||||
{
|
||||
struct drm_device *dev = crt->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t pipe = to_intel_crtc(crt->base.base.crtc)->pipe;
|
||||
uint32_t save_bclrpat;
|
||||
uint32_t save_vtotal;
|
||||
uint32_t vtotal, vactive;
|
||||
@ -655,7 +656,8 @@ intel_crt_detect(struct drm_connector *connector, bool force)
|
||||
if (intel_crt_detect_ddc(connector))
|
||||
status = connector_status_connected;
|
||||
else if (INTEL_INFO(dev)->gen < 4)
|
||||
status = intel_crt_load_detect(crt);
|
||||
status = intel_crt_load_detect(crt,
|
||||
to_intel_crtc(connector->state->crtc)->pipe);
|
||||
else
|
||||
status = connector_status_unknown;
|
||||
intel_release_load_detect_pipe(connector, &tmp, &ctx);
|
||||
|
@ -44,6 +44,8 @@
|
||||
#define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
|
||||
#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
|
||||
|
||||
#define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares"
|
||||
|
||||
MODULE_FIRMWARE(I915_CSR_SKL);
|
||||
MODULE_FIRMWARE(I915_CSR_BXT);
|
||||
|
||||
@ -175,10 +177,10 @@ static const struct stepping_info kbl_stepping_info[] = {
|
||||
};
|
||||
|
||||
static const struct stepping_info skl_stepping_info[] = {
|
||||
{'A', '0'}, {'B', '0'}, {'C', '0'},
|
||||
{'D', '0'}, {'E', '0'}, {'F', '0'},
|
||||
{'G', '0'}, {'H', '0'}, {'I', '0'},
|
||||
{'J', '0'}, {'K', '0'}
|
||||
{'A', '0'}, {'B', '0'}, {'C', '0'},
|
||||
{'D', '0'}, {'E', '0'}, {'F', '0'},
|
||||
{'G', '0'}, {'H', '0'}, {'I', '0'},
|
||||
{'J', '0'}, {'K', '0'}
|
||||
};
|
||||
|
||||
static const struct stepping_info bxt_stepping_info[] = {
|
||||
@ -218,19 +220,19 @@ static const struct stepping_info *intel_get_stepping_info(struct drm_device *de
|
||||
* Everytime display comes back from low power state this function is called to
|
||||
* copy the firmware from internal memory to registers.
|
||||
*/
|
||||
void intel_csr_load_program(struct drm_i915_private *dev_priv)
|
||||
bool intel_csr_load_program(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 *payload = dev_priv->csr.dmc_payload;
|
||||
uint32_t i, fw_size;
|
||||
|
||||
if (!IS_GEN9(dev_priv)) {
|
||||
DRM_ERROR("No CSR support available for this platform\n");
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!dev_priv->csr.dmc_payload) {
|
||||
DRM_ERROR("Tried to program CSR with empty payload\n");
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
fw_size = dev_priv->csr.dmc_fw_size;
|
||||
@ -239,10 +241,12 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv)
|
||||
|
||||
for (i = 0; i < dev_priv->csr.mmio_count; i++) {
|
||||
I915_WRITE(dev_priv->csr.mmioaddr[i],
|
||||
dev_priv->csr.mmiodata[i]);
|
||||
dev_priv->csr.mmiodata[i]);
|
||||
}
|
||||
|
||||
dev_priv->csr.dc_state = 0;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
|
||||
@ -273,18 +277,19 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
|
||||
/* Extract CSS Header information*/
|
||||
css_header = (struct intel_css_header *)fw->data;
|
||||
if (sizeof(struct intel_css_header) !=
|
||||
(css_header->header_len * 4)) {
|
||||
(css_header->header_len * 4)) {
|
||||
DRM_ERROR("Firmware has wrong CSS header length %u bytes\n",
|
||||
(css_header->header_len * 4));
|
||||
(css_header->header_len * 4));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
csr->version = css_header->version;
|
||||
|
||||
if (IS_SKYLAKE(dev) && csr->version < SKL_CSR_VERSION_REQUIRED) {
|
||||
if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
|
||||
csr->version < SKL_CSR_VERSION_REQUIRED) {
|
||||
DRM_INFO("Refusing to load old Skylake DMC firmware v%u.%u,"
|
||||
" please upgrade to v%u.%u or later"
|
||||
" [https://01.org/linuxgraphics/intel-linux-graphics-firmwares].\n",
|
||||
" [" FIRMWARE_URL "].\n",
|
||||
CSR_VERSION_MAJOR(csr->version),
|
||||
CSR_VERSION_MINOR(csr->version),
|
||||
CSR_VERSION_MAJOR(SKL_CSR_VERSION_REQUIRED),
|
||||
@ -296,11 +301,11 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
|
||||
|
||||
/* Extract Package Header information*/
|
||||
package_header = (struct intel_package_header *)
|
||||
&fw->data[readcount];
|
||||
&fw->data[readcount];
|
||||
if (sizeof(struct intel_package_header) !=
|
||||
(package_header->header_len * 4)) {
|
||||
(package_header->header_len * 4)) {
|
||||
DRM_ERROR("Firmware has wrong package header length %u bytes\n",
|
||||
(package_header->header_len * 4));
|
||||
(package_header->header_len * 4));
|
||||
return NULL;
|
||||
}
|
||||
readcount += sizeof(struct intel_package_header);
|
||||
@ -308,7 +313,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
|
||||
/* Search for dmc_offset to find firware binary. */
|
||||
for (i = 0; i < package_header->num_entries; i++) {
|
||||
if (package_header->fw_info[i].substepping == '*' &&
|
||||
stepping == package_header->fw_info[i].stepping) {
|
||||
stepping == package_header->fw_info[i].stepping) {
|
||||
dmc_offset = package_header->fw_info[i].offset;
|
||||
break;
|
||||
} else if (stepping == package_header->fw_info[i].stepping &&
|
||||
@ -316,7 +321,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
|
||||
dmc_offset = package_header->fw_info[i].offset;
|
||||
break;
|
||||
} else if (package_header->fw_info[i].stepping == '*' &&
|
||||
package_header->fw_info[i].substepping == '*')
|
||||
package_header->fw_info[i].substepping == '*')
|
||||
dmc_offset = package_header->fw_info[i].offset;
|
||||
}
|
||||
if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
|
||||
@ -329,7 +334,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
|
||||
dmc_header = (struct intel_dmc_header *)&fw->data[readcount];
|
||||
if (sizeof(struct intel_dmc_header) != (dmc_header->header_len)) {
|
||||
DRM_ERROR("Firmware has wrong dmc header length %u bytes\n",
|
||||
(dmc_header->header_len));
|
||||
(dmc_header->header_len));
|
||||
return NULL;
|
||||
}
|
||||
readcount += sizeof(struct intel_dmc_header);
|
||||
@ -337,15 +342,15 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
|
||||
/* Cache the dmc header info. */
|
||||
if (dmc_header->mmio_count > ARRAY_SIZE(csr->mmioaddr)) {
|
||||
DRM_ERROR("Firmware has wrong mmio count %u\n",
|
||||
dmc_header->mmio_count);
|
||||
dmc_header->mmio_count);
|
||||
return NULL;
|
||||
}
|
||||
csr->mmio_count = dmc_header->mmio_count;
|
||||
for (i = 0; i < dmc_header->mmio_count; i++) {
|
||||
if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE ||
|
||||
dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) {
|
||||
dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) {
|
||||
DRM_ERROR(" Firmware has wrong mmio address 0x%x\n",
|
||||
dmc_header->mmioaddr[i]);
|
||||
dmc_header->mmioaddr[i]);
|
||||
return NULL;
|
||||
}
|
||||
csr->mmioaddr[i] = _MMIO(dmc_header->mmioaddr[i]);
|
||||
@ -371,12 +376,14 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
|
||||
return dmc_payload;
|
||||
}
|
||||
|
||||
static void csr_load_work_fn(struct drm_i915_private *dev_priv)
|
||||
static void csr_load_work_fn(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv;
|
||||
struct intel_csr *csr;
|
||||
const struct firmware *fw;
|
||||
int ret;
|
||||
|
||||
dev_priv = container_of(work, typeof(*dev_priv), csr.work);
|
||||
csr = &dev_priv->csr;
|
||||
|
||||
ret = request_firmware(&fw, dev_priv->csr.fw_path,
|
||||
@ -400,8 +407,11 @@ out:
|
||||
CSR_VERSION_MAJOR(csr->version),
|
||||
CSR_VERSION_MINOR(csr->version));
|
||||
} else {
|
||||
DRM_ERROR("Failed to load DMC firmware, disabling rpm\n");
|
||||
}
|
||||
dev_notice(dev_priv->dev->dev,
|
||||
"Failed to load DMC firmware"
|
||||
" [" FIRMWARE_URL "],"
|
||||
" disabling runtime power management.\n");
|
||||
}
|
||||
|
||||
release_firmware(fw);
|
||||
}
|
||||
@ -417,10 +427,12 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_csr *csr = &dev_priv->csr;
|
||||
|
||||
INIT_WORK(&dev_priv->csr.work, csr_load_work_fn);
|
||||
|
||||
if (!HAS_CSR(dev_priv))
|
||||
return;
|
||||
|
||||
if (IS_SKYLAKE(dev_priv))
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
|
||||
csr->fw_path = I915_CSR_SKL;
|
||||
else if (IS_BROXTON(dev_priv))
|
||||
csr->fw_path = I915_CSR_BXT;
|
||||
@ -437,7 +449,7 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
|
||||
|
||||
csr_load_work_fn(dev_priv);
|
||||
schedule_work(&dev_priv->csr.work);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -133,38 +133,38 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
|
||||
{ 0x00002016, 0x000000A0, 0x0 },
|
||||
{ 0x00005012, 0x0000009B, 0x0 },
|
||||
{ 0x00007011, 0x00000088, 0x0 },
|
||||
{ 0x80009010, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
|
||||
{ 0x80009010, 0x000000C0, 0x1 },
|
||||
{ 0x00002016, 0x0000009B, 0x0 },
|
||||
{ 0x00005012, 0x00000088, 0x0 },
|
||||
{ 0x80007011, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
|
||||
{ 0x80007011, 0x000000C0, 0x1 },
|
||||
{ 0x00002016, 0x000000DF, 0x0 },
|
||||
{ 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
|
||||
{ 0x80005012, 0x000000C0, 0x1 },
|
||||
};
|
||||
|
||||
/* Skylake U */
|
||||
static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
|
||||
{ 0x0000201B, 0x000000A2, 0x0 },
|
||||
{ 0x00005012, 0x00000088, 0x0 },
|
||||
{ 0x00007011, 0x00000087, 0x0 },
|
||||
{ 0x80009010, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
|
||||
{ 0x80007011, 0x000000CD, 0x0 },
|
||||
{ 0x80009010, 0x000000C0, 0x1 },
|
||||
{ 0x0000201B, 0x0000009D, 0x0 },
|
||||
{ 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
|
||||
{ 0x80007011, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
|
||||
{ 0x80005012, 0x000000C0, 0x1 },
|
||||
{ 0x80007011, 0x000000C0, 0x1 },
|
||||
{ 0x00002016, 0x00000088, 0x0 },
|
||||
{ 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
|
||||
{ 0x80005012, 0x000000C0, 0x1 },
|
||||
};
|
||||
|
||||
/* Skylake Y */
|
||||
static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
|
||||
{ 0x00000018, 0x000000A2, 0x0 },
|
||||
{ 0x00005012, 0x00000088, 0x0 },
|
||||
{ 0x00007011, 0x00000087, 0x0 },
|
||||
{ 0x80009010, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
|
||||
{ 0x80007011, 0x000000CD, 0x0 },
|
||||
{ 0x80009010, 0x000000C0, 0x3 },
|
||||
{ 0x00000018, 0x0000009D, 0x0 },
|
||||
{ 0x80005012, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
|
||||
{ 0x80007011, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
|
||||
{ 0x80005012, 0x000000C0, 0x3 },
|
||||
{ 0x80007011, 0x000000C0, 0x3 },
|
||||
{ 0x00000018, 0x00000088, 0x0 },
|
||||
{ 0x80005012, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
|
||||
{ 0x80005012, 0x000000C0, 0x3 },
|
||||
};
|
||||
|
||||
/*
|
||||
@ -226,26 +226,26 @@ static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
|
||||
{ 0x00000018, 0x000000A1, 0x0 },
|
||||
{ 0x00000018, 0x00000098, 0x0 },
|
||||
{ 0x00004013, 0x00000088, 0x0 },
|
||||
{ 0x00006012, 0x00000087, 0x0 },
|
||||
{ 0x80006012, 0x000000CD, 0x1 },
|
||||
{ 0x00000018, 0x000000DF, 0x0 },
|
||||
{ 0x00003015, 0x00000087, 0x0 }, /* Default */
|
||||
{ 0x00003015, 0x000000C7, 0x0 },
|
||||
{ 0x00000018, 0x000000C7, 0x0 },
|
||||
{ 0x80003015, 0x000000CD, 0x1 }, /* Default */
|
||||
{ 0x80003015, 0x000000C0, 0x1 },
|
||||
{ 0x80000018, 0x000000C0, 0x1 },
|
||||
};
|
||||
|
||||
/* Skylake Y */
|
||||
static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = {
|
||||
{ 0x00000018, 0x000000A1, 0x0 },
|
||||
{ 0x00005012, 0x000000DF, 0x0 },
|
||||
{ 0x00007011, 0x00000084, 0x0 },
|
||||
{ 0x80007011, 0x000000CB, 0x3 },
|
||||
{ 0x00000018, 0x000000A4, 0x0 },
|
||||
{ 0x00000018, 0x0000009D, 0x0 },
|
||||
{ 0x00004013, 0x00000080, 0x0 },
|
||||
{ 0x00006013, 0x000000C7, 0x0 },
|
||||
{ 0x80006013, 0x000000C0, 0x3 },
|
||||
{ 0x00000018, 0x0000008A, 0x0 },
|
||||
{ 0x00003015, 0x000000C7, 0x0 }, /* Default */
|
||||
{ 0x80003015, 0x000000C7, 0x7 }, /* Uses I_boost level 0x7 */
|
||||
{ 0x00000018, 0x000000C7, 0x0 },
|
||||
{ 0x80003015, 0x000000C0, 0x3 }, /* Default */
|
||||
{ 0x80003015, 0x000000C0, 0x3 },
|
||||
{ 0x80000018, 0x000000C0, 0x3 },
|
||||
};
|
||||
|
||||
struct bxt_ddi_buf_trans {
|
||||
@ -301,8 +301,8 @@ static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = {
|
||||
{ 154, 0x9A, 1, 128, true }, /* 9: 1200 0 */
|
||||
};
|
||||
|
||||
static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
|
||||
enum port port, int type);
|
||||
static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
|
||||
u32 level, enum port port, int type);
|
||||
|
||||
static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
|
||||
struct intel_digital_port **dig_port,
|
||||
@ -342,81 +342,50 @@ enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
|
||||
return port;
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_dig_port_supports_hdmi(const struct intel_digital_port *intel_dig_port)
|
||||
static const struct ddi_buf_trans *
|
||||
skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
|
||||
{
|
||||
return i915_mmio_reg_valid(intel_dig_port->hdmi.hdmi_reg);
|
||||
}
|
||||
|
||||
static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev,
|
||||
int *n_entries)
|
||||
{
|
||||
const struct ddi_buf_trans *ddi_translations;
|
||||
|
||||
if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) {
|
||||
ddi_translations = skl_y_ddi_translations_dp;
|
||||
if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
|
||||
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
|
||||
} else if (IS_SKL_ULT(dev) || IS_KBL_ULT(dev)) {
|
||||
ddi_translations = skl_u_ddi_translations_dp;
|
||||
return skl_y_ddi_translations_dp;
|
||||
} else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv)) {
|
||||
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
|
||||
return skl_u_ddi_translations_dp;
|
||||
} else {
|
||||
ddi_translations = skl_ddi_translations_dp;
|
||||
*n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
|
||||
return skl_ddi_translations_dp;
|
||||
}
|
||||
|
||||
return ddi_translations;
|
||||
}
|
||||
|
||||
static const struct ddi_buf_trans *skl_get_buf_trans_edp(struct drm_device *dev,
|
||||
int *n_entries)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
const struct ddi_buf_trans *ddi_translations;
|
||||
|
||||
if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) {
|
||||
if (dev_priv->edp_low_vswing) {
|
||||
ddi_translations = skl_y_ddi_translations_edp;
|
||||
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
|
||||
} else {
|
||||
ddi_translations = skl_y_ddi_translations_dp;
|
||||
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
|
||||
}
|
||||
} else if (IS_SKL_ULT(dev) || IS_KBL_ULT(dev)) {
|
||||
if (dev_priv->edp_low_vswing) {
|
||||
ddi_translations = skl_u_ddi_translations_edp;
|
||||
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp);
|
||||
} else {
|
||||
ddi_translations = skl_u_ddi_translations_dp;
|
||||
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
|
||||
}
|
||||
} else {
|
||||
if (dev_priv->edp_low_vswing) {
|
||||
ddi_translations = skl_ddi_translations_edp;
|
||||
*n_entries = ARRAY_SIZE(skl_ddi_translations_edp);
|
||||
} else {
|
||||
ddi_translations = skl_ddi_translations_dp;
|
||||
*n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
|
||||
}
|
||||
}
|
||||
|
||||
return ddi_translations;
|
||||
}
|
||||
|
||||
static const struct ddi_buf_trans *
|
||||
skl_get_buf_trans_hdmi(struct drm_device *dev,
|
||||
int *n_entries)
|
||||
skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
|
||||
{
|
||||
const struct ddi_buf_trans *ddi_translations;
|
||||
|
||||
if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) {
|
||||
ddi_translations = skl_y_ddi_translations_hdmi;
|
||||
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
|
||||
} else {
|
||||
ddi_translations = skl_ddi_translations_hdmi;
|
||||
*n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
|
||||
if (dev_priv->edp_low_vswing) {
|
||||
if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
|
||||
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
|
||||
return skl_y_ddi_translations_edp;
|
||||
} else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv)) {
|
||||
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp);
|
||||
return skl_u_ddi_translations_edp;
|
||||
} else {
|
||||
*n_entries = ARRAY_SIZE(skl_ddi_translations_edp);
|
||||
return skl_ddi_translations_edp;
|
||||
}
|
||||
}
|
||||
|
||||
return ddi_translations;
|
||||
return skl_get_buf_trans_dp(dev_priv, n_entries);
|
||||
}
|
||||
|
||||
static const struct ddi_buf_trans *
|
||||
skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
|
||||
{
|
||||
if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
|
||||
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
|
||||
return skl_y_ddi_translations_hdmi;
|
||||
} else {
|
||||
*n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
|
||||
return skl_ddi_translations_hdmi;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -426,47 +395,57 @@ skl_get_buf_trans_hdmi(struct drm_device *dev,
|
||||
* in either FDI or DP modes only, as HDMI connections will work with both
|
||||
* of those
|
||||
*/
|
||||
static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
|
||||
bool supports_hdmi)
|
||||
void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
u32 iboost_bit = 0;
|
||||
int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry,
|
||||
size;
|
||||
int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
|
||||
int hdmi_level;
|
||||
enum port port;
|
||||
const struct ddi_buf_trans *ddi_translations_fdi;
|
||||
const struct ddi_buf_trans *ddi_translations_dp;
|
||||
const struct ddi_buf_trans *ddi_translations_edp;
|
||||
const struct ddi_buf_trans *ddi_translations_hdmi;
|
||||
const struct ddi_buf_trans *ddi_translations;
|
||||
|
||||
if (IS_BROXTON(dev)) {
|
||||
if (!supports_hdmi)
|
||||
port = intel_ddi_get_encoder_port(encoder);
|
||||
hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
|
||||
|
||||
if (IS_BROXTON(dev_priv)) {
|
||||
if (encoder->type != INTEL_OUTPUT_HDMI)
|
||||
return;
|
||||
|
||||
/* Vswing programming for HDMI */
|
||||
bxt_ddi_vswing_sequence(dev, hdmi_level, port,
|
||||
bxt_ddi_vswing_sequence(dev_priv, hdmi_level, port,
|
||||
INTEL_OUTPUT_HDMI);
|
||||
return;
|
||||
} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
|
||||
}
|
||||
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
|
||||
ddi_translations_fdi = NULL;
|
||||
ddi_translations_dp =
|
||||
skl_get_buf_trans_dp(dev, &n_dp_entries);
|
||||
skl_get_buf_trans_dp(dev_priv, &n_dp_entries);
|
||||
ddi_translations_edp =
|
||||
skl_get_buf_trans_edp(dev, &n_edp_entries);
|
||||
skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
|
||||
ddi_translations_hdmi =
|
||||
skl_get_buf_trans_hdmi(dev, &n_hdmi_entries);
|
||||
skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
|
||||
hdmi_default_entry = 8;
|
||||
/* If we're boosting the current, set bit 31 of trans1 */
|
||||
if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level ||
|
||||
dev_priv->vbt.ddi_port_info[port].dp_boost_level)
|
||||
iboost_bit = 1<<31;
|
||||
} else if (IS_BROADWELL(dev)) {
|
||||
|
||||
if (WARN_ON(encoder->type == INTEL_OUTPUT_EDP &&
|
||||
port != PORT_A && port != PORT_E &&
|
||||
n_edp_entries > 9))
|
||||
n_edp_entries = 9;
|
||||
} else if (IS_BROADWELL(dev_priv)) {
|
||||
ddi_translations_fdi = bdw_ddi_translations_fdi;
|
||||
ddi_translations_dp = bdw_ddi_translations_dp;
|
||||
|
||||
if (dev_priv->edp_low_vswing) {
|
||||
ddi_translations_edp = bdw_ddi_translations_edp;
|
||||
ddi_translations_edp = bdw_ddi_translations_edp;
|
||||
n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
|
||||
} else {
|
||||
ddi_translations_edp = bdw_ddi_translations_dp;
|
||||
@ -478,7 +457,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
|
||||
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
|
||||
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
|
||||
hdmi_default_entry = 7;
|
||||
} else if (IS_HASWELL(dev)) {
|
||||
} else if (IS_HASWELL(dev_priv)) {
|
||||
ddi_translations_fdi = hsw_ddi_translations_fdi;
|
||||
ddi_translations_dp = hsw_ddi_translations_dp;
|
||||
ddi_translations_edp = hsw_ddi_translations_dp;
|
||||
@ -498,30 +477,18 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
|
||||
hdmi_default_entry = 7;
|
||||
}
|
||||
|
||||
switch (port) {
|
||||
case PORT_A:
|
||||
switch (encoder->type) {
|
||||
case INTEL_OUTPUT_EDP:
|
||||
ddi_translations = ddi_translations_edp;
|
||||
size = n_edp_entries;
|
||||
break;
|
||||
case PORT_B:
|
||||
case PORT_C:
|
||||
case INTEL_OUTPUT_DISPLAYPORT:
|
||||
case INTEL_OUTPUT_HDMI:
|
||||
ddi_translations = ddi_translations_dp;
|
||||
size = n_dp_entries;
|
||||
break;
|
||||
case PORT_D:
|
||||
if (intel_dp_is_edp(dev, PORT_D)) {
|
||||
ddi_translations = ddi_translations_edp;
|
||||
size = n_edp_entries;
|
||||
} else {
|
||||
ddi_translations = ddi_translations_dp;
|
||||
size = n_dp_entries;
|
||||
}
|
||||
break;
|
||||
case PORT_E:
|
||||
if (ddi_translations_fdi)
|
||||
ddi_translations = ddi_translations_fdi;
|
||||
else
|
||||
ddi_translations = ddi_translations_dp;
|
||||
case INTEL_OUTPUT_ANALOG:
|
||||
ddi_translations = ddi_translations_fdi;
|
||||
size = n_dp_entries;
|
||||
break;
|
||||
default:
|
||||
@ -535,7 +502,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
|
||||
ddi_translations[i].trans2);
|
||||
}
|
||||
|
||||
if (!supports_hdmi)
|
||||
if (encoder->type != INTEL_OUTPUT_HDMI)
|
||||
return;
|
||||
|
||||
/* Choose a good default if VBT is badly populated */
|
||||
@ -550,37 +517,6 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
|
||||
ddi_translations_hdmi[hdmi_level].trans2);
|
||||
}
|
||||
|
||||
/* Program DDI buffers translations for DP. By default, program ports A-D in DP
|
||||
* mode and port E for FDI.
|
||||
*/
|
||||
void intel_prepare_ddi(struct drm_device *dev)
|
||||
{
|
||||
struct intel_encoder *intel_encoder;
|
||||
bool visited[I915_MAX_PORTS] = { 0, };
|
||||
|
||||
if (!HAS_DDI(dev))
|
||||
return;
|
||||
|
||||
for_each_intel_encoder(dev, intel_encoder) {
|
||||
struct intel_digital_port *intel_dig_port;
|
||||
enum port port;
|
||||
bool supports_hdmi;
|
||||
|
||||
if (intel_encoder->type == INTEL_OUTPUT_DSI)
|
||||
continue;
|
||||
|
||||
ddi_get_encoder_port(intel_encoder, &intel_dig_port, &port);
|
||||
if (visited[port])
|
||||
continue;
|
||||
|
||||
supports_hdmi = intel_dig_port &&
|
||||
intel_dig_port_supports_hdmi(intel_dig_port);
|
||||
|
||||
intel_prepare_ddi_buffers(dev, port, supports_hdmi);
|
||||
visited[port] = true;
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
|
||||
enum port port)
|
||||
{
|
||||
@ -609,8 +545,14 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_encoder *encoder;
|
||||
u32 temp, i, rx_ctl_val;
|
||||
|
||||
for_each_encoder_on_crtc(dev, crtc, encoder) {
|
||||
WARN_ON(encoder->type != INTEL_OUTPUT_ANALOG);
|
||||
intel_prepare_ddi_buffer(encoder);
|
||||
}
|
||||
|
||||
/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
|
||||
* mode set "sequence for CRT port" document:
|
||||
* - TP1 to TP2 time with the default value
|
||||
@ -1612,8 +1554,10 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
|
||||
}
|
||||
|
||||
cfgcr1 = cfgcr2 = 0;
|
||||
} else /* eDP */
|
||||
} else if (intel_encoder->type == INTEL_OUTPUT_EDP) {
|
||||
return true;
|
||||
} else
|
||||
return false;
|
||||
|
||||
memset(&crtc_state->dpll_hw_state, 0,
|
||||
sizeof(crtc_state->dpll_hw_state));
|
||||
@ -2070,15 +2014,15 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
|
||||
tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
|
||||
for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
|
||||
tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
|
||||
|
||||
if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(port)) {
|
||||
if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
|
||||
TRANS_DDI_MODE_SELECT_DP_MST)
|
||||
goto out;
|
||||
|
||||
*pipe = i;
|
||||
*pipe = i;
|
||||
ret = true;
|
||||
|
||||
goto out;
|
||||
@ -2117,10 +2061,9 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
|
||||
TRANS_CLK_SEL_DISABLED);
|
||||
}
|
||||
|
||||
static void skl_ddi_set_iboost(struct drm_device *dev, u32 level,
|
||||
enum port port, int type)
|
||||
static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
|
||||
u32 level, enum port port, int type)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
const struct ddi_buf_trans *ddi_translations;
|
||||
uint8_t iboost;
|
||||
uint8_t dp_iboost, hdmi_iboost;
|
||||
@ -2135,21 +2078,26 @@ static void skl_ddi_set_iboost(struct drm_device *dev, u32 level,
|
||||
if (dp_iboost) {
|
||||
iboost = dp_iboost;
|
||||
} else {
|
||||
ddi_translations = skl_get_buf_trans_dp(dev, &n_entries);
|
||||
ddi_translations = skl_get_buf_trans_dp(dev_priv, &n_entries);
|
||||
iboost = ddi_translations[level].i_boost;
|
||||
}
|
||||
} else if (type == INTEL_OUTPUT_EDP) {
|
||||
if (dp_iboost) {
|
||||
iboost = dp_iboost;
|
||||
} else {
|
||||
ddi_translations = skl_get_buf_trans_edp(dev, &n_entries);
|
||||
ddi_translations = skl_get_buf_trans_edp(dev_priv, &n_entries);
|
||||
|
||||
if (WARN_ON(port != PORT_A &&
|
||||
port != PORT_E && n_entries > 9))
|
||||
n_entries = 9;
|
||||
|
||||
iboost = ddi_translations[level].i_boost;
|
||||
}
|
||||
} else if (type == INTEL_OUTPUT_HDMI) {
|
||||
if (hdmi_iboost) {
|
||||
iboost = hdmi_iboost;
|
||||
} else {
|
||||
ddi_translations = skl_get_buf_trans_hdmi(dev, &n_entries);
|
||||
ddi_translations = skl_get_buf_trans_hdmi(dev_priv, &n_entries);
|
||||
iboost = ddi_translations[level].i_boost;
|
||||
}
|
||||
} else {
|
||||
@ -2174,10 +2122,9 @@ static void skl_ddi_set_iboost(struct drm_device *dev, u32 level,
|
||||
I915_WRITE(DISPIO_CR_TX_BMU_CR0, reg);
|
||||
}
|
||||
|
||||
static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
|
||||
enum port port, int type)
|
||||
static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
|
||||
u32 level, enum port port, int type)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
const struct bxt_ddi_buf_trans *ddi_translations;
|
||||
u32 n_entries, i;
|
||||
uint32_t val;
|
||||
@ -2292,7 +2239,7 @@ static uint32_t translate_signal_level(int signal_levels)
|
||||
uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = dport->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
|
||||
struct intel_encoder *encoder = &dport->base;
|
||||
uint8_t train_set = intel_dp->train_set[0];
|
||||
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
|
||||
@ -2302,10 +2249,10 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
|
||||
|
||||
level = translate_signal_level(signal_levels);
|
||||
|
||||
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
|
||||
skl_ddi_set_iboost(dev, level, port, encoder->type);
|
||||
else if (IS_BROXTON(dev))
|
||||
bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
|
||||
skl_ddi_set_iboost(dev_priv, level, port, encoder->type);
|
||||
else if (IS_BROXTON(dev_priv))
|
||||
bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type);
|
||||
|
||||
return DDI_BUF_TRANS_SELECT(level);
|
||||
}
|
||||
@ -2357,12 +2304,18 @@ void intel_ddi_clk_select(struct intel_encoder *encoder,
|
||||
static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
|
||||
{
|
||||
struct drm_encoder *encoder = &intel_encoder->base;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
|
||||
enum port port = intel_ddi_get_encoder_port(intel_encoder);
|
||||
int type = intel_encoder->type;
|
||||
int hdmi_level;
|
||||
|
||||
if (type == INTEL_OUTPUT_HDMI) {
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
|
||||
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
|
||||
}
|
||||
|
||||
intel_prepare_ddi_buffer(intel_encoder);
|
||||
|
||||
if (type == INTEL_OUTPUT_EDP) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
@ -2380,17 +2333,11 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
|
||||
|
||||
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
|
||||
intel_dp_start_link_train(intel_dp);
|
||||
if (port != PORT_A || INTEL_INFO(dev)->gen >= 9)
|
||||
if (port != PORT_A || INTEL_INFO(dev_priv)->gen >= 9)
|
||||
intel_dp_stop_link_train(intel_dp);
|
||||
} else if (type == INTEL_OUTPUT_HDMI) {
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
|
||||
if (IS_BROXTON(dev)) {
|
||||
hdmi_level = dev_priv->vbt.
|
||||
ddi_port_info[port].hdmi_level_shift;
|
||||
bxt_ddi_vswing_sequence(dev, hdmi_level, port,
|
||||
INTEL_OUTPUT_HDMI);
|
||||
}
|
||||
intel_hdmi->set_infoframes(encoder,
|
||||
crtc->config->has_hdmi_sink,
|
||||
&crtc->config->base.adjusted_mode);
|
||||
@ -2434,6 +2381,12 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
|
||||
DPLL_CTRL2_DDI_CLK_OFF(port)));
|
||||
else if (INTEL_INFO(dev)->gen < 9)
|
||||
I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
|
||||
|
||||
if (type == INTEL_OUTPUT_HDMI) {
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
|
||||
intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_enable_ddi(struct intel_encoder *intel_encoder)
|
||||
@ -3318,6 +3271,33 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
|
||||
struct intel_encoder *intel_encoder;
|
||||
struct drm_encoder *encoder;
|
||||
bool init_hdmi, init_dp;
|
||||
int max_lanes;
|
||||
|
||||
if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) {
|
||||
switch (port) {
|
||||
case PORT_A:
|
||||
max_lanes = 4;
|
||||
break;
|
||||
case PORT_E:
|
||||
max_lanes = 0;
|
||||
break;
|
||||
default:
|
||||
max_lanes = 4;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
switch (port) {
|
||||
case PORT_A:
|
||||
max_lanes = 2;
|
||||
break;
|
||||
case PORT_E:
|
||||
max_lanes = 2;
|
||||
break;
|
||||
default:
|
||||
max_lanes = 4;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
|
||||
dev_priv->vbt.ddi_port_info[port].supports_hdmi);
|
||||
@ -3363,9 +3343,12 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
|
||||
if (!(intel_dig_port->saved_port_bits & DDI_A_4_LANES)) {
|
||||
DRM_DEBUG_KMS("BXT BIOS forgot to set DDI_A_4_LANES for port A; fixing\n");
|
||||
intel_dig_port->saved_port_bits |= DDI_A_4_LANES;
|
||||
max_lanes = 4;
|
||||
}
|
||||
}
|
||||
|
||||
intel_dig_port->max_lanes = max_lanes;
|
||||
|
||||
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
|
||||
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
|
||||
intel_encoder->cloneable = 0;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -156,14 +156,9 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp)
|
||||
static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
u8 source_max, sink_max;
|
||||
|
||||
source_max = 4;
|
||||
if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
|
||||
(intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
|
||||
source_max = 2;
|
||||
|
||||
source_max = intel_dig_port->max_lanes;
|
||||
sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
|
||||
|
||||
return min(source_max, sink_max);
|
||||
@ -207,6 +202,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
|
||||
int target_clock = mode->clock;
|
||||
int max_rate, mode_rate, max_lanes, max_link_clock;
|
||||
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
|
||||
|
||||
if (is_edp(intel_dp) && fixed_mode) {
|
||||
if (mode->hdisplay > fixed_mode->hdisplay)
|
||||
@ -224,7 +220,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
|
||||
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
|
||||
mode_rate = intel_dp_link_required(target_clock, 18);
|
||||
|
||||
if (mode_rate > max_rate)
|
||||
if (mode_rate > max_rate || target_clock > max_dotclk)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
if (mode->clock < 10000)
|
||||
@ -339,8 +335,12 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
|
||||
release_cl_override = IS_CHERRYVIEW(dev) &&
|
||||
!chv_phy_powergate_ch(dev_priv, phy, ch, true);
|
||||
|
||||
vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
|
||||
&chv_dpll[0].dpll : &vlv_dpll[0].dpll);
|
||||
if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
|
||||
&chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
|
||||
DRM_ERROR("Failed to force on pll for pipe %c!\n",
|
||||
pipe_name(pipe));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -979,7 +979,10 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
|
||||
if (WARN_ON(txsize > 20))
|
||||
return -E2BIG;
|
||||
|
||||
memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
|
||||
if (msg->buffer)
|
||||
memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
|
||||
else
|
||||
WARN_ON(msg->size);
|
||||
|
||||
ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
|
||||
if (ret > 0) {
|
||||
@ -1094,14 +1097,14 @@ static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
|
||||
const struct ddi_vbt_port_info *info =
|
||||
&dev_priv->vbt.ddi_port_info[PORT_E];
|
||||
|
||||
switch (info->alternate_aux_channel) {
|
||||
switch (info->alternate_aux_channel) {
|
||||
case DP_AUX_A:
|
||||
return PORT_A;
|
||||
case DP_AUX_B:
|
||||
case DP_AUX_B:
|
||||
return PORT_B;
|
||||
case DP_AUX_C:
|
||||
case DP_AUX_C:
|
||||
return PORT_C;
|
||||
case DP_AUX_D:
|
||||
case DP_AUX_D:
|
||||
return PORT_D;
|
||||
default:
|
||||
MISSING_CASE(info->alternate_aux_channel);
|
||||
@ -1121,11 +1124,11 @@ static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
|
||||
case PORT_C:
|
||||
case PORT_D:
|
||||
return DP_AUX_CH_CTL(port);
|
||||
default:
|
||||
default:
|
||||
MISSING_CASE(port);
|
||||
return DP_AUX_CH_CTL(PORT_A);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
|
||||
enum port port, int index)
|
||||
@ -1188,7 +1191,6 @@ intel_dp_aux_fini(struct intel_dp *intel_dp)
|
||||
static int
|
||||
intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = intel_dp_to_dev(intel_dp);
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
enum port port = intel_dig_port->port;
|
||||
int ret;
|
||||
@ -1199,13 +1201,9 @@ intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
|
||||
if (!intel_dp->aux.name)
|
||||
return -ENOMEM;
|
||||
|
||||
intel_dp->aux.dev = dev->dev;
|
||||
intel_dp->aux.dev = connector->base.kdev;
|
||||
intel_dp->aux.transfer = intel_dp_aux_transfer;
|
||||
|
||||
DRM_DEBUG_KMS("registering %s bus for %s\n",
|
||||
intel_dp->aux.name,
|
||||
connector->base.kdev->kobj.name);
|
||||
|
||||
ret = drm_dp_aux_register(&intel_dp->aux);
|
||||
if (ret < 0) {
|
||||
DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
|
||||
@ -1214,16 +1212,6 @@ intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = sysfs_create_link(&connector->base.kdev->kobj,
|
||||
&intel_dp->aux.ddc.dev.kobj,
|
||||
intel_dp->aux.ddc.dev.kobj.name);
|
||||
if (ret < 0) {
|
||||
DRM_ERROR("sysfs_create_link() for %s failed (%d)\n",
|
||||
intel_dp->aux.name, ret);
|
||||
intel_dp_aux_fini(intel_dp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1232,9 +1220,7 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector)
|
||||
{
|
||||
struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
|
||||
|
||||
if (!intel_connector->mst_port)
|
||||
sysfs_remove_link(&intel_connector->base.kdev->kobj,
|
||||
intel_dp->aux.ddc.dev.kobj.name);
|
||||
intel_dp_aux_fini(intel_dp);
|
||||
intel_connector_unregister(intel_connector);
|
||||
}
|
||||
|
||||
@ -1496,7 +1482,7 @@ int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
|
||||
}
|
||||
|
||||
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
|
||||
uint8_t *link_bw, uint8_t *rate_select)
|
||||
uint8_t *link_bw, uint8_t *rate_select)
|
||||
{
|
||||
if (intel_dp->num_sink_rates) {
|
||||
*link_bw = 0;
|
||||
@ -1811,12 +1797,21 @@ static void wait_panel_off(struct intel_dp *intel_dp)
|
||||
|
||||
static void wait_panel_power_cycle(struct intel_dp *intel_dp)
|
||||
{
|
||||
ktime_t panel_power_on_time;
|
||||
s64 panel_power_off_duration;
|
||||
|
||||
DRM_DEBUG_KMS("Wait for panel power cycle\n");
|
||||
|
||||
/* take the difference of currrent time and panel power off time
|
||||
* and then make panel wait for t11_t12 if needed. */
|
||||
panel_power_on_time = ktime_get();
|
||||
panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
|
||||
|
||||
/* When we disable the VDD override bit last we have to do the manual
|
||||
* wait. */
|
||||
wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
|
||||
intel_dp->panel_power_cycle_delay);
|
||||
if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
|
||||
wait_remaining_ms_from_jiffies(jiffies,
|
||||
intel_dp->panel_power_cycle_delay - panel_power_off_duration);
|
||||
|
||||
wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
|
||||
}
|
||||
@ -1874,7 +1869,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
|
||||
if (!is_edp(intel_dp))
|
||||
return false;
|
||||
|
||||
// cancel_delayed_work(&intel_dp->panel_vdd_work);
|
||||
cancel_delayed_work(&intel_dp->panel_vdd_work);
|
||||
intel_dp->want_panel_vdd = true;
|
||||
|
||||
if (edp_have_panel_vdd(intel_dp))
|
||||
@ -1968,7 +1963,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
|
||||
I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
|
||||
|
||||
if ((pp & POWER_TARGET_ON) == 0)
|
||||
intel_dp->last_power_cycle = jiffies;
|
||||
intel_dp->panel_power_off_time = ktime_get();
|
||||
|
||||
power_domain = intel_display_port_aux_power_domain(intel_encoder);
|
||||
intel_display_power_put(dev_priv, power_domain);
|
||||
@ -2117,7 +2112,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
|
||||
I915_WRITE(pp_ctrl_reg, pp);
|
||||
POSTING_READ(pp_ctrl_reg);
|
||||
|
||||
intel_dp->last_power_cycle = jiffies;
|
||||
intel_dp->panel_power_off_time = ktime_get();
|
||||
wait_panel_off(intel_dp);
|
||||
|
||||
/* We got a reference when we enabled the VDD. */
|
||||
@ -2242,11 +2237,6 @@ static void intel_edp_backlight_power(struct intel_connector *connector,
|
||||
_intel_edp_backlight_off(intel_dp);
|
||||
}
|
||||
|
||||
static const char *state_string(bool enabled)
|
||||
{
|
||||
return enabled ? "on" : "off";
|
||||
}
|
||||
|
||||
static void assert_dp_port(struct intel_dp *intel_dp, bool state)
|
||||
{
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
@ -2256,7 +2246,7 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state)
|
||||
I915_STATE_WARN(cur_state != state,
|
||||
"DP port %c state assertion failure (expected %s, current %s)\n",
|
||||
port_name(dig_port->port),
|
||||
state_string(state), state_string(cur_state));
|
||||
onoff(state), onoff(cur_state));
|
||||
}
|
||||
#define assert_dp_port_disabled(d) assert_dp_port((d), false)
|
||||
|
||||
@ -2266,7 +2256,7 @@ static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
|
||||
|
||||
I915_STATE_WARN(cur_state != state,
|
||||
"eDP PLL state assertion failure (expected %s, current %s)\n",
|
||||
state_string(state), state_string(cur_state));
|
||||
onoff(state), onoff(cur_state));
|
||||
}
|
||||
#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
|
||||
#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
|
||||
@ -2819,7 +2809,7 @@ static void g4x_pre_enable_dp(struct intel_encoder *encoder)
|
||||
/* Only ilk+ has port A */
|
||||
if (port == PORT_A)
|
||||
ironlake_edp_pll_on(intel_dp);
|
||||
}
|
||||
}
|
||||
|
||||
static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
|
||||
{
|
||||
@ -3740,11 +3730,11 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
|
||||
|
||||
I915_WRITE(intel_dp->output_reg, intel_dp->DP);
|
||||
POSTING_READ(intel_dp->output_reg);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
|
||||
uint8_t dp_train_pat)
|
||||
uint8_t dp_train_pat)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_i915_private *dev_priv =
|
||||
@ -4023,7 +4013,7 @@ static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
|
||||
} while (--attempts && count);
|
||||
|
||||
if (attempts == 0) {
|
||||
DRM_ERROR("TIMEOUT: Sink CRC counter is not zeroed\n");
|
||||
DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
|
||||
ret = -ETIMEDOUT;
|
||||
}
|
||||
|
||||
@ -4093,10 +4083,10 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
|
||||
} while (--attempts && count == 0);
|
||||
|
||||
if (attempts == 0) {
|
||||
DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
|
||||
ret = -ETIMEDOUT;
|
||||
goto stop;
|
||||
}
|
||||
DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
|
||||
ret = -ETIMEDOUT;
|
||||
goto stop;
|
||||
}
|
||||
|
||||
if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
|
||||
ret = -EIO;
|
||||
@ -4503,7 +4493,7 @@ static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
|
||||
static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
|
||||
struct intel_digital_port *port)
|
||||
struct intel_digital_port *port)
|
||||
{
|
||||
u32 bit;
|
||||
|
||||
@ -4563,7 +4553,7 @@ bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
|
||||
{
|
||||
if (HAS_PCH_IBX(dev_priv))
|
||||
return ibx_digital_port_connected(dev_priv, port);
|
||||
if (HAS_PCH_SPLIT(dev_priv))
|
||||
else if (HAS_PCH_SPLIT(dev_priv))
|
||||
return cpt_digital_port_connected(dev_priv, port);
|
||||
else if (IS_BROXTON(dev_priv))
|
||||
return bxt_digital_port_connected(dev_priv, port);
|
||||
@ -4883,10 +4873,9 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
|
||||
intel_dp_aux_fini(intel_dp);
|
||||
intel_dp_mst_encoder_cleanup(intel_dig_port);
|
||||
if (is_edp(intel_dp)) {
|
||||
// cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
|
||||
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
|
||||
/*
|
||||
* vdd might still be enabled do to the delayed vdd off.
|
||||
* Make sure vdd is actually turned off here.
|
||||
@ -4914,7 +4903,7 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
|
||||
* vdd might still be enabled do to the delayed vdd off.
|
||||
* Make sure vdd is actually turned off here.
|
||||
*/
|
||||
// cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
|
||||
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
|
||||
pps_lock(intel_dp);
|
||||
edp_panel_vdd_off_sync(intel_dp);
|
||||
pps_unlock(intel_dp);
|
||||
@ -4947,13 +4936,15 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
|
||||
|
||||
void intel_dp_encoder_reset(struct drm_encoder *encoder)
|
||||
{
|
||||
struct intel_dp *intel_dp;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
if (!HAS_DDI(dev_priv))
|
||||
intel_dp->DP = I915_READ(intel_dp->output_reg);
|
||||
|
||||
if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
|
||||
return;
|
||||
|
||||
intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
pps_lock(intel_dp);
|
||||
|
||||
/*
|
||||
@ -5025,9 +5016,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
|
||||
intel_display_power_get(dev_priv, power_domain);
|
||||
|
||||
if (long_hpd) {
|
||||
/* indicate that we need to restart link training */
|
||||
intel_dp->train_set_valid = false;
|
||||
|
||||
if (!intel_digital_port_connected(dev_priv, intel_dig_port))
|
||||
goto mst_fail;
|
||||
|
||||
@ -5130,7 +5118,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
|
||||
|
||||
static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
|
||||
{
|
||||
intel_dp->last_power_cycle = jiffies;
|
||||
intel_dp->panel_power_off_time = ktime_get();
|
||||
intel_dp->last_power_on = jiffies;
|
||||
intel_dp->last_backlight_off = jiffies;
|
||||
}
|
||||
@ -5513,7 +5501,7 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp)
|
||||
dev_priv->drrs.dp = NULL;
|
||||
mutex_unlock(&dev_priv->drrs.mutex);
|
||||
|
||||
// cancel_delayed_work_sync(&dev_priv->drrs.work);
|
||||
cancel_delayed_work_sync(&dev_priv->drrs.work);
|
||||
}
|
||||
|
||||
static void intel_edp_drrs_downclock_work(struct work_struct *work)
|
||||
@ -5566,7 +5554,7 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
|
||||
if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
|
||||
return;
|
||||
|
||||
// cancel_delayed_work(&dev_priv->drrs.work);
|
||||
cancel_delayed_work(&dev_priv->drrs.work);
|
||||
|
||||
mutex_lock(&dev_priv->drrs.mutex);
|
||||
if (!dev_priv->drrs.dp) {
|
||||
@ -5611,7 +5599,7 @@ void intel_edp_drrs_flush(struct drm_device *dev,
|
||||
if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
|
||||
return;
|
||||
|
||||
// cancel_delayed_work(&dev_priv->drrs.work);
|
||||
cancel_delayed_work(&dev_priv->drrs.work);
|
||||
|
||||
mutex_lock(&dev_priv->drrs.mutex);
|
||||
if (!dev_priv->drrs.dp) {
|
||||
@ -5847,6 +5835,11 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
enum port port = intel_dig_port->port;
|
||||
int type, ret;
|
||||
|
||||
if (WARN(intel_dig_port->max_lanes < 1,
|
||||
"Not enough lanes (%d) for DP on port %c\n",
|
||||
intel_dig_port->max_lanes, port_name(port)))
|
||||
return false;
|
||||
|
||||
intel_dp->pps_pipe = INVALID_PIPE;
|
||||
|
||||
/* intel_dp vfuncs */
|
||||
@ -5979,7 +5972,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
|
||||
fail:
|
||||
if (is_edp(intel_dp)) {
|
||||
// cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
|
||||
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
|
||||
/*
|
||||
* vdd might still be enabled do to the delayed vdd off.
|
||||
* Make sure vdd is actually turned off here.
|
||||
@ -5987,7 +5980,7 @@ fail:
|
||||
pps_lock(intel_dp);
|
||||
edp_panel_vdd_off_sync(intel_dp);
|
||||
pps_unlock(intel_dp);
|
||||
}
|
||||
}
|
||||
drm_connector_unregister(connector);
|
||||
drm_connector_cleanup(connector);
|
||||
|
||||
@ -6044,6 +6037,7 @@ intel_dp_init(struct drm_device *dev,
|
||||
|
||||
intel_dig_port->port = port;
|
||||
intel_dig_port->dp.output_reg = output_reg;
|
||||
intel_dig_port->max_lanes = 4;
|
||||
|
||||
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
|
||||
if (IS_CHERRYVIEW(dev)) {
|
||||
|
@ -85,8 +85,7 @@ static bool
|
||||
intel_dp_reset_link_train(struct intel_dp *intel_dp,
|
||||
uint8_t dp_train_pat)
|
||||
{
|
||||
if (!intel_dp->train_set_valid)
|
||||
memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
|
||||
memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
|
||||
intel_dp_set_signal_levels(intel_dp);
|
||||
return intel_dp_set_link_train(intel_dp, dp_train_pat);
|
||||
}
|
||||
@ -161,22 +160,6 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* if we used previously trained voltage and pre-emphasis values
|
||||
* and we don't get clock recovery, reset link training values
|
||||
*/
|
||||
if (intel_dp->train_set_valid) {
|
||||
DRM_DEBUG_KMS("clock recovery not ok, reset");
|
||||
/* clear the flag as we are not reusing train set */
|
||||
intel_dp->train_set_valid = false;
|
||||
if (!intel_dp_reset_link_train(intel_dp,
|
||||
DP_TRAINING_PATTERN_1 |
|
||||
DP_LINK_SCRAMBLING_DISABLE)) {
|
||||
DRM_ERROR("failed to enable link training\n");
|
||||
return;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Check to see if we've tried the max voltage */
|
||||
for (i = 0; i < intel_dp->lane_count; i++)
|
||||
@ -284,7 +267,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
|
||||
/* Make sure clock is still ok */
|
||||
if (!drm_dp_clock_recovery_ok(link_status,
|
||||
intel_dp->lane_count)) {
|
||||
intel_dp->train_set_valid = false;
|
||||
intel_dp_link_training_clock_recovery(intel_dp);
|
||||
intel_dp_set_link_train(intel_dp,
|
||||
training_pattern |
|
||||
@ -301,7 +283,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
|
||||
|
||||
/* Try 5 times, then try clock recovery if that fails */
|
||||
if (tries > 5) {
|
||||
intel_dp->train_set_valid = false;
|
||||
intel_dp_link_training_clock_recovery(intel_dp);
|
||||
intel_dp_set_link_train(intel_dp,
|
||||
training_pattern |
|
||||
@ -322,10 +303,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
|
||||
|
||||
intel_dp_set_idle_link_train(intel_dp);
|
||||
|
||||
if (channel_eq) {
|
||||
intel_dp->train_set_valid = true;
|
||||
if (channel_eq)
|
||||
DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
|
||||
}
|
||||
}
|
||||
|
||||
void intel_dp_stop_link_train(struct intel_dp *intel_dp)
|
||||
|
@ -173,6 +173,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
|
||||
intel_mst->port = found->port;
|
||||
|
||||
if (intel_dp->active_mst_links == 0) {
|
||||
intel_prepare_ddi_buffer(&intel_dig_port->base);
|
||||
|
||||
intel_ddi_clk_select(&intel_dig_port->base, intel_crtc->config);
|
||||
|
||||
intel_dp_set_link_params(intel_dp, intel_crtc->config);
|
||||
@ -347,6 +349,8 @@ static enum drm_mode_status
|
||||
intel_dp_mst_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
|
||||
|
||||
/* TODO - validate mode against available PBN for link */
|
||||
if (mode->clock < 10000)
|
||||
return MODE_CLOCK_LOW;
|
||||
@ -354,6 +358,9 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
|
||||
return MODE_H_ILLEGAL;
|
||||
|
||||
if (mode->clock > max_dotclk)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
return MODE_OK;
|
||||
}
|
||||
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_dp_dual_mode_helper.h>
|
||||
#include <drm/drm_dp_mst_helper.h>
|
||||
#include <drm/drm_rect.h>
|
||||
#include <drm/drm_atomic.h>
|
||||
@ -247,7 +248,18 @@ struct intel_atomic_state {
|
||||
struct drm_atomic_state base;
|
||||
|
||||
unsigned int cdclk;
|
||||
bool dpll_set;
|
||||
|
||||
/*
|
||||
* Calculated device cdclk, can be different from cdclk
|
||||
* only when all crtc's are DPMS off.
|
||||
*/
|
||||
unsigned int dev_cdclk;
|
||||
|
||||
bool dpll_set, modeset;
|
||||
|
||||
unsigned int active_crtcs;
|
||||
unsigned int min_pixclk[I915_MAX_PIPES];
|
||||
|
||||
struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
|
||||
struct intel_wm_config wm_config;
|
||||
};
|
||||
@ -369,6 +381,7 @@ struct intel_crtc_state {
|
||||
bool update_pipe; /* can a fast modeset be performed? */
|
||||
bool disable_cxsr;
|
||||
bool update_wm_pre, update_wm_post; /* watermarks are updated */
|
||||
bool fb_changed; /* fb on any of the planes is changed */
|
||||
|
||||
/* Pipe source size (ie. panel fitter input size)
|
||||
* All planes will be positioned inside this space,
|
||||
@ -482,6 +495,8 @@ struct intel_crtc_state {
|
||||
|
||||
bool ips_enabled;
|
||||
|
||||
bool enable_fbc;
|
||||
|
||||
bool double_wide;
|
||||
|
||||
bool dp_encoder_is_mst;
|
||||
@ -532,16 +547,13 @@ struct intel_mmio_flip {
|
||||
*/
|
||||
struct intel_crtc_atomic_commit {
|
||||
/* Sleepable operations to perform before commit */
|
||||
bool disable_fbc;
|
||||
bool disable_ips;
|
||||
bool pre_disable_primary;
|
||||
|
||||
/* Sleepable operations to perform after commit */
|
||||
unsigned fb_bits;
|
||||
bool wait_vblank;
|
||||
bool update_fbc;
|
||||
bool post_enable_primary;
|
||||
unsigned update_sprite_watermarks;
|
||||
|
||||
/* Sleepable operations to perform before and after commit */
|
||||
bool update_fbc;
|
||||
};
|
||||
|
||||
struct intel_crtc {
|
||||
@ -565,7 +577,7 @@ struct intel_crtc {
|
||||
/* Display surface base address adjustement for pageflips. Note that on
|
||||
* gen4+ this only adjusts up to a tile, offsets within a tile are
|
||||
* handled in the hw itself (with the TILEOFF register). */
|
||||
unsigned long dspaddr_offset;
|
||||
u32 dspaddr_offset;
|
||||
int adjusted_x;
|
||||
int adjusted_y;
|
||||
|
||||
@ -648,23 +660,17 @@ struct intel_plane {
|
||||
/*
|
||||
* NOTE: Do not place new plane state fields here (e.g., when adding
|
||||
* new plane properties). New runtime state should now be placed in
|
||||
* the intel_plane_state structure and accessed via drm_plane->state.
|
||||
* the intel_plane_state structure and accessed via plane_state.
|
||||
*/
|
||||
|
||||
void (*update_plane)(struct drm_plane *plane,
|
||||
struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
int crtc_x, int crtc_y,
|
||||
unsigned int crtc_w, unsigned int crtc_h,
|
||||
uint32_t x, uint32_t y,
|
||||
uint32_t src_w, uint32_t src_h);
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_plane_state *plane_state);
|
||||
void (*disable_plane)(struct drm_plane *plane,
|
||||
struct drm_crtc *crtc);
|
||||
int (*check_plane)(struct drm_plane *plane,
|
||||
struct intel_crtc_state *crtc_state,
|
||||
struct intel_plane_state *state);
|
||||
void (*commit_plane)(struct drm_plane *plane,
|
||||
struct intel_plane_state *state);
|
||||
};
|
||||
|
||||
struct intel_watermark_params {
|
||||
@ -699,6 +705,10 @@ struct cxsr_latency {
|
||||
struct intel_hdmi {
|
||||
i915_reg_t hdmi_reg;
|
||||
int ddc_bus;
|
||||
struct {
|
||||
enum drm_dp_dual_mode_type type;
|
||||
int max_tmds_clock;
|
||||
} dp_dual_mode;
|
||||
bool limited_color_range;
|
||||
bool color_range_auto;
|
||||
bool has_hdmi_sink;
|
||||
@ -766,9 +776,9 @@ struct intel_dp {
|
||||
int backlight_off_delay;
|
||||
struct delayed_work panel_vdd_work;
|
||||
bool want_panel_vdd;
|
||||
unsigned long last_power_cycle;
|
||||
unsigned long last_power_on;
|
||||
unsigned long last_backlight_off;
|
||||
ktime_t panel_power_off_time;
|
||||
|
||||
struct notifier_block edp_notifier;
|
||||
|
||||
@ -802,8 +812,6 @@ struct intel_dp {
|
||||
/* This is called before a link training is starterd */
|
||||
void (*prepare_link_retrain)(struct intel_dp *intel_dp);
|
||||
|
||||
bool train_set_valid;
|
||||
|
||||
/* Displayport compliance testing */
|
||||
unsigned long compliance_test_type;
|
||||
unsigned long compliance_test_data;
|
||||
@ -818,6 +826,7 @@ struct intel_digital_port {
|
||||
struct intel_hdmi hdmi;
|
||||
enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
|
||||
bool release_cl2_override;
|
||||
uint8_t max_lanes;
|
||||
/* for communication with audio component; protected by av_mutex */
|
||||
const struct drm_connector *audio_connector;
|
||||
};
|
||||
@ -904,9 +913,7 @@ struct intel_unpin_work {
|
||||
};
|
||||
|
||||
struct intel_load_detect_pipe {
|
||||
struct drm_framebuffer *release_fb;
|
||||
bool load_detect_temp;
|
||||
int dpms_mode;
|
||||
struct drm_atomic_state *restore_state;
|
||||
};
|
||||
|
||||
static inline struct intel_encoder *
|
||||
@ -989,6 +996,8 @@ static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
|
||||
int intel_get_crtc_scanline(struct intel_crtc *crtc);
|
||||
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
|
||||
unsigned int pipe_mask);
|
||||
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
|
||||
unsigned int pipe_mask);
|
||||
|
||||
/* intel_crt.c */
|
||||
void intel_crt_init(struct drm_device *dev);
|
||||
@ -997,7 +1006,7 @@ void intel_crt_init(struct drm_device *dev);
|
||||
/* intel_ddi.c */
|
||||
void intel_ddi_clk_select(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config);
|
||||
void intel_prepare_ddi(struct drm_device *dev);
|
||||
void intel_prepare_ddi_buffer(struct intel_encoder *encoder);
|
||||
void hsw_fdi_link_train(struct drm_crtc *crtc);
|
||||
void intel_ddi_init(struct drm_device *dev, enum port port);
|
||||
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
|
||||
@ -1040,8 +1049,8 @@ unsigned int intel_fb_align_height(struct drm_device *dev,
|
||||
uint64_t fb_format_modifier);
|
||||
void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire,
|
||||
enum fb_op_origin origin);
|
||||
u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
|
||||
uint32_t pixel_format);
|
||||
u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
|
||||
uint64_t fb_modifier, uint32_t pixel_format);
|
||||
|
||||
/* intel_audio.c */
|
||||
void intel_init_audio(struct drm_device *dev);
|
||||
@ -1125,9 +1134,8 @@ int intel_plane_atomic_set_property(struct drm_plane *plane,
|
||||
int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
|
||||
struct drm_plane_state *plane_state);
|
||||
|
||||
unsigned int
|
||||
intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
|
||||
uint64_t fb_format_modifier, unsigned int plane);
|
||||
unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
|
||||
uint64_t fb_modifier, unsigned int cpp);
|
||||
|
||||
static inline bool
|
||||
intel_rotation_90_or_270(unsigned int rotation)
|
||||
@ -1148,8 +1156,8 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *state);
|
||||
|
||||
void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
|
||||
const struct dpll *dpll);
|
||||
int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
|
||||
const struct dpll *dpll);
|
||||
void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe);
|
||||
|
||||
/* modesetting asserts */
|
||||
@ -1166,11 +1174,11 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
|
||||
void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
|
||||
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
|
||||
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
|
||||
unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
|
||||
int *x, int *y,
|
||||
unsigned int tiling_mode,
|
||||
unsigned int bpp,
|
||||
unsigned int pitch);
|
||||
u32 intel_compute_tile_offset(struct drm_i915_private *dev_priv,
|
||||
int *x, int *y,
|
||||
uint64_t fb_modifier,
|
||||
unsigned int cpp,
|
||||
unsigned int pitch);
|
||||
void intel_prepare_reset(struct drm_device *dev);
|
||||
void intel_finish_reset(struct drm_device *dev);
|
||||
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
|
||||
@ -1206,14 +1214,13 @@ enum intel_display_power_domain
|
||||
intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder);
|
||||
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
|
||||
struct intel_crtc_state *pipe_config);
|
||||
void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
|
||||
|
||||
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
|
||||
int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
|
||||
|
||||
u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
|
||||
struct drm_i915_gem_object *obj,
|
||||
unsigned int plane);
|
||||
struct drm_i915_gem_object *obj,
|
||||
unsigned int plane);
|
||||
|
||||
u32 skl_plane_ctl_format(uint32_t pixel_format);
|
||||
u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
|
||||
@ -1221,7 +1228,7 @@ u32 skl_plane_ctl_rotation(unsigned int rotation);
|
||||
|
||||
/* intel_csr.c */
|
||||
void intel_csr_ucode_init(struct drm_i915_private *);
|
||||
void intel_csr_load_program(struct drm_i915_private *);
|
||||
bool intel_csr_load_program(struct drm_i915_private *);
|
||||
void intel_csr_ucode_fini(struct drm_i915_private *);
|
||||
|
||||
/* intel_dp.c */
|
||||
@ -1324,13 +1331,16 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev)
|
||||
#endif
|
||||
|
||||
/* intel_fbc.c */
|
||||
void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
|
||||
struct drm_atomic_state *state);
|
||||
bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
|
||||
void intel_fbc_deactivate(struct intel_crtc *crtc);
|
||||
void intel_fbc_update(struct intel_crtc *crtc);
|
||||
void intel_fbc_pre_update(struct intel_crtc *crtc);
|
||||
void intel_fbc_post_update(struct intel_crtc *crtc);
|
||||
void intel_fbc_init(struct drm_i915_private *dev_priv);
|
||||
void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv);
|
||||
void intel_fbc_enable(struct intel_crtc *crtc);
|
||||
void intel_fbc_disable(struct drm_i915_private *dev_priv);
|
||||
void intel_fbc_disable_crtc(struct intel_crtc *crtc);
|
||||
void intel_fbc_disable(struct intel_crtc *crtc);
|
||||
void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
|
||||
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
|
||||
unsigned int frontbuffer_bits,
|
||||
enum fb_op_origin origin);
|
||||
@ -1345,6 +1355,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
|
||||
bool intel_hdmi_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config);
|
||||
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
|
||||
|
||||
|
||||
/* intel_lvds.c */
|
||||
@ -1559,6 +1570,7 @@ void skl_wm_get_hw_state(struct drm_device *dev);
|
||||
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
|
||||
struct skl_ddb_allocation *ddb /* out */);
|
||||
uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
|
||||
int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6);
|
||||
|
||||
/* intel_sdvo.c */
|
||||
bool intel_sdvo_init(struct drm_device *dev,
|
||||
@ -1617,5 +1629,6 @@ void shmem_file_delete(struct file *filep);
|
||||
void intel_fbdev_initial_config(void *data, async_cookie_t cookie);
|
||||
int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
|
||||
struct drm_driver *driver);
|
||||
#define synchronize_irq(x)
|
||||
|
||||
#endif /* __INTEL_DRV_H__ */
|
||||
|
@ -384,7 +384,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
|
||||
enum port port;
|
||||
|
||||
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
|
||||
u32 temp;
|
||||
u32 temp;
|
||||
|
||||
temp = I915_READ(VLV_CHICKEN_3);
|
||||
temp &= ~PIXEL_OVERLAP_CNT_MASK |
|
||||
@ -426,7 +426,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
i915_reg_t port_ctrl = IS_BROXTON(dev) ?
|
||||
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
|
||||
u32 temp;
|
||||
u32 temp;
|
||||
|
||||
/* de-assert ip_tg_enable signal */
|
||||
temp = I915_READ(port_ctrl);
|
||||
@ -478,8 +478,8 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
intel_dsi_prepare(encoder);
|
||||
intel_enable_dsi_pll(encoder);
|
||||
intel_dsi_prepare(encoder);
|
||||
|
||||
/* Panel Enable over CRC PMIC */
|
||||
if (intel_dsi->gpio_panel)
|
||||
@ -634,7 +634,6 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
u32 val;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
@ -642,9 +641,13 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
|
||||
|
||||
intel_dsi_clear_device_ready(encoder);
|
||||
|
||||
val = I915_READ(DSPCLK_GATE_D);
|
||||
val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
|
||||
I915_WRITE(DSPCLK_GATE_D, val);
|
||||
if (!IS_BROXTON(dev_priv)) {
|
||||
u32 val;
|
||||
|
||||
val = I915_READ(DSPCLK_GATE_D);
|
||||
val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
|
||||
I915_WRITE(DSPCLK_GATE_D, val);
|
||||
}
|
||||
|
||||
drm_panel_unprepare(intel_dsi->panel);
|
||||
|
||||
@ -709,7 +712,7 @@ out:
|
||||
static void intel_dsi_get_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
u32 pclk = 0;
|
||||
u32 pclk;
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
pipe_config->has_dsi_encoder = true;
|
||||
@ -720,12 +723,7 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
|
||||
*/
|
||||
pipe_config->dpll_hw_state.dpll_md = 0;
|
||||
|
||||
if (IS_BROXTON(encoder->base.dev))
|
||||
pclk = bxt_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
|
||||
else if (IS_VALLEYVIEW(encoder->base.dev) ||
|
||||
IS_CHERRYVIEW(encoder->base.dev))
|
||||
pclk = vlv_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
|
||||
|
||||
pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp);
|
||||
if (!pclk)
|
||||
return;
|
||||
|
||||
@ -787,10 +785,9 @@ static void set_dsi_timings(struct drm_encoder *encoder,
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
enum port port;
|
||||
unsigned int bpp = intel_crtc->config->pipe_bpp;
|
||||
unsigned int bpp = dsi_pixel_format_bpp(intel_dsi->pixel_format);
|
||||
unsigned int lane_count = intel_dsi->lane_count;
|
||||
|
||||
u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
|
||||
@ -861,7 +858,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
|
||||
enum port port;
|
||||
unsigned int bpp = intel_crtc->config->pipe_bpp;
|
||||
unsigned int bpp = dsi_pixel_format_bpp(intel_dsi->pixel_format);
|
||||
u32 val, tmp;
|
||||
u16 mode_hdisplay;
|
||||
|
||||
|
@ -34,6 +34,8 @@
|
||||
#define DSI_DUAL_LINK_FRONT_BACK 1
|
||||
#define DSI_DUAL_LINK_PIXEL_ALT 2
|
||||
|
||||
int dsi_pixel_format_bpp(int pixel_format);
|
||||
|
||||
struct intel_dsi_host;
|
||||
|
||||
struct intel_dsi {
|
||||
@ -126,8 +128,7 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
|
||||
|
||||
extern void intel_enable_dsi_pll(struct intel_encoder *encoder);
|
||||
extern void intel_disable_dsi_pll(struct intel_encoder *encoder);
|
||||
extern u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
|
||||
extern u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
|
||||
extern u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp);
|
||||
extern void intel_dsi_reset_clocks(struct intel_encoder *encoder,
|
||||
enum port port);
|
||||
|
||||
|
@ -234,28 +234,33 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
|
||||
if (!gtable[gpio].init) {
|
||||
/* program the function */
|
||||
/* FIXME: remove constant below */
|
||||
vlv_gpio_nc_write(dev_priv, function, 0x2000CC00);
|
||||
vlv_iosf_sb_write(dev_priv, IOSF_PORT_GPIO_NC, function,
|
||||
0x2000CC00);
|
||||
gtable[gpio].init = 1;
|
||||
}
|
||||
|
||||
val = 0x4 | action;
|
||||
|
||||
/* pull up/down */
|
||||
vlv_gpio_nc_write(dev_priv, pad, val);
|
||||
vlv_iosf_sb_write(dev_priv, IOSF_PORT_GPIO_NC, pad, val);
|
||||
mutex_unlock(&dev_priv->sb_lock);
|
||||
|
||||
out:
|
||||
return data;
|
||||
}
|
||||
|
||||
static const u8 *mipi_exec_i2c_skip(struct intel_dsi *intel_dsi, const u8 *data)
|
||||
{
|
||||
return data + *(data + 6) + 7;
|
||||
}
|
||||
|
||||
typedef const u8 * (*fn_mipi_elem_exec)(struct intel_dsi *intel_dsi,
|
||||
const u8 *data);
|
||||
static const fn_mipi_elem_exec exec_elem[] = {
|
||||
NULL, /* reserved */
|
||||
mipi_exec_send_packet,
|
||||
mipi_exec_delay,
|
||||
mipi_exec_gpio,
|
||||
NULL, /* status read; later */
|
||||
[MIPI_SEQ_ELEM_SEND_PKT] = mipi_exec_send_packet,
|
||||
[MIPI_SEQ_ELEM_DELAY] = mipi_exec_delay,
|
||||
[MIPI_SEQ_ELEM_GPIO] = mipi_exec_gpio,
|
||||
[MIPI_SEQ_ELEM_I2C] = mipi_exec_i2c_skip,
|
||||
};
|
||||
|
||||
/*
|
||||
@ -265,107 +270,114 @@ static const fn_mipi_elem_exec exec_elem[] = {
|
||||
*/
|
||||
|
||||
static const char * const seq_name[] = {
|
||||
"UNDEFINED",
|
||||
"MIPI_SEQ_ASSERT_RESET",
|
||||
"MIPI_SEQ_INIT_OTP",
|
||||
"MIPI_SEQ_DISPLAY_ON",
|
||||
"MIPI_SEQ_DISPLAY_OFF",
|
||||
"MIPI_SEQ_DEASSERT_RESET"
|
||||
[MIPI_SEQ_ASSERT_RESET] = "MIPI_SEQ_ASSERT_RESET",
|
||||
[MIPI_SEQ_INIT_OTP] = "MIPI_SEQ_INIT_OTP",
|
||||
[MIPI_SEQ_DISPLAY_ON] = "MIPI_SEQ_DISPLAY_ON",
|
||||
[MIPI_SEQ_DISPLAY_OFF] = "MIPI_SEQ_DISPLAY_OFF",
|
||||
[MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET",
|
||||
[MIPI_SEQ_BACKLIGHT_ON] = "MIPI_SEQ_BACKLIGHT_ON",
|
||||
[MIPI_SEQ_BACKLIGHT_OFF] = "MIPI_SEQ_BACKLIGHT_OFF",
|
||||
[MIPI_SEQ_TEAR_ON] = "MIPI_SEQ_TEAR_ON",
|
||||
[MIPI_SEQ_TEAR_OFF] = "MIPI_SEQ_TEAR_OFF",
|
||||
[MIPI_SEQ_POWER_ON] = "MIPI_SEQ_POWER_ON",
|
||||
[MIPI_SEQ_POWER_OFF] = "MIPI_SEQ_POWER_OFF",
|
||||
};
|
||||
|
||||
static void generic_exec_sequence(struct intel_dsi *intel_dsi, const u8 *data)
|
||||
static const char *sequence_name(enum mipi_seq seq_id)
|
||||
{
|
||||
fn_mipi_elem_exec mipi_elem_exec;
|
||||
int index;
|
||||
if (seq_id < ARRAY_SIZE(seq_name) && seq_name[seq_id])
|
||||
return seq_name[seq_id];
|
||||
else
|
||||
return "(unknown)";
|
||||
}
|
||||
|
||||
if (!data)
|
||||
static void generic_exec_sequence(struct drm_panel *panel, enum mipi_seq seq_id)
|
||||
{
|
||||
struct vbt_panel *vbt_panel = to_vbt_panel(panel);
|
||||
struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
|
||||
const u8 *data;
|
||||
fn_mipi_elem_exec mipi_elem_exec;
|
||||
|
||||
if (WARN_ON(seq_id >= ARRAY_SIZE(dev_priv->vbt.dsi.sequence)))
|
||||
return;
|
||||
|
||||
DRM_DEBUG_DRIVER("Starting MIPI sequence - %s\n", seq_name[*data]);
|
||||
data = dev_priv->vbt.dsi.sequence[seq_id];
|
||||
if (!data) {
|
||||
DRM_DEBUG_KMS("MIPI sequence %d - %s not available\n",
|
||||
seq_id, sequence_name(seq_id));
|
||||
return;
|
||||
}
|
||||
|
||||
/* go to the first element of the sequence */
|
||||
WARN_ON(*data != seq_id);
|
||||
|
||||
DRM_DEBUG_KMS("Starting MIPI sequence %d - %s\n",
|
||||
seq_id, sequence_name(seq_id));
|
||||
|
||||
/* Skip Sequence Byte. */
|
||||
data++;
|
||||
|
||||
/* parse each byte till we reach end of sequence byte - 0x00 */
|
||||
/* Skip Size of Sequence. */
|
||||
if (dev_priv->vbt.dsi.seq_version >= 3)
|
||||
data += 4;
|
||||
|
||||
while (1) {
|
||||
index = *data;
|
||||
mipi_elem_exec = exec_elem[index];
|
||||
if (!mipi_elem_exec) {
|
||||
DRM_ERROR("Unsupported MIPI element, skipping sequence execution\n");
|
||||
u8 operation_byte = *data++;
|
||||
u8 operation_size = 0;
|
||||
|
||||
if (operation_byte == MIPI_SEQ_ELEM_END)
|
||||
break;
|
||||
|
||||
if (operation_byte < ARRAY_SIZE(exec_elem))
|
||||
mipi_elem_exec = exec_elem[operation_byte];
|
||||
else
|
||||
mipi_elem_exec = NULL;
|
||||
|
||||
/* Size of Operation. */
|
||||
if (dev_priv->vbt.dsi.seq_version >= 3)
|
||||
operation_size = *data++;
|
||||
|
||||
if (mipi_elem_exec) {
|
||||
data = mipi_elem_exec(intel_dsi, data);
|
||||
} else if (operation_size) {
|
||||
/* We have size, skip. */
|
||||
DRM_DEBUG_KMS("Unsupported MIPI operation byte %u\n",
|
||||
operation_byte);
|
||||
data += operation_size;
|
||||
} else {
|
||||
/* No size, can't skip without parsing. */
|
||||
DRM_ERROR("Unsupported MIPI operation byte %u\n",
|
||||
operation_byte);
|
||||
return;
|
||||
}
|
||||
|
||||
/* goto element payload */
|
||||
data++;
|
||||
|
||||
/* execute the element specific rotines */
|
||||
data = mipi_elem_exec(intel_dsi, data);
|
||||
|
||||
/*
|
||||
* After processing the element, data should point to
|
||||
* next element or end of sequence
|
||||
* check if have we reached end of sequence
|
||||
*/
|
||||
if (*data == 0x00)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int vbt_panel_prepare(struct drm_panel *panel)
|
||||
{
|
||||
struct vbt_panel *vbt_panel = to_vbt_panel(panel);
|
||||
struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
|
||||
struct drm_device *dev = intel_dsi->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
const u8 *sequence;
|
||||
|
||||
sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET];
|
||||
generic_exec_sequence(intel_dsi, sequence);
|
||||
|
||||
sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
|
||||
generic_exec_sequence(intel_dsi, sequence);
|
||||
generic_exec_sequence(panel, MIPI_SEQ_ASSERT_RESET);
|
||||
generic_exec_sequence(panel, MIPI_SEQ_INIT_OTP);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vbt_panel_unprepare(struct drm_panel *panel)
|
||||
{
|
||||
struct vbt_panel *vbt_panel = to_vbt_panel(panel);
|
||||
struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
|
||||
struct drm_device *dev = intel_dsi->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
const u8 *sequence;
|
||||
|
||||
sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET];
|
||||
generic_exec_sequence(intel_dsi, sequence);
|
||||
generic_exec_sequence(panel, MIPI_SEQ_DEASSERT_RESET);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vbt_panel_enable(struct drm_panel *panel)
|
||||
{
|
||||
struct vbt_panel *vbt_panel = to_vbt_panel(panel);
|
||||
struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
|
||||
struct drm_device *dev = intel_dsi->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
const u8 *sequence;
|
||||
|
||||
sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON];
|
||||
generic_exec_sequence(intel_dsi, sequence);
|
||||
generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_ON);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vbt_panel_disable(struct drm_panel *panel)
|
||||
{
|
||||
struct vbt_panel *vbt_panel = to_vbt_panel(panel);
|
||||
struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
|
||||
struct drm_device *dev = intel_dsi->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
const u8 *sequence;
|
||||
|
||||
sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_OFF];
|
||||
generic_exec_sequence(intel_dsi, sequence);
|
||||
generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_OFF);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -428,10 +440,7 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
|
||||
intel_dsi->dual_link = mipi_config->dual_link;
|
||||
intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
|
||||
|
||||
if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB666)
|
||||
bits_per_pixel = 18;
|
||||
else if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB565)
|
||||
bits_per_pixel = 16;
|
||||
bits_per_pixel = dsi_pixel_format_bpp(intel_dsi->pixel_format);
|
||||
|
||||
intel_dsi->operation_mode = mipi_config->is_cmd_mode;
|
||||
intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
|
||||
@ -685,6 +694,8 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
|
||||
|
||||
/* This is cheating a bit with the cleanup. */
|
||||
vbt_panel = kzalloc(sizeof(*vbt_panel), GFP_KERNEL);
|
||||
if (!vbt_panel)
|
||||
return NULL;
|
||||
|
||||
vbt_panel->intel_dsi = intel_dsi;
|
||||
drm_panel_init(&vbt_panel->panel);
|
||||
|
@ -30,15 +30,7 @@
|
||||
#include "i915_drv.h"
|
||||
#include "intel_dsi.h"
|
||||
|
||||
#define DSI_HSS_PACKET_SIZE 4
|
||||
#define DSI_HSE_PACKET_SIZE 4
|
||||
#define DSI_HSA_PACKET_EXTRA_SIZE 6
|
||||
#define DSI_HBP_PACKET_EXTRA_SIZE 6
|
||||
#define DSI_HACTIVE_PACKET_EXTRA_SIZE 6
|
||||
#define DSI_HFP_PACKET_EXTRA_SIZE 6
|
||||
#define DSI_EOTP_PACKET_SIZE 4
|
||||
|
||||
static int dsi_pixel_format_bpp(int pixel_format)
|
||||
int dsi_pixel_format_bpp(int pixel_format)
|
||||
{
|
||||
int bpp;
|
||||
|
||||
@ -71,77 +63,6 @@ static const u32 lfsr_converts[] = {
|
||||
71, 35, 273, 136, 324, 418, 465, 488, 500, 506 /* 91 - 100 */
|
||||
};
|
||||
|
||||
#ifdef DSI_CLK_FROM_RR
|
||||
|
||||
static u32 dsi_rr_formula(const struct drm_display_mode *mode,
|
||||
int pixel_format, int video_mode_format,
|
||||
int lane_count, bool eotp)
|
||||
{
|
||||
u32 bpp;
|
||||
u32 hactive, vactive, hfp, hsync, hbp, vfp, vsync, vbp;
|
||||
u32 hsync_bytes, hbp_bytes, hactive_bytes, hfp_bytes;
|
||||
u32 bytes_per_line, bytes_per_frame;
|
||||
u32 num_frames;
|
||||
u32 bytes_per_x_frames, bytes_per_x_frames_x_lanes;
|
||||
u32 dsi_bit_clock_hz;
|
||||
u32 dsi_clk;
|
||||
|
||||
bpp = dsi_pixel_format_bpp(pixel_format);
|
||||
|
||||
hactive = mode->hdisplay;
|
||||
vactive = mode->vdisplay;
|
||||
hfp = mode->hsync_start - mode->hdisplay;
|
||||
hsync = mode->hsync_end - mode->hsync_start;
|
||||
hbp = mode->htotal - mode->hsync_end;
|
||||
|
||||
vfp = mode->vsync_start - mode->vdisplay;
|
||||
vsync = mode->vsync_end - mode->vsync_start;
|
||||
vbp = mode->vtotal - mode->vsync_end;
|
||||
|
||||
hsync_bytes = DIV_ROUND_UP(hsync * bpp, 8);
|
||||
hbp_bytes = DIV_ROUND_UP(hbp * bpp, 8);
|
||||
hactive_bytes = DIV_ROUND_UP(hactive * bpp, 8);
|
||||
hfp_bytes = DIV_ROUND_UP(hfp * bpp, 8);
|
||||
|
||||
bytes_per_line = DSI_HSS_PACKET_SIZE + hsync_bytes +
|
||||
DSI_HSA_PACKET_EXTRA_SIZE + DSI_HSE_PACKET_SIZE +
|
||||
hbp_bytes + DSI_HBP_PACKET_EXTRA_SIZE +
|
||||
hactive_bytes + DSI_HACTIVE_PACKET_EXTRA_SIZE +
|
||||
hfp_bytes + DSI_HFP_PACKET_EXTRA_SIZE;
|
||||
|
||||
/*
|
||||
* XXX: Need to accurately calculate LP to HS transition timeout and add
|
||||
* it to bytes_per_line/bytes_per_frame.
|
||||
*/
|
||||
|
||||
if (eotp && video_mode_format == VIDEO_MODE_BURST)
|
||||
bytes_per_line += DSI_EOTP_PACKET_SIZE;
|
||||
|
||||
bytes_per_frame = vsync * bytes_per_line + vbp * bytes_per_line +
|
||||
vactive * bytes_per_line + vfp * bytes_per_line;
|
||||
|
||||
if (eotp &&
|
||||
(video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ||
|
||||
video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS))
|
||||
bytes_per_frame += DSI_EOTP_PACKET_SIZE;
|
||||
|
||||
num_frames = drm_mode_vrefresh(mode);
|
||||
bytes_per_x_frames = num_frames * bytes_per_frame;
|
||||
|
||||
bytes_per_x_frames_x_lanes = bytes_per_x_frames / lane_count;
|
||||
|
||||
/* the dsi clock is divided by 2 in the hardware to get dsi ddr clock */
|
||||
dsi_bit_clock_hz = bytes_per_x_frames_x_lanes * 8;
|
||||
dsi_clk = dsi_bit_clock_hz / 1000;
|
||||
|
||||
if (eotp && video_mode_format == VIDEO_MODE_BURST)
|
||||
dsi_clk *= 2;
|
||||
|
||||
return dsi_clk;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
/* Get DSI clock from pixel clock */
|
||||
static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
|
||||
{
|
||||
@ -155,8 +76,6 @@ static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
|
||||
return dsi_clk_khz;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
|
||||
struct dsi_mnp *dsi_mnp, int target_dsi_clk)
|
||||
{
|
||||
@ -322,7 +241,7 @@ static void assert_bpp_mismatch(int pixel_format, int pipe_bpp)
|
||||
bpp, pipe_bpp);
|
||||
}
|
||||
|
||||
u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
|
||||
static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
@ -384,7 +303,7 @@ u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
|
||||
return pclk;
|
||||
}
|
||||
|
||||
u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
|
||||
static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
|
||||
{
|
||||
u32 pclk;
|
||||
u32 dsi_clk;
|
||||
@ -419,6 +338,14 @@ u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
|
||||
return pclk;
|
||||
}
|
||||
|
||||
u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
|
||||
{
|
||||
if (IS_BROXTON(encoder->base.dev))
|
||||
return bxt_dsi_get_pclk(encoder, pipe_bpp);
|
||||
else
|
||||
return vlv_dsi_get_pclk(encoder, pipe_bpp);
|
||||
}
|
||||
|
||||
static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
|
||||
{
|
||||
u32 temp;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -35,7 +35,7 @@
|
||||
#include <linux/delay.h>
|
||||
#include <linux/fb.h>
|
||||
#include <linux/init.h>
|
||||
//#include <linux/vga_switcheroo.h>
|
||||
#include <linux/vga_switcheroo.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
@ -170,8 +170,6 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
|
||||
|
||||
out:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (!IS_ERR_OR_NULL(fb))
|
||||
drm_framebuffer_unreference(fb);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -392,8 +390,8 @@ retry:
|
||||
continue;
|
||||
}
|
||||
|
||||
encoder = connector->encoder;
|
||||
if (!encoder || WARN_ON(!encoder->crtc)) {
|
||||
encoder = connector->state->best_encoder;
|
||||
if (!encoder || WARN_ON(!connector->state->crtc)) {
|
||||
if (connector->force > DRM_FORCE_OFF)
|
||||
goto bail;
|
||||
|
||||
@ -406,7 +404,7 @@ retry:
|
||||
|
||||
num_connectors_enabled++;
|
||||
|
||||
new_crtc = intel_fb_helper_crtc(fb_helper, encoder->crtc);
|
||||
new_crtc = intel_fb_helper_crtc(fb_helper, connector->state->crtc);
|
||||
|
||||
/*
|
||||
* Make sure we're not trying to drive multiple connectors
|
||||
@ -452,17 +450,22 @@ retry:
|
||||
* usually contains. But since our current
|
||||
* code puts a mode derived from the post-pfit timings
|
||||
* into crtc->mode this works out correctly.
|
||||
*
|
||||
* This is crtc->mode and not crtc->state->mode for the
|
||||
* fastboot check to work correctly. crtc_state->mode has
|
||||
* I915_MODE_FLAG_INHERITED, which we clear to force check
|
||||
* state.
|
||||
*/
|
||||
DRM_DEBUG_KMS("looking for current mode on connector %s\n",
|
||||
connector->name);
|
||||
modes[i] = &encoder->crtc->mode;
|
||||
modes[i] = &connector->state->crtc->mode;
|
||||
}
|
||||
crtcs[i] = new_crtc;
|
||||
|
||||
DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n",
|
||||
connector->name,
|
||||
pipe_name(to_intel_crtc(encoder->crtc)->pipe),
|
||||
encoder->crtc->base.id,
|
||||
pipe_name(to_intel_crtc(connector->state->crtc)->pipe),
|
||||
connector->state->crtc->base.id,
|
||||
modes[i]->hdisplay, modes[i]->vdisplay,
|
||||
modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
|
||||
|
||||
@ -521,9 +524,9 @@ static void intel_fbdev_destroy(struct drm_device *dev,
|
||||
drm_fb_helper_fini(&ifbdev->helper);
|
||||
|
||||
if (ifbdev->fb) {
|
||||
drm_framebuffer_unregister_private(&ifbdev->fb->base);
|
||||
drm_framebuffer_remove(&ifbdev->fb->base);
|
||||
}
|
||||
drm_framebuffer_unregister_private(&ifbdev->fb->base);
|
||||
drm_framebuffer_remove(&ifbdev->fb->base);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -682,6 +685,7 @@ int intel_fbdev_init(struct drm_device *dev)
|
||||
ifbdev->helper.atomic = true;
|
||||
|
||||
dev_priv->fbdev = ifbdev;
|
||||
|
||||
drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
|
||||
|
||||
return 0;
|
||||
|
@ -92,14 +92,14 @@ static void i9xx_check_fifo_underruns(struct intel_crtc *crtc)
|
||||
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
|
||||
if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
|
||||
if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
|
||||
return;
|
||||
|
||||
I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
|
||||
POSTING_READ(reg);
|
||||
I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
|
||||
POSTING_READ(reg);
|
||||
|
||||
DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
|
||||
}
|
||||
DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
|
||||
}
|
||||
|
||||
static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum pipe pipe,
|
||||
|
@ -43,9 +43,10 @@ struct i915_guc_client {
|
||||
uint32_t wq_offset;
|
||||
uint32_t wq_size;
|
||||
uint32_t wq_tail;
|
||||
uint32_t wq_head;
|
||||
|
||||
/* GuC submission statistics & status */
|
||||
uint64_t submissions[I915_NUM_RINGS];
|
||||
uint64_t submissions[GUC_MAX_ENGINES_NUM];
|
||||
uint32_t q_fail;
|
||||
uint32_t b_fail;
|
||||
int retcode;
|
||||
@ -88,6 +89,8 @@ struct intel_guc {
|
||||
uint32_t log_flags;
|
||||
struct drm_i915_gem_object *log_obj;
|
||||
|
||||
struct drm_i915_gem_object *ads_obj;
|
||||
|
||||
struct drm_i915_gem_object *ctx_pool_obj;
|
||||
struct ida ctx_ids;
|
||||
|
||||
@ -103,8 +106,8 @@ struct intel_guc {
|
||||
uint32_t action_fail; /* Total number of failures */
|
||||
int32_t action_err; /* Last error code */
|
||||
|
||||
uint64_t submissions[I915_NUM_RINGS];
|
||||
uint32_t last_seqno[I915_NUM_RINGS];
|
||||
uint64_t submissions[GUC_MAX_ENGINES_NUM];
|
||||
uint32_t last_seqno[GUC_MAX_ENGINES_NUM];
|
||||
};
|
||||
|
||||
/* intel_guc_loader.c */
|
||||
@ -122,5 +125,6 @@ int i915_guc_submit(struct i915_guc_client *client,
|
||||
struct drm_i915_gem_request *rq);
|
||||
void i915_guc_submission_disable(struct drm_device *dev);
|
||||
void i915_guc_submission_fini(struct drm_device *dev);
|
||||
int i915_guc_wq_check_space(struct i915_guc_client *client);
|
||||
|
||||
#endif
|
||||
|
@ -39,10 +39,18 @@
|
||||
#define GUC_CTX_PRIORITY_HIGH 1
|
||||
#define GUC_CTX_PRIORITY_KMD_NORMAL 2
|
||||
#define GUC_CTX_PRIORITY_NORMAL 3
|
||||
#define GUC_CTX_PRIORITY_NUM 4
|
||||
|
||||
#define GUC_MAX_GPU_CONTEXTS 1024
|
||||
#define GUC_INVALID_CTX_ID GUC_MAX_GPU_CONTEXTS
|
||||
|
||||
#define GUC_RENDER_ENGINE 0
|
||||
#define GUC_VIDEO_ENGINE 1
|
||||
#define GUC_BLITTER_ENGINE 2
|
||||
#define GUC_VIDEOENHANCE_ENGINE 3
|
||||
#define GUC_VIDEO_ENGINE2 4
|
||||
#define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1)
|
||||
|
||||
/* Work queue item header definitions */
|
||||
#define WQ_STATUS_ACTIVE 1
|
||||
#define WQ_STATUS_SUSPENDED 2
|
||||
@ -81,11 +89,14 @@
|
||||
#define GUC_CTL_CTXINFO 0
|
||||
#define GUC_CTL_CTXNUM_IN16_SHIFT 0
|
||||
#define GUC_CTL_BASE_ADDR_SHIFT 12
|
||||
|
||||
#define GUC_CTL_ARAT_HIGH 1
|
||||
#define GUC_CTL_ARAT_LOW 2
|
||||
|
||||
#define GUC_CTL_DEVICE_INFO 3
|
||||
#define GUC_CTL_GTTYPE_SHIFT 0
|
||||
#define GUC_CTL_COREFAMILY_SHIFT 7
|
||||
|
||||
#define GUC_CTL_LOG_PARAMS 4
|
||||
#define GUC_LOG_VALID (1 << 0)
|
||||
#define GUC_LOG_NOTIFY_ON_HALF_FULL (1 << 1)
|
||||
@ -97,9 +108,12 @@
|
||||
#define GUC_LOG_ISR_PAGES 3
|
||||
#define GUC_LOG_ISR_SHIFT 9
|
||||
#define GUC_LOG_BUF_ADDR_SHIFT 12
|
||||
|
||||
#define GUC_CTL_PAGE_FAULT_CONTROL 5
|
||||
|
||||
#define GUC_CTL_WA 6
|
||||
#define GUC_CTL_WA_UK_BY_DRIVER (1 << 3)
|
||||
|
||||
#define GUC_CTL_FEATURE 7
|
||||
#define GUC_CTL_VCS2_ENABLED (1 << 0)
|
||||
#define GUC_CTL_KERNEL_SUBMISSIONS (1 << 1)
|
||||
@ -109,6 +123,7 @@
|
||||
#define GUC_CTL_PREEMPTION_LOG (1 << 5)
|
||||
#define GUC_CTL_ENABLE_SLPC (1 << 7)
|
||||
#define GUC_CTL_RESET_ON_PREMPT_FAILURE (1 << 8)
|
||||
|
||||
#define GUC_CTL_DEBUG 8
|
||||
#define GUC_LOG_VERBOSITY_SHIFT 0
|
||||
#define GUC_LOG_VERBOSITY_LOW (0 << GUC_LOG_VERBOSITY_SHIFT)
|
||||
@ -118,9 +133,19 @@
|
||||
/* Verbosity range-check limits, without the shift */
|
||||
#define GUC_LOG_VERBOSITY_MIN 0
|
||||
#define GUC_LOG_VERBOSITY_MAX 3
|
||||
#define GUC_LOG_VERBOSITY_MASK 0x0000000f
|
||||
#define GUC_LOG_DESTINATION_MASK (3 << 4)
|
||||
#define GUC_LOG_DISABLED (1 << 6)
|
||||
#define GUC_PROFILE_ENABLED (1 << 7)
|
||||
#define GUC_WQ_TRACK_ENABLED (1 << 8)
|
||||
#define GUC_ADS_ENABLED (1 << 9)
|
||||
#define GUC_DEBUG_RESERVED (1 << 10)
|
||||
#define GUC_ADS_ADDR_SHIFT 11
|
||||
#define GUC_ADS_ADDR_MASK 0xfffff800
|
||||
|
||||
#define GUC_CTL_RSRVD 9
|
||||
|
||||
#define GUC_CTL_MAX_DWORDS (GUC_CTL_RSRVD + 1)
|
||||
#define GUC_CTL_MAX_DWORDS (SOFT_SCRATCH_COUNT - 2) /* [1..14] */
|
||||
|
||||
/**
|
||||
* DOC: GuC Firmware Layout
|
||||
@ -267,7 +292,7 @@ struct guc_context_desc {
|
||||
u64 db_trigger_phy;
|
||||
u16 db_id;
|
||||
|
||||
struct guc_execlist_context lrc[I915_NUM_RINGS];
|
||||
struct guc_execlist_context lrc[GUC_MAX_ENGINES_NUM];
|
||||
|
||||
u8 attribute;
|
||||
|
||||
@ -299,6 +324,99 @@ struct guc_context_desc {
|
||||
#define GUC_POWER_D2 3
|
||||
#define GUC_POWER_D3 4
|
||||
|
||||
/* Scheduling policy settings */
|
||||
|
||||
/* Reset engine upon preempt failure */
|
||||
#define POLICY_RESET_ENGINE (1<<0)
|
||||
/* Preempt to idle on quantum expiry */
|
||||
#define POLICY_PREEMPT_TO_IDLE (1<<1)
|
||||
|
||||
#define POLICY_MAX_NUM_WI 15
|
||||
|
||||
struct guc_policy {
|
||||
/* Time for one workload to execute. (in micro seconds) */
|
||||
u32 execution_quantum;
|
||||
u32 reserved1;
|
||||
|
||||
/* Time to wait for a preemption request to completed before issuing a
|
||||
* reset. (in micro seconds). */
|
||||
u32 preemption_time;
|
||||
|
||||
/* How much time to allow to run after the first fault is observed.
|
||||
* Then preempt afterwards. (in micro seconds) */
|
||||
u32 fault_time;
|
||||
|
||||
u32 policy_flags;
|
||||
u32 reserved[2];
|
||||
} __packed;
|
||||
|
||||
struct guc_policies {
|
||||
struct guc_policy policy[GUC_CTX_PRIORITY_NUM][GUC_MAX_ENGINES_NUM];
|
||||
|
||||
/* In micro seconds. How much time to allow before DPC processing is
|
||||
* called back via interrupt (to prevent DPC queue drain starving).
|
||||
* Typically 1000s of micro seconds (example only, not granularity). */
|
||||
u32 dpc_promote_time;
|
||||
|
||||
/* Must be set to take these new values. */
|
||||
u32 is_valid;
|
||||
|
||||
/* Max number of WIs to process per call. A large value may keep CS
|
||||
* idle. */
|
||||
u32 max_num_work_items;
|
||||
|
||||
u32 reserved[19];
|
||||
} __packed;
|
||||
|
||||
/* GuC MMIO reg state struct */
|
||||
|
||||
#define GUC_REGSET_FLAGS_NONE 0x0
|
||||
#define GUC_REGSET_POWERCYCLE 0x1
|
||||
#define GUC_REGSET_MASKED 0x2
|
||||
#define GUC_REGSET_ENGINERESET 0x4
|
||||
#define GUC_REGSET_SAVE_DEFAULT_VALUE 0x8
|
||||
#define GUC_REGSET_SAVE_CURRENT_VALUE 0x10
|
||||
|
||||
#define GUC_REGSET_MAX_REGISTERS 25
|
||||
#define GUC_MMIO_WHITE_LIST_START 0x24d0
|
||||
#define GUC_MMIO_WHITE_LIST_MAX 12
|
||||
#define GUC_S3_SAVE_SPACE_PAGES 10
|
||||
|
||||
struct guc_mmio_regset {
|
||||
struct __packed {
|
||||
u32 offset;
|
||||
u32 value;
|
||||
u32 flags;
|
||||
} registers[GUC_REGSET_MAX_REGISTERS];
|
||||
|
||||
u32 values_valid;
|
||||
u32 number_of_registers;
|
||||
} __packed;
|
||||
|
||||
struct guc_mmio_reg_state {
|
||||
struct guc_mmio_regset global_reg;
|
||||
struct guc_mmio_regset engine_reg[GUC_MAX_ENGINES_NUM];
|
||||
|
||||
/* MMIO registers that are set as non privileged */
|
||||
struct __packed {
|
||||
u32 mmio_start;
|
||||
u32 offsets[GUC_MMIO_WHITE_LIST_MAX];
|
||||
u32 count;
|
||||
} mmio_white_list[GUC_MAX_ENGINES_NUM];
|
||||
} __packed;
|
||||
|
||||
/* GuC Additional Data Struct */
|
||||
|
||||
struct guc_ads {
|
||||
u32 reg_state_addr;
|
||||
u32 reg_state_buffer;
|
||||
u32 golden_context_lrca;
|
||||
u32 scheduler_policies;
|
||||
u32 reserved0[3];
|
||||
u32 eng_state_size[GUC_MAX_ENGINES_NUM];
|
||||
u32 reserved2[4];
|
||||
} __packed;
|
||||
|
||||
/* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */
|
||||
enum host2guc_action {
|
||||
HOST2GUC_ACTION_DEFAULT = 0x0,
|
||||
|
@ -165,6 +165,13 @@ static void set_guc_init_params(struct drm_i915_private *dev_priv)
|
||||
i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
|
||||
}
|
||||
|
||||
if (guc->ads_obj) {
|
||||
u32 ads = (u32)i915_gem_obj_ggtt_offset(guc->ads_obj)
|
||||
>> PAGE_SHIFT;
|
||||
params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
|
||||
params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
|
||||
}
|
||||
|
||||
/* If GuC submission is enabled, set up additional parameters here */
|
||||
if (i915.enable_guc_submission) {
|
||||
u32 pgs = i915_gem_obj_ggtt_offset(dev_priv->guc.ctx_pool_obj);
|
||||
@ -192,7 +199,7 @@ static void set_guc_init_params(struct drm_i915_private *dev_priv)
|
||||
* the value matches either of two values representing completion
|
||||
* of the GuC boot process.
|
||||
*
|
||||
* This is used for polling the GuC status in a wait_for_atomic()
|
||||
* This is used for polling the GuC status in a wait_for()
|
||||
* loop below.
|
||||
*/
|
||||
static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
|
||||
@ -252,14 +259,14 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
|
||||
I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
|
||||
|
||||
/*
|
||||
* Spin-wait for the DMA to complete & the GuC to start up.
|
||||
* Wait for the DMA to complete & the GuC to start up.
|
||||
* NB: Docs recommend not using the interrupt for completion.
|
||||
* Measurements indicate this should take no more than 20ms, so a
|
||||
* timeout here indicates that the GuC has failed and is unusable.
|
||||
* (Higher levels of the driver will attempt to fall back to
|
||||
* execlist mode if this happens.)
|
||||
*/
|
||||
ret = wait_for_atomic(guc_ucode_response(dev_priv, &status), 100);
|
||||
ret = wait_for(guc_ucode_response(dev_priv, &status), 100);
|
||||
|
||||
DRM_DEBUG_DRIVER("DMA status 0x%x, GuC status 0x%x\n",
|
||||
I915_READ(DMA_CTRL), status);
|
||||
@ -438,6 +445,7 @@ fail:
|
||||
|
||||
direct_interrupts_to_host(dev_priv);
|
||||
i915_guc_submission_disable(dev);
|
||||
i915_guc_submission_fini(dev);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -554,10 +562,12 @@ fail:
|
||||
DRM_ERROR("Failed to fetch GuC firmware from %s (error %d)\n",
|
||||
guc_fw->guc_fw_path, err);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
obj = guc_fw->guc_fw_obj;
|
||||
if (obj)
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
guc_fw->guc_fw_obj = NULL;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
release_firmware(fw); /* OK even if fw is NULL */
|
||||
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
|
||||
@ -624,10 +634,11 @@ void intel_guc_ucode_fini(struct drm_device *dev)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
direct_interrupts_to_host(dev_priv);
|
||||
i915_guc_submission_disable(dev);
|
||||
i915_guc_submission_fini(dev);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (guc_fw->guc_fw_obj)
|
||||
drm_gem_object_unreference(&guc_fw->guc_fw_obj->base);
|
||||
guc_fw->guc_fw_obj = NULL;
|
||||
|
@ -115,9 +115,9 @@ static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type)
|
||||
|
||||
static i915_reg_t
|
||||
hsw_dip_data_reg(struct drm_i915_private *dev_priv,
|
||||
enum transcoder cpu_transcoder,
|
||||
enum hdmi_infoframe_type type,
|
||||
int i)
|
||||
enum transcoder cpu_transcoder,
|
||||
enum hdmi_infoframe_type type,
|
||||
int i)
|
||||
{
|
||||
switch (type) {
|
||||
case HDMI_INFOFRAME_TYPE_AVI:
|
||||
@ -836,6 +836,22 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
|
||||
intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
|
||||
}
|
||||
|
||||
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi));
|
||||
struct i2c_adapter *adapter =
|
||||
intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
|
||||
|
||||
if (hdmi->dp_dual_mode.type < DRM_DP_DUAL_MODE_TYPE2_DVI)
|
||||
return;
|
||||
|
||||
DRM_DEBUG_KMS("%s DP dual mode adaptor TMDS output\n",
|
||||
enable ? "Enabling" : "Disabling");
|
||||
|
||||
drm_dp_dual_mode_set_tmds_output(hdmi->dp_dual_mode.type,
|
||||
adapter, enable);
|
||||
}
|
||||
|
||||
static void intel_hdmi_prepare(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
@ -845,6 +861,8 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder)
|
||||
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
|
||||
u32 hdmi_val;
|
||||
|
||||
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
|
||||
|
||||
hdmi_val = SDVO_ENCODING_HDMI;
|
||||
if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range)
|
||||
hdmi_val |= HDMI_COLOR_RANGE_16_235;
|
||||
@ -1143,6 +1161,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
|
||||
}
|
||||
|
||||
intel_hdmi->set_infoframes(&encoder->base, false, NULL);
|
||||
|
||||
intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
|
||||
}
|
||||
|
||||
static void g4x_disable_hdmi(struct intel_encoder *encoder)
|
||||
@ -1168,27 +1188,42 @@ static void pch_post_disable_hdmi(struct intel_encoder *encoder)
|
||||
intel_disable_hdmi(encoder);
|
||||
}
|
||||
|
||||
static int hdmi_port_clock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
|
||||
static int intel_hdmi_source_max_tmds_clock(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
|
||||
|
||||
if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev))
|
||||
if (IS_G4X(dev_priv))
|
||||
return 165000;
|
||||
else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
|
||||
else if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
|
||||
return 300000;
|
||||
else
|
||||
return 225000;
|
||||
}
|
||||
|
||||
static int hdmi_port_clock_limit(struct intel_hdmi *hdmi,
|
||||
bool respect_downstream_limits)
|
||||
{
|
||||
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
|
||||
int max_tmds_clock = intel_hdmi_source_max_tmds_clock(to_i915(dev));
|
||||
|
||||
if (respect_downstream_limits) {
|
||||
if (hdmi->dp_dual_mode.max_tmds_clock)
|
||||
max_tmds_clock = min(max_tmds_clock,
|
||||
hdmi->dp_dual_mode.max_tmds_clock);
|
||||
if (!hdmi->has_hdmi_sink)
|
||||
max_tmds_clock = min(max_tmds_clock, 165000);
|
||||
}
|
||||
|
||||
return max_tmds_clock;
|
||||
}
|
||||
|
||||
static enum drm_mode_status
|
||||
hdmi_port_clock_valid(struct intel_hdmi *hdmi,
|
||||
int clock, bool respect_dvi_limit)
|
||||
int clock, bool respect_downstream_limits)
|
||||
{
|
||||
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
|
||||
|
||||
if (clock < 25000)
|
||||
return MODE_CLOCK_LOW;
|
||||
if (clock > hdmi_port_clock_limit(hdmi, respect_dvi_limit))
|
||||
if (clock > hdmi_port_clock_limit(hdmi, respect_downstream_limits))
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
/* BXT DPLL can't generate 223-240 MHz */
|
||||
@ -1210,11 +1245,19 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
|
||||
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
|
||||
enum drm_mode_status status;
|
||||
int clock;
|
||||
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
||||
return MODE_NO_DBLESCAN;
|
||||
|
||||
clock = mode->clock;
|
||||
|
||||
if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
|
||||
clock *= 2;
|
||||
|
||||
if (clock > max_dotclk)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
|
||||
clock *= 2;
|
||||
|
||||
@ -1304,7 +1347,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
|
||||
* within limits.
|
||||
*/
|
||||
if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink &&
|
||||
hdmi_port_clock_valid(intel_hdmi, clock_12bpc, false) == MODE_OK &&
|
||||
hdmi_port_clock_valid(intel_hdmi, clock_12bpc, true) == MODE_OK &&
|
||||
hdmi_12bpc_possible(pipe_config)) {
|
||||
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
|
||||
desired_bpp = 12*3;
|
||||
@ -1344,10 +1387,35 @@ intel_hdmi_unset_edid(struct drm_connector *connector)
|
||||
intel_hdmi->has_audio = false;
|
||||
intel_hdmi->rgb_quant_range_selectable = false;
|
||||
|
||||
intel_hdmi->dp_dual_mode.type = DRM_DP_DUAL_MODE_NONE;
|
||||
intel_hdmi->dp_dual_mode.max_tmds_clock = 0;
|
||||
|
||||
kfree(to_intel_connector(connector)->detect_edid);
|
||||
to_intel_connector(connector)->detect_edid = NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->dev);
|
||||
struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
|
||||
struct i2c_adapter *adapter =
|
||||
intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
|
||||
enum drm_dp_dual_mode_type type = drm_dp_dual_mode_detect(adapter);
|
||||
|
||||
if (type == DRM_DP_DUAL_MODE_NONE ||
|
||||
type == DRM_DP_DUAL_MODE_UNKNOWN)
|
||||
return;
|
||||
|
||||
hdmi->dp_dual_mode.type = type;
|
||||
hdmi->dp_dual_mode.max_tmds_clock =
|
||||
drm_dp_dual_mode_max_tmds_clock(type, adapter);
|
||||
|
||||
DRM_DEBUG_KMS("DP dual mode adaptor (%s) detected (max TMDS clock: %d kHz)\n",
|
||||
drm_dp_get_dual_mode_type_name(type),
|
||||
hdmi->dp_dual_mode.max_tmds_clock);
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_hdmi_set_edid(struct drm_connector *connector, bool force)
|
||||
{
|
||||
@ -1357,13 +1425,15 @@ intel_hdmi_set_edid(struct drm_connector *connector, bool force)
|
||||
bool connected = false;
|
||||
|
||||
if (force) {
|
||||
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
|
||||
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
|
||||
|
||||
edid = drm_get_edid(connector,
|
||||
intel_gmbus_get_adapter(dev_priv,
|
||||
intel_hdmi->ddc_bus));
|
||||
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
|
||||
intel_hdmi_dp_dual_mode_detect(connector);
|
||||
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
|
||||
}
|
||||
|
||||
to_intel_connector(connector)->detect_edid = edid;
|
||||
@ -2049,6 +2119,11 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
enum port port = intel_dig_port->port;
|
||||
uint8_t alternate_ddc_pin;
|
||||
|
||||
if (WARN(intel_dig_port->max_lanes < 4,
|
||||
"Not enough lanes (%d) for HDMI on port %c\n",
|
||||
intel_dig_port->max_lanes, port_name(port)))
|
||||
return;
|
||||
|
||||
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_HDMIA);
|
||||
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
|
||||
@ -2232,6 +2307,7 @@ void intel_hdmi_init(struct drm_device *dev,
|
||||
intel_dig_port->port = port;
|
||||
intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
|
||||
intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
|
||||
intel_dig_port->max_lanes = 4;
|
||||
|
||||
intel_hdmi_init_connector(intel_dig_port, intel_connector);
|
||||
}
|
||||
|
@ -598,7 +598,7 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
|
||||
mutex_lock(&dev_priv->gmbus_mutex);
|
||||
|
||||
if (bus->force_bit)
|
||||
ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
|
||||
ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
|
||||
else
|
||||
ret = do_gmbus_xfer(adapter, msgs, num);
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -25,8 +25,6 @@
|
||||
#define _INTEL_LRC_H_
|
||||
|
||||
#define GEN8_LR_CONTEXT_ALIGN 4096
|
||||
#define GEN8_CSB_ENTRIES 6
|
||||
#define GEN8_CSB_PTR_MASK 0x07
|
||||
|
||||
/* Execlists regs */
|
||||
#define RING_ELSP(ring) _MMIO((ring)->mmio_base + 0x230)
|
||||
@ -40,6 +38,22 @@
|
||||
#define RING_CONTEXT_STATUS_BUF_HI(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4)
|
||||
#define RING_CONTEXT_STATUS_PTR(ring) _MMIO((ring)->mmio_base + 0x3a0)
|
||||
|
||||
/* The docs specify that the write pointer wraps around after 5h, "After status
|
||||
* is written out to the last available status QW at offset 5h, this pointer
|
||||
* wraps to 0."
|
||||
*
|
||||
* Therefore, one must infer than even though there are 3 bits available, 6 and
|
||||
* 7 appear to be * reserved.
|
||||
*/
|
||||
#define GEN8_CSB_ENTRIES 6
|
||||
#define GEN8_CSB_PTR_MASK 0x7
|
||||
#define GEN8_CSB_READ_PTR_MASK (GEN8_CSB_PTR_MASK << 8)
|
||||
#define GEN8_CSB_WRITE_PTR_MASK (GEN8_CSB_PTR_MASK << 0)
|
||||
#define GEN8_CSB_WRITE_PTR(csb_status) \
|
||||
(((csb_status) & GEN8_CSB_WRITE_PTR_MASK) >> 0)
|
||||
#define GEN8_CSB_READ_PTR(csb_status) \
|
||||
(((csb_status) & GEN8_CSB_READ_PTR_MASK) >> 8)
|
||||
|
||||
/* Logical Rings */
|
||||
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
|
||||
int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
|
||||
@ -84,21 +98,25 @@ static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
|
||||
#define LRC_STATE_PN (LRC_PPHWSP_PN + 1)
|
||||
|
||||
void intel_lr_context_free(struct intel_context *ctx);
|
||||
uint32_t intel_lr_context_size(struct intel_engine_cs *ring);
|
||||
int intel_lr_context_deferred_alloc(struct intel_context *ctx,
|
||||
struct intel_engine_cs *ring);
|
||||
void intel_lr_context_unpin(struct drm_i915_gem_request *req);
|
||||
void intel_lr_context_unpin(struct intel_context *ctx,
|
||||
struct intel_engine_cs *engine);
|
||||
void intel_lr_context_reset(struct drm_device *dev,
|
||||
struct intel_context *ctx);
|
||||
uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
|
||||
struct intel_engine_cs *ring);
|
||||
|
||||
u32 intel_execlists_ctx_id(struct intel_context *ctx,
|
||||
struct intel_engine_cs *ring);
|
||||
|
||||
/* Execlists */
|
||||
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
|
||||
struct i915_execbuffer_params;
|
||||
int intel_execlists_submission(struct i915_execbuffer_params *params,
|
||||
struct drm_i915_gem_execbuffer2 *args,
|
||||
struct list_head *vmas);
|
||||
u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
|
||||
|
||||
void intel_lrc_irq_handler(struct intel_engine_cs *ring);
|
||||
void intel_execlists_retire_requests(struct intel_engine_cs *ring);
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vga_switcheroo.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
@ -122,6 +123,10 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
|
||||
|
||||
pipe_config->base.adjusted_mode.flags |= flags;
|
||||
|
||||
if (INTEL_INFO(dev)->gen < 5)
|
||||
pipe_config->gmch_pfit.lvds_border_bits =
|
||||
tmp & LVDS_BORDER_ENABLE;
|
||||
|
||||
/* gen2/3 store dither state in pfit control, needs to match */
|
||||
if (INTEL_INFO(dev)->gen < 4) {
|
||||
tmp = I915_READ(PFIT_CONTROL);
|
||||
@ -478,11 +483,8 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
|
||||
* and as part of the cleanup in the hw state restore we also redisable
|
||||
* the vga plane.
|
||||
*/
|
||||
if (!HAS_PCH_SPLIT(dev)) {
|
||||
drm_modeset_lock_all(dev);
|
||||
if (!HAS_PCH_SPLIT(dev))
|
||||
intel_display_resume(dev);
|
||||
drm_modeset_unlock_all(dev);
|
||||
}
|
||||
|
||||
dev_priv->modeset_restore = MODESET_DONE;
|
||||
|
||||
|
@ -330,8 +330,8 @@ int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
|
||||
/* Program the control registers */
|
||||
for_each_ring(ring, dev_priv, ring_id) {
|
||||
ret = emit_mocs_control_table(req, &t, ring_id);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Now program the l3cc registers */
|
||||
|
@ -37,6 +37,8 @@
|
||||
void getrawmonotonic(struct timespec *ts);
|
||||
|
||||
/**
|
||||
* DOC: RC6
|
||||
*
|
||||
* RC6 is a special power stage which allows the GPU to enter an very
|
||||
* low-voltage mode when idle, using down to 0V while at this stage. This
|
||||
* stage is entered automatically when the GPU is idle when RC6 support is
|
||||
@ -551,7 +553,7 @@ static const struct intel_watermark_params i845_wm_info = {
|
||||
* intel_calculate_wm - calculate watermark level
|
||||
* @clock_in_khz: pixel clock
|
||||
* @wm: chip FIFO params
|
||||
* @pixel_size: display pixel size
|
||||
* @cpp: bytes per pixel
|
||||
* @latency_ns: memory latency for the platform
|
||||
*
|
||||
* Calculate the watermark level (the level at which the display plane will
|
||||
@ -567,8 +569,7 @@ static const struct intel_watermark_params i845_wm_info = {
|
||||
*/
|
||||
static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
|
||||
const struct intel_watermark_params *wm,
|
||||
int fifo_size,
|
||||
int pixel_size,
|
||||
int fifo_size, int cpp,
|
||||
unsigned long latency_ns)
|
||||
{
|
||||
long entries_required, wm_size;
|
||||
@ -579,7 +580,7 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
|
||||
* clocks go from a few thousand to several hundred thousand.
|
||||
* latency is usually a few thousand
|
||||
*/
|
||||
entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
|
||||
entries_required = ((clock_in_khz / 1000) * cpp * latency_ns) /
|
||||
1000;
|
||||
entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
|
||||
|
||||
@ -643,13 +644,13 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
|
||||
crtc = single_enabled_crtc(dev);
|
||||
if (crtc) {
|
||||
const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
|
||||
int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
|
||||
int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
|
||||
int clock = adjusted_mode->crtc_clock;
|
||||
|
||||
/* Display SR */
|
||||
wm = intel_calculate_wm(clock, &pineview_display_wm,
|
||||
pineview_display_wm.fifo_size,
|
||||
pixel_size, latency->display_sr);
|
||||
cpp, latency->display_sr);
|
||||
reg = I915_READ(DSPFW1);
|
||||
reg &= ~DSPFW_SR_MASK;
|
||||
reg |= FW_WM(wm, SR);
|
||||
@ -659,7 +660,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
|
||||
/* cursor SR */
|
||||
wm = intel_calculate_wm(clock, &pineview_cursor_wm,
|
||||
pineview_display_wm.fifo_size,
|
||||
pixel_size, latency->cursor_sr);
|
||||
cpp, latency->cursor_sr);
|
||||
reg = I915_READ(DSPFW3);
|
||||
reg &= ~DSPFW_CURSOR_SR_MASK;
|
||||
reg |= FW_WM(wm, CURSOR_SR);
|
||||
@ -668,7 +669,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
|
||||
/* Display HPLL off SR */
|
||||
wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
|
||||
pineview_display_hplloff_wm.fifo_size,
|
||||
pixel_size, latency->display_hpll_disable);
|
||||
cpp, latency->display_hpll_disable);
|
||||
reg = I915_READ(DSPFW3);
|
||||
reg &= ~DSPFW_HPLL_SR_MASK;
|
||||
reg |= FW_WM(wm, HPLL_SR);
|
||||
@ -677,7 +678,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
|
||||
/* cursor HPLL off SR */
|
||||
wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
|
||||
pineview_display_hplloff_wm.fifo_size,
|
||||
pixel_size, latency->cursor_hpll_disable);
|
||||
cpp, latency->cursor_hpll_disable);
|
||||
reg = I915_READ(DSPFW3);
|
||||
reg &= ~DSPFW_HPLL_CURSOR_MASK;
|
||||
reg |= FW_WM(wm, HPLL_CURSOR);
|
||||
@ -701,7 +702,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
|
||||
{
|
||||
struct drm_crtc *crtc;
|
||||
const struct drm_display_mode *adjusted_mode;
|
||||
int htotal, hdisplay, clock, pixel_size;
|
||||
int htotal, hdisplay, clock, cpp;
|
||||
int line_time_us, line_count;
|
||||
int entries, tlb_miss;
|
||||
|
||||
@ -716,10 +717,10 @@ static bool g4x_compute_wm0(struct drm_device *dev,
|
||||
clock = adjusted_mode->crtc_clock;
|
||||
htotal = adjusted_mode->crtc_htotal;
|
||||
hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
|
||||
pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
|
||||
cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
|
||||
|
||||
/* Use the small buffer method to calculate plane watermark */
|
||||
entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
|
||||
entries = ((clock * cpp / 1000) * display_latency_ns) / 1000;
|
||||
tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
|
||||
if (tlb_miss > 0)
|
||||
entries += tlb_miss;
|
||||
@ -731,7 +732,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
|
||||
/* Use the large buffer method to calculate cursor watermark */
|
||||
line_time_us = max(htotal * 1000 / clock, 1);
|
||||
line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
|
||||
entries = line_count * crtc->cursor->state->crtc_w * pixel_size;
|
||||
entries = line_count * crtc->cursor->state->crtc_w * cpp;
|
||||
tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
|
||||
if (tlb_miss > 0)
|
||||
entries += tlb_miss;
|
||||
@ -787,7 +788,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
|
||||
{
|
||||
struct drm_crtc *crtc;
|
||||
const struct drm_display_mode *adjusted_mode;
|
||||
int hdisplay, htotal, pixel_size, clock;
|
||||
int hdisplay, htotal, cpp, clock;
|
||||
unsigned long line_time_us;
|
||||
int line_count, line_size;
|
||||
int small, large;
|
||||
@ -803,21 +804,21 @@ static bool g4x_compute_srwm(struct drm_device *dev,
|
||||
clock = adjusted_mode->crtc_clock;
|
||||
htotal = adjusted_mode->crtc_htotal;
|
||||
hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
|
||||
pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
|
||||
cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
|
||||
|
||||
line_time_us = max(htotal * 1000 / clock, 1);
|
||||
line_count = (latency_ns / line_time_us + 1000) / 1000;
|
||||
line_size = hdisplay * pixel_size;
|
||||
line_size = hdisplay * cpp;
|
||||
|
||||
/* Use the minimum of the small and large buffer method for primary */
|
||||
small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
|
||||
small = ((clock * cpp / 1000) * latency_ns) / 1000;
|
||||
large = line_count * line_size;
|
||||
|
||||
entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
|
||||
*display_wm = entries + display->guard_size;
|
||||
|
||||
/* calculate the self-refresh watermark for display cursor */
|
||||
entries = line_count * pixel_size * crtc->cursor->state->crtc_w;
|
||||
entries = line_count * cpp * crtc->cursor->state->crtc_w;
|
||||
entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
|
||||
*cursor_wm = entries + cursor->guard_size;
|
||||
|
||||
@ -909,13 +910,13 @@ enum vlv_wm_level {
|
||||
static unsigned int vlv_wm_method2(unsigned int pixel_rate,
|
||||
unsigned int pipe_htotal,
|
||||
unsigned int horiz_pixels,
|
||||
unsigned int bytes_per_pixel,
|
||||
unsigned int cpp,
|
||||
unsigned int latency)
|
||||
{
|
||||
unsigned int ret;
|
||||
|
||||
ret = (latency * pixel_rate) / (pipe_htotal * 10000);
|
||||
ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
|
||||
ret = (ret + 1) * horiz_pixels * cpp;
|
||||
ret = DIV_ROUND_UP(ret, 64);
|
||||
|
||||
return ret;
|
||||
@ -944,7 +945,7 @@ static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
|
||||
int level)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
|
||||
int clock, htotal, pixel_size, width, wm;
|
||||
int clock, htotal, cpp, width, wm;
|
||||
|
||||
if (dev_priv->wm.pri_latency[level] == 0)
|
||||
return USHRT_MAX;
|
||||
@ -952,7 +953,7 @@ static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
|
||||
if (!state->visible)
|
||||
return 0;
|
||||
|
||||
pixel_size = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
|
||||
cpp = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
|
||||
clock = crtc->config->base.adjusted_mode.crtc_clock;
|
||||
htotal = crtc->config->base.adjusted_mode.crtc_htotal;
|
||||
width = crtc->config->pipe_src_w;
|
||||
@ -968,7 +969,7 @@ static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
|
||||
*/
|
||||
wm = 63;
|
||||
} else {
|
||||
wm = vlv_wm_method2(clock, htotal, width, pixel_size,
|
||||
wm = vlv_wm_method2(clock, htotal, width, cpp,
|
||||
dev_priv->wm.pri_latency[level] * 10);
|
||||
}
|
||||
|
||||
@ -1442,7 +1443,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
|
||||
int clock = adjusted_mode->crtc_clock;
|
||||
int htotal = adjusted_mode->crtc_htotal;
|
||||
int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
|
||||
int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
|
||||
int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
|
||||
unsigned long line_time_us;
|
||||
int entries;
|
||||
|
||||
@ -1450,7 +1451,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
|
||||
|
||||
/* Use ns/us then divide to preserve precision */
|
||||
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
|
||||
pixel_size * hdisplay;
|
||||
cpp * hdisplay;
|
||||
entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
|
||||
srwm = I965_FIFO_SIZE - entries;
|
||||
if (srwm < 0)
|
||||
@ -1460,7 +1461,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
|
||||
entries, srwm);
|
||||
|
||||
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
|
||||
pixel_size * crtc->cursor->state->crtc_w;
|
||||
cpp * crtc->cursor->state->crtc_w;
|
||||
entries = DIV_ROUND_UP(entries,
|
||||
i965_cursor_wm_info.cacheline_size);
|
||||
cursor_sr = i965_cursor_wm_info.fifo_size -
|
||||
@ -1521,7 +1522,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
|
||||
crtc = intel_get_crtc_for_plane(dev, 0);
|
||||
if (intel_crtc_active(crtc)) {
|
||||
const struct drm_display_mode *adjusted_mode;
|
||||
int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
|
||||
int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
|
||||
if (IS_GEN2(dev))
|
||||
cpp = 4;
|
||||
|
||||
@ -1543,7 +1544,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
|
||||
crtc = intel_get_crtc_for_plane(dev, 1);
|
||||
if (intel_crtc_active(crtc)) {
|
||||
const struct drm_display_mode *adjusted_mode;
|
||||
int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
|
||||
int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
|
||||
if (IS_GEN2(dev))
|
||||
cpp = 4;
|
||||
|
||||
@ -1589,7 +1590,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
|
||||
int clock = adjusted_mode->crtc_clock;
|
||||
int htotal = adjusted_mode->crtc_htotal;
|
||||
int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
|
||||
int pixel_size = enabled->primary->state->fb->bits_per_pixel / 8;
|
||||
int cpp = drm_format_plane_cpp(enabled->primary->state->fb->pixel_format, 0);
|
||||
unsigned long line_time_us;
|
||||
int entries;
|
||||
|
||||
@ -1597,7 +1598,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
|
||||
|
||||
/* Use ns/us then divide to preserve precision */
|
||||
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
|
||||
pixel_size * hdisplay;
|
||||
cpp * hdisplay;
|
||||
entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
|
||||
DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
|
||||
srwm = wm_info->fifo_size - entries;
|
||||
@ -1677,6 +1678,9 @@ uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
|
||||
if (pipe_h < pfit_h)
|
||||
pipe_h = pfit_h;
|
||||
|
||||
if (WARN_ON(!pfit_w || !pfit_h))
|
||||
return pixel_rate;
|
||||
|
||||
pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
|
||||
pfit_w * pfit_h);
|
||||
}
|
||||
@ -1685,15 +1689,14 @@ uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
|
||||
}
|
||||
|
||||
/* latency must be in 0.1us units. */
|
||||
static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
|
||||
uint32_t latency)
|
||||
static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
|
||||
{
|
||||
uint64_t ret;
|
||||
|
||||
if (WARN(latency == 0, "Latency value missing\n"))
|
||||
return UINT_MAX;
|
||||
|
||||
ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
|
||||
ret = (uint64_t) pixel_rate * cpp * latency;
|
||||
ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
|
||||
|
||||
return ret;
|
||||
@ -1701,24 +1704,37 @@ static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
|
||||
|
||||
/* latency must be in 0.1us units. */
|
||||
static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
|
||||
uint32_t horiz_pixels, uint8_t bytes_per_pixel,
|
||||
uint32_t horiz_pixels, uint8_t cpp,
|
||||
uint32_t latency)
|
||||
{
|
||||
uint32_t ret;
|
||||
|
||||
if (WARN(latency == 0, "Latency value missing\n"))
|
||||
return UINT_MAX;
|
||||
if (WARN_ON(!pipe_htotal))
|
||||
return UINT_MAX;
|
||||
|
||||
ret = (latency * pixel_rate) / (pipe_htotal * 10000);
|
||||
ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
|
||||
ret = (ret + 1) * horiz_pixels * cpp;
|
||||
ret = DIV_ROUND_UP(ret, 64) + 2;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
|
||||
uint8_t bytes_per_pixel)
|
||||
uint8_t cpp)
|
||||
{
|
||||
return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
|
||||
/*
|
||||
* Neither of these should be possible since this function shouldn't be
|
||||
* called if the CRTC is off or the plane is invisible. But let's be
|
||||
* extra paranoid to avoid a potential divide-by-zero if we screw up
|
||||
* elsewhere in the driver.
|
||||
*/
|
||||
if (WARN_ON(!cpp))
|
||||
return 0;
|
||||
if (WARN_ON(!horiz_pixels))
|
||||
return 0;
|
||||
|
||||
return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
|
||||
}
|
||||
|
||||
struct ilk_wm_maximums {
|
||||
@ -1737,13 +1753,14 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
|
||||
uint32_t mem_value,
|
||||
bool is_lp)
|
||||
{
|
||||
int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
|
||||
int cpp = pstate->base.fb ?
|
||||
drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
|
||||
uint32_t method1, method2;
|
||||
|
||||
if (!cstate->base.active || !pstate->visible)
|
||||
return 0;
|
||||
|
||||
method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), bpp, mem_value);
|
||||
method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
|
||||
|
||||
if (!is_lp)
|
||||
return method1;
|
||||
@ -1751,8 +1768,7 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
|
||||
method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
|
||||
cstate->base.adjusted_mode.crtc_htotal,
|
||||
drm_rect_width(&pstate->dst),
|
||||
bpp,
|
||||
mem_value);
|
||||
cpp, mem_value);
|
||||
|
||||
return min(method1, method2);
|
||||
}
|
||||
@ -1765,18 +1781,18 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
|
||||
const struct intel_plane_state *pstate,
|
||||
uint32_t mem_value)
|
||||
{
|
||||
int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
|
||||
int cpp = pstate->base.fb ?
|
||||
drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
|
||||
uint32_t method1, method2;
|
||||
|
||||
if (!cstate->base.active || !pstate->visible)
|
||||
return 0;
|
||||
|
||||
method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), bpp, mem_value);
|
||||
method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
|
||||
method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
|
||||
cstate->base.adjusted_mode.crtc_htotal,
|
||||
drm_rect_width(&pstate->dst),
|
||||
bpp,
|
||||
mem_value);
|
||||
cpp, mem_value);
|
||||
return min(method1, method2);
|
||||
}
|
||||
|
||||
@ -1809,12 +1825,13 @@ static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
|
||||
const struct intel_plane_state *pstate,
|
||||
uint32_t pri_val)
|
||||
{
|
||||
int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
|
||||
int cpp = pstate->base.fb ?
|
||||
drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
|
||||
|
||||
if (!cstate->base.active || !pstate->visible)
|
||||
return 0;
|
||||
|
||||
return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), bpp);
|
||||
return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), cpp);
|
||||
}
|
||||
|
||||
static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
|
||||
@ -2007,14 +2024,19 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
|
||||
hsw_compute_linetime_wm(struct drm_device *dev,
|
||||
struct intel_crtc_state *cstate)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&cstate->base.adjusted_mode;
|
||||
u32 linetime, ips_linetime;
|
||||
|
||||
if (!intel_crtc->active)
|
||||
if (!cstate->base.active)
|
||||
return 0;
|
||||
if (WARN_ON(adjusted_mode->crtc_clock == 0))
|
||||
return 0;
|
||||
if (WARN_ON(dev_priv->cdclk_freq == 0))
|
||||
return 0;
|
||||
|
||||
/* The WM are computed with base on how long it takes to fill a single
|
||||
@ -2104,7 +2126,7 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
|
||||
for (i = level + 1; i <= max_level; i++)
|
||||
wm[i] = 0;
|
||||
|
||||
break;
|
||||
break;
|
||||
}
|
||||
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
|
||||
uint64_t sskpd = I915_READ64(MCH_SSKPD);
|
||||
@ -2323,8 +2345,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc *intel_crtc,
|
||||
pristate, sprstate, curstate, &pipe_wm->wm[0]);
|
||||
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
pipe_wm->linetime = hsw_compute_linetime_wm(dev,
|
||||
&intel_crtc->base);
|
||||
pipe_wm->linetime = hsw_compute_linetime_wm(dev, cstate);
|
||||
|
||||
/* LP0 watermarks always use 1/2 DDB partitioning */
|
||||
ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
|
||||
@ -2860,25 +2881,28 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
|
||||
const struct drm_plane_state *pstate,
|
||||
int y)
|
||||
{
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
|
||||
struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
|
||||
struct drm_framebuffer *fb = pstate->fb;
|
||||
uint32_t width = 0, height = 0;
|
||||
|
||||
width = drm_rect_width(&intel_pstate->src) >> 16;
|
||||
height = drm_rect_height(&intel_pstate->src) >> 16;
|
||||
|
||||
if (intel_rotation_90_or_270(pstate->rotation))
|
||||
swap(width, height);
|
||||
|
||||
/* for planar format */
|
||||
if (fb->pixel_format == DRM_FORMAT_NV12) {
|
||||
if (y) /* y-plane data rate */
|
||||
return intel_crtc->config->pipe_src_w *
|
||||
intel_crtc->config->pipe_src_h *
|
||||
return width * height *
|
||||
drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
else /* uv-plane data rate */
|
||||
return (intel_crtc->config->pipe_src_w/2) *
|
||||
(intel_crtc->config->pipe_src_h/2) *
|
||||
return (width / 2) * (height / 2) *
|
||||
drm_format_plane_cpp(fb->pixel_format, 1);
|
||||
}
|
||||
|
||||
/* for packed formats */
|
||||
return intel_crtc->config->pipe_src_w *
|
||||
intel_crtc->config->pipe_src_h *
|
||||
drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
return width * height * drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2957,8 +2981,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
|
||||
struct drm_framebuffer *fb = plane->state->fb;
|
||||
int id = skl_wm_plane_id(intel_plane);
|
||||
|
||||
if (fb == NULL)
|
||||
if (!to_intel_plane_state(plane->state)->visible)
|
||||
continue;
|
||||
|
||||
if (plane->type == DRM_PLANE_TYPE_CURSOR)
|
||||
continue;
|
||||
|
||||
@ -2984,7 +3009,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
|
||||
uint16_t plane_blocks, y_plane_blocks = 0;
|
||||
int id = skl_wm_plane_id(intel_plane);
|
||||
|
||||
if (pstate->fb == NULL)
|
||||
if (!to_intel_plane_state(pstate)->visible)
|
||||
continue;
|
||||
if (plane->type == DRM_PLANE_TYPE_CURSOR)
|
||||
continue;
|
||||
@ -3034,26 +3059,25 @@ static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
|
||||
|
||||
/*
|
||||
* The max latency should be 257 (max the punit can code is 255 and we add 2us
|
||||
* for the read latency) and bytes_per_pixel should always be <= 8, so that
|
||||
* for the read latency) and cpp should always be <= 8, so that
|
||||
* should allow pixel_rate up to ~2 GHz which seems sufficient since max
|
||||
* 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
|
||||
*/
|
||||
static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
|
||||
uint32_t latency)
|
||||
static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
|
||||
{
|
||||
uint32_t wm_intermediate_val, ret;
|
||||
|
||||
if (latency == 0)
|
||||
return UINT_MAX;
|
||||
|
||||
wm_intermediate_val = latency * pixel_rate * bytes_per_pixel / 512;
|
||||
wm_intermediate_val = latency * pixel_rate * cpp / 512;
|
||||
ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
|
||||
uint32_t horiz_pixels, uint8_t bytes_per_pixel,
|
||||
uint32_t horiz_pixels, uint8_t cpp,
|
||||
uint64_t tiling, uint32_t latency)
|
||||
{
|
||||
uint32_t ret;
|
||||
@ -3063,7 +3087,7 @@ static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
|
||||
if (latency == 0)
|
||||
return UINT_MAX;
|
||||
|
||||
plane_bytes_per_line = horiz_pixels * bytes_per_pixel;
|
||||
plane_bytes_per_line = horiz_pixels * cpp;
|
||||
|
||||
if (tiling == I915_FORMAT_MOD_Y_TILED ||
|
||||
tiling == I915_FORMAT_MOD_Yf_TILED) {
|
||||
@ -3108,28 +3132,36 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
|
||||
{
|
||||
struct drm_plane *plane = &intel_plane->base;
|
||||
struct drm_framebuffer *fb = plane->state->fb;
|
||||
struct intel_plane_state *intel_pstate =
|
||||
to_intel_plane_state(plane->state);
|
||||
uint32_t latency = dev_priv->wm.skl_latency[level];
|
||||
uint32_t method1, method2;
|
||||
uint32_t plane_bytes_per_line, plane_blocks_per_line;
|
||||
uint32_t res_blocks, res_lines;
|
||||
uint32_t selected_result;
|
||||
uint8_t bytes_per_pixel;
|
||||
uint8_t cpp;
|
||||
uint32_t width = 0, height = 0;
|
||||
|
||||
if (latency == 0 || !cstate->base.active || !fb)
|
||||
if (latency == 0 || !cstate->base.active || !intel_pstate->visible)
|
||||
return false;
|
||||
|
||||
bytes_per_pixel = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
width = drm_rect_width(&intel_pstate->src) >> 16;
|
||||
height = drm_rect_height(&intel_pstate->src) >> 16;
|
||||
|
||||
if (intel_rotation_90_or_270(plane->state->rotation))
|
||||
swap(width, height);
|
||||
|
||||
cpp = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
|
||||
bytes_per_pixel,
|
||||
latency);
|
||||
cpp, latency);
|
||||
method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
|
||||
cstate->base.adjusted_mode.crtc_htotal,
|
||||
cstate->pipe_src_w,
|
||||
bytes_per_pixel,
|
||||
width,
|
||||
cpp,
|
||||
fb->modifier[0],
|
||||
latency);
|
||||
|
||||
plane_bytes_per_line = cstate->pipe_src_w * bytes_per_pixel;
|
||||
plane_bytes_per_line = width * cpp;
|
||||
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
|
||||
|
||||
if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
|
||||
@ -3137,11 +3169,11 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
|
||||
uint32_t min_scanlines = 4;
|
||||
uint32_t y_tile_minimum;
|
||||
if (intel_rotation_90_or_270(plane->state->rotation)) {
|
||||
int bpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
|
||||
int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
|
||||
drm_format_plane_cpp(fb->pixel_format, 1) :
|
||||
drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
|
||||
switch (bpp) {
|
||||
switch (cpp) {
|
||||
case 1:
|
||||
min_scanlines = 16;
|
||||
break;
|
||||
@ -3630,9 +3662,11 @@ static void ilk_compute_wm_config(struct drm_device *dev,
|
||||
}
|
||||
}
|
||||
|
||||
static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
|
||||
static void ilk_program_watermarks(struct intel_crtc_state *cstate)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_crtc *crtc = cstate->base.crtc;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
|
||||
struct ilk_wm_maximums max;
|
||||
struct intel_wm_config config = {};
|
||||
@ -3665,7 +3699,6 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
|
||||
|
||||
static void ilk_update_wm(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
|
||||
|
||||
@ -3685,7 +3718,7 @@ static void ilk_update_wm(struct drm_crtc *crtc)
|
||||
|
||||
intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk;
|
||||
|
||||
ilk_program_watermarks(dev_priv);
|
||||
ilk_program_watermarks(cstate);
|
||||
}
|
||||
|
||||
static void skl_pipe_wm_active_state(uint32_t val,
|
||||
@ -4073,7 +4106,7 @@ void intel_update_watermarks(struct drm_crtc *crtc)
|
||||
dev_priv->display.update_wm(crtc);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Lock protecting IPS related data structures
|
||||
*/
|
||||
DEFINE_SPINLOCK(mchdev_lock);
|
||||
@ -4109,11 +4142,13 @@ bool ironlake_set_drps(struct drm_device *dev, u8 val)
|
||||
static void ironlake_enable_drps(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 rgvmodectl = I915_READ(MEMMODECTL);
|
||||
u32 rgvmodectl;
|
||||
u8 fmax, fmin, fstart, vstart;
|
||||
|
||||
spin_lock_irq(&mchdev_lock);
|
||||
|
||||
rgvmodectl = I915_READ(MEMMODECTL);
|
||||
|
||||
/* Enable temp reporting */
|
||||
I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
|
||||
I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
|
||||
@ -4546,21 +4581,71 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
|
||||
}
|
||||
if (HAS_RC6p(dev))
|
||||
DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
|
||||
(mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
|
||||
(mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
|
||||
(mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
|
||||
onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
|
||||
onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
|
||||
onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
|
||||
|
||||
else
|
||||
DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
|
||||
(mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
|
||||
onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
|
||||
}
|
||||
|
||||
static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
|
||||
static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
bool enable_rc6 = true;
|
||||
unsigned long rc6_ctx_base;
|
||||
|
||||
if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
|
||||
DRM_DEBUG_KMS("RC6 Base location not set properly.\n");
|
||||
enable_rc6 = false;
|
||||
}
|
||||
|
||||
/*
|
||||
* The exact context size is not known for BXT, so assume a page size
|
||||
* for this check.
|
||||
*/
|
||||
rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
|
||||
if (!((rc6_ctx_base >= dev_priv->gtt.stolen_reserved_base) &&
|
||||
(rc6_ctx_base + PAGE_SIZE <= dev_priv->gtt.stolen_reserved_base +
|
||||
dev_priv->gtt.stolen_reserved_size))) {
|
||||
DRM_DEBUG_KMS("RC6 Base address not as expected.\n");
|
||||
enable_rc6 = false;
|
||||
}
|
||||
|
||||
if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) &&
|
||||
((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
|
||||
((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
|
||||
((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
|
||||
DRM_DEBUG_KMS("Engine Idle wait time not set properly.\n");
|
||||
enable_rc6 = false;
|
||||
}
|
||||
|
||||
if (!(I915_READ(GEN6_RC_CONTROL) & (GEN6_RC_CTL_RC6_ENABLE |
|
||||
GEN6_RC_CTL_HW_ENABLE)) &&
|
||||
((I915_READ(GEN6_RC_CONTROL) & GEN6_RC_CTL_HW_ENABLE) ||
|
||||
!(I915_READ(GEN6_RC_STATE) & RC6_STATE))) {
|
||||
DRM_DEBUG_KMS("HW/SW RC6 is not enabled by BIOS.\n");
|
||||
enable_rc6 = false;
|
||||
}
|
||||
|
||||
return enable_rc6;
|
||||
}
|
||||
|
||||
int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
|
||||
{
|
||||
/* No RC6 before Ironlake and code is gone for ilk. */
|
||||
if (INTEL_INFO(dev)->gen < 6)
|
||||
return 0;
|
||||
|
||||
if (!enable_rc6)
|
||||
return 0;
|
||||
|
||||
if (IS_BROXTON(dev) && !bxt_check_bios_rc6_setup(dev)) {
|
||||
DRM_INFO("RC6 disabled by BIOS\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Respect the kernel parameter if it is set */
|
||||
if (enable_rc6 >= 0) {
|
||||
int mask;
|
||||
@ -4730,8 +4815,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
|
||||
/* 3a: Enable RC6 */
|
||||
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
|
||||
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
|
||||
DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
|
||||
"on" : "off");
|
||||
DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
|
||||
/* WaRsUseTimeoutMode */
|
||||
if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
|
||||
IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
|
||||
@ -4750,8 +4834,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
|
||||
* 3b: Enable Coarse Power Gating only when RC6 is enabled.
|
||||
* WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
|
||||
*/
|
||||
if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) ||
|
||||
((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_F0)))
|
||||
if (NEEDS_WaRsDisableCoarsePowerGating(dev))
|
||||
I915_WRITE(GEN9_PG_ENABLE, 0);
|
||||
else
|
||||
I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
|
||||
@ -5169,8 +5252,6 @@ static void cherryview_setup_pctx(struct drm_device *dev)
|
||||
u32 pcbr;
|
||||
int pctx_size = 32*1024;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
|
||||
pcbr = I915_READ(VLV_PCBR);
|
||||
if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
|
||||
DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
|
||||
@ -5192,7 +5273,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
|
||||
u32 pcbr;
|
||||
int pctx_size = 24*1024;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
pcbr = I915_READ(VLV_PCBR);
|
||||
if (pcbr) {
|
||||
@ -5220,7 +5301,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
|
||||
pctx = i915_gem_object_create_stolen(dev, pctx_size);
|
||||
if (!pctx) {
|
||||
DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
|
||||
@ -5229,6 +5310,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
|
||||
out:
|
||||
DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
|
||||
dev_priv->vlv_pctx = pctx;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
static void valleyview_cleanup_pctx(struct drm_device *dev)
|
||||
@ -5238,7 +5320,7 @@ static void valleyview_cleanup_pctx(struct drm_device *dev)
|
||||
if (WARN_ON(!dev_priv->vlv_pctx))
|
||||
return;
|
||||
|
||||
drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
|
||||
drm_gem_object_unreference_unlocked(&dev_priv->vlv_pctx->base);
|
||||
dev_priv->vlv_pctx = NULL;
|
||||
}
|
||||
|
||||
@ -6047,7 +6129,6 @@ void intel_init_gt_powersave(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
|
||||
/*
|
||||
* RPM depends on RC6 to save restore the GT HW context, so make RC6 a
|
||||
* requirement.
|
||||
@ -6182,8 +6263,8 @@ void intel_enable_gt_powersave(struct drm_device *dev)
|
||||
return;
|
||||
|
||||
if (IS_IRONLAKE_M(dev)) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ironlake_enable_drps(dev);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
intel_init_emon(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
} else if (INTEL_INFO(dev)->gen >= 6) {
|
||||
@ -7019,6 +7100,7 @@ void intel_init_pm(struct drm_device *dev)
|
||||
dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
|
||||
dev_priv->display.update_wm = ilk_update_wm;
|
||||
dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
|
||||
dev_priv->display.program_watermarks = ilk_program_watermarks;
|
||||
} else {
|
||||
DRM_DEBUG_KMS("Failed to read display plane latency. "
|
||||
"Disable CxSR\n");
|
||||
@ -7184,9 +7266,10 @@ static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
|
||||
{
|
||||
int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
|
||||
|
||||
div = vlv_gpu_freq_div(czclk_freq) / 2;
|
||||
div = vlv_gpu_freq_div(czclk_freq);
|
||||
if (div < 0)
|
||||
return div;
|
||||
div /= 2;
|
||||
|
||||
return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2;
|
||||
}
|
||||
@ -7195,9 +7278,10 @@ static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
|
||||
{
|
||||
int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
|
||||
|
||||
mul = vlv_gpu_freq_div(czclk_freq) / 2;
|
||||
mul = vlv_gpu_freq_div(czclk_freq);
|
||||
if (mul < 0)
|
||||
return mul;
|
||||
mul /= 2;
|
||||
|
||||
/* CHV needs even values */
|
||||
return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2;
|
||||
|
@ -225,7 +225,12 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
|
||||
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
|
||||
}
|
||||
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, DP_PSR_ENABLE);
|
||||
if (dev_priv->psr.link_standby)
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
|
||||
DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
|
||||
else
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
|
||||
DP_PSR_ENABLE);
|
||||
}
|
||||
|
||||
static void vlv_psr_enable_source(struct intel_dp *intel_dp)
|
||||
@ -283,6 +288,9 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
|
||||
if (IS_HASWELL(dev))
|
||||
val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
|
||||
|
||||
if (dev_priv->psr.link_standby)
|
||||
val |= EDP_PSR_LINK_STANDBY;
|
||||
|
||||
if (dev_priv->vbt.psr.tp1_wakeup_time > 5)
|
||||
val |= EDP_PSR_TP1_TIME_2500us;
|
||||
else if (dev_priv->vbt.psr.tp1_wakeup_time > 1)
|
||||
@ -343,8 +351,15 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
|
||||
|
||||
dev_priv->psr.source_ok = false;
|
||||
|
||||
if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
|
||||
DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
|
||||
/*
|
||||
* HSW spec explicitly says PSR is tied to port A.
|
||||
* BDW+ platforms with DDI implementation of PSR have different
|
||||
* PSR registers per transcoder and we only implement transcoder EDP
|
||||
* ones. Since by Display design transcoder EDP is tied to port A
|
||||
* we can safely escape based on the port A.
|
||||
*/
|
||||
if (HAS_DDI(dev) && dig_port->port != PORT_A) {
|
||||
DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -353,6 +368,12 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
|
||||
!dev_priv->psr.link_standby) {
|
||||
DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (IS_HASWELL(dev) &&
|
||||
I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) &
|
||||
S3D_ENABLE) {
|
||||
@ -366,12 +387,6 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
|
||||
((dev_priv->vbt.psr.full_link) || (dig_port->port != PORT_A))) {
|
||||
DRM_DEBUG_KMS("PSR condition failed: Link Standby requested/needed but not supported on this platform\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
dev_priv->psr.source_ok = true;
|
||||
return true;
|
||||
}
|
||||
@ -778,8 +793,8 @@ void intel_psr_flush(struct drm_device *dev,
|
||||
dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
|
||||
|
||||
/* By definition flush = invalidate + flush */
|
||||
if (frontbuffer_bits)
|
||||
intel_psr_exit(dev);
|
||||
if (frontbuffer_bits)
|
||||
intel_psr_exit(dev);
|
||||
|
||||
// if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
|
||||
// if (!work_busy(&dev_priv->psr.work.work))
|
||||
@ -802,6 +817,36 @@ void intel_psr_init(struct drm_device *dev)
|
||||
dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
|
||||
HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
|
||||
|
||||
/* Per platform default */
|
||||
if (i915.enable_psr == -1) {
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev) ||
|
||||
IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
|
||||
i915.enable_psr = 1;
|
||||
else
|
||||
i915.enable_psr = 0;
|
||||
}
|
||||
|
||||
/* Set link_standby x link_off defaults */
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
/* HSW and BDW require workarounds that we don't implement. */
|
||||
dev_priv->psr.link_standby = false;
|
||||
else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
|
||||
/* On VLV and CHV only standby mode is supported. */
|
||||
dev_priv->psr.link_standby = true;
|
||||
else
|
||||
/* For new platforms let's respect VBT back again */
|
||||
dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
|
||||
|
||||
/* Override link_standby x link_off defaults */
|
||||
if (i915.enable_psr == 2 && !dev_priv->psr.link_standby) {
|
||||
DRM_DEBUG_KMS("PSR: Forcing link standby\n");
|
||||
dev_priv->psr.link_standby = true;
|
||||
}
|
||||
if (i915.enable_psr == 3 && dev_priv->psr.link_standby) {
|
||||
DRM_DEBUG_KMS("PSR: Forcing main link off\n");
|
||||
dev_priv->psr.link_standby = false;
|
||||
}
|
||||
|
||||
INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
|
||||
mutex_init(&dev_priv->psr.lock);
|
||||
}
|
||||
|
@ -746,9 +746,9 @@ static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
|
||||
|
||||
ret = i915_gem_render_state_init(req);
|
||||
if (ret)
|
||||
DRM_ERROR("init render state: %d\n", ret);
|
||||
return ret;
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int wa_add(struct drm_i915_private *dev_priv,
|
||||
@ -789,6 +789,22 @@ static int wa_add(struct drm_i915_private *dev_priv,
|
||||
|
||||
#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
|
||||
|
||||
static int wa_ring_whitelist_reg(struct intel_engine_cs *ring, i915_reg_t reg)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
struct i915_workarounds *wa = &dev_priv->workarounds;
|
||||
const uint32_t index = wa->hw_whitelist_count[ring->id];
|
||||
|
||||
if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
|
||||
return -EINVAL;
|
||||
|
||||
WA_WRITE(RING_FORCE_TO_NONPRIV(ring->mmio_base, index),
|
||||
i915_mmio_reg_offset(reg));
|
||||
wa->hw_whitelist_count[ring->id]++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gen8_init_workarounds(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
@ -894,6 +910,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t tmp;
|
||||
int ret;
|
||||
|
||||
/* WaEnableLbsSlaRetryTimerDecrement:skl */
|
||||
I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
|
||||
@ -911,13 +928,13 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
|
||||
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
|
||||
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
|
||||
|
||||
/* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
|
||||
/* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
|
||||
if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
|
||||
IS_BXT_REVID(dev, 0, BXT_REVID_A1))
|
||||
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
|
||||
GEN9_DG_MIRROR_FIX_ENABLE);
|
||||
|
||||
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
|
||||
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
|
||||
if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
|
||||
IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
|
||||
WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
|
||||
@ -929,7 +946,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
|
||||
*/
|
||||
}
|
||||
|
||||
/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
|
||||
/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
|
||||
if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER) || IS_BROXTON(dev))
|
||||
WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
|
||||
GEN9_ENABLE_YV12_BUGFIX);
|
||||
@ -964,6 +981,20 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
|
||||
/* WaDisableSTUnitPowerOptimization:skl,bxt */
|
||||
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
|
||||
|
||||
/* WaOCLCoherentLineFlush:skl,bxt */
|
||||
I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
|
||||
GEN8_LQSC_FLUSH_COHERENT_LINES));
|
||||
|
||||
/* WaEnablePreemptionGranularityControlByUMD:skl,bxt */
|
||||
ret= wa_ring_whitelist_reg(ring, GEN8_CS_CHICKEN1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* WaAllowUMDToModifyHDCChicken1:skl,bxt */
|
||||
ret = wa_ring_whitelist_reg(ring, GEN8_HDC_CHICKEN1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1019,6 +1050,16 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Actual WA is to disable percontext preemption granularity control
|
||||
* until D0 which is the default case so this is equivalent to
|
||||
* !WaDisablePerCtxtPreemptionGranularityControl:skl
|
||||
*/
|
||||
if (IS_SKL_REVID(dev, SKL_REVID_E0, REVID_FOREVER)) {
|
||||
I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
|
||||
_MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
|
||||
}
|
||||
|
||||
if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) {
|
||||
/* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
|
||||
I915_WRITE(FF_SLICE_CS_CHICKEN2,
|
||||
@ -1060,7 +1101,7 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
|
||||
BDW_DISABLE_HDC_INVALIDATION);
|
||||
}
|
||||
|
||||
/* WaBarrierPerformanceFixDisable:skl */
|
||||
/* WaBarrierPerformanceFixDisable:skl */
|
||||
if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0))
|
||||
WA_SET_BIT_MASKED(HDC_CHICKEN0,
|
||||
HDC_FENCE_DEST_SLM_DISABLE |
|
||||
@ -1072,6 +1113,11 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
|
||||
GEN7_HALF_SLICE_CHICKEN1,
|
||||
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
|
||||
|
||||
/* WaDisableLSQCROPERFforOCL:skl */
|
||||
ret = wa_ring_whitelist_reg(ring, GEN8_L3SQCREG4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return skl_tune_iz_hashing(ring);
|
||||
}
|
||||
|
||||
@ -1107,6 +1153,20 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring)
|
||||
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
|
||||
}
|
||||
|
||||
/* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
|
||||
/* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
|
||||
/* WaDisableObjectLevelPreemtionForInstanceId:bxt */
|
||||
/* WaDisableLSQCROPERFforOCL:bxt */
|
||||
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
|
||||
ret = wa_ring_whitelist_reg(ring, GEN9_CS_DEBUG_MODE1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = wa_ring_whitelist_reg(ring, GEN8_L3SQCREG4);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1118,6 +1178,7 @@ int init_workarounds_ring(struct intel_engine_cs *ring)
|
||||
WARN_ON(ring->id != RCS);
|
||||
|
||||
dev_priv->workarounds.count = 0;
|
||||
dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
|
||||
|
||||
if (IS_BROADWELL(dev))
|
||||
return bdw_init_workarounds(ring);
|
||||
@ -1868,15 +1929,13 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
|
||||
offset = cs_offset;
|
||||
}
|
||||
|
||||
ret = intel_ring_begin(req, 4);
|
||||
ret = intel_ring_begin(req, 2);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring, MI_BATCH_BUFFER);
|
||||
intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
|
||||
intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
|
||||
0 : MI_BATCH_NON_SECURE));
|
||||
intel_ring_emit(ring, offset + len - 8);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
return 0;
|
||||
@ -1997,11 +2056,36 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
|
||||
|
||||
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
|
||||
{
|
||||
iounmap(ringbuf->virtual_start);
|
||||
if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
|
||||
vunmap(ringbuf->virtual_start);
|
||||
else
|
||||
iounmap(ringbuf->virtual_start);
|
||||
ringbuf->virtual_start = NULL;
|
||||
ringbuf->vma = NULL;
|
||||
i915_gem_object_ggtt_unpin(ringbuf->obj);
|
||||
}
|
||||
|
||||
static u32 *vmap_obj(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct sg_page_iter sg_iter;
|
||||
struct page **pages;
|
||||
void *addr;
|
||||
int i;
|
||||
|
||||
pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
|
||||
if (pages == NULL)
|
||||
return NULL;
|
||||
|
||||
i = 0;
|
||||
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
|
||||
pages[i++] = sg_page_iter_page(&sg_iter);
|
||||
|
||||
addr = vmap(pages, i, 0, PAGE_KERNEL);
|
||||
drm_free_large(pages);
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
|
||||
struct intel_ringbuffer *ringbuf)
|
||||
{
|
||||
@ -2011,22 +2095,46 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
|
||||
unsigned flags = PIN_OFFSET_BIAS | 4096;
|
||||
int ret;
|
||||
|
||||
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (HAS_LLC(dev_priv) && !obj->stolen) {
|
||||
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = i915_gem_object_set_to_gtt_domain(obj, true);
|
||||
if (ret) {
|
||||
i915_gem_object_ggtt_unpin(obj);
|
||||
return ret;
|
||||
ret = i915_gem_object_set_to_cpu_domain(obj, true);
|
||||
if (ret) {
|
||||
i915_gem_object_ggtt_unpin(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ringbuf->virtual_start = vmap_obj(obj);
|
||||
if (ringbuf->virtual_start == NULL) {
|
||||
i915_gem_object_ggtt_unpin(obj);
|
||||
return -ENOMEM;
|
||||
}
|
||||
} else {
|
||||
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
|
||||
flags | PIN_MAPPABLE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = i915_gem_object_set_to_gtt_domain(obj, true);
|
||||
if (ret) {
|
||||
i915_gem_object_ggtt_unpin(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Access through the GTT requires the device to be awake. */
|
||||
assert_rpm_wakelock_held(dev_priv);
|
||||
|
||||
ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
|
||||
i915_gem_obj_ggtt_offset(obj), ringbuf->size);
|
||||
if (ringbuf->virtual_start == NULL) {
|
||||
i915_gem_object_ggtt_unpin(obj);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
|
||||
i915_gem_obj_ggtt_offset(obj), ringbuf->size);
|
||||
if (ringbuf->virtual_start == NULL) {
|
||||
i915_gem_object_ggtt_unpin(obj);
|
||||
return -EINVAL;
|
||||
}
|
||||
ringbuf->vma = i915_gem_obj_to_ggtt(obj);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2089,7 +2197,7 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
|
||||
ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
|
||||
if (ret) {
|
||||
DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
|
||||
engine->name, ret);
|
||||
engine->name, ret);
|
||||
list_del(&ring->link);
|
||||
kfree(ring);
|
||||
return ERR_PTR(ret);
|
||||
@ -2171,19 +2279,19 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
|
||||
dev_priv = to_i915(ring->dev);
|
||||
|
||||
if (ring->buffer) {
|
||||
intel_stop_ring_buffer(ring);
|
||||
WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
|
||||
intel_stop_ring_buffer(ring);
|
||||
WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
|
||||
|
||||
intel_unpin_ringbuffer_obj(ring->buffer);
|
||||
intel_ringbuffer_free(ring->buffer);
|
||||
ring->buffer = NULL;
|
||||
intel_unpin_ringbuffer_obj(ring->buffer);
|
||||
intel_ringbuffer_free(ring->buffer);
|
||||
ring->buffer = NULL;
|
||||
}
|
||||
|
||||
if (ring->cleanup)
|
||||
ring->cleanup(ring);
|
||||
|
||||
if (I915_NEED_GFX_HWS(ring->dev)) {
|
||||
cleanup_status_page(ring);
|
||||
cleanup_status_page(ring);
|
||||
} else {
|
||||
WARN_ON(ring->id != RCS);
|
||||
cleanup_phys_status_page(ring);
|
||||
@ -2643,6 +2751,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
|
||||
|
||||
ring->name = "render ring";
|
||||
ring->id = RCS;
|
||||
ring->exec_id = I915_EXEC_RENDER;
|
||||
ring->mmio_base = RENDER_RING_BASE;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 8) {
|
||||
@ -2791,6 +2900,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
|
||||
|
||||
ring->name = "bsd ring";
|
||||
ring->id = VCS;
|
||||
ring->exec_id = I915_EXEC_BSD;
|
||||
|
||||
ring->write_tail = ring_write_tail;
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
@ -2867,6 +2977,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
|
||||
|
||||
ring->name = "bsd2 ring";
|
||||
ring->id = VCS2;
|
||||
ring->exec_id = I915_EXEC_BSD;
|
||||
|
||||
ring->write_tail = ring_write_tail;
|
||||
ring->mmio_base = GEN8_BSD2_RING_BASE;
|
||||
@ -2897,6 +3008,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
|
||||
|
||||
ring->name = "blitter ring";
|
||||
ring->id = BCS;
|
||||
ring->exec_id = I915_EXEC_BLT;
|
||||
|
||||
ring->mmio_base = BLT_RING_BASE;
|
||||
ring->write_tail = ring_write_tail;
|
||||
@ -2954,6 +3066,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
|
||||
|
||||
ring->name = "video enhancement ring";
|
||||
ring->id = VECS;
|
||||
ring->exec_id = I915_EXEC_VEBOX;
|
||||
|
||||
ring->mmio_base = VEBOX_RING_BASE;
|
||||
ring->write_tail = ring_write_tail;
|
||||
|
@ -93,11 +93,13 @@ struct intel_ring_hangcheck {
|
||||
int score;
|
||||
enum intel_ring_hangcheck_action action;
|
||||
int deadlock;
|
||||
u32 instdone[I915_NUM_INSTDONE_REG];
|
||||
};
|
||||
|
||||
struct intel_ringbuffer {
|
||||
struct drm_i915_gem_object *obj;
|
||||
void __iomem *virtual_start;
|
||||
struct i915_vma *vma;
|
||||
|
||||
struct intel_engine_cs *ring;
|
||||
struct list_head link;
|
||||
@ -147,14 +149,16 @@ struct i915_ctx_workarounds {
|
||||
struct intel_engine_cs {
|
||||
const char *name;
|
||||
enum intel_ring_id {
|
||||
RCS = 0x0,
|
||||
VCS,
|
||||
RCS = 0,
|
||||
BCS,
|
||||
VECS,
|
||||
VCS2
|
||||
VCS,
|
||||
VCS2, /* Keep instances of the same type engine together. */
|
||||
VECS
|
||||
} id;
|
||||
#define I915_NUM_RINGS 5
|
||||
#define LAST_USER_RING (VECS + 1)
|
||||
#define _VCS(n) (VCS + (n))
|
||||
unsigned int exec_id;
|
||||
unsigned int guc_id;
|
||||
u32 mmio_base;
|
||||
struct drm_device *dev;
|
||||
struct intel_ringbuffer *buffer;
|
||||
@ -268,6 +272,8 @@ struct intel_engine_cs {
|
||||
struct list_head execlist_queue;
|
||||
struct list_head execlist_retired_req_list;
|
||||
u8 next_context_status_buffer;
|
||||
bool disable_lite_restore_wa;
|
||||
u32 ctx_desc_template;
|
||||
u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
|
||||
int (*emit_request)(struct drm_i915_gem_request *request);
|
||||
int (*emit_flush)(struct drm_i915_gem_request *request,
|
||||
@ -305,7 +311,6 @@ struct intel_engine_cs {
|
||||
|
||||
wait_queue_head_t irq_queue;
|
||||
|
||||
struct intel_context *default_context;
|
||||
struct intel_context *last_context;
|
||||
|
||||
struct intel_ring_hangcheck hangcheck;
|
||||
@ -406,7 +411,7 @@ intel_write_status_page(struct intel_engine_cs *ring,
|
||||
ring->status_page.page_addr[reg] = value;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Reads a dword out of the status page, which is written to from the command
|
||||
* queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
|
||||
* MI_STORE_DATA_IMM.
|
||||
@ -423,6 +428,7 @@ intel_write_status_page(struct intel_engine_cs *ring,
|
||||
* The area from dword 0x30 to 0x3ff is available for driver usage.
|
||||
*/
|
||||
#define I915_GEM_HWS_INDEX 0x30
|
||||
#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
|
||||
#define I915_GEM_HWS_SCRATCH_INDEX 0x40
|
||||
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
|
||||
|
||||
|
@ -284,6 +284,13 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
|
||||
1 << PIPE_C | 1 << PIPE_B);
|
||||
}
|
||||
|
||||
static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (IS_BROADWELL(dev_priv))
|
||||
gen8_irq_power_well_pre_disable(dev_priv,
|
||||
1 << PIPE_C | 1 << PIPE_B);
|
||||
}
|
||||
|
||||
static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
@ -309,6 +316,14 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
}
|
||||
|
||||
static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
if (power_well->data == SKL_DISP_PW_2)
|
||||
gen8_irq_power_well_pre_disable(dev_priv,
|
||||
1 << PIPE_C | 1 << PIPE_B);
|
||||
}
|
||||
|
||||
static void hsw_set_power_well(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well, bool enable)
|
||||
{
|
||||
@ -334,6 +349,7 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
|
||||
|
||||
} else {
|
||||
if (enable_requested) {
|
||||
hsw_power_well_pre_disable(dev_priv);
|
||||
I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
|
||||
POSTING_READ(HSW_PWR_WELL_DRIVER);
|
||||
DRM_DEBUG_KMS("Requesting to disable the power well\n");
|
||||
@ -456,15 +472,19 @@ static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
}
|
||||
|
||||
static void gen9_set_dc_state_debugmask_memory_up(
|
||||
struct drm_i915_private *dev_priv)
|
||||
static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
uint32_t val;
|
||||
uint32_t val, mask;
|
||||
|
||||
mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
|
||||
|
||||
if (IS_BROXTON(dev_priv))
|
||||
mask |= DC_STATE_DEBUG_MASK_CORES;
|
||||
|
||||
/* The below bit doesn't need to be cleared ever afterwards */
|
||||
val = I915_READ(DC_STATE_DEBUG);
|
||||
if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) {
|
||||
val |= DC_STATE_DEBUG_MASK_MEMORY_UP;
|
||||
if ((val & mask) != mask) {
|
||||
val |= mask;
|
||||
I915_WRITE(DC_STATE_DEBUG, val);
|
||||
POSTING_READ(DC_STATE_DEBUG);
|
||||
}
|
||||
@ -525,9 +545,6 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
|
||||
else if (i915.enable_dc == 1 && state > DC_STATE_EN_UPTO_DC5)
|
||||
state = DC_STATE_EN_UPTO_DC5;
|
||||
|
||||
if (state & DC_STATE_EN_UPTO_DC5_DC6_MASK)
|
||||
gen9_set_dc_state_debugmask_memory_up(dev_priv);
|
||||
|
||||
val = I915_READ(DC_STATE_EN);
|
||||
DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
|
||||
val & mask, state);
|
||||
@ -577,7 +594,8 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
|
||||
bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
|
||||
SKL_DISP_PW_2);
|
||||
|
||||
WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC5.\n");
|
||||
WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev),
|
||||
"Platform doesn't support DC5.\n");
|
||||
WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
|
||||
WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
|
||||
|
||||
@ -613,7 +631,8 @@ static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
||||
WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC6.\n");
|
||||
WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev),
|
||||
"Platform doesn't support DC6.\n");
|
||||
WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
|
||||
WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
|
||||
"Backlight is not disabled.\n");
|
||||
@ -640,7 +659,8 @@ static void gen9_disable_dc5_dc6(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
assert_can_disable_dc5(dev_priv);
|
||||
|
||||
if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1)
|
||||
if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
|
||||
i915.enable_dc != 0 && i915.enable_dc != 1)
|
||||
assert_can_disable_dc6(dev_priv);
|
||||
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
|
||||
@ -668,7 +688,6 @@ void skl_disable_dc6(struct drm_i915_private *dev_priv)
|
||||
static void skl_set_power_well(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well, bool enable)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
uint32_t tmp, fuse_status;
|
||||
uint32_t req_mask, state_mask;
|
||||
bool is_enabled, enable_requested, check_fuse_status = false;
|
||||
@ -706,23 +725,15 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
|
||||
state_mask = SKL_POWER_WELL_STATE(power_well->data);
|
||||
is_enabled = tmp & state_mask;
|
||||
|
||||
if (!enable && enable_requested)
|
||||
skl_power_well_pre_disable(dev_priv, power_well);
|
||||
|
||||
if (enable) {
|
||||
if (!enable_requested) {
|
||||
WARN((tmp & state_mask) &&
|
||||
!I915_READ(HSW_PWR_WELL_BIOS),
|
||||
"Invalid for power well status to be enabled, unless done by the BIOS, \
|
||||
when request is to disable!\n");
|
||||
if (power_well->data == SKL_DISP_PW_2) {
|
||||
/*
|
||||
* DDI buffer programming unnecessary during
|
||||
* driver-load/resume as it's already done
|
||||
* during modeset initialization then. It's
|
||||
* also invalid here as encoder list is still
|
||||
* uninitialized.
|
||||
*/
|
||||
if (!dev_priv->power_domains.initializing)
|
||||
intel_prepare_ddi(dev);
|
||||
}
|
||||
I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
|
||||
}
|
||||
|
||||
@ -736,11 +747,11 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
} else {
|
||||
if (enable_requested) {
|
||||
I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
|
||||
POSTING_READ(HSW_PWR_WELL_DRIVER);
|
||||
DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
|
||||
}
|
||||
I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
|
||||
POSTING_READ(HSW_PWR_WELL_DRIVER);
|
||||
DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
|
||||
}
|
||||
}
|
||||
|
||||
if (check_fuse_status) {
|
||||
if (power_well->data == SKL_DISP_PW_1) {
|
||||
@ -828,7 +839,8 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1)
|
||||
if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
|
||||
i915.enable_dc != 0 && i915.enable_dc != 1)
|
||||
skl_enable_dc6(dev_priv);
|
||||
else
|
||||
gen9_enable_dc5(dev_priv);
|
||||
@ -840,7 +852,8 @@ static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv,
|
||||
if (power_well->count > 0) {
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
|
||||
} else {
|
||||
if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 &&
|
||||
if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
|
||||
i915.enable_dc != 0 &&
|
||||
i915.enable_dc != 1)
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
|
||||
else
|
||||
@ -993,6 +1006,9 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
|
||||
valleyview_disable_display_irqs(dev_priv);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
/* make sure we're done processing display irqs */
|
||||
synchronize_irq(dev_priv->dev->irq);
|
||||
|
||||
vlv_power_sequencer_reset(dev_priv);
|
||||
}
|
||||
|
||||
@ -1065,7 +1081,7 @@ static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_pr
|
||||
int i;
|
||||
|
||||
for (i = 0; i < power_domains->power_well_count; i++) {
|
||||
struct i915_power_well *power_well;
|
||||
struct i915_power_well *power_well;
|
||||
|
||||
power_well = &power_domains->power_wells[i];
|
||||
if (power_well->data == power_well_id)
|
||||
@ -1941,7 +1957,7 @@ void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct i915_power_well *well;
|
||||
|
||||
if (!IS_SKYLAKE(dev_priv))
|
||||
if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
|
||||
return;
|
||||
|
||||
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
|
||||
@ -1955,7 +1971,7 @@ void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct i915_power_well *well;
|
||||
|
||||
if (!IS_SKYLAKE(dev_priv))
|
||||
if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
|
||||
return;
|
||||
|
||||
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
|
||||
@ -2125,8 +2141,8 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
|
||||
|
||||
skl_init_cdclk(dev_priv);
|
||||
|
||||
if (dev_priv->csr.dmc_payload)
|
||||
intel_csr_load_program(dev_priv);
|
||||
if (dev_priv->csr.dmc_payload && intel_csr_load_program(dev_priv))
|
||||
gen9_set_dc_state_debugmask(dev_priv);
|
||||
}
|
||||
|
||||
static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
|
||||
|
@ -1527,6 +1527,7 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
|
||||
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
||||
return MODE_NO_DBLESCAN;
|
||||
@ -1537,6 +1538,9 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
|
||||
if (intel_sdvo->pixel_clock_max < mode->clock)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
if (mode->clock > max_dotclk)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
if (intel_sdvo->is_lvds) {
|
||||
if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
|
||||
return MODE_PANEL;
|
||||
|
@ -24,8 +24,8 @@
|
||||
* Eric Anholt <eric@anholt.net>
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file SDVO command definitions and structures.
|
||||
/*
|
||||
* SDVO command definitions and structures.
|
||||
*/
|
||||
|
||||
#define SDVO_OUTPUT_FIRST (0)
|
||||
@ -66,39 +66,39 @@ struct intel_sdvo_caps {
|
||||
#define DTD_FLAG_VSYNC_POSITIVE (1 << 2)
|
||||
#define DTD_FLAG_INTERLACE (1 << 7)
|
||||
|
||||
/** This matches the EDID DTD structure, more or less */
|
||||
/* This matches the EDID DTD structure, more or less */
|
||||
struct intel_sdvo_dtd {
|
||||
struct {
|
||||
u16 clock; /**< pixel clock, in 10kHz units */
|
||||
u8 h_active; /**< lower 8 bits (pixels) */
|
||||
u8 h_blank; /**< lower 8 bits (pixels) */
|
||||
u8 h_high; /**< upper 4 bits each h_active, h_blank */
|
||||
u8 v_active; /**< lower 8 bits (lines) */
|
||||
u8 v_blank; /**< lower 8 bits (lines) */
|
||||
u8 v_high; /**< upper 4 bits each v_active, v_blank */
|
||||
u16 clock; /* pixel clock, in 10kHz units */
|
||||
u8 h_active; /* lower 8 bits (pixels) */
|
||||
u8 h_blank; /* lower 8 bits (pixels) */
|
||||
u8 h_high; /* upper 4 bits each h_active, h_blank */
|
||||
u8 v_active; /* lower 8 bits (lines) */
|
||||
u8 v_blank; /* lower 8 bits (lines) */
|
||||
u8 v_high; /* upper 4 bits each v_active, v_blank */
|
||||
} part1;
|
||||
|
||||
struct {
|
||||
u8 h_sync_off; /**< lower 8 bits, from hblank start */
|
||||
u8 h_sync_width; /**< lower 8 bits (pixels) */
|
||||
/** lower 4 bits each vsync offset, vsync width */
|
||||
u8 h_sync_off; /* lower 8 bits, from hblank start */
|
||||
u8 h_sync_width; /* lower 8 bits (pixels) */
|
||||
/* lower 4 bits each vsync offset, vsync width */
|
||||
u8 v_sync_off_width;
|
||||
/**
|
||||
/*
|
||||
* 2 high bits of hsync offset, 2 high bits of hsync width,
|
||||
* bits 4-5 of vsync offset, and 2 high bits of vsync width.
|
||||
*/
|
||||
u8 sync_off_width_high;
|
||||
u8 dtd_flags;
|
||||
u8 sdvo_flags;
|
||||
/** bits 6-7 of vsync offset at bits 6-7 */
|
||||
/* bits 6-7 of vsync offset at bits 6-7 */
|
||||
u8 v_sync_off_high;
|
||||
u8 reserved;
|
||||
} part2;
|
||||
} __packed;
|
||||
|
||||
struct intel_sdvo_pixel_clock_range {
|
||||
u16 min; /**< pixel clock, in 10kHz units */
|
||||
u16 max; /**< pixel clock, in 10kHz units */
|
||||
u16 min; /* pixel clock, in 10kHz units */
|
||||
u16 max; /* pixel clock, in 10kHz units */
|
||||
} __packed;
|
||||
|
||||
struct intel_sdvo_preferred_input_timing_args {
|
||||
@ -144,7 +144,7 @@ struct intel_sdvo_preferred_input_timing_args {
|
||||
|
||||
#define SDVO_CMD_RESET 0x01
|
||||
|
||||
/** Returns a struct intel_sdvo_caps */
|
||||
/* Returns a struct intel_sdvo_caps */
|
||||
#define SDVO_CMD_GET_DEVICE_CAPS 0x02
|
||||
|
||||
#define SDVO_CMD_GET_FIRMWARE_REV 0x86
|
||||
@ -152,7 +152,7 @@ struct intel_sdvo_preferred_input_timing_args {
|
||||
# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1
|
||||
# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2
|
||||
|
||||
/**
|
||||
/*
|
||||
* Reports which inputs are trained (managed to sync).
|
||||
*
|
||||
* Devices must have trained within 2 vsyncs of a mode change.
|
||||
@ -164,10 +164,10 @@ struct intel_sdvo_get_trained_inputs_response {
|
||||
unsigned int pad:6;
|
||||
} __packed;
|
||||
|
||||
/** Returns a struct intel_sdvo_output_flags of active outputs. */
|
||||
/* Returns a struct intel_sdvo_output_flags of active outputs. */
|
||||
#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
|
||||
|
||||
/**
|
||||
/*
|
||||
* Sets the current set of active outputs.
|
||||
*
|
||||
* Takes a struct intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP
|
||||
@ -175,7 +175,7 @@ struct intel_sdvo_get_trained_inputs_response {
|
||||
*/
|
||||
#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05
|
||||
|
||||
/**
|
||||
/*
|
||||
* Returns the current mapping of SDVO inputs to outputs on the device.
|
||||
*
|
||||
* Returns two struct intel_sdvo_output_flags structures.
|
||||
@ -185,29 +185,29 @@ struct intel_sdvo_in_out_map {
|
||||
u16 in0, in1;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Sets the current mapping of SDVO inputs to outputs on the device.
|
||||
*
|
||||
* Takes two struct i380_sdvo_output_flags structures.
|
||||
*/
|
||||
#define SDVO_CMD_SET_IN_OUT_MAP 0x07
|
||||
|
||||
/**
|
||||
/*
|
||||
* Returns a struct intel_sdvo_output_flags of attached displays.
|
||||
*/
|
||||
#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b
|
||||
|
||||
/**
|
||||
/*
|
||||
* Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging.
|
||||
*/
|
||||
#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c
|
||||
|
||||
/**
|
||||
/*
|
||||
* Takes a struct intel_sdvo_output_flags.
|
||||
*/
|
||||
#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d
|
||||
|
||||
/**
|
||||
/*
|
||||
* Returns a struct intel_sdvo_output_flags of displays with hot plug
|
||||
* interrupts enabled.
|
||||
*/
|
||||
@ -221,7 +221,7 @@ struct intel_sdvo_get_interrupt_event_source_response {
|
||||
unsigned int pad:6;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
/*
|
||||
* Selects which input is affected by future input commands.
|
||||
*
|
||||
* Commands affected include SET_INPUT_TIMINGS_PART[12],
|
||||
@ -234,7 +234,7 @@ struct intel_sdvo_set_target_input_args {
|
||||
unsigned int pad:7;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
/*
|
||||
* Takes a struct intel_sdvo_output_flags of which outputs are targeted by
|
||||
* future output commands.
|
||||
*
|
||||
@ -280,7 +280,7 @@ struct intel_sdvo_set_target_input_args {
|
||||
# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4)
|
||||
# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6
|
||||
|
||||
/**
|
||||
/*
|
||||
* Generates a DTD based on the given width, height, and flags.
|
||||
*
|
||||
* This will be supported by any device supporting scaling or interlaced
|
||||
@ -300,24 +300,24 @@ struct intel_sdvo_set_target_input_args {
|
||||
#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b
|
||||
#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c
|
||||
|
||||
/** Returns a struct intel_sdvo_pixel_clock_range */
|
||||
/* Returns a struct intel_sdvo_pixel_clock_range */
|
||||
#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d
|
||||
/** Returns a struct intel_sdvo_pixel_clock_range */
|
||||
/* Returns a struct intel_sdvo_pixel_clock_range */
|
||||
#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e
|
||||
|
||||
/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
|
||||
/* Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
|
||||
#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f
|
||||
|
||||
/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
|
||||
/* Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
|
||||
#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20
|
||||
/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
|
||||
/* Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
|
||||
#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21
|
||||
# define SDVO_CLOCK_RATE_MULT_1X (1 << 0)
|
||||
# define SDVO_CLOCK_RATE_MULT_2X (1 << 1)
|
||||
# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
|
||||
|
||||
#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
|
||||
/** 6 bytes of bit flags for TV formats shared by all TV format functions */
|
||||
/* 6 bytes of bit flags for TV formats shared by all TV format functions */
|
||||
struct intel_sdvo_tv_format {
|
||||
unsigned int ntsc_m:1;
|
||||
unsigned int ntsc_j:1;
|
||||
@ -376,7 +376,7 @@ struct intel_sdvo_tv_format {
|
||||
|
||||
#define SDVO_CMD_SET_TV_FORMAT 0x29
|
||||
|
||||
/** Returns the resolutiosn that can be used with the given TV format */
|
||||
/* Returns the resolutiosn that can be used with the given TV format */
|
||||
#define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT 0x83
|
||||
struct intel_sdvo_sdtv_resolution_request {
|
||||
unsigned int ntsc_m:1;
|
||||
@ -539,7 +539,7 @@ struct intel_sdvo_hdtv_resolution_reply {
|
||||
#define SDVO_CMD_GET_MAX_PANEL_POWER_SEQUENCING 0x2d
|
||||
#define SDVO_CMD_GET_PANEL_POWER_SEQUENCING 0x2e
|
||||
#define SDVO_CMD_SET_PANEL_POWER_SEQUENCING 0x2f
|
||||
/**
|
||||
/*
|
||||
* The panel power sequencing parameters are in units of milliseconds.
|
||||
* The high fields are bits 8:9 of the 10-bit values.
|
||||
*/
|
||||
|
@ -129,17 +129,18 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
|
||||
return val;
|
||||
}
|
||||
|
||||
u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
|
||||
u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg)
|
||||
{
|
||||
u32 val = 0;
|
||||
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
|
||||
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), port,
|
||||
SB_CRRDDA_NP, reg, &val);
|
||||
return val;
|
||||
}
|
||||
|
||||
void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
|
||||
void vlv_iosf_sb_write(struct drm_i915_private *dev_priv,
|
||||
u8 port, u32 reg, u32 val)
|
||||
{
|
||||
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
|
||||
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), port,
|
||||
SB_CRWRDA_NP, reg, &val);
|
||||
}
|
||||
|
||||
@ -171,20 +172,6 @@ void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
|
||||
SB_CRWRDA_NP, reg, &val);
|
||||
}
|
||||
|
||||
u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
|
||||
{
|
||||
u32 val = 0;
|
||||
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
|
||||
SB_CRRDDA_NP, reg, &val);
|
||||
return val;
|
||||
}
|
||||
|
||||
void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
|
||||
{
|
||||
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
|
||||
SB_CRWRDA_NP, reg, &val);
|
||||
}
|
||||
|
||||
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
|
||||
{
|
||||
u32 val = 0;
|
||||
|
@ -183,28 +183,33 @@ void intel_pipe_update_end(struct intel_crtc *crtc)
|
||||
}
|
||||
|
||||
static void
|
||||
skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
int crtc_x, int crtc_y,
|
||||
unsigned int crtc_w, unsigned int crtc_h,
|
||||
uint32_t x, uint32_t y,
|
||||
uint32_t src_w, uint32_t src_h)
|
||||
skl_update_plane(struct drm_plane *drm_plane,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_plane_state *plane_state)
|
||||
{
|
||||
struct drm_device *dev = drm_plane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
|
||||
struct drm_framebuffer *fb = plane_state->base.fb;
|
||||
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
||||
const int pipe = intel_plane->pipe;
|
||||
const int plane = intel_plane->plane + 1;
|
||||
u32 plane_ctl, stride_div, stride;
|
||||
const struct drm_intel_sprite_colorkey *key =
|
||||
&to_intel_plane_state(drm_plane->state)->ckey;
|
||||
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
|
||||
u32 surf_addr;
|
||||
u32 tile_height, plane_offset, plane_size;
|
||||
unsigned int rotation;
|
||||
int x_offset, y_offset;
|
||||
struct intel_crtc_state *crtc_state = to_intel_crtc(crtc)->config;
|
||||
int scaler_id;
|
||||
int crtc_x = plane_state->dst.x1;
|
||||
int crtc_y = plane_state->dst.y1;
|
||||
uint32_t crtc_w = drm_rect_width(&plane_state->dst);
|
||||
uint32_t crtc_h = drm_rect_height(&plane_state->dst);
|
||||
uint32_t x = plane_state->src.x1 >> 16;
|
||||
uint32_t y = plane_state->src.y1 >> 16;
|
||||
uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
|
||||
uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
|
||||
const struct intel_scaler *scaler =
|
||||
&crtc_state->scaler_state.scalers[plane_state->scaler_id];
|
||||
|
||||
plane_ctl = PLANE_CTL_ENABLE |
|
||||
PLANE_CTL_PIPE_GAMMA_ENABLE |
|
||||
@ -213,14 +218,12 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
|
||||
plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
|
||||
plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
|
||||
|
||||
rotation = drm_plane->state->rotation;
|
||||
rotation = plane_state->base.rotation;
|
||||
plane_ctl |= skl_plane_ctl_rotation(rotation);
|
||||
|
||||
stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
|
||||
stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
|
||||
fb->pixel_format);
|
||||
|
||||
scaler_id = to_intel_plane_state(drm_plane->state)->scaler_id;
|
||||
|
||||
/* Sizes are 0 based */
|
||||
src_w--;
|
||||
src_h--;
|
||||
@ -241,9 +244,10 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
|
||||
surf_addr = intel_plane_obj_offset(intel_plane, obj, 0);
|
||||
|
||||
if (intel_rotation_90_or_270(rotation)) {
|
||||
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
|
||||
/* stride: Surface height in tiles */
|
||||
tile_height = intel_tile_height(dev, fb->pixel_format,
|
||||
fb->modifier[0], 0);
|
||||
tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp);
|
||||
stride = DIV_ROUND_UP(fb->height, tile_height);
|
||||
plane_size = (src_w << 16) | src_h;
|
||||
x_offset = stride * tile_height - y - (src_h + 1);
|
||||
@ -261,13 +265,13 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
|
||||
I915_WRITE(PLANE_SIZE(pipe, plane), plane_size);
|
||||
|
||||
/* program plane scaler */
|
||||
if (scaler_id >= 0) {
|
||||
if (plane_state->scaler_id >= 0) {
|
||||
uint32_t ps_ctrl = 0;
|
||||
int scaler_id = plane_state->scaler_id;
|
||||
|
||||
DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane,
|
||||
PS_PLANE_SEL(plane));
|
||||
ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) |
|
||||
crtc_state->scaler_state.scalers[scaler_id].mode;
|
||||
ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode;
|
||||
I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
|
||||
I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
|
||||
I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
|
||||
@ -339,24 +343,29 @@ chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
|
||||
}
|
||||
|
||||
static void
|
||||
vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
int crtc_x, int crtc_y,
|
||||
unsigned int crtc_w, unsigned int crtc_h,
|
||||
uint32_t x, uint32_t y,
|
||||
uint32_t src_w, uint32_t src_h)
|
||||
vlv_update_plane(struct drm_plane *dplane,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_plane_state *plane_state)
|
||||
{
|
||||
struct drm_device *dev = dplane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_plane *intel_plane = to_intel_plane(dplane);
|
||||
struct drm_framebuffer *fb = plane_state->base.fb;
|
||||
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
||||
int pipe = intel_plane->pipe;
|
||||
int plane = intel_plane->plane;
|
||||
u32 sprctl;
|
||||
unsigned long sprsurf_offset, linear_offset;
|
||||
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
const struct drm_intel_sprite_colorkey *key =
|
||||
&to_intel_plane_state(dplane->state)->ckey;
|
||||
u32 sprsurf_offset, linear_offset;
|
||||
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
|
||||
int crtc_x = plane_state->dst.x1;
|
||||
int crtc_y = plane_state->dst.y1;
|
||||
uint32_t crtc_w = drm_rect_width(&plane_state->dst);
|
||||
uint32_t crtc_h = drm_rect_height(&plane_state->dst);
|
||||
uint32_t x = plane_state->src.x1 >> 16;
|
||||
uint32_t y = plane_state->src.y1 >> 16;
|
||||
uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
|
||||
uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
|
||||
|
||||
sprctl = SP_ENABLE;
|
||||
|
||||
@ -418,20 +427,18 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
|
||||
crtc_w--;
|
||||
crtc_h--;
|
||||
|
||||
linear_offset = y * fb->pitches[0] + x * pixel_size;
|
||||
sprsurf_offset = intel_gen4_compute_page_offset(dev_priv,
|
||||
&x, &y,
|
||||
obj->tiling_mode,
|
||||
pixel_size,
|
||||
fb->pitches[0]);
|
||||
linear_offset = y * fb->pitches[0] + x * cpp;
|
||||
sprsurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
|
||||
fb->modifier[0], cpp,
|
||||
fb->pitches[0]);
|
||||
linear_offset -= sprsurf_offset;
|
||||
|
||||
if (dplane->state->rotation == BIT(DRM_ROTATE_180)) {
|
||||
if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
|
||||
sprctl |= SP_ROTATE_180;
|
||||
|
||||
x += src_w;
|
||||
y += src_h;
|
||||
linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
|
||||
linear_offset += src_h * fb->pitches[0] + src_w * cpp;
|
||||
}
|
||||
|
||||
if (key->flags) {
|
||||
@ -479,23 +486,28 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
|
||||
}
|
||||
|
||||
static void
|
||||
ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
int crtc_x, int crtc_y,
|
||||
unsigned int crtc_w, unsigned int crtc_h,
|
||||
uint32_t x, uint32_t y,
|
||||
uint32_t src_w, uint32_t src_h)
|
||||
ivb_update_plane(struct drm_plane *plane,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_plane_state *plane_state)
|
||||
{
|
||||
struct drm_device *dev = plane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
struct drm_framebuffer *fb = plane_state->base.fb;
|
||||
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
||||
enum pipe pipe = intel_plane->pipe;
|
||||
u32 sprctl, sprscale = 0;
|
||||
unsigned long sprsurf_offset, linear_offset;
|
||||
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
const struct drm_intel_sprite_colorkey *key =
|
||||
&to_intel_plane_state(plane->state)->ckey;
|
||||
u32 sprsurf_offset, linear_offset;
|
||||
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
|
||||
int crtc_x = plane_state->dst.x1;
|
||||
int crtc_y = plane_state->dst.y1;
|
||||
uint32_t crtc_w = drm_rect_width(&plane_state->dst);
|
||||
uint32_t crtc_h = drm_rect_height(&plane_state->dst);
|
||||
uint32_t x = plane_state->src.x1 >> 16;
|
||||
uint32_t y = plane_state->src.y1 >> 16;
|
||||
uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
|
||||
uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
|
||||
|
||||
sprctl = SPRITE_ENABLE;
|
||||
|
||||
@ -548,22 +560,20 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
if (crtc_w != src_w || crtc_h != src_h)
|
||||
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
|
||||
|
||||
linear_offset = y * fb->pitches[0] + x * pixel_size;
|
||||
sprsurf_offset =
|
||||
intel_gen4_compute_page_offset(dev_priv,
|
||||
&x, &y, obj->tiling_mode,
|
||||
pixel_size, fb->pitches[0]);
|
||||
linear_offset = y * fb->pitches[0] + x * cpp;
|
||||
sprsurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
|
||||
fb->modifier[0], cpp,
|
||||
fb->pitches[0]);
|
||||
linear_offset -= sprsurf_offset;
|
||||
|
||||
if (plane->state->rotation == BIT(DRM_ROTATE_180)) {
|
||||
if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
|
||||
sprctl |= SPRITE_ROTATE_180;
|
||||
|
||||
/* HSW and BDW does this automagically in hardware */
|
||||
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
|
||||
x += src_w;
|
||||
y += src_h;
|
||||
linear_offset += src_h * fb->pitches[0] +
|
||||
src_w * pixel_size;
|
||||
linear_offset += src_h * fb->pitches[0] + src_w * cpp;
|
||||
}
|
||||
}
|
||||
|
||||
@ -617,23 +627,28 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
|
||||
}
|
||||
|
||||
static void
|
||||
ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
int crtc_x, int crtc_y,
|
||||
unsigned int crtc_w, unsigned int crtc_h,
|
||||
uint32_t x, uint32_t y,
|
||||
uint32_t src_w, uint32_t src_h)
|
||||
ilk_update_plane(struct drm_plane *plane,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_plane_state *plane_state)
|
||||
{
|
||||
struct drm_device *dev = plane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
struct drm_framebuffer *fb = plane_state->base.fb;
|
||||
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
||||
int pipe = intel_plane->pipe;
|
||||
unsigned long dvssurf_offset, linear_offset;
|
||||
u32 dvscntr, dvsscale;
|
||||
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
const struct drm_intel_sprite_colorkey *key =
|
||||
&to_intel_plane_state(plane->state)->ckey;
|
||||
u32 dvssurf_offset, linear_offset;
|
||||
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
|
||||
int crtc_x = plane_state->dst.x1;
|
||||
int crtc_y = plane_state->dst.y1;
|
||||
uint32_t crtc_w = drm_rect_width(&plane_state->dst);
|
||||
uint32_t crtc_h = drm_rect_height(&plane_state->dst);
|
||||
uint32_t x = plane_state->src.x1 >> 16;
|
||||
uint32_t y = plane_state->src.y1 >> 16;
|
||||
uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
|
||||
uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
|
||||
|
||||
dvscntr = DVS_ENABLE;
|
||||
|
||||
@ -682,19 +697,18 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
if (crtc_w != src_w || crtc_h != src_h)
|
||||
dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
|
||||
|
||||
linear_offset = y * fb->pitches[0] + x * pixel_size;
|
||||
dvssurf_offset =
|
||||
intel_gen4_compute_page_offset(dev_priv,
|
||||
&x, &y, obj->tiling_mode,
|
||||
pixel_size, fb->pitches[0]);
|
||||
linear_offset = y * fb->pitches[0] + x * cpp;
|
||||
dvssurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
|
||||
fb->modifier[0], cpp,
|
||||
fb->pitches[0]);
|
||||
linear_offset -= dvssurf_offset;
|
||||
|
||||
if (plane->state->rotation == BIT(DRM_ROTATE_180)) {
|
||||
if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
|
||||
dvscntr |= DVS_ROTATE_180;
|
||||
|
||||
x += src_w;
|
||||
y += src_h;
|
||||
linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
|
||||
linear_offset += src_h * fb->pitches[0] + src_w * cpp;
|
||||
}
|
||||
|
||||
if (key->flags) {
|
||||
@ -759,7 +773,6 @@ intel_check_sprite_plane(struct drm_plane *plane,
|
||||
int hscale, vscale;
|
||||
int max_scale, min_scale;
|
||||
bool can_scale;
|
||||
int pixel_size;
|
||||
|
||||
if (!fb) {
|
||||
state->visible = false;
|
||||
@ -881,6 +894,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
|
||||
/* Check size restrictions when scaling */
|
||||
if (state->visible && (src_w != crtc_w || src_h != crtc_h)) {
|
||||
unsigned int width_bytes;
|
||||
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
|
||||
WARN_ON(!can_scale);
|
||||
|
||||
@ -892,9 +906,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
|
||||
if (src_w < 3 || src_h < 3)
|
||||
state->visible = false;
|
||||
|
||||
pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
width_bytes = ((src_x * pixel_size) & 63) +
|
||||
src_w * pixel_size;
|
||||
width_bytes = ((src_x * cpp) & 63) + src_w * cpp;
|
||||
|
||||
if (INTEL_INFO(dev)->gen < 9 && (src_w > 2048 || src_h > 2048 ||
|
||||
width_bytes > 4096 || fb->pitches[0] > 4096)) {
|
||||
@ -918,30 +930,6 @@ intel_check_sprite_plane(struct drm_plane *plane,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
intel_commit_sprite_plane(struct drm_plane *plane,
|
||||
struct intel_plane_state *state)
|
||||
{
|
||||
struct drm_crtc *crtc = state->base.crtc;
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
struct drm_framebuffer *fb = state->base.fb;
|
||||
|
||||
crtc = crtc ? crtc : plane->crtc;
|
||||
|
||||
if (state->visible) {
|
||||
intel_plane->update_plane(plane, crtc, fb,
|
||||
state->dst.x1, state->dst.y1,
|
||||
drm_rect_width(&state->dst),
|
||||
drm_rect_height(&state->dst),
|
||||
state->src.x1 >> 16,
|
||||
state->src.y1 >> 16,
|
||||
drm_rect_width(&state->src) >> 16,
|
||||
drm_rect_height(&state->src) >> 16);
|
||||
} else {
|
||||
intel_plane->disable_plane(plane, crtc);
|
||||
}
|
||||
}
|
||||
|
||||
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
@ -1123,7 +1111,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
|
||||
intel_plane->plane = plane;
|
||||
intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe, plane);
|
||||
intel_plane->check_plane = intel_check_sprite_plane;
|
||||
intel_plane->commit_plane = intel_commit_sprite_plane;
|
||||
possible_crtcs = (1 << pipe);
|
||||
ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
|
||||
&intel_plane_funcs,
|
||||
|
@ -329,13 +329,54 @@ static void intel_uncore_ellc_detect(struct drm_device *dev)
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 dbg;
|
||||
|
||||
dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
|
||||
if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
|
||||
return false;
|
||||
|
||||
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 cer;
|
||||
|
||||
cer = __raw_i915_read32(dev_priv, CLAIM_ER);
|
||||
if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
|
||||
return false;
|
||||
|
||||
__raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
|
||||
return fpga_check_for_unclaimed_mmio(dev_priv);
|
||||
|
||||
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
||||
return vlv_check_for_unclaimed_mmio(dev_priv);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void __intel_uncore_early_sanitize(struct drm_device *dev,
|
||||
bool restore_forcewake)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (HAS_FPGA_DBG_UNCLAIMED(dev))
|
||||
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
|
||||
/* clear out unclaimed reg detection bit */
|
||||
if (check_for_unclaimed_mmio(dev_priv))
|
||||
DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
|
||||
|
||||
/* clear out old GT FIFO errors */
|
||||
if (IS_GEN6(dev) || IS_GEN7(dev))
|
||||
@ -361,6 +402,8 @@ void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
|
||||
|
||||
void intel_uncore_sanitize(struct drm_device *dev)
|
||||
{
|
||||
i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
|
||||
|
||||
/* BIOS often leaves RC6 enabled, but disable it for hw init */
|
||||
intel_disable_gt_powersave(dev);
|
||||
}
|
||||
@ -571,7 +614,7 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
|
||||
REG_RANGE((reg), 0x9400, 0x9800)
|
||||
|
||||
#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
|
||||
((reg) < 0x40000 &&\
|
||||
((reg) < 0x40000 && \
|
||||
!FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
|
||||
!FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
|
||||
!FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
|
||||
@ -587,38 +630,38 @@ ilk_dummy_write(struct drm_i915_private *dev_priv)
|
||||
}
|
||||
|
||||
static void
|
||||
hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv,
|
||||
i915_reg_t reg, bool read, bool before)
|
||||
__unclaimed_reg_debug(struct drm_i915_private *dev_priv,
|
||||
const i915_reg_t reg,
|
||||
const bool read,
|
||||
const bool before)
|
||||
{
|
||||
const char *op = read ? "reading" : "writing to";
|
||||
const char *when = before ? "before" : "after";
|
||||
|
||||
if (!i915.mmio_debug)
|
||||
/* XXX. We limit the auto arming traces for mmio
|
||||
* debugs on these platforms. There are just too many
|
||||
* revealed by these and CI/Bat suffers from the noise.
|
||||
* Please fix and then re-enable the automatic traces.
|
||||
*/
|
||||
if (i915.mmio_debug < 2 &&
|
||||
(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
|
||||
return;
|
||||
|
||||
if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
|
||||
WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
|
||||
when, op, i915_mmio_reg_offset(reg));
|
||||
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
|
||||
if (WARN(check_for_unclaimed_mmio(dev_priv),
|
||||
"Unclaimed register detected %s %s register 0x%x\n",
|
||||
before ? "before" : "after",
|
||||
read ? "reading" : "writing to",
|
||||
i915_mmio_reg_offset(reg)))
|
||||
i915.mmio_debug--; /* Only report the first N failures */
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
|
||||
static inline void
|
||||
unclaimed_reg_debug(struct drm_i915_private *dev_priv,
|
||||
const i915_reg_t reg,
|
||||
const bool read,
|
||||
const bool before)
|
||||
{
|
||||
static bool mmio_debug_once = true;
|
||||
|
||||
if (i915.mmio_debug || !mmio_debug_once)
|
||||
if (likely(!i915.mmio_debug))
|
||||
return;
|
||||
|
||||
if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
|
||||
DRM_DEBUG("Unclaimed register detected, "
|
||||
"enabling oneshot unclaimed register reporting. "
|
||||
"Please use i915.mmio_debug=N for more information.\n");
|
||||
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
|
||||
i915.mmio_debug = mmio_debug_once--;
|
||||
}
|
||||
__unclaimed_reg_debug(dev_priv, reg, read, before);
|
||||
}
|
||||
|
||||
#define GEN2_READ_HEADER(x) \
|
||||
@ -666,9 +709,11 @@ __gen2_read(64)
|
||||
unsigned long irqflags; \
|
||||
u##x val = 0; \
|
||||
assert_rpm_wakelock_held(dev_priv); \
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
|
||||
unclaimed_reg_debug(dev_priv, reg, true, true)
|
||||
|
||||
#define GEN6_READ_FOOTER \
|
||||
unclaimed_reg_debug(dev_priv, reg, true, false); \
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
|
||||
trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
|
||||
return val
|
||||
@ -701,11 +746,9 @@ static inline void __force_wake_get(struct drm_i915_private *dev_priv,
|
||||
static u##x \
|
||||
gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
|
||||
GEN6_READ_HEADER(x); \
|
||||
hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
|
||||
if (NEEDS_FORCE_WAKE(offset)) \
|
||||
__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
|
||||
val = __raw_i915_read##x(dev_priv, reg); \
|
||||
hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
|
||||
GEN6_READ_FOOTER; \
|
||||
}
|
||||
|
||||
@ -746,14 +789,13 @@ chv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
|
||||
}
|
||||
|
||||
#define SKL_NEEDS_FORCE_WAKE(reg) \
|
||||
((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
|
||||
((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
|
||||
|
||||
#define __gen9_read(x) \
|
||||
static u##x \
|
||||
gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
|
||||
enum forcewake_domains fw_engine; \
|
||||
GEN6_READ_HEADER(x); \
|
||||
hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
|
||||
if (!SKL_NEEDS_FORCE_WAKE(offset)) \
|
||||
fw_engine = 0; \
|
||||
else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
|
||||
@ -767,7 +809,6 @@ gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
|
||||
if (fw_engine) \
|
||||
__force_wake_get(dev_priv, fw_engine); \
|
||||
val = __raw_i915_read##x(dev_priv, reg); \
|
||||
hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
|
||||
GEN6_READ_FOOTER; \
|
||||
}
|
||||
|
||||
@ -866,9 +907,11 @@ __gen2_write(64)
|
||||
unsigned long irqflags; \
|
||||
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
|
||||
assert_rpm_wakelock_held(dev_priv); \
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
|
||||
unclaimed_reg_debug(dev_priv, reg, false, true)
|
||||
|
||||
#define GEN6_WRITE_FOOTER \
|
||||
unclaimed_reg_debug(dev_priv, reg, false, false); \
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
|
||||
|
||||
#define __gen6_write(x) \
|
||||
@ -894,13 +937,10 @@ hsw_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool t
|
||||
if (NEEDS_FORCE_WAKE(offset)) { \
|
||||
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
|
||||
} \
|
||||
hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
|
||||
__raw_i915_write##x(dev_priv, reg, val); \
|
||||
if (unlikely(__fifo_ret)) { \
|
||||
gen6_gt_check_fifodbg(dev_priv); \
|
||||
} \
|
||||
hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
|
||||
hsw_unclaimed_reg_detect(dev_priv); \
|
||||
GEN6_WRITE_FOOTER; \
|
||||
}
|
||||
|
||||
@ -930,12 +970,9 @@ static bool is_gen8_shadowed(struct drm_i915_private *dev_priv,
|
||||
static void \
|
||||
gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
|
||||
GEN6_WRITE_HEADER; \
|
||||
hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
|
||||
if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(dev_priv, reg)) \
|
||||
__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
|
||||
__raw_i915_write##x(dev_priv, reg, val); \
|
||||
hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
|
||||
hsw_unclaimed_reg_detect(dev_priv); \
|
||||
GEN6_WRITE_FOOTER; \
|
||||
}
|
||||
|
||||
@ -989,7 +1026,6 @@ gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
|
||||
bool trace) { \
|
||||
enum forcewake_domains fw_engine; \
|
||||
GEN6_WRITE_HEADER; \
|
||||
hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
|
||||
if (!SKL_NEEDS_FORCE_WAKE(offset) || \
|
||||
is_gen9_shadowed(dev_priv, reg)) \
|
||||
fw_engine = 0; \
|
||||
@ -1004,8 +1040,6 @@ gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
|
||||
if (fw_engine) \
|
||||
__force_wake_get(dev_priv, fw_engine); \
|
||||
__raw_i915_write##x(dev_priv, reg, val); \
|
||||
hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
|
||||
hsw_unclaimed_reg_detect(dev_priv); \
|
||||
GEN6_WRITE_FOOTER; \
|
||||
}
|
||||
|
||||
@ -1161,7 +1195,7 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
|
||||
dev_priv->uncore.funcs.force_wake_put =
|
||||
fw_domains_put_with_fifo;
|
||||
else
|
||||
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
|
||||
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
|
||||
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
|
||||
FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
|
||||
} else if (IS_IVYBRIDGE(dev)) {
|
||||
@ -1229,6 +1263,8 @@ void intel_uncore_init(struct drm_device *dev)
|
||||
intel_uncore_fw_domains_init(dev);
|
||||
__intel_uncore_early_sanitize(dev, false);
|
||||
|
||||
dev_priv->uncore.unclaimed_mmio_check = 1;
|
||||
|
||||
switch (INTEL_INFO(dev)->gen) {
|
||||
default:
|
||||
case 9:
|
||||
@ -1384,7 +1420,7 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev,
|
||||
hs = &ctx->hang_stats;
|
||||
|
||||
if (capable(CAP_SYS_ADMIN))
|
||||
args->reset_count = i915_reset_count(&dev_priv->gpu_error);
|
||||
args->reset_count = i915_reset_count(&dev_priv->gpu_error);
|
||||
else
|
||||
args->reset_count = 0;
|
||||
|
||||
@ -1586,13 +1622,26 @@ bool intel_has_gpu_reset(struct drm_device *dev)
|
||||
return intel_get_gpu_reset(dev) != NULL;
|
||||
}
|
||||
|
||||
void intel_uncore_check_errors(struct drm_device *dev)
|
||||
bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
|
||||
(__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
|
||||
DRM_ERROR("Unclaimed register before interrupt\n");
|
||||
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
|
||||
}
|
||||
return check_for_unclaimed_mmio(dev_priv);
|
||||
}
|
||||
|
||||
bool
|
||||
intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (unlikely(i915.mmio_debug ||
|
||||
dev_priv->uncore.unclaimed_mmio_check <= 0))
|
||||
return false;
|
||||
|
||||
if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
|
||||
DRM_DEBUG("Unclaimed register detected, "
|
||||
"enabling oneshot unclaimed register reporting. "
|
||||
"Please use i915.mmio_debug=N for more information.\n");
|
||||
i915.mmio_debug++;
|
||||
dev_priv->uncore.unclaimed_mmio_check--;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
@ -542,7 +542,6 @@ int init_display_kms(struct drm_device *dev, videomode_t *usermode)
|
||||
struct drm_plane *plane;
|
||||
|
||||
int ret;
|
||||
ENTER();
|
||||
|
||||
drm_for_each_plane(plane, dev)
|
||||
{
|
||||
@ -594,8 +593,6 @@ ENTER();
|
||||
set_mode(dev, os_display->connector, os_display->crtc, usermode, false);
|
||||
};
|
||||
|
||||
LEAVE();
|
||||
|
||||
return ret;
|
||||
};
|
||||
|
||||
|
@ -20,7 +20,7 @@ static void __stdcall move_cursor_kms(cursor_t *cursor, int x, int y)
|
||||
cursor_state->crtc_x = x;
|
||||
cursor_state->crtc_y = y;
|
||||
|
||||
intel_crtc_update_cursor(crtc, 1);
|
||||
intel_crtc_update_cursor(crtc, cursor_state);
|
||||
};
|
||||
|
||||
static cursor_t* __stdcall select_cursor_kms(cursor_t *cursor)
|
||||
@ -28,6 +28,8 @@ static cursor_t* __stdcall select_cursor_kms(cursor_t *cursor)
|
||||
struct drm_i915_private *dev_priv = os_display->ddev->dev_private;
|
||||
struct drm_crtc *crtc = os_display->crtc;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct drm_plane *cursor_plane = crtc->cursor;
|
||||
struct intel_plane_state *cursor_state = to_intel_plane_state(cursor_plane->state);
|
||||
|
||||
cursor_t *old;
|
||||
|
||||
@ -42,9 +44,12 @@ static cursor_t* __stdcall select_cursor_kms(cursor_t *cursor)
|
||||
else
|
||||
intel_crtc->cursor_addr = (addr_t)cursor->cobj;
|
||||
|
||||
intel_crtc->base.cursor->state->crtc_w = 64;
|
||||
intel_crtc->base.cursor->state->crtc_h = 64;
|
||||
intel_crtc->base.cursor->state->rotation = 0;
|
||||
cursor_state->visible = 1;
|
||||
|
||||
cursor_plane->state->crtc_w = 64;
|
||||
cursor_plane->state->crtc_h = 64;
|
||||
cursor_plane->state->rotation = 0;
|
||||
|
||||
mutex_unlock(&cursor_lock);
|
||||
|
||||
move_cursor_kms(cursor, crtc->cursor_x, crtc->cursor_y);
|
||||
|
@ -16,7 +16,7 @@
|
||||
#include "bitmap.h"
|
||||
#include "i915_kos32.h"
|
||||
|
||||
#define DRV_NAME "i915 v4.5.7"
|
||||
#define DRV_NAME "i915 v4.6.7"
|
||||
|
||||
#define I915_DEV_CLOSE 0
|
||||
#define I915_DEV_INIT 1
|
||||
|
@ -17,6 +17,9 @@ static LIST_HEAD(devices);
|
||||
|
||||
#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
|
||||
|
||||
#define IORESOURCE_ROM_COPY (1<<2) /* ROM is alloc'd copy, resource field overlaid */
|
||||
#define IORESOURCE_ROM_BIOS_COPY (1<<3) /* ROM is BIOS copy, resource field overlaid */
|
||||
|
||||
/*
|
||||
* Translate the low bits of the PCI base
|
||||
* to the resource type
|
||||
|
@ -624,3 +624,53 @@ int fb_get_options(const char *name, char **option)
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
void *vmap(struct page **pages, unsigned int count,
|
||||
unsigned long flags, pgprot_t prot)
|
||||
{
|
||||
void *vaddr;
|
||||
char *tmp;
|
||||
int i;
|
||||
|
||||
vaddr = AllocKernelSpace(count << 12);
|
||||
if(vaddr == NULL)
|
||||
return NULL;
|
||||
|
||||
for(i = 0, tmp = vaddr; i < count; i++)
|
||||
{
|
||||
MapPage(tmp, page_to_phys(pages[i]), PG_SW);
|
||||
tmp+= 4096;
|
||||
};
|
||||
|
||||
return vaddr;
|
||||
};
|
||||
|
||||
void vunmap(const void *addr)
|
||||
{
|
||||
FreeKernelSpace((void*)addr);
|
||||
}
|
||||
|
||||
void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size)
|
||||
{
|
||||
return (void __iomem*) MapIoMem(offset, size, PG_SW|PG_NOCACHE|0x100);
|
||||
}
|
||||
|
||||
void __iomem *ioremap_wc(resource_size_t offset, unsigned long size)
|
||||
{
|
||||
// return (void __iomem*) MapIoMem(offset, size, PG_SW|PG_WRITEC|0x100);
|
||||
return (void __iomem*) MapIoMem(offset, size, PG_SW|0x100);
|
||||
}
|
||||
|
||||
void iounmap(volatile void __iomem *addr)
|
||||
{
|
||||
FreeKernelSpace((void*)addr);
|
||||
}
|
||||
|
||||
unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
|
||||
{
|
||||
// if (access_ok(VERIFY_READ, from, n))
|
||||
n = __copy_from_user(to, from, n);
|
||||
// else
|
||||
// memset(to, 0, n);
|
||||
return n;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user