i915: 3.12-rc6

git-svn-id: svn://kolibrios.org@4104 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge) 2013-10-26 13:34:57 +00:00
parent 4adaeb28fb
commit 7f2170cd0d
58 changed files with 15414 additions and 7057 deletions

View File

@ -0,0 +1,152 @@
/**************************************************************************
*
* Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include <linux/export.h>
#include <drm/drmP.h>
extern int x86_clflush_size;
static inline void clflush(volatile void *__p)
{
asm volatile("clflush %0" : "+m" (*(volatile char*)__p));
}
#if 0
static void
drm_clflush_page(struct page *page)
{
uint8_t *page_virtual;
unsigned int i;
const int size = boot_cpu_data.x86_clflush_size;
if (unlikely(page == NULL))
return;
page_virtual = kmap_atomic(page);
for (i = 0; i < PAGE_SIZE; i += size)
clflush(page_virtual + i);
kunmap_atomic(page_virtual);
}
static void drm_cache_flush_clflush(struct page *pages[],
unsigned long num_pages)
{
unsigned long i;
mb();
for (i = 0; i < num_pages; i++)
drm_clflush_page(*pages++);
mb();
}
static void
drm_clflush_ipi_handler(void *null)
{
wbinvd();
}
#endif
void
drm_clflush_pages(struct page *pages[], unsigned long num_pages)
{
uint8_t *page_virtual;
unsigned int i, j;
page_virtual = AllocKernelSpace(4096);
if(page_virtual != NULL)
{
dma_addr_t *src, *dst;
u32 count;
for (i = 0; i < num_pages; i++)
{
mb();
// asm volatile("mfence");
MapPage(page_virtual,*pages++, 0x001);
for (j = 0; j < PAGE_SIZE; j += x86_clflush_size)
clflush(page_virtual + j);
mb();
}
FreeKernelSpace(page_virtual);
}
}
EXPORT_SYMBOL(drm_clflush_pages);
#if 0
void
drm_clflush_sg(struct sg_table *st)
{
#if defined(CONFIG_X86)
if (cpu_has_clflush) {
struct sg_page_iter sg_iter;
mb();
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
drm_clflush_page(sg_page_iter_page(&sg_iter));
mb();
return;
}
if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
printk(KERN_ERR "Timed out waiting for cache flush.\n");
#else
printk(KERN_ERR "Architecture has no drm_cache.c support\n");
WARN_ON_ONCE(1);
#endif
}
EXPORT_SYMBOL(drm_clflush_sg);
void
drm_clflush_virt_range(char *addr, unsigned long length)
{
#if defined(CONFIG_X86)
if (cpu_has_clflush) {
char *end = addr + length;
mb();
for (; addr < end; addr += boot_cpu_data.x86_clflush_size)
clflush(addr);
clflush(end - 1);
mb();
return;
}
if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
printk(KERN_ERR "Timed out waiting for cache flush.\n");
#else
printk(KERN_ERR "Architecture has no drm_cache.c support\n");
WARN_ON_ONCE(1);
#endif
}
EXPORT_SYMBOL(drm_clflush_virt_range);
#endif

View File

@ -121,13 +121,6 @@ static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
{ DRM_MODE_SCALE_ASPECT, "Full aspect" },
};
static const struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
{
{ DRM_MODE_DITHERING_OFF, "Off" },
{ DRM_MODE_DITHERING_ON, "On" },
{ DRM_MODE_DITHERING_AUTO, "Automatic" },
};
/*
* Non-global properties, but "required" for certain connectors.
*/
@ -182,29 +175,29 @@ static const struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
struct drm_conn_prop_enum_list {
int type;
const char *name;
int count;
struct ida ida;
};
/*
* Connector and encoder types.
*/
static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
{ { DRM_MODE_CONNECTOR_Unknown, "Unknown", 0 },
{ DRM_MODE_CONNECTOR_VGA, "VGA", 0 },
{ DRM_MODE_CONNECTOR_DVII, "DVI-I", 0 },
{ DRM_MODE_CONNECTOR_DVID, "DVI-D", 0 },
{ DRM_MODE_CONNECTOR_DVIA, "DVI-A", 0 },
{ DRM_MODE_CONNECTOR_Composite, "Composite", 0 },
{ DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 },
{ DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 },
{ DRM_MODE_CONNECTOR_Component, "Component", 0 },
{ DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 },
{ DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 },
{ DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 },
{ DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 },
{ DRM_MODE_CONNECTOR_TV, "TV", 0 },
{ DRM_MODE_CONNECTOR_eDP, "eDP", 0 },
{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual", 0},
{ { DRM_MODE_CONNECTOR_Unknown, "Unknown" },
{ DRM_MODE_CONNECTOR_VGA, "VGA" },
{ DRM_MODE_CONNECTOR_DVII, "DVI-I" },
{ DRM_MODE_CONNECTOR_DVID, "DVI-D" },
{ DRM_MODE_CONNECTOR_DVIA, "DVI-A" },
{ DRM_MODE_CONNECTOR_Composite, "Composite" },
{ DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO" },
{ DRM_MODE_CONNECTOR_LVDS, "LVDS" },
{ DRM_MODE_CONNECTOR_Component, "Component" },
{ DRM_MODE_CONNECTOR_9PinDIN, "DIN" },
{ DRM_MODE_CONNECTOR_DisplayPort, "DP" },
{ DRM_MODE_CONNECTOR_HDMIA, "HDMI-A" },
{ DRM_MODE_CONNECTOR_HDMIB, "HDMI-B" },
{ DRM_MODE_CONNECTOR_TV, "TV" },
{ DRM_MODE_CONNECTOR_eDP, "eDP" },
{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
};
static const struct drm_prop_enum_list drm_encoder_enum_list[] =
@ -216,6 +209,22 @@ static const struct drm_prop_enum_list drm_encoder_enum_list[] =
{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
};
void drm_connector_ida_init(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
ida_init(&drm_connector_enum_list[i].ida);
}
void drm_connector_ida_destroy(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
ida_destroy(&drm_connector_enum_list[i].ida);
}
const char *drm_get_encoder_name(const struct drm_encoder *encoder)
{
static char buf[32];
@ -673,20 +682,19 @@ void drm_mode_probed_add(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_mode_probed_add);
/**
/*
* drm_mode_remove - remove and free a mode
* @connector: connector list to modify
* @mode: mode to remove
*
* Remove @mode from @connector's mode list, then free it.
*/
void drm_mode_remove(struct drm_connector *connector,
static void drm_mode_remove(struct drm_connector *connector,
struct drm_display_mode *mode)
{
list_del(&mode->head);
drm_mode_destroy(connector->dev, mode);
}
EXPORT_SYMBOL(drm_mode_remove);
/**
* drm_connector_init - Init a preallocated connector
@ -707,6 +715,8 @@ int drm_connector_init(struct drm_device *dev,
int connector_type)
{
int ret;
struct ida *connector_ida =
&drm_connector_enum_list[connector_type].ida;
drm_modeset_lock_all(dev);
@ -719,7 +729,12 @@ int drm_connector_init(struct drm_device *dev,
connector->funcs = funcs;
connector->connector_type = connector_type;
connector->connector_type_id =
++drm_connector_enum_list[connector_type].count; /* TODO */
ida_simple_get(connector_ida, 1, 0, GFP_KERNEL);
if (connector->connector_type_id < 0) {
ret = connector->connector_type_id;
drm_mode_object_put(dev, &connector->base);
goto out;
}
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
connector->edid_blob_ptr = NULL;
@ -760,6 +775,9 @@ void drm_connector_cleanup(struct drm_connector *connector)
list_for_each_entry_safe(mode, t, &connector->modes, head)
drm_mode_remove(connector, mode);
ida_remove(&drm_connector_enum_list[connector->connector_type].ida,
connector->connector_type_id);
drm_mode_object_put(dev, &connector->base);
list_del(&connector->head);
dev->mode_config.num_connector--;
@ -777,6 +795,41 @@ void drm_connector_unplug_all(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_connector_unplug_all);
int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge,
const struct drm_bridge_funcs *funcs)
{
int ret;
drm_modeset_lock_all(dev);
ret = drm_mode_object_get(dev, &bridge->base, DRM_MODE_OBJECT_BRIDGE);
if (ret)
goto out;
bridge->dev = dev;
bridge->funcs = funcs;
list_add_tail(&bridge->head, &dev->mode_config.bridge_list);
dev->mode_config.num_bridge++;
out:
drm_modeset_unlock_all(dev);
return ret;
}
EXPORT_SYMBOL(drm_bridge_init);
void drm_bridge_cleanup(struct drm_bridge *bridge)
{
struct drm_device *dev = bridge->dev;
drm_modeset_lock_all(dev);
drm_mode_object_put(dev, &bridge->base);
list_del(&bridge->head);
dev->mode_config.num_bridge--;
drm_modeset_unlock_all(dev);
}
EXPORT_SYMBOL(drm_bridge_cleanup);
int drm_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
const struct drm_encoder_funcs *funcs,
@ -1130,30 +1183,6 @@ int drm_mode_create_scaling_mode_property(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
/**
* drm_mode_create_dithering_property - create dithering property
* @dev: DRM device
*
* Called by a driver the first time it's needed, must be attached to desired
* connectors.
*/
int drm_mode_create_dithering_property(struct drm_device *dev)
{
struct drm_property *dithering_mode;
if (dev->mode_config.dithering_mode_property)
return 0;
dithering_mode =
drm_property_create_enum(dev, 0, "dithering",
drm_dithering_mode_enum_list,
ARRAY_SIZE(drm_dithering_mode_enum_list));
dev->mode_config.dithering_mode_property = dithering_mode;
return 0;
}
EXPORT_SYMBOL(drm_mode_create_dithering_property);
/**
* drm_mode_create_dirty_property - create dirty property
* @dev: DRM device
@ -1186,6 +1215,7 @@ static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *gr
total_objects += dev->mode_config.num_crtc;
total_objects += dev->mode_config.num_connector;
total_objects += dev->mode_config.num_encoder;
total_objects += dev->mode_config.num_bridge;
group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL);
if (!group->id_list)
@ -1194,6 +1224,7 @@ static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *gr
group->num_crtcs = 0;
group->num_connectors = 0;
group->num_encoders = 0;
group->num_bridges = 0;
return 0;
}
@ -1203,6 +1234,7 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev,
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_connector *connector;
struct drm_bridge *bridge;
int ret;
if ((ret = drm_mode_group_init(dev, group)))
@ -1219,6 +1251,11 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev,
group->id_list[group->num_crtcs + group->num_encoders +
group->num_connectors++] = connector->base.id;
list_for_each_entry(bridge, &dev->mode_config.bridge_list, head)
group->id_list[group->num_crtcs + group->num_encoders +
group->num_connectors + group->num_bridges++] =
bridge->base.id;
return 0;
}
EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
@ -2604,10 +2641,22 @@ int drm_mode_getfb(struct drm_device *dev,
r->depth = fb->depth;
r->bpp = fb->bits_per_pixel;
r->pitch = fb->pitches[0];
if (fb->funcs->create_handle)
ret = fb->funcs->create_handle(fb, file_priv, &r->handle);
else
if (fb->funcs->create_handle) {
if (file_priv->is_master || capable(CAP_SYS_ADMIN)) {
ret = fb->funcs->create_handle(fb, file_priv,
&r->handle);
} else {
/* GET_FB() is an unprivileged ioctl so we must not
* return a buffer-handle to non-master processes! For
* backwards-compatibility reasons, we cannot make
* GET_FB() privileged, so just return an invalid handle
* for non-masters. */
r->handle = 0;
ret = 0;
}
} else {
ret = -ENODEV;
}
drm_framebuffer_unreference(fb);
@ -3721,6 +3770,7 @@ void drm_mode_config_init(struct drm_device *dev)
INIT_LIST_HEAD(&dev->mode_config.fb_list);
INIT_LIST_HEAD(&dev->mode_config.crtc_list);
INIT_LIST_HEAD(&dev->mode_config.connector_list);
INIT_LIST_HEAD(&dev->mode_config.bridge_list);
INIT_LIST_HEAD(&dev->mode_config.encoder_list);
INIT_LIST_HEAD(&dev->mode_config.property_list);
INIT_LIST_HEAD(&dev->mode_config.property_blob_list);

View File

@ -257,10 +257,16 @@ drm_encoder_disable(struct drm_encoder *encoder)
{
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
if (encoder->bridge)
encoder->bridge->funcs->disable(encoder->bridge);
if (encoder_funcs->disable)
(*encoder_funcs->disable)(encoder);
else
(*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
if (encoder->bridge)
encoder->bridge->funcs->post_disable(encoder->bridge);
}
/**
@ -424,6 +430,16 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (encoder->crtc != crtc)
continue;
if (encoder->bridge && encoder->bridge->funcs->mode_fixup) {
ret = encoder->bridge->funcs->mode_fixup(
encoder->bridge, mode, adjusted_mode);
if (!ret) {
DRM_DEBUG_KMS("Bridge fixup failed\n");
goto done;
}
}
encoder_funcs = encoder->helper_private;
if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
adjusted_mode))) {
@ -443,9 +459,16 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (encoder->crtc != crtc)
continue;
if (encoder->bridge)
encoder->bridge->funcs->disable(encoder->bridge);
encoder_funcs = encoder->helper_private;
/* Disable the encoders as the first thing we do. */
encoder_funcs->prepare(encoder);
if (encoder->bridge)
encoder->bridge->funcs->post_disable(encoder->bridge);
}
drm_crtc_prepare_encoders(dev);
@ -469,6 +492,10 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
mode->base.id, mode->name);
encoder_funcs = encoder->helper_private;
encoder_funcs->mode_set(encoder, mode, adjusted_mode);
if (encoder->bridge && encoder->bridge->funcs->mode_set)
encoder->bridge->funcs->mode_set(encoder->bridge, mode,
adjusted_mode);
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
@ -479,9 +506,14 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (encoder->crtc != crtc)
continue;
if (encoder->bridge)
encoder->bridge->funcs->pre_enable(encoder->bridge);
encoder_funcs = encoder->helper_private;
encoder_funcs->commit(encoder);
if (encoder->bridge)
encoder->bridge->funcs->enable(encoder->bridge);
}
/* Store real post-adjustment hardware mode. */
@ -830,6 +862,31 @@ static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
return dpms;
}
/* Helper which handles bridge ordering around encoder dpms */
static void drm_helper_encoder_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_bridge *bridge = encoder->bridge;
struct drm_encoder_helper_funcs *encoder_funcs;
if (bridge) {
if (mode == DRM_MODE_DPMS_ON)
bridge->funcs->pre_enable(bridge);
else
bridge->funcs->disable(bridge);
}
encoder_funcs = encoder->helper_private;
if (encoder_funcs->dpms)
encoder_funcs->dpms(encoder, mode);
if (bridge) {
if (mode == DRM_MODE_DPMS_ON)
bridge->funcs->enable(bridge);
else
bridge->funcs->post_disable(bridge);
}
}
static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
{
int dpms = DRM_MODE_DPMS_OFF;
@ -857,7 +914,7 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
{
struct drm_encoder *encoder = connector->encoder;
struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
int old_dpms;
int old_dpms, encoder_dpms = DRM_MODE_DPMS_OFF;
if (mode == connector->dpms)
return;
@ -865,6 +922,9 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
old_dpms = connector->dpms;
connector->dpms = mode;
if (encoder)
encoder_dpms = drm_helper_choose_encoder_dpms(encoder);
/* from off to on, do crtc then encoder */
if (mode < old_dpms) {
if (crtc) {
@ -873,22 +933,14 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
(*crtc_funcs->dpms) (crtc,
drm_helper_choose_crtc_dpms(crtc));
}
if (encoder) {
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
if (encoder_funcs->dpms)
(*encoder_funcs->dpms) (encoder,
drm_helper_choose_encoder_dpms(encoder));
}
if (encoder)
drm_helper_encoder_dpms(encoder, encoder_dpms);
}
/* from on to off, do encoder then crtc */
if (mode > old_dpms) {
if (encoder) {
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
if (encoder_funcs->dpms)
(*encoder_funcs->dpms) (encoder,
drm_helper_choose_encoder_dpms(encoder));
}
if (encoder)
drm_helper_encoder_dpms(encoder, encoder_dpms);
if (crtc) {
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
if (crtc_funcs->dpms)
@ -924,9 +976,8 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
{
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_encoder_helper_funcs *encoder_funcs;
struct drm_crtc_helper_funcs *crtc_funcs;
int ret;
int ret, encoder_dpms;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@ -946,10 +997,10 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
if(encoder->crtc != crtc)
continue;
encoder_funcs = encoder->helper_private;
if (encoder_funcs->dpms)
(*encoder_funcs->dpms) (encoder,
drm_helper_choose_encoder_dpms(encoder));
encoder_dpms = drm_helper_choose_encoder_dpms(
encoder);
drm_helper_encoder_dpms(encoder, encoder_dpms);
}
crtc_funcs = crtc->helper_private;

View File

@ -0,0 +1,70 @@
/**
* \file drm_drv.c
* Generic driver template
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*
* To use this template, you must at least define the following (samples
* given for the MGA driver):
*
* \code
* #define DRIVER_AUTHOR "VA Linux Systems, Inc."
*
* #define DRIVER_NAME "mga"
* #define DRIVER_DESC "Matrox G200/G400"
* #define DRIVER_DATE "20001127"
*
* #define drm_x mga_##x
* \endcode
*/
/*
* Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/slab.h>
#include <linux/export.h>
#include <drm/drmP.h>
#include <drm/drm_core.h>
static int drm_version(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_core_init(void)
{
int ret = -ENOMEM;
drm_global_init();
drm_connector_ida_init();
// idr_init(&drm_minors_idr);
return 0;
}

View File

@ -125,6 +125,9 @@ static struct edid_quirk {
/* ViewSonic VA2026w */
{ "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
/* Medion MD 30217 PG */
{ "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
};
/*
@ -931,6 +934,36 @@ static const struct drm_display_mode edid_cea_modes[] = {
.vrefresh = 100, },
};
/*
* HDMI 1.4 4k modes.
*/
static const struct drm_display_mode edid_4k_modes[] = {
/* 1 - 3840x2160@30Hz */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
3840, 4016, 4104, 4400, 0,
2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, },
/* 2 - 3840x2160@25Hz */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
3840, 4896, 4984, 5280, 0,
2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, },
/* 3 - 3840x2160@24Hz */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
3840, 5116, 5204, 5500, 0,
2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, },
/* 4 - 4096x2160@24Hz (SMPTE) */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000,
4096, 5116, 5204, 5500, 0,
2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, },
};
/*** DDC fetch and block validation ***/
static const u8 edid_header[] = {
@ -2287,7 +2320,6 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
return closure.modes;
}
#define HDMI_IDENTIFIER 0x000C03
#define AUDIO_BLOCK 0x01
#define VIDEO_BLOCK 0x02
#define VENDOR_BLOCK 0x03
@ -2298,10 +2330,10 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
#define EDID_CEA_YCRCB422 (1 << 4)
#define EDID_CEA_VCDB_QS (1 << 6)
/**
/*
* Search EDID for CEA extension block.
*/
u8 *drm_find_cea_extension(struct edid *edid)
static u8 *drm_find_cea_extension(struct edid *edid)
{
u8 *edid_ext = NULL;
int i;
@ -2322,7 +2354,6 @@ u8 *drm_find_cea_extension(struct edid *edid)
return edid_ext;
}
EXPORT_SYMBOL(drm_find_cea_extension);
/*
* Calculate the alternate clock for the CEA mode
@ -2380,6 +2411,54 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
}
EXPORT_SYMBOL(drm_match_cea_mode);
/*
* Calculate the alternate clock for HDMI modes (those from the HDMI vendor
* specific block).
*
* It's almost like cea_mode_alternate_clock(), we just need to add an
* exception for the VIC 4 mode (4096x2160@24Hz): no alternate clock for this
* one.
*/
static unsigned int
hdmi_mode_alternate_clock(const struct drm_display_mode *hdmi_mode)
{
if (hdmi_mode->vdisplay == 4096 && hdmi_mode->hdisplay == 2160)
return hdmi_mode->clock;
return cea_mode_alternate_clock(hdmi_mode);
}
/*
* drm_match_hdmi_mode - look for a HDMI mode matching given mode
* @to_match: display mode
*
* An HDMI mode is one defined in the HDMI vendor specific block.
*
* Returns the HDMI Video ID (VIC) of the mode or 0 if it isn't one.
*/
static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
{
u8 mode;
if (!to_match->clock)
return 0;
for (mode = 0; mode < ARRAY_SIZE(edid_4k_modes); mode++) {
const struct drm_display_mode *hdmi_mode = &edid_4k_modes[mode];
unsigned int clock1, clock2;
/* Make sure to also match alternate clocks */
clock1 = hdmi_mode->clock;
clock2 = hdmi_mode_alternate_clock(hdmi_mode);
if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
drm_mode_equal_no_clocks(to_match, hdmi_mode))
return mode + 1;
}
return 0;
}
static int
add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
{
@ -2397,18 +2476,26 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
* with the alternate clock for certain CEA modes.
*/
list_for_each_entry(mode, &connector->probed_modes, head) {
const struct drm_display_mode *cea_mode;
const struct drm_display_mode *cea_mode = NULL;
struct drm_display_mode *newmode;
u8 cea_mode_idx = drm_match_cea_mode(mode) - 1;
u8 mode_idx = drm_match_cea_mode(mode) - 1;
unsigned int clock1, clock2;
if (cea_mode_idx >= ARRAY_SIZE(edid_cea_modes))
if (mode_idx < ARRAY_SIZE(edid_cea_modes)) {
cea_mode = &edid_cea_modes[mode_idx];
clock2 = cea_mode_alternate_clock(cea_mode);
} else {
mode_idx = drm_match_hdmi_mode(mode) - 1;
if (mode_idx < ARRAY_SIZE(edid_4k_modes)) {
cea_mode = &edid_4k_modes[mode_idx];
clock2 = hdmi_mode_alternate_clock(cea_mode);
}
}
if (!cea_mode)
continue;
cea_mode = &edid_cea_modes[cea_mode_idx];
clock1 = cea_mode->clock;
clock2 = cea_mode_alternate_clock(cea_mode);
if (clock1 == clock2)
continue;
@ -2442,10 +2529,11 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
}
static int
do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len)
{
struct drm_device *dev = connector->dev;
u8 * mode, cea_mode;
const u8 *mode;
u8 cea_mode;
int modes = 0;
for (mode = db; mode < db + len; mode++) {
@ -2465,6 +2553,68 @@ do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
return modes;
}
/*
* do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block
* @connector: connector corresponding to the HDMI sink
* @db: start of the CEA vendor specific block
* @len: length of the CEA block payload, ie. one can access up to db[len]
*
* Parses the HDMI VSDB looking for modes to add to @connector.
*/
static int
do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len)
{
struct drm_device *dev = connector->dev;
int modes = 0, offset = 0, i;
u8 vic_len;
if (len < 8)
goto out;
/* no HDMI_Video_Present */
if (!(db[8] & (1 << 5)))
goto out;
/* Latency_Fields_Present */
if (db[8] & (1 << 7))
offset += 2;
/* I_Latency_Fields_Present */
if (db[8] & (1 << 6))
offset += 2;
/* the declared length is not long enough for the 2 first bytes
* of additional video format capabilities */
offset += 2;
if (len < (8 + offset))
goto out;
vic_len = db[8 + offset] >> 5;
for (i = 0; i < vic_len && len >= (9 + offset + i); i++) {
struct drm_display_mode *newmode;
u8 vic;
vic = db[9 + offset + i];
vic--; /* VICs start at 1 */
if (vic >= ARRAY_SIZE(edid_4k_modes)) {
DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
continue;
}
newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
if (!newmode)
continue;
drm_mode_probed_add(connector, newmode);
modes++;
}
out:
return modes;
}
static int
cea_db_payload_len(const u8 *db)
{
@ -2496,14 +2646,30 @@ cea_db_offsets(const u8 *cea, int *start, int *end)
return 0;
}
static bool cea_db_is_hdmi_vsdb(const u8 *db)
{
int hdmi_id;
if (cea_db_tag(db) != VENDOR_BLOCK)
return false;
if (cea_db_payload_len(db) < 5)
return false;
hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
return hdmi_id == HDMI_IEEE_OUI;
}
#define for_each_cea_db(cea, i, start, end) \
for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
static int
add_cea_modes(struct drm_connector *connector, struct edid *edid)
{
u8 * cea = drm_find_cea_extension(edid);
u8 * db, dbl;
const u8 *cea = drm_find_cea_extension(edid);
const u8 *db;
u8 dbl;
int modes = 0;
if (cea && cea_revision(cea) >= 3) {
@ -2518,6 +2684,8 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid)
if (cea_db_tag(db) == VIDEO_BLOCK)
modes += do_cea_modes (connector, db+1, dbl);
else if (cea_db_is_hdmi_vsdb(db))
modes += do_hdmi_vsdb_modes(connector, db, dbl);
}
}
@ -2570,21 +2738,6 @@ monitor_name(struct detailed_timing *t, void *data)
*(u8 **)data = t->data.other_data.data.str.str;
}
static bool cea_db_is_hdmi_vsdb(const u8 *db)
{
int hdmi_id;
if (cea_db_tag(db) != VENDOR_BLOCK)
return false;
if (cea_db_payload_len(db) < 5)
return false;
hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
return hdmi_id == HDMI_IDENTIFIER;
}
/**
* drm_edid_to_eld - build ELD from EDID
* @connector: connector corresponding to the HDMI/DP sink
@ -2731,6 +2884,60 @@ int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads)
}
EXPORT_SYMBOL(drm_edid_to_sad);
/**
* drm_edid_to_speaker_allocation - extracts Speaker Allocation Data Blocks from EDID
* @edid: EDID to parse
* @sadb: pointer to the speaker block
*
* Looks for CEA EDID block and extracts the Speaker Allocation Data Block from it.
* Note: returned pointer needs to be kfreed
*
* Return number of found Speaker Allocation Blocks or negative number on error.
*/
int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
{
int count = 0;
int i, start, end, dbl;
const u8 *cea;
cea = drm_find_cea_extension(edid);
if (!cea) {
DRM_DEBUG_KMS("SAD: no CEA Extension found\n");
return -ENOENT;
}
if (cea_revision(cea) < 3) {
DRM_DEBUG_KMS("SAD: wrong CEA revision\n");
return -ENOTSUPP;
}
if (cea_db_offsets(cea, &start, &end)) {
DRM_DEBUG_KMS("SAD: invalid data block offsets\n");
return -EPROTO;
}
for_each_cea_db(cea, i, start, end) {
const u8 *db = &cea[i];
if (cea_db_tag(db) == SPEAKER_BLOCK) {
dbl = cea_db_payload_len(db);
/* Speaker Allocation Data Block */
if (dbl == 3) {
*sadb = kmalloc(dbl, GFP_KERNEL);
if (!*sadb)
return -ENOMEM;
memcpy(*sadb, &db[1], dbl);
count = dbl;
break;
}
}
}
return count;
}
EXPORT_SYMBOL(drm_edid_to_speaker_allocation);
/**
* drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond
* @connector: connector associated with the HDMI/DP sink
@ -3102,9 +3309,10 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
if (err < 0)
return err;
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
frame->pixel_repeat = 1;
frame->video_code = drm_match_cea_mode(mode);
if (!frame->video_code)
return 0;
frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
@ -3112,3 +3320,39 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
return 0;
}
EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
/**
* drm_hdmi_vendor_infoframe_from_display_mode() - fill an HDMI infoframe with
* data from a DRM display mode
* @frame: HDMI vendor infoframe
* @mode: DRM display mode
*
* Note that there's is a need to send HDMI vendor infoframes only when using a
* 4k or stereoscopic 3D mode. So when giving any other mode as input this
* function will return -EINVAL, error that can be safely ignored.
*
* Returns 0 on success or a negative error code on failure.
*/
int
drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
const struct drm_display_mode *mode)
{
int err;
u8 vic;
if (!frame || !mode)
return -EINVAL;
vic = drm_match_hdmi_mode(mode);
if (!vic)
return -EINVAL;
err = hdmi_vendor_infoframe_init(frame);
if (err < 0)
return err;
frame->vic = vic;
return 0;
}
EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode);

View File

@ -114,6 +114,9 @@ static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_h
uint16_t *r_base, *g_base, *b_base;
int i;
if (helper->funcs->gamma_get == NULL)
return;
r_base = crtc->gamma_store;
g_base = r_base + crtc->gamma_size;
b_base = g_base + crtc->gamma_size;
@ -337,6 +340,14 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
return 0;
}
/*
* The driver really shouldn't advertise pseudo/directcolor
* visuals if it can't deal with the palette.
*/
if (WARN_ON(!fb_helper->funcs->gamma_set ||
!fb_helper->funcs->gamma_get))
return -EINVAL;
pindex = regno;
if (fb->bits_per_pixel == 16) {
@ -380,12 +391,19 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_crtc_helper_funcs *crtc_funcs;
u16 *red, *green, *blue, *transp;
struct drm_crtc *crtc;
int i, j, rc = 0;
int start;
drm_modeset_lock_all(dev);
if (!drm_fb_helper_is_bound(fb_helper)) {
drm_modeset_unlock_all(dev);
return -EBUSY;
}
for (i = 0; i < fb_helper->crtc_count; i++) {
crtc = fb_helper->crtc_info[i].mode_set.crtc;
crtc_funcs = crtc->helper_private;
@ -408,10 +426,13 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
rc = setcolreg(crtc, hred, hgreen, hblue, start++, info);
if (rc)
return rc;
goto out;
}
if (crtc_funcs->load_lut)
crtc_funcs->load_lut(crtc);
}
out:
drm_modeset_unlock_all(dev);
return rc;
}
EXPORT_SYMBOL(drm_fb_helper_setcmap);

View File

@ -32,6 +32,7 @@
#include <linux/shmem_fs.h>
#include <linux/err.h>
#include <drm/drmP.h>
#include <drm/drm_vma_manager.h>
/** @file drm_gem.c
*
@ -78,7 +79,6 @@
#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
#endif
#if 0
/**
* Initialize the GEM device fields
*/
@ -88,7 +88,7 @@ drm_gem_init(struct drm_device *dev)
{
struct drm_gem_mm *mm;
spin_lock_init(&dev->object_name_lock);
mutex_init(&dev->object_name_lock);
idr_init(&dev->object_name_idr);
mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
@ -98,13 +98,8 @@ drm_gem_init(struct drm_device *dev)
}
dev->mm_private = mm;
if (drm_ht_create(&mm->offset_hash, 12)) {
kfree(mm);
return -ENOMEM;
}
drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
drm_vma_offset_manager_init(&mm->vma_manager,
DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE);
return 0;
@ -115,12 +110,10 @@ drm_gem_destroy(struct drm_device *dev)
{
struct drm_gem_mm *mm = dev->mm_private;
drm_mm_takedown(&mm->offset_manager);
drm_ht_remove(&mm->offset_hash);
drm_vma_offset_manager_destroy(&mm->vma_manager);
kfree(mm);
dev->mm_private = NULL;
}
#endif
/**
* Initialize an already allocated GEM object of the specified size with
@ -129,16 +122,14 @@ drm_gem_destroy(struct drm_device *dev)
int drm_gem_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size)
{
BUG_ON((size & (PAGE_SIZE - 1)) != 0);
struct file *filp;
obj->dev = dev;
obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
if (IS_ERR(obj->filp))
return PTR_ERR(obj->filp);
filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
if (IS_ERR(filp))
return PTR_ERR(filp);
kref_init(&obj->refcount);
atomic_set(&obj->handle_count, 0);
obj->size = size;
drm_gem_private_object_init(dev, obj, size);
obj->filp = filp;
return 0;
}
@ -149,7 +140,7 @@ EXPORT_SYMBOL(drm_gem_object_init);
* no GEM provided backing store. Instead the caller is responsible for
* backing the object and handling it.
*/
int drm_gem_private_object_init(struct drm_device *dev,
void drm_gem_private_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size)
{
BUG_ON((size & (PAGE_SIZE - 1)) != 0);
@ -158,10 +149,9 @@ int drm_gem_private_object_init(struct drm_device *dev,
obj->filp = NULL;
kref_init(&obj->refcount);
atomic_set(&obj->handle_count, 0);
obj->handle_count = 0;
obj->size = size;
return 0;
drm_vma_node_reset(&obj->vma_node);
}
EXPORT_SYMBOL(drm_gem_private_object_init);
@ -194,6 +184,57 @@ free:
}
EXPORT_SYMBOL(drm_gem_object_alloc);
static void drm_gem_object_ref_bug(struct kref *list_kref)
{
BUG();
}
/**
* Called after the last handle to the object has been closed
*
* Removes any name for the object. Note that this must be
* called before drm_gem_object_free or we'll be touching
* freed memory
*/
static void drm_gem_object_handle_free(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
/* Remove any name for this object */
if (obj->name) {
idr_remove(&dev->object_name_idr, obj->name);
obj->name = 0;
/*
* The object name held a reference to this object, drop
* that now.
*
* This cannot be the last reference, since the handle holds one too.
*/
kref_put(&obj->refcount, drm_gem_object_ref_bug);
}
}
static void
drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
{
if (WARN_ON(obj->handle_count == 0))
return;
/*
* Must bump handle count first as this may be the last
* ref, in which case the object would disappear before we
* checked for a name
*/
mutex_lock(&obj->dev->object_name_lock);
if (--obj->handle_count == 0) {
drm_gem_object_handle_free(obj);
}
mutex_unlock(&obj->dev->object_name_lock);
drm_gem_object_unreference_unlocked(obj);
}
/**
* Removes the mapping from handle to filp for this object.
@ -248,13 +289,15 @@ EXPORT_SYMBOL(drm_gem_handle_delete);
* will likely want to dereference the object afterwards.
*/
int
drm_gem_handle_create(struct drm_file *file_priv,
drm_gem_handle_create_tail(struct drm_file *file_priv,
struct drm_gem_object *obj,
u32 *handlep)
{
struct drm_device *dev = obj->dev;
int ret;
WARN_ON(!mutex_is_locked(&dev->object_name_lock));
/*
* Get the user-visible handle using idr. Preload and perform
* allocation under our spinlock.
@ -263,14 +306,22 @@ drm_gem_handle_create(struct drm_file *file_priv,
spin_lock(&file_priv->table_lock);
ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
drm_gem_object_reference(obj);
obj->handle_count++;
spin_unlock(&file_priv->table_lock);
idr_preload_end();
if (ret < 0)
mutex_unlock(&dev->object_name_lock);
if (ret < 0) {
drm_gem_object_handle_unreference_unlocked(obj);
return ret;
}
*handlep = ret;
drm_gem_object_handle_reference(obj);
// ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
// if (ret) {
// drm_gem_handle_delete(file_priv, *handlep);
// return ret;
// }
if (dev->driver->gem_open_object) {
ret = dev->driver->gem_open_object(obj, file_priv);
@ -282,6 +333,21 @@ drm_gem_handle_create(struct drm_file *file_priv,
return 0;
}
/**
* Create a handle for this object. This adds a handle reference
* to the object, which includes a regular reference count. Callers
* will likely want to dereference the object afterwards.
*/
int
drm_gem_handle_create(struct drm_file *file_priv,
struct drm_gem_object *obj,
u32 *handlep)
{
mutex_lock(&obj->dev->object_name_lock);
return drm_gem_handle_create_tail(file_priv, obj, handlep);
}
EXPORT_SYMBOL(drm_gem_handle_create);
@ -297,34 +363,30 @@ drm_gem_free_mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct drm_gem_mm *mm = dev->mm_private;
struct drm_map_list *list = &obj->map_list;
drm_ht_remove_item(&mm->offset_hash, &list->hash);
drm_mm_put_block(list->file_offset_node);
kfree(list->map);
list->map = NULL;
drm_vma_offset_remove(&mm->vma_manager, &obj->vma_node);
}
EXPORT_SYMBOL(drm_gem_free_mmap_offset);
/**
* drm_gem_create_mmap_offset - create a fake mmap offset for an object
* drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
* @obj: obj in question
* @size: the virtual size
*
* GEM memory mapping works by handing back to userspace a fake mmap offset
* it can use in a subsequent mmap(2) call. The DRM core code then looks
* up the object based on the offset and sets up the various memory mapping
* structures.
*
* This routine allocates and attaches a fake offset for @obj.
* This routine allocates and attaches a fake offset for @obj, in cases where
* the virtual size differs from the physical size (ie. obj->size). Otherwise
* just use drm_gem_create_mmap_offset().
*/
int
drm_gem_create_mmap_offset(struct drm_gem_object *obj)
drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
{
struct drm_device *dev = obj->dev;
struct drm_gem_mm *mm = dev->mm_private;
struct drm_map_list *list;
struct drm_local_map *map;
int ret;
/* Set the object up for mmap'ing */
list = &obj->map_list;
@ -439,8 +501,14 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
if (obj == NULL)
return -ENOENT;
mutex_lock(&dev->object_name_lock);
idr_preload(GFP_KERNEL);
spin_lock(&dev->object_name_lock);
/* prevent races with concurrent gem_close. */
if (obj->handle_count == 0) {
ret = -ENOENT;
goto err;
}
if (!obj->name) {
ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
if (ret < 0)
@ -456,8 +524,8 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
ret = 0;
err:
spin_unlock(&dev->object_name_lock);
idr_preload_end();
mutex_unlock(&dev->object_name_lock);
drm_gem_object_unreference_unlocked(obj);
return ret;
}
@ -483,15 +551,17 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
if(handle == -2)
printf("%s handle %d\n", __FUNCTION__, handle);
spin_lock(&dev->object_name_lock);
mutex_lock(&dev->object_name_lock);
obj = idr_find(&dev->object_name_idr, (int) args->name);
if (obj)
if (obj) {
drm_gem_object_reference(obj);
spin_unlock(&dev->object_name_lock);
if (!obj)
} else {
mutex_unlock(&dev->object_name_lock);
return -ENOENT;
}
ret = drm_gem_handle_create(file_priv, obj, &handle);
/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
drm_gem_object_unreference_unlocked(obj);
if (ret)
return ret;
@ -525,6 +595,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
struct drm_device *dev = obj->dev;
drm_gem_remove_prime_handles(obj, file_priv);
drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
@ -575,40 +646,6 @@ drm_gem_object_free(struct kref *kref)
}
EXPORT_SYMBOL(drm_gem_object_free);
static void drm_gem_object_ref_bug(struct kref *list_kref)
{
BUG();
}
/**
* Called after the last handle to the object has been closed
*
* Removes any name for the object. Note that this must be
* called before drm_gem_object_free or we'll be touching
* freed memory
*/
void drm_gem_object_handle_free(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
/* Remove any name for this object */
spin_lock(&dev->object_name_lock);
if (obj->name) {
idr_remove(&dev->object_name_idr, obj->name);
obj->name = 0;
spin_unlock(&dev->object_name_lock);
/*
* The object name held a reference to this object, drop
* that now.
*
* This cannot be the last reference, since the handle holds one too.
*/
kref_put(&obj->refcount, drm_gem_object_ref_bug);
} else
spin_unlock(&dev->object_name_lock);
}
EXPORT_SYMBOL(drm_gem_object_handle_free);
#if 0
void drm_gem_vm_open(struct vm_area_struct *vma)

View File

@ -94,7 +94,6 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
return NULL;
}
static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht,
unsigned long key)
{

View File

@ -62,7 +62,7 @@
irqreturn_t device_irq_handler(struct drm_device *dev)
{
printf("video irq\n");
// printf("video irq\n");
// printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
@ -116,6 +116,7 @@ int drm_irq_install(struct drm_device *dev)
ret = dev->driver->irq_postinstall(dev);
if (ret < 0) {
dev->irq_enabled = 0;
DRM_ERROR(__FUNCTION__);
}
@ -128,11 +129,6 @@ int drm_irq_install(struct drm_device *dev)
EXPORT_SYMBOL(drm_irq_install);
static inline u64 div_u64(u64 dividend, u32 divisor)
{
u32 remainder;
return div_u64_rem(dividend, divisor, &remainder);
}
u64 div64_u64(u64 dividend, u64 divisor)
@ -260,7 +256,3 @@ void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
#endif
}
EXPORT_SYMBOL(drm_vblank_post_modeset);

View File

@ -49,58 +49,18 @@
#define MM_UNUSED_TARGET 4
static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
{
struct drm_mm_node *child;
if (atomic)
child = kzalloc(sizeof(*child), GFP_ATOMIC);
else
child = kzalloc(sizeof(*child), GFP_KERNEL);
if (unlikely(child == NULL)) {
spin_lock(&mm->unused_lock);
if (list_empty(&mm->unused_nodes))
child = NULL;
else {
child =
list_entry(mm->unused_nodes.next,
struct drm_mm_node, node_list);
list_del(&child->node_list);
--mm->num_unused;
}
spin_unlock(&mm->unused_lock);
}
return child;
}
/* drm_mm_pre_get() - pre allocate drm_mm_node structure
* drm_mm: memory manager struct we are pre-allocating for
*
* Returns 0 on success or -ENOMEM if allocation fails.
*/
int drm_mm_pre_get(struct drm_mm *mm)
{
struct drm_mm_node *node;
spin_lock(&mm->unused_lock);
while (mm->num_unused < MM_UNUSED_TARGET) {
spin_unlock(&mm->unused_lock);
node = kzalloc(sizeof(*node), GFP_KERNEL);
spin_lock(&mm->unused_lock);
if (unlikely(node == NULL)) {
int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
spin_unlock(&mm->unused_lock);
return ret;
}
++mm->num_unused;
list_add_tail(&node->node_list, &mm->unused_nodes);
}
spin_unlock(&mm->unused_lock);
return 0;
}
EXPORT_SYMBOL(drm_mm_pre_get);
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
enum drm_mm_search_flags flags);
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end,
enum drm_mm_search_flags flags);
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
@ -147,33 +107,27 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
}
}
struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
unsigned long start,
unsigned long size,
bool atomic)
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
{
struct drm_mm_node *hole, *node;
unsigned long end = start + size;
struct drm_mm_node *hole;
unsigned long end = node->start + node->size;
unsigned long hole_start;
unsigned long hole_end;
BUG_ON(node == NULL);
/* Find the relevant hole to add our node to */
drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
if (hole_start > start || hole_end < end)
if (hole_start > node->start || hole_end < end)
continue;
node = drm_mm_kmalloc(mm, atomic);
if (unlikely(node == NULL))
return NULL;
node->start = start;
node->size = size;
node->mm = mm;
node->allocated = 1;
INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole->node_list);
if (start == hole_start) {
if (node->start == hole_start) {
hole->hole_follows = 0;
list_del_init(&hole->hole_stack);
}
@ -184,31 +138,14 @@ struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
node->hole_follows = 1;
}
return node;
return 0;
}
WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size);
return NULL;
WARN(1, "no hole found for node 0x%lx + 0x%lx\n",
node->start, node->size);
return -ENOSPC;
}
EXPORT_SYMBOL(drm_mm_create_block);
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
unsigned long size,
unsigned alignment,
unsigned long color,
int atomic)
{
struct drm_mm_node *node;
node = drm_mm_kmalloc(hole_node->mm, atomic);
if (unlikely(node == NULL))
return NULL;
drm_mm_insert_helper(hole_node, node, size, alignment, color);
return node;
}
EXPORT_SYMBOL(drm_mm_get_block_generic);
EXPORT_SYMBOL(drm_mm_reserve_node);
/**
* Search for free space and insert a preallocated memory node. Returns
@ -217,12 +154,13 @@ EXPORT_SYMBOL(drm_mm_get_block_generic);
*/
int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment,
unsigned long color)
unsigned long color,
enum drm_mm_search_flags flags)
{
struct drm_mm_node *hole_node;
hole_node = drm_mm_search_free_generic(mm, size, alignment,
color, 0);
color, flags);
if (!hole_node)
return -ENOSPC;
@ -231,13 +169,6 @@ int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
}
EXPORT_SYMBOL(drm_mm_insert_node_generic);
int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment)
{
return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
}
EXPORT_SYMBOL(drm_mm_insert_node);
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
unsigned long size, unsigned alignment,
@ -290,27 +221,6 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
}
}
struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end,
int atomic)
{
struct drm_mm_node *node;
node = drm_mm_kmalloc(hole_node->mm, atomic);
if (unlikely(node == NULL))
return NULL;
drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
start, end);
return node;
}
EXPORT_SYMBOL(drm_mm_get_block_range_generic);
/**
* Search for free space and insert a preallocated memory node. Returns
* -ENOSPC if no suitable free area is available. This is for range
@ -318,13 +228,14 @@ EXPORT_SYMBOL(drm_mm_get_block_range_generic);
*/
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment, unsigned long color,
unsigned long start, unsigned long end)
unsigned long start, unsigned long end,
enum drm_mm_search_flags flags)
{
struct drm_mm_node *hole_node;
hole_node = drm_mm_search_free_in_range_generic(mm,
size, alignment, color,
start, end, 0);
start, end, flags);
if (!hole_node)
return -ENOSPC;
@ -335,14 +246,6 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *n
}
EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment,
unsigned long start, unsigned long end)
{
return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
}
EXPORT_SYMBOL(drm_mm_insert_node_in_range);
/**
* Remove a memory node from the allocator.
*/
@ -351,6 +254,9 @@ void drm_mm_remove_node(struct drm_mm_node *node)
struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node;
if (WARN_ON(!node->allocated))
return;
BUG_ON(node->scanned_block || node->scanned_prev_free
|| node->scanned_next_free);
@ -377,28 +283,6 @@ void drm_mm_remove_node(struct drm_mm_node *node)
}
EXPORT_SYMBOL(drm_mm_remove_node);
/*
* Remove a memory node from the allocator and free the allocated struct
* drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
* drm_mm_get_block functions.
*/
void drm_mm_put_block(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
drm_mm_remove_node(node);
spin_lock(&mm->unused_lock);
if (mm->num_unused < MM_UNUSED_TARGET) {
list_add(&node->node_list, &mm->unused_nodes);
++mm->num_unused;
} else
kfree(node);
spin_unlock(&mm->unused_lock);
}
EXPORT_SYMBOL(drm_mm_put_block);
static int check_free_hole(unsigned long start, unsigned long end,
unsigned long size, unsigned alignment)
{
@ -414,11 +298,11 @@ static int check_free_hole(unsigned long start, unsigned long end,
return end >= start + size;
}
struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
bool best_match)
enum drm_mm_search_flags flags)
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
@ -441,7 +325,7 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
if (!best_match)
if (!(flags & DRM_MM_SEARCH_BEST))
return entry;
if (entry->size < best_size) {
@ -452,15 +336,14 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
return best;
}
EXPORT_SYMBOL(drm_mm_search_free_generic);
struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end,
bool best_match)
enum drm_mm_search_flags flags)
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
@ -488,7 +371,7 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
if (!best_match)
if (!(flags & DRM_MM_SEARCH_BEST))
return entry;
if (entry->size < best_size) {
@ -499,7 +382,6 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
return best;
}
EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
/**
* Moves an allocation. To be used with embedded struct drm_mm_node.
@ -634,8 +516,8 @@ EXPORT_SYMBOL(drm_mm_scan_add_block);
* corrupted.
*
* When the scan list is empty, the selected memory nodes can be freed. An
* immediately following drm_mm_search_free with best_match = 0 will then return
* the just freed block (because its at the top of the free_stack list).
* immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
* return the just freed block (because its at the top of the free_stack list).
*
* Returns one if this block should be evicted, zero otherwise. Will always
* return zero when no hole has been found.
@ -672,10 +554,7 @@ EXPORT_SYMBOL(drm_mm_clean);
void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
{
INIT_LIST_HEAD(&mm->hole_stack);
INIT_LIST_HEAD(&mm->unused_nodes);
mm->num_unused = 0;
mm->scanned_blocks = 0;
spin_lock_init(&mm->unused_lock);
/* Clever trick to avoid a special case in the free hole tracking. */
INIT_LIST_HEAD(&mm->head_node.node_list);
@ -695,22 +574,8 @@ EXPORT_SYMBOL(drm_mm_init);
void drm_mm_takedown(struct drm_mm * mm)
{
struct drm_mm_node *entry, *next;
if (WARN(!list_empty(&mm->head_node.node_list),
"Memory manager not clean. Delaying takedown\n")) {
return;
}
spin_lock(&mm->unused_lock);
list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
list_del(&entry->node_list);
kfree(entry);
--mm->num_unused;
}
spin_unlock(&mm->unused_lock);
BUG_ON(mm->num_unused != 0);
WARN(!list_empty(&mm->head_node.node_list),
"Memory manager not clean during takedown.\n");
}
EXPORT_SYMBOL(drm_mm_takedown);

View File

@ -593,27 +593,6 @@ void drm_mode_set_name(struct drm_display_mode *mode)
}
EXPORT_SYMBOL(drm_mode_set_name);
/**
* drm_mode_list_concat - move modes from one list to another
* @head: source list
* @new: dst list
*
* LOCKING:
* Caller must ensure both lists are locked.
*
* Move all the modes from @head to @new.
*/
void drm_mode_list_concat(struct list_head *head, struct list_head *new)
{
struct list_head *entry, *tmp;
list_for_each_safe(entry, tmp, head) {
list_move_tail(entry, new);
}
}
EXPORT_SYMBOL(drm_mode_list_concat);
/**
* drm_mode_width - get the width of a mode
* @mode: mode
@ -920,43 +899,6 @@ void drm_mode_validate_size(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_mode_validate_size);
/**
* drm_mode_validate_clocks - validate modes against clock limits
* @dev: DRM device
* @mode_list: list of modes to check
* @min: minimum clock rate array
* @max: maximum clock rate array
* @n_ranges: number of clock ranges (size of arrays)
*
* LOCKING:
* Caller must hold a lock protecting @mode_list.
*
* Some code may need to check a mode list against the clock limits of the
* device in question. This function walks the mode list, testing to make
* sure each mode falls within a given range (defined by @min and @max
* arrays) and sets @mode->status as needed.
*/
void drm_mode_validate_clocks(struct drm_device *dev,
struct list_head *mode_list,
int *min, int *max, int n_ranges)
{
struct drm_display_mode *mode;
int i;
list_for_each_entry(mode, mode_list, head) {
bool good = false;
for (i = 0; i < n_ranges; i++) {
if (mode->clock >= min[i] && mode->clock <= max[i]) {
good = true;
break;
}
}
if (!good)
mode->status = MODE_CLOCK_RANGE;
}
}
EXPORT_SYMBOL(drm_mode_validate_clocks);
/**
* drm_mode_prune_invalid - remove invalid modes from mode list
* @dev: DRM device

View File

@ -84,6 +84,330 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
return dmah;
}
EXPORT_SYMBOL(drm_pci_alloc);
#if 0
/**
* \brief Free a PCI consistent memory block without freeing its descriptor.
*
* This function is for internal use in the Linux-specific DRM core code.
*/
void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
unsigned long addr;
size_t sz;
if (dmah->vaddr) {
/* XXX - Is virt_to_page() legal for consistent mem? */
/* Unreserve */
for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
}
dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
dmah->busaddr);
}
}
/**
* \brief Free a PCI consistent memory block
*/
void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
__drm_pci_free(dev, dmah);
kfree(dmah);
}
EXPORT_SYMBOL(drm_pci_free);
static int drm_get_pci_domain(struct drm_device *dev)
{
#ifndef __alpha__
/* For historical reasons, drm_get_pci_domain() is busticated
* on most archs and has to remain so for userspace interface
* < 1.4, except on alpha which was right from the beginning
*/
if (dev->if_version < 0x10004)
return 0;
#endif /* __alpha__ */
return pci_domain_nr(dev->pdev->bus);
}
static int drm_pci_get_irq(struct drm_device *dev)
{
return dev->pdev->irq;
}
static const char *drm_pci_get_name(struct drm_device *dev)
{
struct pci_driver *pdriver = dev->driver->kdriver.pci;
return pdriver->name;
}
static int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
{
int len, ret;
struct pci_driver *pdriver = dev->driver->kdriver.pci;
master->unique_len = 40;
master->unique_size = master->unique_len;
master->unique = kmalloc(master->unique_size, GFP_KERNEL);
if (master->unique == NULL)
return -ENOMEM;
len = snprintf(master->unique, master->unique_len,
"pci:%04x:%02x:%02x.%d",
drm_get_pci_domain(dev),
dev->pdev->bus->number,
PCI_SLOT(dev->pdev->devfn),
PCI_FUNC(dev->pdev->devfn));
if (len >= master->unique_len) {
DRM_ERROR("buffer overflow");
ret = -EINVAL;
goto err;
} else
master->unique_len = len;
dev->devname =
kmalloc(strlen(pdriver->name) +
master->unique_len + 2, GFP_KERNEL);
if (dev->devname == NULL) {
ret = -ENOMEM;
goto err;
}
sprintf(dev->devname, "%s@%s", pdriver->name,
master->unique);
return 0;
err:
return ret;
}
static int drm_pci_set_unique(struct drm_device *dev,
struct drm_master *master,
struct drm_unique *u)
{
int domain, bus, slot, func, ret;
const char *bus_name;
master->unique_len = u->unique_len;
master->unique_size = u->unique_len + 1;
master->unique = kmalloc(master->unique_size, GFP_KERNEL);
if (!master->unique) {
ret = -ENOMEM;
goto err;
}
if (copy_from_user(master->unique, u->unique, master->unique_len)) {
ret = -EFAULT;
goto err;
}
master->unique[master->unique_len] = '\0';
bus_name = dev->driver->bus->get_name(dev);
dev->devname = kmalloc(strlen(bus_name) +
strlen(master->unique) + 2, GFP_KERNEL);
if (!dev->devname) {
ret = -ENOMEM;
goto err;
}
sprintf(dev->devname, "%s@%s", bus_name,
master->unique);
/* Return error if the busid submitted doesn't match the device's actual
* busid.
*/
ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
if (ret != 3) {
ret = -EINVAL;
goto err;
}
domain = bus >> 8;
bus &= 0xff;
if ((domain != drm_get_pci_domain(dev)) ||
(bus != dev->pdev->bus->number) ||
(slot != PCI_SLOT(dev->pdev->devfn)) ||
(func != PCI_FUNC(dev->pdev->devfn))) {
ret = -EINVAL;
goto err;
}
return 0;
err:
return ret;
}
static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
{
if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
(p->busnum & 0xff) != dev->pdev->bus->number ||
p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
return -EINVAL;
p->irq = dev->pdev->irq;
DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
p->irq);
return 0;
}
static int drm_pci_agp_init(struct drm_device *dev)
{
if (drm_core_has_AGP(dev)) {
if (drm_pci_device_is_agp(dev))
dev->agp = drm_agp_init(dev);
if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
&& (dev->agp == NULL)) {
DRM_ERROR("Cannot initialize the agpgart module.\n");
return -EINVAL;
}
if (dev->agp) {
dev->agp->agp_mtrr = arch_phys_wc_add(
dev->agp->agp_info.aper_base,
dev->agp->agp_info.aper_size *
1024 * 1024);
}
}
return 0;
}
static void drm_pci_agp_destroy(struct drm_device *dev)
{
if (drm_core_has_AGP(dev) && dev->agp) {
arch_phys_wc_del(dev->agp->agp_mtrr);
drm_agp_clear(dev);
drm_agp_destroy(dev->agp);
dev->agp = NULL;
}
}
static struct drm_bus drm_pci_bus = {
.bus_type = DRIVER_BUS_PCI,
.get_irq = drm_pci_get_irq,
.get_name = drm_pci_get_name,
.set_busid = drm_pci_set_busid,
.set_unique = drm_pci_set_unique,
.irq_by_busid = drm_pci_irq_by_busid,
.agp_init = drm_pci_agp_init,
.agp_destroy = drm_pci_agp_destroy,
};
#endif
/**
* Register.
*
* \param pdev - PCI device structure
* \param ent entry from the PCI ID table with device type flags
* \return zero on success or a negative number on failure.
*
* Attempt to gets inter module "drm" information. If we are first
* then register the character device and inter module information.
* Try and register, if we fail to register, backout previous work.
*/
int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
struct drm_driver *driver)
{
static struct drm_device drm_dev;
static struct drm_file drm_file;
struct drm_device *dev;
struct drm_file *priv;
int ret;
dev = &drm_dev;
priv = &drm_file;
drm_file_handlers[0] = priv;
// ret = pci_enable_device(pdev);
// if (ret)
// goto err_g1;
pci_set_master(pdev);
if ((ret = drm_fill_in_dev(dev, ent, driver))) {
printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
goto err_g2;
}
DRM_DEBUG("\n");
dev->pdev = pdev;
dev->pci_device = pdev->device;
dev->pci_vendor = pdev->vendor;
#ifdef __alpha__
dev->hose = pdev->sysdata;
#endif
// mutex_lock(&drm_global_mutex);
if ((ret = drm_fill_in_dev(dev, ent, driver))) {
printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
goto err_g2;
}
#if 0
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
pci_set_drvdata(pdev, dev);
ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
if (ret)
goto err_g2;
}
if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
if (ret)
goto err_g21;
}
if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
goto err_g3;
#endif
if (dev->driver->load) {
ret = dev->driver->load(dev, ent->driver_data);
if (ret)
goto err_g4;
}
if (dev->driver->open) {
ret = dev->driver->open(dev, priv);
if (ret < 0)
goto err_g4;
}
// mutex_unlock(&drm_global_mutex);
return 0;
err_g4:
// drm_put_minor(&dev->primary);
err_g3:
// if (dev->render)
// drm_put_minor(&dev->render);
err_g21:
// if (drm_core_check_feature(dev, DRIVER_MODESET))
// drm_put_minor(&dev->control);
err_g2:
// pci_disable_device(pdev);
err_g1:
// kfree(dev);
// mutex_unlock(&drm_global_mutex);
return ret;
}
EXPORT_SYMBOL(drm_get_pci_dev);
int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
{

View File

@ -0,0 +1,295 @@
/*
* Copyright (C) 2011-2013 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <drm/drmP.h>
#include <drm/drm_rect.h>
/**
* drm_rect_intersect - intersect two rectangles
* @r1: first rectangle
* @r2: second rectangle
*
* Calculate the intersection of rectangles @r1 and @r2.
* @r1 will be overwritten with the intersection.
*
* RETURNS:
* %true if rectangle @r1 is still visible after the operation,
* %false otherwise.
*/
bool drm_rect_intersect(struct drm_rect *r1, const struct drm_rect *r2)
{
r1->x1 = max(r1->x1, r2->x1);
r1->y1 = max(r1->y1, r2->y1);
r1->x2 = min(r1->x2, r2->x2);
r1->y2 = min(r1->y2, r2->y2);
return drm_rect_visible(r1);
}
EXPORT_SYMBOL(drm_rect_intersect);
/**
* drm_rect_clip_scaled - perform a scaled clip operation
* @src: source window rectangle
* @dst: destination window rectangle
* @clip: clip rectangle
* @hscale: horizontal scaling factor
* @vscale: vertical scaling factor
*
* Clip rectangle @dst by rectangle @clip. Clip rectangle @src by the
* same amounts multiplied by @hscale and @vscale.
*
* RETURNS:
* %true if rectangle @dst is still visible after being clipped,
* %false otherwise
*/
bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
const struct drm_rect *clip,
int hscale, int vscale)
{
int diff;
diff = clip->x1 - dst->x1;
if (diff > 0) {
int64_t tmp = src->x1 + (int64_t) diff * hscale;
src->x1 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
}
diff = clip->y1 - dst->y1;
if (diff > 0) {
int64_t tmp = src->y1 + (int64_t) diff * vscale;
src->y1 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
}
diff = dst->x2 - clip->x2;
if (diff > 0) {
int64_t tmp = src->x2 - (int64_t) diff * hscale;
src->x2 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
}
diff = dst->y2 - clip->y2;
if (diff > 0) {
int64_t tmp = src->y2 - (int64_t) diff * vscale;
src->y2 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
}
return drm_rect_intersect(dst, clip);
}
EXPORT_SYMBOL(drm_rect_clip_scaled);
static int drm_calc_scale(int src, int dst)
{
int scale = 0;
if (src < 0 || dst < 0)
return -EINVAL;
if (dst == 0)
return 0;
scale = src / dst;
return scale;
}
/**
* drm_rect_calc_hscale - calculate the horizontal scaling factor
* @src: source window rectangle
* @dst: destination window rectangle
* @min_hscale: minimum allowed horizontal scaling factor
* @max_hscale: maximum allowed horizontal scaling factor
*
* Calculate the horizontal scaling factor as
* (@src width) / (@dst width).
*
* RETURNS:
* The horizontal scaling factor, or errno of out of limits.
*/
int drm_rect_calc_hscale(const struct drm_rect *src,
const struct drm_rect *dst,
int min_hscale, int max_hscale)
{
int src_w = drm_rect_width(src);
int dst_w = drm_rect_width(dst);
int hscale = drm_calc_scale(src_w, dst_w);
if (hscale < 0 || dst_w == 0)
return hscale;
if (hscale < min_hscale || hscale > max_hscale)
return -ERANGE;
return hscale;
}
EXPORT_SYMBOL(drm_rect_calc_hscale);
/**
* drm_rect_calc_vscale - calculate the vertical scaling factor
* @src: source window rectangle
* @dst: destination window rectangle
* @min_vscale: minimum allowed vertical scaling factor
* @max_vscale: maximum allowed vertical scaling factor
*
* Calculate the vertical scaling factor as
* (@src height) / (@dst height).
*
* RETURNS:
* The vertical scaling factor, or errno of out of limits.
*/
int drm_rect_calc_vscale(const struct drm_rect *src,
const struct drm_rect *dst,
int min_vscale, int max_vscale)
{
int src_h = drm_rect_height(src);
int dst_h = drm_rect_height(dst);
int vscale = drm_calc_scale(src_h, dst_h);
if (vscale < 0 || dst_h == 0)
return vscale;
if (vscale < min_vscale || vscale > max_vscale)
return -ERANGE;
return vscale;
}
EXPORT_SYMBOL(drm_rect_calc_vscale);
/**
* drm_calc_hscale_relaxed - calculate the horizontal scaling factor
* @src: source window rectangle
* @dst: destination window rectangle
* @min_hscale: minimum allowed horizontal scaling factor
* @max_hscale: maximum allowed horizontal scaling factor
*
* Calculate the horizontal scaling factor as
* (@src width) / (@dst width).
*
* If the calculated scaling factor is below @min_vscale,
* decrease the height of rectangle @dst to compensate.
*
* If the calculated scaling factor is above @max_vscale,
* decrease the height of rectangle @src to compensate.
*
* RETURNS:
* The horizontal scaling factor.
*/
int drm_rect_calc_hscale_relaxed(struct drm_rect *src,
struct drm_rect *dst,
int min_hscale, int max_hscale)
{
int src_w = drm_rect_width(src);
int dst_w = drm_rect_width(dst);
int hscale = drm_calc_scale(src_w, dst_w);
if (hscale < 0 || dst_w == 0)
return hscale;
if (hscale < min_hscale) {
int max_dst_w = src_w / min_hscale;
drm_rect_adjust_size(dst, max_dst_w - dst_w, 0);
return min_hscale;
}
if (hscale > max_hscale) {
int max_src_w = dst_w * max_hscale;
drm_rect_adjust_size(src, max_src_w - src_w, 0);
return max_hscale;
}
return hscale;
}
EXPORT_SYMBOL(drm_rect_calc_hscale_relaxed);
/**
* drm_rect_calc_vscale_relaxed - calculate the vertical scaling factor
* @src: source window rectangle
* @dst: destination window rectangle
* @min_vscale: minimum allowed vertical scaling factor
* @max_vscale: maximum allowed vertical scaling factor
*
* Calculate the vertical scaling factor as
* (@src height) / (@dst height).
*
* If the calculated scaling factor is below @min_vscale,
* decrease the height of rectangle @dst to compensate.
*
* If the calculated scaling factor is above @max_vscale,
* decrease the height of rectangle @src to compensate.
*
* RETURNS:
* The vertical scaling factor.
*/
int drm_rect_calc_vscale_relaxed(struct drm_rect *src,
struct drm_rect *dst,
int min_vscale, int max_vscale)
{
int src_h = drm_rect_height(src);
int dst_h = drm_rect_height(dst);
int vscale = drm_calc_scale(src_h, dst_h);
if (vscale < 0 || dst_h == 0)
return vscale;
if (vscale < min_vscale) {
int max_dst_h = src_h / min_vscale;
drm_rect_adjust_size(dst, 0, max_dst_h - dst_h);
return min_vscale;
}
if (vscale > max_vscale) {
int max_src_h = dst_h * max_vscale;
drm_rect_adjust_size(src, 0, max_src_h - src_h);
return max_vscale;
}
return vscale;
}
EXPORT_SYMBOL(drm_rect_calc_vscale_relaxed);
/**
* drm_rect_debug_print - print the rectangle information
* @r: rectangle to print
* @fixed_point: rectangle is in 16.16 fixed point format
*/
void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point)
{
int w = drm_rect_width(r);
int h = drm_rect_height(r);
if (fixed_point)
DRM_DEBUG_KMS("%d.%06ux%d.%06u%+d.%06u%+d.%06u\n",
w >> 16, ((w & 0xffff) * 15625) >> 10,
h >> 16, ((h & 0xffff) * 15625) >> 10,
r->x1 >> 16, ((r->x1 & 0xffff) * 15625) >> 10,
r->y1 >> 16, ((r->y1 & 0xffff) * 15625) >> 10);
else
DRM_DEBUG_KMS("%dx%d%+d%+d\n", w, h, r->x1, r->y1);
}
EXPORT_SYMBOL(drm_rect_debug_print);

View File

@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <drm/drmP.h>
#include <drm/drm_core.h>
struct va_format {
const char *fmt;
@ -44,12 +45,16 @@ struct va_format {
unsigned int drm_debug = 0; /* 1 to enable debug output */
EXPORT_SYMBOL(drm_debug);
unsigned int drm_rnodes = 0; /* 1 to enable experimental render nodes API */
EXPORT_SYMBOL(drm_rnodes);
unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
EXPORT_SYMBOL(drm_vblank_offdelay);
unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
EXPORT_SYMBOL(drm_timestamp_precision);
struct idr drm_minors_idr;
int drm_err(const char *func, const char *format, ...)
{
struct va_format vaf;
@ -86,6 +91,45 @@ void drm_ut_debug_printk(unsigned int request_level,
}
EXPORT_SYMBOL(drm_ut_debug_printk);
int drm_fill_in_dev(struct drm_device *dev,
const struct pci_device_id *ent,
struct drm_driver *driver)
{
int retcode;
INIT_LIST_HEAD(&dev->filelist);
INIT_LIST_HEAD(&dev->ctxlist);
INIT_LIST_HEAD(&dev->vmalist);
INIT_LIST_HEAD(&dev->maplist);
INIT_LIST_HEAD(&dev->vblank_event_list);
spin_lock_init(&dev->count_lock);
spin_lock_init(&dev->event_lock);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
// if (drm_ht_create(&dev->map_hash, 12)) {
// return -ENOMEM;
// }
dev->driver = driver;
if (driver->driver_features & DRIVER_GEM) {
retcode = drm_gem_init(dev);
if (retcode) {
DRM_ERROR("Cannot initialize graphics execution "
"manager (GEM)\n");
goto error_out_unreg;
}
}
return 0;
error_out_unreg:
// drm_lastclose(dev);
return retcode;
}
EXPORT_SYMBOL(drm_fill_in_dev);
/**
* Compute size order. Returns the exponent of the smaller power of two which
* is greater or equal to given number.

View File

@ -0,0 +1,436 @@
/*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* Copyright (c) 2012 David Airlie <airlied@linux.ie>
* Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <drm/drmP.h>
#include <drm/drm_mm.h>
#include <drm/drm_vma_manager.h>
//#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
/**
* DOC: vma offset manager
*
* The vma-manager is responsible to map arbitrary driver-dependent memory
* regions into the linear user address-space. It provides offsets to the
* caller which can then be used on the address_space of the drm-device. It
* takes care to not overlap regions, size them appropriately and to not
* confuse mm-core by inconsistent fake vm_pgoff fields.
* Drivers shouldn't use this for object placement in VMEM. This manager should
* only be used to manage mappings into linear user-space VMs.
*
* We use drm_mm as backend to manage object allocations. But it is highly
* optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to
* speed up offset lookups.
*
* You must not use multiple offset managers on a single address_space.
* Otherwise, mm-core will be unable to tear down memory mappings as the VM will
* no longer be linear. Please use VM_NONLINEAR in that case and implement your
* own offset managers.
*
* This offset manager works on page-based addresses. That is, every argument
* and return code (with the exception of drm_vma_node_offset_addr()) is given
* in number of pages, not number of bytes. That means, object sizes and offsets
* must always be page-aligned (as usual).
* If you want to get a valid byte-based user-space address for a given offset,
* please see drm_vma_node_offset_addr().
*
* Additionally to offset management, the vma offset manager also handles access
* management. For every open-file context that is allowed to access a given
* node, you must call drm_vma_node_allow(). Otherwise, an mmap() call on this
* open-file with the offset of the node will fail with -EACCES. To revoke
* access again, use drm_vma_node_revoke(). However, the caller is responsible
* for destroying already existing mappings, if required.
*/
/**
* drm_vma_offset_manager_init - Initialize new offset-manager
* @mgr: Manager object
* @page_offset: Offset of available memory area (page-based)
* @size: Size of available address space range (page-based)
*
* Initialize a new offset-manager. The offset and area size available for the
* manager are given as @page_offset and @size. Both are interpreted as
* page-numbers, not bytes.
*
* Adding/removing nodes from the manager is locked internally and protected
* against concurrent access. However, node allocation and destruction is left
* for the caller. While calling into the vma-manager, a given node must
* always be guaranteed to be referenced.
*/
void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
unsigned long page_offset, unsigned long size)
{
rwlock_init(&mgr->vm_lock);
mgr->vm_addr_space_rb = RB_ROOT;
drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
}
EXPORT_SYMBOL(drm_vma_offset_manager_init);
/**
* drm_vma_offset_manager_destroy() - Destroy offset manager
* @mgr: Manager object
*
* Destroy an object manager which was previously created via
* drm_vma_offset_manager_init(). The caller must remove all allocated nodes
* before destroying the manager. Otherwise, drm_mm will refuse to free the
* requested resources.
*
* The manager must not be accessed after this function is called.
*/
void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
{
/* take the lock to protect against buggy drivers */
write_lock(&mgr->vm_lock);
drm_mm_takedown(&mgr->vm_addr_space_mm);
write_unlock(&mgr->vm_lock);
}
EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
/**
* drm_vma_offset_lookup() - Find node in offset space
* @mgr: Manager object
* @start: Start address for object (page-based)
* @pages: Size of object (page-based)
*
* Find a node given a start address and object size. This returns the _best_
* match for the given node. That is, @start may point somewhere into a valid
* region and the given node will be returned, as long as the node spans the
* whole requested area (given the size in number of pages as @pages).
*
* RETURNS:
* Returns NULL if no suitable node can be found. Otherwise, the best match
* is returned. It's the caller's responsibility to make sure the node doesn't
* get destroyed before the caller can access it.
*/
struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
unsigned long start,
unsigned long pages)
{
struct drm_vma_offset_node *node;
read_lock(&mgr->vm_lock);
node = drm_vma_offset_lookup_locked(mgr, start, pages);
read_unlock(&mgr->vm_lock);
return node;
}
EXPORT_SYMBOL(drm_vma_offset_lookup);
/**
* drm_vma_offset_lookup_locked() - Find node in offset space
* @mgr: Manager object
* @start: Start address for object (page-based)
* @pages: Size of object (page-based)
*
* Same as drm_vma_offset_lookup() but requires the caller to lock offset lookup
* manually. See drm_vma_offset_lock_lookup() for an example.
*
* RETURNS:
* Returns NULL if no suitable node can be found. Otherwise, the best match
* is returned.
*/
struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
unsigned long start,
unsigned long pages)
{
struct drm_vma_offset_node *node, *best;
struct rb_node *iter;
unsigned long offset;
iter = mgr->vm_addr_space_rb.rb_node;
best = NULL;
while (likely(iter)) {
node = rb_entry(iter, struct drm_vma_offset_node, vm_rb);
offset = node->vm_node.start;
if (start >= offset) {
iter = iter->rb_right;
best = node;
if (start == offset)
break;
} else {
iter = iter->rb_left;
}
}
/* verify that the node spans the requested area */
if (best) {
offset = best->vm_node.start + best->vm_node.size;
if (offset < start + pages)
best = NULL;
}
return best;
}
EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
/* internal helper to link @node into the rb-tree */
static void _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr,
struct drm_vma_offset_node *node)
{
struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node;
struct rb_node *parent = NULL;
struct drm_vma_offset_node *iter_node;
while (likely(*iter)) {
parent = *iter;
iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb);
if (node->vm_node.start < iter_node->vm_node.start)
iter = &(*iter)->rb_left;
else if (node->vm_node.start > iter_node->vm_node.start)
iter = &(*iter)->rb_right;
else
BUG();
}
rb_link_node(&node->vm_rb, parent, iter);
rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb);
}
/**
* drm_vma_offset_add() - Add offset node to manager
* @mgr: Manager object
* @node: Node to be added
* @pages: Allocation size visible to user-space (in number of pages)
*
* Add a node to the offset-manager. If the node was already added, this does
* nothing and return 0. @pages is the size of the object given in number of
* pages.
* After this call succeeds, you can access the offset of the node until it
* is removed again.
*
* If this call fails, it is safe to retry the operation or call
* drm_vma_offset_remove(), anyway. However, no cleanup is required in that
* case.
*
* @pages is not required to be the same size as the underlying memory object
* that you want to map. It only limits the size that user-space can map into
* their address space.
*
* RETURNS:
* 0 on success, negative error code on failure.
*/
int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
struct drm_vma_offset_node *node, unsigned long pages)
{
int ret;
write_lock(&mgr->vm_lock);
if (drm_mm_node_allocated(&node->vm_node)) {
ret = 0;
goto out_unlock;
}
ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node,
pages, 0, DRM_MM_SEARCH_DEFAULT);
if (ret)
goto out_unlock;
_drm_vma_offset_add_rb(mgr, node);
out_unlock:
write_unlock(&mgr->vm_lock);
return ret;
}
EXPORT_SYMBOL(drm_vma_offset_add);
/**
* drm_vma_offset_remove() - Remove offset node from manager
* @mgr: Manager object
* @node: Node to be removed
*
* Remove a node from the offset manager. If the node wasn't added before, this
* does nothing. After this call returns, the offset and size will be 0 until a
* new offset is allocated via drm_vma_offset_add() again. Helper functions like
* drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no
* offset is allocated.
*/
void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
struct drm_vma_offset_node *node)
{
write_lock(&mgr->vm_lock);
if (drm_mm_node_allocated(&node->vm_node)) {
rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb);
drm_mm_remove_node(&node->vm_node);
memset(&node->vm_node, 0, sizeof(node->vm_node));
}
write_unlock(&mgr->vm_lock);
}
EXPORT_SYMBOL(drm_vma_offset_remove);
/**
* drm_vma_node_allow - Add open-file to list of allowed users
* @node: Node to modify
* @filp: Open file to add
*
* Add @filp to the list of allowed open-files for this node. If @filp is
* already on this list, the ref-count is incremented.
*
* The list of allowed-users is preserved across drm_vma_offset_add() and
* drm_vma_offset_remove() calls. You may even call it if the node is currently
* not added to any offset-manager.
*
* You must remove all open-files the same number of times as you added them
* before destroying the node. Otherwise, you will leak memory.
*
* This is locked against concurrent access internally.
*
* RETURNS:
* 0 on success, negative error code on internal failure (out-of-mem)
*/
int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp)
{
struct rb_node **iter;
struct rb_node *parent = NULL;
struct drm_vma_offset_file *new, *entry;
int ret = 0;
/* Preallocate entry to avoid atomic allocations below. It is quite
* unlikely that an open-file is added twice to a single node so we
* don't optimize for this case. OOM is checked below only if the entry
* is actually used. */
new = kmalloc(sizeof(*entry), GFP_KERNEL);
write_lock(&node->vm_lock);
iter = &node->vm_files.rb_node;
while (likely(*iter)) {
parent = *iter;
entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
if (filp == entry->vm_filp) {
entry->vm_count++;
goto unlock;
} else if (filp > entry->vm_filp) {
iter = &(*iter)->rb_right;
} else {
iter = &(*iter)->rb_left;
}
}
if (!new) {
ret = -ENOMEM;
goto unlock;
}
new->vm_filp = filp;
new->vm_count = 1;
rb_link_node(&new->vm_rb, parent, iter);
rb_insert_color(&new->vm_rb, &node->vm_files);
new = NULL;
unlock:
write_unlock(&node->vm_lock);
kfree(new);
return ret;
}
EXPORT_SYMBOL(drm_vma_node_allow);
/**
* drm_vma_node_revoke - Remove open-file from list of allowed users
* @node: Node to modify
* @filp: Open file to remove
*
* Decrement the ref-count of @filp in the list of allowed open-files on @node.
* If the ref-count drops to zero, remove @filp from the list. You must call
* this once for every drm_vma_node_allow() on @filp.
*
* This is locked against concurrent access internally.
*
* If @filp is not on the list, nothing is done.
*/
void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp)
{
struct drm_vma_offset_file *entry;
struct rb_node *iter;
write_lock(&node->vm_lock);
iter = node->vm_files.rb_node;
while (likely(iter)) {
entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
if (filp == entry->vm_filp) {
if (!--entry->vm_count) {
rb_erase(&entry->vm_rb, &node->vm_files);
kfree(entry);
}
break;
} else if (filp > entry->vm_filp) {
iter = iter->rb_right;
} else {
iter = iter->rb_left;
}
}
write_unlock(&node->vm_lock);
}
EXPORT_SYMBOL(drm_vma_node_revoke);
/**
* drm_vma_node_is_allowed - Check whether an open-file is granted access
* @node: Node to check
* @filp: Open-file to check for
*
* Search the list in @node whether @filp is currently on the list of allowed
* open-files (see drm_vma_node_allow()).
*
* This is locked against concurrent access internally.
*
* RETURNS:
* true iff @filp is on the list
*/
bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
struct file *filp)
{
struct drm_vma_offset_file *entry;
struct rb_node *iter;
read_lock(&node->vm_lock);
iter = node->vm_files.rb_node;
while (likely(iter)) {
entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
if (filp == entry->vm_filp)
break;
else if (filp > entry->vm_filp)
iter = iter->rb_right;
else
iter = iter->rb_left;
}
read_unlock(&node->vm_lock);
return iter;
}
EXPORT_SYMBOL(drm_vma_node_is_allowed);

436
drivers/video/drm/hdmi.c Normal file
View File

@ -0,0 +1,436 @@
/*
* Copyright (C) 2012 Avionic Design GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/hdmi.h>
#include <linux/string.h>
static void hdmi_infoframe_checksum(void *buffer, size_t size)
{
u8 *ptr = buffer;
u8 csum = 0;
size_t i;
/* compute checksum */
for (i = 0; i < size; i++)
csum += ptr[i];
ptr[3] = 256 - csum;
}
/**
* hdmi_avi_infoframe_init() - initialize an HDMI AVI infoframe
* @frame: HDMI AVI infoframe
*
* Returns 0 on success or a negative error code on failure.
*/
int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame)
{
memset(frame, 0, sizeof(*frame));
frame->type = HDMI_INFOFRAME_TYPE_AVI;
frame->version = 2;
frame->length = HDMI_AVI_INFOFRAME_SIZE;
return 0;
}
EXPORT_SYMBOL(hdmi_avi_infoframe_init);
/**
* hdmi_avi_infoframe_pack() - write HDMI AVI infoframe to binary buffer
* @frame: HDMI AVI infoframe
* @buffer: destination buffer
* @size: size of buffer
*
* Packs the information contained in the @frame structure into a binary
* representation that can be written into the corresponding controller
* registers. Also computes the checksum as required by section 5.3.5 of
* the HDMI 1.4 specification.
*
* Returns the number of bytes packed into the binary buffer or a negative
* error code on failure.
*/
ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
size_t size)
{
u8 *ptr = buffer;
size_t length;
length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
if (size < length)
return -ENOSPC;
memset(buffer, 0, size);
ptr[0] = frame->type;
ptr[1] = frame->version;
ptr[2] = frame->length;
ptr[3] = 0; /* checksum */
/* start infoframe payload */
ptr += HDMI_INFOFRAME_HEADER_SIZE;
ptr[0] = ((frame->colorspace & 0x3) << 5) | (frame->scan_mode & 0x3);
/*
* Data byte 1, bit 4 has to be set if we provide the active format
* aspect ratio
*/
if (frame->active_aspect & 0xf)
ptr[0] |= BIT(4);
/* Bit 3 and 2 indicate if we transmit horizontal/vertical bar data */
if (frame->top_bar || frame->bottom_bar)
ptr[0] |= BIT(3);
if (frame->left_bar || frame->right_bar)
ptr[0] |= BIT(2);
ptr[1] = ((frame->colorimetry & 0x3) << 6) |
((frame->picture_aspect & 0x3) << 4) |
(frame->active_aspect & 0xf);
ptr[2] = ((frame->extended_colorimetry & 0x7) << 4) |
((frame->quantization_range & 0x3) << 2) |
(frame->nups & 0x3);
if (frame->itc)
ptr[2] |= BIT(7);
ptr[3] = frame->video_code & 0x7f;
ptr[4] = ((frame->ycc_quantization_range & 0x3) << 6) |
((frame->content_type & 0x3) << 4) |
(frame->pixel_repeat & 0xf);
ptr[5] = frame->top_bar & 0xff;
ptr[6] = (frame->top_bar >> 8) & 0xff;
ptr[7] = frame->bottom_bar & 0xff;
ptr[8] = (frame->bottom_bar >> 8) & 0xff;
ptr[9] = frame->left_bar & 0xff;
ptr[10] = (frame->left_bar >> 8) & 0xff;
ptr[11] = frame->right_bar & 0xff;
ptr[12] = (frame->right_bar >> 8) & 0xff;
hdmi_infoframe_checksum(buffer, length);
return length;
}
EXPORT_SYMBOL(hdmi_avi_infoframe_pack);
/**
* hdmi_spd_infoframe_init() - initialize an HDMI SPD infoframe
* @frame: HDMI SPD infoframe
* @vendor: vendor string
* @product: product string
*
* Returns 0 on success or a negative error code on failure.
*/
int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
const char *vendor, const char *product)
{
memset(frame, 0, sizeof(*frame));
frame->type = HDMI_INFOFRAME_TYPE_SPD;
frame->version = 1;
frame->length = HDMI_SPD_INFOFRAME_SIZE;
strncpy(frame->vendor, vendor, sizeof(frame->vendor));
strncpy(frame->product, product, sizeof(frame->product));
return 0;
}
EXPORT_SYMBOL(hdmi_spd_infoframe_init);
/**
* hdmi_spd_infoframe_pack() - write HDMI SPD infoframe to binary buffer
* @frame: HDMI SPD infoframe
* @buffer: destination buffer
* @size: size of buffer
*
* Packs the information contained in the @frame structure into a binary
* representation that can be written into the corresponding controller
* registers. Also computes the checksum as required by section 5.3.5 of
* the HDMI 1.4 specification.
*
* Returns the number of bytes packed into the binary buffer or a negative
* error code on failure.
*/
ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
size_t size)
{
u8 *ptr = buffer;
size_t length;
length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
if (size < length)
return -ENOSPC;
memset(buffer, 0, size);
ptr[0] = frame->type;
ptr[1] = frame->version;
ptr[2] = frame->length;
ptr[3] = 0; /* checksum */
/* start infoframe payload */
ptr += HDMI_INFOFRAME_HEADER_SIZE;
memcpy(ptr, frame->vendor, sizeof(frame->vendor));
memcpy(ptr + 8, frame->product, sizeof(frame->product));
ptr[24] = frame->sdi;
hdmi_infoframe_checksum(buffer, length);
return length;
}
EXPORT_SYMBOL(hdmi_spd_infoframe_pack);
/**
* hdmi_audio_infoframe_init() - initialize an HDMI audio infoframe
* @frame: HDMI audio infoframe
*
* Returns 0 on success or a negative error code on failure.
*/
int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame)
{
memset(frame, 0, sizeof(*frame));
frame->type = HDMI_INFOFRAME_TYPE_AUDIO;
frame->version = 1;
frame->length = HDMI_AUDIO_INFOFRAME_SIZE;
return 0;
}
EXPORT_SYMBOL(hdmi_audio_infoframe_init);
/**
* hdmi_audio_infoframe_pack() - write HDMI audio infoframe to binary buffer
* @frame: HDMI audio infoframe
* @buffer: destination buffer
* @size: size of buffer
*
* Packs the information contained in the @frame structure into a binary
* representation that can be written into the corresponding controller
* registers. Also computes the checksum as required by section 5.3.5 of
* the HDMI 1.4 specification.
*
* Returns the number of bytes packed into the binary buffer or a negative
* error code on failure.
*/
ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
void *buffer, size_t size)
{
unsigned char channels;
u8 *ptr = buffer;
size_t length;
length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
if (size < length)
return -ENOSPC;
memset(buffer, 0, size);
if (frame->channels >= 2)
channels = frame->channels - 1;
else
channels = 0;
ptr[0] = frame->type;
ptr[1] = frame->version;
ptr[2] = frame->length;
ptr[3] = 0; /* checksum */
/* start infoframe payload */
ptr += HDMI_INFOFRAME_HEADER_SIZE;
ptr[0] = ((frame->coding_type & 0xf) << 4) | (channels & 0x7);
ptr[1] = ((frame->sample_frequency & 0x7) << 2) |
(frame->sample_size & 0x3);
ptr[2] = frame->coding_type_ext & 0x1f;
ptr[3] = frame->channel_allocation;
ptr[4] = (frame->level_shift_value & 0xf) << 3;
if (frame->downmix_inhibit)
ptr[4] |= BIT(7);
hdmi_infoframe_checksum(buffer, length);
return length;
}
EXPORT_SYMBOL(hdmi_audio_infoframe_pack);
/**
* hdmi_vendor_infoframe_init() - initialize an HDMI vendor infoframe
* @frame: HDMI vendor infoframe
*
* Returns 0 on success or a negative error code on failure.
*/
int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame)
{
memset(frame, 0, sizeof(*frame));
frame->type = HDMI_INFOFRAME_TYPE_VENDOR;
frame->version = 1;
frame->oui = HDMI_IEEE_OUI;
/*
* 0 is a valid value for s3d_struct, so we use a special "not set"
* value
*/
frame->s3d_struct = HDMI_3D_STRUCTURE_INVALID;
return 0;
}
EXPORT_SYMBOL(hdmi_vendor_infoframe_init);
/**
* hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary buffer
* @frame: HDMI infoframe
* @buffer: destination buffer
* @size: size of buffer
*
* Packs the information contained in the @frame structure into a binary
* representation that can be written into the corresponding controller
* registers. Also computes the checksum as required by section 5.3.5 of
* the HDMI 1.4 specification.
*
* Returns the number of bytes packed into the binary buffer or a negative
* error code on failure.
*/
ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
void *buffer, size_t size)
{
u8 *ptr = buffer;
size_t length;
/* empty info frame */
if (frame->vic == 0 && frame->s3d_struct == HDMI_3D_STRUCTURE_INVALID)
return -EINVAL;
/* only one of those can be supplied */
if (frame->vic != 0 && frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID)
return -EINVAL;
/* for side by side (half) we also need to provide 3D_Ext_Data */
if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
frame->length = 6;
else
frame->length = 5;
length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
if (size < length)
return -ENOSPC;
memset(buffer, 0, size);
ptr[0] = frame->type;
ptr[1] = frame->version;
ptr[2] = frame->length;
ptr[3] = 0; /* checksum */
/* HDMI OUI */
ptr[4] = 0x03;
ptr[5] = 0x0c;
ptr[6] = 0x00;
if (frame->vic) {
ptr[7] = 0x1 << 5; /* video format */
ptr[8] = frame->vic;
} else {
ptr[7] = 0x2 << 5; /* video format */
ptr[8] = (frame->s3d_struct & 0xf) << 4;
if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
ptr[9] = (frame->s3d_ext_data & 0xf) << 4;
}
hdmi_infoframe_checksum(buffer, length);
return length;
}
EXPORT_SYMBOL(hdmi_vendor_infoframe_pack);
/*
* hdmi_vendor_any_infoframe_pack() - write a vendor infoframe to binary buffer
*/
static ssize_t
hdmi_vendor_any_infoframe_pack(union hdmi_vendor_any_infoframe *frame,
void *buffer, size_t size)
{
/* we only know about HDMI vendor infoframes */
if (frame->any.oui != HDMI_IEEE_OUI)
return -EINVAL;
return hdmi_vendor_infoframe_pack(&frame->hdmi, buffer, size);
}
/**
* hdmi_infoframe_pack() - write a HDMI infoframe to binary buffer
* @frame: HDMI infoframe
* @buffer: destination buffer
* @size: size of buffer
*
* Packs the information contained in the @frame structure into a binary
* representation that can be written into the corresponding controller
* registers. Also computes the checksum as required by section 5.3.5 of
* the HDMI 1.4 specification.
*
* Returns the number of bytes packed into the binary buffer or a negative
* error code on failure.
*/
ssize_t
hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size)
{
ssize_t length;
switch (frame->any.type) {
case HDMI_INFOFRAME_TYPE_AVI:
length = hdmi_avi_infoframe_pack(&frame->avi, buffer, size);
break;
case HDMI_INFOFRAME_TYPE_SPD:
length = hdmi_spd_infoframe_pack(&frame->spd, buffer, size);
break;
case HDMI_INFOFRAME_TYPE_AUDIO:
length = hdmi_audio_infoframe_pack(&frame->audio, buffer, size);
break;
case HDMI_INFOFRAME_TYPE_VENDOR:
length = hdmi_vendor_any_infoframe_pack(&frame->vendor,
buffer, size);
break;
default:
WARN(1, "Bad infoframe type %d\n", frame->any.type);
length = -EINVAL;
}
return length;
}
EXPORT_SYMBOL(hdmi_infoframe_pack);

View File

@ -27,6 +27,7 @@
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <syscall.h>
#include <linux/jiffies.h>
#include <errno.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
@ -43,7 +44,7 @@
} while (0)
#else
#define bit_dbg(level, dev, format, args...) \
do { /* dbgprintf(format, ##args); */ } while (0)
do {} while (0)
#endif /* DEBUG */
/* ----- global variables --------------------------------------------- */

View File

@ -32,6 +32,7 @@
#include <errno.h>
#include <linux/i2c.h>
#include <syscall.h>
#include <linux/jiffies.h>
@ -104,7 +105,7 @@ static const struct dev_pm_ops i2c_device_pm_ops = {
SET_RUNTIME_PM_OPS(
pm_generic_runtime_suspend,
pm_generic_runtime_resume,
pm_generic_runtime_idle
NULL
)
};

View File

@ -25,7 +25,7 @@
#define PCI_DEVICE_ID_INTEL_82945GM_HB 0x27A0
int intel_gmch_probe(struct pci_dev *pdev,
int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
struct agp_bridge_data *bridge);
int intel_agp_enabled;
@ -62,7 +62,8 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
bridge->capndx = cap_ptr;
if (intel_gmch_probe(pdev, bridge))
if (intel_gmch_probe(pdev, NULL, bridge))
{
// pci_set_drvdata(pdev, bridge);
// err = agp_add_bridge(bridge);

View File

@ -363,6 +363,40 @@ static void intel_gtt_cleanup(void)
intel_gtt_teardown_scratch_page();
}
/* Certain Gen5 chipsets require require idling the GPU before
* unmapping anything from the GTT when VT-d is enabled.
*/
static inline int needs_ilk_vtd_wa(void)
{
#ifdef CONFIG_INTEL_IOMMU
const unsigned short gpu_devid = intel_private.pcidev->device;
/* Query intel_iommu to see if we need the workaround. Presumably that
* was loaded first.
*/
if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
intel_iommu_gfx_mapped)
return 1;
#endif
return 0;
}
static bool intel_gtt_can_wc(void)
{
if (INTEL_GTT_GEN <= 2)
return false;
if (INTEL_GTT_GEN >= 6)
return false;
/* Reports of major corruption with ILK vt'd enabled */
if (needs_ilk_vtd_wa())
return false;
return true;
}
static int intel_gtt_init(void)
{
u32 gma_addr;

View File

@ -0,0 +1,124 @@
CC = gcc.exe
FASM = e:/fasm/fasm.exe
DEFINES = -D__KERNEL__ -DCONFIG_X86_32
DDK_TOPDIR = /d/kos/kolibri/drivers/ddk
DRV_INCLUDES = /d/kos/kolibri/drivers/include
DRM_TOPDIR = $(CURDIR)/..
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/drm \
-I$(DRV_INCLUDES)/linux -I$(DRV_INCLUDES)/linux/asm \
-I./ -I./render
CFLAGS= -c -O2 $(INCLUDES) -march=i686 -msse2 -fomit-frame-pointer -fno-builtin-printf
CFLAGS+= -mno-stack-arg-probe -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2 -mno-ms-bitfields
LIBPATH:= $(DDK_TOPDIR)/
LIBS:= -lddk -lcore -lgcc
LDFLAGS = -nostdlib -shared -s -Map i915.map --image-base 0\
--file-alignment 512 --section-alignment 4096
NAME:= i915
HFILES:= $(DRV_INCLUDES)/linux/types.h \
$(DRV_INCLUDES)/linux/list.h \
$(DRV_INCLUDES)/linux/pci.h \
$(DRV_INCLUDES)/drm/drm.h \
$(DRV_INCLUDES)/drm/drmP.h \
$(DRV_INCLUDES)/drm/drm_edid.h \
$(DRV_INCLUDES)/drm/drm_crtc.h \
$(DRV_INCLUDES)/drm/drm_mode.h \
$(DRV_INCLUDES)/drm/drm_mm.h \
i915_drv.h \
bitmap.h
NAME_SRC= main.c \
pci.c \
dvo_ch7017.c \
dvo_ch7xxx.c \
dvo_ivch.c \
dvo_ns2501.c \
dvo_sil164.c \
dvo_tfp410.c \
i915_dma.c \
i915_drv.c \
i915_gem.c \
i915_gem_context.c \
i915_gem_gtt.c \
i915_gem_execbuffer.c \
i915_gem_stolen.c \
i915_gem_tiling.c \
i915_irq.c \
intel_bios.c \
intel_crt.c \
intel_ddi.c \
intel_display.c \
intel_dp.c \
intel_dvo.c \
intel_fb.c \
intel_hdmi.c \
intel_i2c.c \
intel_lvds.c \
intel_modes.c \
intel_opregion.c \
intel_panel.c \
intel_pm.c \
intel_ringbuffer.c \
intel_sdvo.c \
intel_sideband.c \
intel_sprite.c \
intel_uncore.c \
kms_display.c \
utils.c \
../hdmi.c \
Gtt/intel-agp.c \
Gtt/intel-gtt.c \
../drm_global.c \
../drm_drv.c \
../drm_vma_manager.c \
$(DRM_TOPDIR)/i2c/i2c-core.c \
$(DRM_TOPDIR)/i2c/i2c-algo-bit.c \
$(DRM_TOPDIR)/drm_crtc.c \
$(DRM_TOPDIR)/drm_crtc_helper.c \
$(DRM_TOPDIR)/drm_dp_helper.c \
$(DRM_TOPDIR)/drm_edid.c \
$(DRM_TOPDIR)/drm_fb_helper.c \
$(DRM_TOPDIR)/drm_gem.c \
$(DRM_TOPDIR)/drm_irq.c \
$(DRM_TOPDIR)/drm_mm.c \
$(DRM_TOPDIR)/drm_modes.c \
$(DRM_TOPDIR)/drm_pci.c \
$(DRM_TOPDIR)/drm_rect.c \
$(DRM_TOPDIR)/drm_stub.c
SRC_DEP:=
NAME_OBJS = $(patsubst %.S, %.o, $(patsubst %.asm, %.o,\
$(patsubst %.c, %.o, $(NAME_SRC))))
all: $(NAME).dll
$(NAME).dll: $(NAME_OBJS) $(FW_BINS) $(SRC_DEP) $(HFILES) i915.lds Makefile
ld -L$(LIBPATH) $(LDFLAGS) -T i915.lds -o $@ $(NAME_OBJS) $(LIBS)
%.o : %.c $(HFILES) Makefile
$(CC) $(CFLAGS) $(DEFINES) -o $@ $<
%.o : %.S $(HFILES) Makefile
as -o $@ $<
clean:
-rm -f ../*/*.o

View File

@ -0,0 +1,123 @@
CC = gcc
FASM = e:/fasm/fasm.exe
DEFINES = -D__KERNEL__ -DCONFIG_X86_32
DDK_TOPDIR = /d/kos/kolibri/drivers/ddk
DRV_INCLUDES = /d/kos/kolibri/drivers/include
DRM_TOPDIR = $(CURDIR)/..
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/drm \
-I$(DRV_INCLUDES)/linux -I$(DRV_INCLUDES)/linux/asm \
-I./ -I./render
CFLAGS_OPT = -Os -march=i686 -msse2 -fomit-frame-pointer -fno-builtin-printf -mno-stack-arg-probe
CFLAGS_OPT+= -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2 -mno-ms-bitfields -flto
CFLAGS = -c $(INCLUDES) $(DEFINES) $(CFLAGS_OPT)
LIBPATH:= $(DDK_TOPDIR)
LIBS:= libddk.a libcore.a libgcc.a
LDFLAGS = -e,_drvEntry,-nostdlib,-shared,-s,--image-base,0,--file-alignment,512,--section-alignment,4096
NAME:= i915
HFILES:= $(DRV_INCLUDES)/linux/types.h \
$(DRV_INCLUDES)/linux/list.h \
$(DRV_INCLUDES)/linux/pci.h \
$(DRV_INCLUDES)/drm/drm.h \
$(DRV_INCLUDES)/drm/drmP.h \
$(DRV_INCLUDES)/drm/drm_edid.h \
$(DRV_INCLUDES)/drm/drm_crtc.h \
$(DRV_INCLUDES)/drm/drm_mode.h \
$(DRV_INCLUDES)/drm/drm_mm.h \
i915_drv.h \
bitmap.h
NAME_SRC= main.c \
pci.c \
dvo_ch7017.c \
dvo_ch7xxx.c \
dvo_ivch.c \
dvo_ns2501.c \
dvo_sil164.c \
dvo_tfp410.c \
i915_dma.c \
i915_drv.c \
i915_gem.c \
i915_gem_context.c \
i915_gem_gtt.c \
i915_gem_execbuffer.c \
i915_gem_stolen.c \
i915_gem_tiling.c \
i915_irq.c \
intel_bios.c \
intel_crt.c \
intel_ddi.c \
intel_display.c \
intel_dp.c \
intel_dvo.c \
intel_fb.c \
intel_hdmi.c \
intel_i2c.c \
intel_lvds.c \
intel_modes.c \
intel_opregion.c \
intel_panel.c \
intel_pm.c \
intel_ringbuffer.c \
intel_sdvo.c \
intel_sideband.c \
intel_sprite.c \
intel_uncore.c \
kms_display.c \
utils.c \
../hdmi.c \
Gtt/intel-agp.c \
Gtt/intel-gtt.c \
../drm_global.c \
../drm_drv.c \
../drm_vma_manager.c \
$(DRM_TOPDIR)/i2c/i2c-core.c \
$(DRM_TOPDIR)/i2c/i2c-algo-bit.c \
$(DRM_TOPDIR)/drm_crtc.c \
$(DRM_TOPDIR)/drm_crtc_helper.c \
$(DRM_TOPDIR)/drm_dp_helper.c \
$(DRM_TOPDIR)/drm_edid.c \
$(DRM_TOPDIR)/drm_fb_helper.c \
$(DRM_TOPDIR)/drm_gem.c \
$(DRM_TOPDIR)/drm_irq.c \
$(DRM_TOPDIR)/drm_mm.c \
$(DRM_TOPDIR)/drm_modes.c \
$(DRM_TOPDIR)/drm_pci.c \
$(DRM_TOPDIR)/drm_rect.c \
$(DRM_TOPDIR)/drm_stub.c
SRC_DEP:=
NAME_OBJS = $(patsubst %.S, %.o, $(patsubst %.asm, %.o,\
$(patsubst %.c, %.o, $(NAME_SRC))))
all: $(NAME).dll
$(NAME).dll: $(NAME_OBJS) $(FW_BINS) $(SRC_DEP) $(HFILES) i915.lds Makefile.lto
$(CC) $(CFLAGS_OPT) -fwhole-program -nostdlib -Wl,-L$(LIBPATH),$(LDFLAGS),-T,i915.lds -o $@ $(NAME_OBJS) $(LIBS)
%.o : %.c $(HFILES) Makefile.lto
$(CC) $(CFLAGS) $(DEFINES) -o $@ $<
%.o : %.S $(HFILES) Makefile.lto
as -o $@ $<
clean:
-rm -f ../*/*.o

View File

@ -32,12 +32,14 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#define CH7xxx_REG_DID 0x4b
#define CH7011_VID 0x83 /* 7010 as well */
#define CH7010B_VID 0x05
#define CH7009A_VID 0x84
#define CH7009B_VID 0x85
#define CH7301_VID 0x95
#define CH7xxx_VID 0x84
#define CH7xxx_DID 0x17
#define CH7010_DID 0x16
#define CH7xxx_NUM_REGS 0x4c
@ -87,11 +89,20 @@ static struct ch7xxx_id_struct {
char *name;
} ch7xxx_ids[] = {
{ CH7011_VID, "CH7011" },
{ CH7010B_VID, "CH7010B" },
{ CH7009A_VID, "CH7009A" },
{ CH7009B_VID, "CH7009B" },
{ CH7301_VID, "CH7301" },
};
static struct ch7xxx_did_struct {
uint8_t did;
char *name;
} ch7xxx_dids[] = {
{ CH7xxx_DID, "CH7XXX" },
{ CH7010_DID, "CH7010B" },
};
struct ch7xxx_priv {
bool quiet;
};
@ -108,6 +119,18 @@ static char *ch7xxx_get_id(uint8_t vid)
return NULL;
}
static char *ch7xxx_get_did(uint8_t did)
{
int i;
for (i = 0; i < ARRAY_SIZE(ch7xxx_dids); i++) {
if (ch7xxx_dids[i].did == did)
return ch7xxx_dids[i].name;
}
return NULL;
}
/** Reads an 8 bit register */
static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
@ -179,7 +202,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
/* this will detect the CH7xxx chip on the specified i2c bus */
struct ch7xxx_priv *ch7xxx;
uint8_t vendor, device;
char *name;
char *name, *devid;
ch7xxx = kzalloc(sizeof(struct ch7xxx_priv), GFP_KERNEL);
if (ch7xxx == NULL)
@ -204,7 +227,8 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
if (!ch7xxx_readb(dvo, CH7xxx_REG_DID, &device))
goto out;
if (device != CH7xxx_DID) {
devid = ch7xxx_get_did(device);
if (!devid) {
DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
"slave %d.\n",
vendor, adapter->name, dvo->slave_addr);
@ -283,7 +307,7 @@ static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
idf |= CH7xxx_IDF_HSP;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
idf |= CH7xxx_IDF_HSP;
idf |= CH7xxx_IDF_VSP;
ch7xxx_writeb(dvo, CH7xxx_IDF, idf);
}

View File

@ -45,7 +45,6 @@
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen);
#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
#define BEGIN_LP_RING(n) \
@ -991,7 +990,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
value = 1;
break;
case I915_PARAM_HAS_SECURE_BATCHES:
case I915_PARAM_HAS_SECURE_BATCHES:
value = 1;
break;
case I915_PARAM_HAS_PINNED_BATCHES:
@ -1004,8 +1003,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = 1;
break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
}
@ -1150,7 +1148,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
DRM_INFO("failed to find VBIOS tables\n");
// intel_register_dsm_handler();
/* Initialise stolen first so that we may reserve preallocated
* objects for the BIOS to KMS transition.
@ -1177,10 +1175,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
/* Always safe in the mode setting case. */
/* FIXME: do pre/post-mode set stuff in core KMS code */
dev->vblank_disable_allowed = 1;
if (INTEL_INFO(dev)->num_pipes == 0) {
dev_priv->mm.suspended = 0;
if (INTEL_INFO(dev)->num_pipes == 0)
return 0;
}
ret = intel_fbdev_init(dev);
if (ret)
@ -1206,9 +1202,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
drm_kms_helper_poll_init(dev);
/* We're off and running w/KMS */
dev_priv->mm.suspended = 0;
return 0;
cleanup_gem:
@ -1217,13 +1210,13 @@ cleanup_gem:
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev);
cleanup_irq:
// drm_irq_uninstall(dev);
// drm_irq_uninstall(dev);
cleanup_gem_stolen:
// i915_gem_cleanup_stolen(dev);
cleanup_vga_switcheroo:
// vga_switcheroo_unregister_client(dev->pdev);
// vga_switcheroo_unregister_client(dev->pdev);
cleanup_vga_client:
// vga_client_register(dev->pdev, NULL, NULL, NULL);
// vga_client_register(dev->pdev, NULL, NULL, NULL);
out:
return ret;
}
@ -1235,31 +1228,19 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
{
const struct intel_device_info *info = dev_priv->info;
#define DEV_INFO_FLAG(name) info->name ? #name "," : ""
#define DEV_INFO_SEP ,
#define PRINT_S(name) "%s"
#define SEP_EMPTY
#define PRINT_FLAG(name) info->name ? #name "," : ""
#define SEP_COMMA ,
DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
"%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
info->gen,
dev_priv->dev->pdev->device,
DEV_INFO_FLAGS);
#undef DEV_INFO_FLAG
#undef DEV_INFO_SEP
}
/**
* intel_early_sanitize_regs - clean up BIOS state
* @dev: DRM device
*
* This function must be called before we do any I915_READ or I915_WRITE. Its
* purpose is to clean up any state left by the BIOS that may affect us when
* reading and/or writing registers.
*/
static void intel_early_sanitize_regs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (IS_HASWELL(dev))
I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
#undef PRINT_S
#undef SEP_EMPTY
#undef PRINT_FLAG
#undef SEP_COMMA
}
/**
@ -1291,14 +1272,39 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->dev = dev;
dev_priv->info = info;
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
spin_lock_init(&dev_priv->backlight.lock);
spin_lock_init(&dev_priv->uncore.lock);
spin_lock_init(&dev_priv->mm.object_stat_lock);
mutex_init(&dev_priv->dpio_lock);
mutex_init(&dev_priv->rps.hw_lock);
mutex_init(&dev_priv->modeset_restore_lock);
mutex_init(&dev_priv->pc8.lock);
dev_priv->pc8.requirements_met = false;
dev_priv->pc8.gpu_idle = false;
dev_priv->pc8.irqs_disabled = false;
dev_priv->pc8.enabled = false;
dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
i915_dump_device_info(dev_priv);
/* Not all pre-production machines fall into this category, only the
* very first ones. Almost everything should work, except for maybe
* suspend/resume. And we don't implement workarounds that affect only
* pre-production machines. */
if (IS_HSW_EARLY_SDV(dev))
DRM_INFO("This is an early pre-production Haswell machine. "
"It may not be fully functional.\n");
if (i915_get_bridge_dev(dev)) {
ret = -EIO;
goto free_priv;
}
mmio_bar = IS_GEN2(dev) ? 1 : 0;
mmio_bar = IS_GEN2(dev) ? 1 : 0;
/* Before gen4, the registers and the GTT are behind different BARs.
* However, from gen4 onwards, the registers and the GTT are shared
* in the same BAR, so we want to restrict this ioremap from
@ -1312,13 +1318,23 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
mmio_size = 2*1024*1024;
dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
if (!dev_priv->regs) {
DRM_ERROR("failed to map registers\n");
ret = -EIO;
if (!dev_priv->regs) {
DRM_ERROR("failed to map registers\n");
ret = -EIO;
goto put_bridge;
}
}
intel_early_sanitize_regs(dev);
intel_uncore_early_sanitize(dev);
if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) {
/* The docs do not explain exactly how the calculation can be
* made. It is somewhat guessable, but for now, it's always
* 128MB.
* NB: We can't write IDICR yet because we do not have gt funcs
* set up */
dev_priv->ellc_size = 128;
DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
}
ret = i915_gem_gtt_init(dev);
if (ret)
@ -1355,18 +1371,20 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
* workqueue at any time. Use an ordered one.
*/
dev_priv->wq = alloc_ordered_workqueue("i915", 0);
if (dev_priv->wq == NULL) {
DRM_ERROR("Failed to create our workqueue.\n");
ret = -ENOMEM;
goto out_mtrrfree;
}
if (dev_priv->wq == NULL) {
DRM_ERROR("Failed to create our workqueue.\n");
ret = -ENOMEM;
goto out_mtrrfree;
}
system_wq = dev_priv->wq;
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(dev);
intel_irq_init(dev);
intel_gt_init(dev);
intel_pm_init(dev);
intel_uncore_sanitize(dev);
intel_uncore_init(dev);
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev);
@ -1389,25 +1407,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
* stuck interrupts on some machines.
*/
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
spin_lock_init(&dev_priv->rps.lock);
mutex_init(&dev_priv->dpio_lock);
mutex_init(&dev_priv->rps.hw_lock);
mutex_init(&dev_priv->modeset_restore_lock);
dev_priv->num_plane = 1;
if (IS_VALLEYVIEW(dev))
dev_priv->num_plane = 2;
// ret = drm_vblank_init(dev, dev_priv->num_pipe);
// if (ret)
// goto out_gem_unload;
/* Start out suspended */
dev_priv->mm.suspended = 1;
ret = i915_load_modeset_init(dev);
if (ret < 0) {
DRM_ERROR("failed to init modeset\n");
@ -1420,6 +1423,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (IS_GEN5(dev))
intel_gpu_ips_init(dev_priv);
main_device = dev;
return 0;
out_gem_unload:
@ -1433,17 +1438,11 @@ out_gem_unload:
// intel_teardown_mchbar(dev);
// destroy_workqueue(dev_priv->wq);
out_mtrrfree:
// if (dev_priv->mm.gtt_mtrr >= 0) {
// mtrr_del(dev_priv->mm.gtt_mtrr,
// dev_priv->mm.gtt_base_addr,
// aperture_size);
// dev_priv->mm.gtt_mtrr = -1;
// }
// io_mapping_free(dev_priv->mm.gtt_mapping);
// arch_phys_wc_del(dev_priv->mm.gtt_mtrr);
// io_mapping_free(dev_priv->gtt.mappable);
// dev_priv->gtt.gtt_remove(dev);
out_rmmap:
pci_iounmap(dev->pdev, dev_priv->regs);
put_gmch:
// dev_priv->gtt.gtt_remove(dev);
put_bridge:
// pci_dev_put(dev_priv->bridge_dev);
free_priv:
@ -1460,9 +1459,17 @@ int i915_driver_unload(struct drm_device *dev)
intel_gpu_ips_teardown();
if (HAS_POWER_WELL(dev)) {
/* The i915.ko module is still not prepared to be loaded when
* the power well is not enabled, so just enable it in case
* we're going to unload/reload. */
intel_set_power_well(dev, true);
i915_remove_power_well(dev);
}
i915_teardown_sysfs(dev);
if (dev_priv->mm.inactive_shrinker.shrink)
if (dev_priv->mm.inactive_shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
mutex_lock(&dev->struct_mutex);
@ -1476,12 +1483,7 @@ int i915_driver_unload(struct drm_device *dev)
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
io_mapping_free(dev_priv->gtt.mappable);
if (dev_priv->mm.gtt_mtrr >= 0) {
mtrr_del(dev_priv->mm.gtt_mtrr,
dev_priv->gtt.mappable_base,
dev_priv->gtt.mappable_end);
dev_priv->mm.gtt_mtrr = -1;
}
arch_phys_wc_del(dev_priv->gtt.mtrr);
acpi_video_unregister();
@ -1494,10 +1496,10 @@ int i915_driver_unload(struct drm_device *dev)
* free the memory space allocated for the child device
* config parsed from VBT
*/
if (dev_priv->child_dev && dev_priv->child_dev_num) {
kfree(dev_priv->child_dev);
dev_priv->child_dev = NULL;
dev_priv->child_dev_num = 0;
if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
kfree(dev_priv->vbt.child_dev);
dev_priv->vbt.child_dev = NULL;
dev_priv->vbt.child_dev_num = 0;
}
vga_switcheroo_unregister_client(dev->pdev);
@ -1530,6 +1532,9 @@ int i915_driver_unload(struct drm_device *dev)
i915_free_hws(dev);
}
list_del(&dev_priv->gtt.base.global_link);
WARN_ON(!list_empty(&dev_priv->vm_list));
drm_mm_takedown(&dev_priv->gtt.base.mm);
if (dev_priv->regs != NULL)
pci_iounmap(dev->pdev, dev_priv->regs);
@ -1539,6 +1544,8 @@ int i915_driver_unload(struct drm_device *dev)
destroy_workqueue(dev_priv->wq);
pm_qos_remove_request(&dev_priv->pm_qos);
dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
if (dev_priv->slab)
kmem_cache_destroy(dev_priv->slab);
@ -1615,14 +1622,14 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
kfree(file_priv);
}
struct drm_ioctl_desc i915_ioctls[] = {
const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
@ -1635,35 +1642,35 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);

View File

@ -47,10 +47,6 @@
int init_display_kms(struct drm_device *dev);
struct drm_device *main_device;
struct drm_file *drm_file_handlers[256];
static int i915_modeset __read_mostly = 1;
module_param_named(modeset, i915_modeset, int, 0400);
MODULE_PARM_DESC(modeset,
@ -131,35 +127,47 @@ module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
MODULE_PARM_DESC(i915_enable_ppgtt,
"Enable PPGTT (default: true)");
unsigned int i915_preliminary_hw_support __read_mostly = true;
int i915_enable_psr __read_mostly = 0;
module_param_named(enable_psr, i915_enable_psr, int, 0600);
MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
unsigned int i915_preliminary_hw_support __read_mostly = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT);
module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
MODULE_PARM_DESC(preliminary_hw_support,
"Enable preliminary hardware support. (default: false)");
"Enable preliminary hardware support.");
int i915_disable_power_well __read_mostly = 0;
module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
MODULE_PARM_DESC(disable_power_well,
"Disable the power well when possible (default: false)");
"Disable the power well when possible (default: true)");
int i915_enable_ips __read_mostly = 1;
module_param_named(enable_ips, i915_enable_ips, int, 0600);
MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
bool i915_fastboot __read_mostly = 0;
module_param_named(fastboot, i915_fastboot, bool, 0600);
MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time "
"(default: false)");
int i915_enable_pc8 __read_mostly = 0;
module_param_named(enable_pc8, i915_enable_pc8, int, 0600);
MODULE_PARM_DESC(enable_pc8, "Enable support for low power package C states (PC8+) (default: true)");
int i915_pc8_timeout __read_mostly = 5000;
module_param_named(pc8_timeout, i915_pc8_timeout, int, 0600);
MODULE_PARM_DESC(pc8_timeout, "Number of msecs of idleness required to enter PC8+ (default: 5000)");
bool i915_prefault_disable __read_mostly;
module_param_named(prefault_disable, i915_prefault_disable, bool, 0600);
MODULE_PARM_DESC(prefault_disable,
"Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only.");
static struct drm_driver driver;
extern int intel_agp_enabled;
#define PCI_VENDOR_ID_INTEL 0x8086
#define INTEL_VGA_DEVICE(id, info) { \
.class = PCI_BASE_CLASS_DISPLAY << 16, \
.class_mask = 0xff0000, \
.vendor = 0x8086, \
.device = id, \
.subvendor = PCI_ANY_ID, \
.subdevice = PCI_ANY_ID, \
.driver_data = (unsigned long) info }
#define INTEL_QUANTA_VGA_DEVICE(info) { \
.class = PCI_BASE_CLASS_DISPLAY << 16, \
.class_mask = 0xff0000, \
.vendor = 0x8086, \
.device = 0x16a, \
.subvendor = 0x152d, \
.subdevice = 0x8990, \
.driver_data = (unsigned long) info }
static const struct intel_device_info intel_i915g_info = {
.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
@ -270,6 +278,7 @@ static const struct intel_device_info intel_ivybridge_m_info = {
GEN7_FEATURES,
.is_ivybridge = 1,
.is_mobile = 1,
.has_fbc = 1,
};
static const struct intel_device_info intel_ivybridge_q_info = {
@ -298,121 +307,52 @@ static const struct intel_device_info intel_valleyview_d_info = {
static const struct intel_device_info intel_haswell_d_info = {
GEN7_FEATURES,
.is_haswell = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_vebox_ring = 1,
};
static const struct intel_device_info intel_haswell_m_info = {
GEN7_FEATURES,
.is_haswell = 1,
.is_mobile = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
.has_vebox_ring = 1,
};
/*
* Make sure any device matches here are from most specific to most
* general. For example, since the Quanta match is based on the subsystem
* and subvendor IDs, we need it to come before the more general IVB
* PCI ID matches, otherwise we'll use the wrong info struct above.
*/
#define INTEL_PCI_IDS \
INTEL_I915G_IDS(&intel_i915g_info), \
INTEL_I915GM_IDS(&intel_i915gm_info), \
INTEL_I945G_IDS(&intel_i945g_info), \
INTEL_I945GM_IDS(&intel_i945gm_info), \
INTEL_I965G_IDS(&intel_i965g_info), \
INTEL_G33_IDS(&intel_g33_info), \
INTEL_I965GM_IDS(&intel_i965gm_info), \
INTEL_GM45_IDS(&intel_gm45_info), \
INTEL_G45_IDS(&intel_g45_info), \
INTEL_PINEVIEW_IDS(&intel_pineview_info), \
INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), \
INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), \
INTEL_SNB_D_IDS(&intel_sandybridge_d_info), \
INTEL_SNB_M_IDS(&intel_sandybridge_m_info), \
INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
INTEL_IVB_M_IDS(&intel_ivybridge_m_info), \
INTEL_IVB_D_IDS(&intel_ivybridge_d_info), \
INTEL_HSW_D_IDS(&intel_haswell_d_info), \
INTEL_HSW_M_IDS(&intel_haswell_m_info), \
INTEL_VLV_M_IDS(&intel_valleyview_m_info), \
INTEL_VLV_D_IDS(&intel_valleyview_d_info)
static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */
INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */
INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), /* I915_GM */
INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), /* I945_G */
INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), /* I945_GM */
INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), /* I945_GME */
INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), /* I946_GZ */
INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), /* G35_G */
INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), /* I965_Q */
INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), /* I965_G */
INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), /* Q35_G */
INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), /* G33_G */
INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), /* Q33_G */
INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), /* I965_GM */
INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), /* I965_GME */
INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), /* GM45_G */
INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), /* IGD_E_G */
INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), /* Q45_G */
INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */
INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */
INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */
INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */
INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
INTEL_QUANTA_VGA_DEVICE(&intel_ivybridge_q_info), /* Quanta transcode */
INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT3 desktop */
INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT3 server */
INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
INTEL_VGA_DEVICE(0x040B, &intel_haswell_d_info), /* GT1 reserved */
INTEL_VGA_DEVICE(0x041B, &intel_haswell_d_info), /* GT2 reserved */
INTEL_VGA_DEVICE(0x042B, &intel_haswell_d_info), /* GT3 reserved */
INTEL_VGA_DEVICE(0x040E, &intel_haswell_d_info), /* GT1 reserved */
INTEL_VGA_DEVICE(0x041E, &intel_haswell_d_info), /* GT2 reserved */
INTEL_VGA_DEVICE(0x042E, &intel_haswell_d_info), /* GT3 reserved */
INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT3 desktop */
INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT3 server */
INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT3 mobile */
INTEL_VGA_DEVICE(0x0C0B, &intel_haswell_d_info), /* SDV GT1 reserved */
INTEL_VGA_DEVICE(0x0C1B, &intel_haswell_d_info), /* SDV GT2 reserved */
INTEL_VGA_DEVICE(0x0C2B, &intel_haswell_d_info), /* SDV GT3 reserved */
INTEL_VGA_DEVICE(0x0C0E, &intel_haswell_d_info), /* SDV GT1 reserved */
INTEL_VGA_DEVICE(0x0C1E, &intel_haswell_d_info), /* SDV GT2 reserved */
INTEL_VGA_DEVICE(0x0C2E, &intel_haswell_d_info), /* SDV GT3 reserved */
INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT3 desktop */
INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT3 server */
INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT3 mobile */
INTEL_VGA_DEVICE(0x0A0B, &intel_haswell_d_info), /* ULT GT1 reserved */
INTEL_VGA_DEVICE(0x0A1B, &intel_haswell_d_info), /* ULT GT2 reserved */
INTEL_VGA_DEVICE(0x0A2B, &intel_haswell_d_info), /* ULT GT3 reserved */
INTEL_VGA_DEVICE(0x0A0E, &intel_haswell_m_info), /* ULT GT1 reserved */
INTEL_VGA_DEVICE(0x0A1E, &intel_haswell_m_info), /* ULT GT2 reserved */
INTEL_VGA_DEVICE(0x0A2E, &intel_haswell_m_info), /* ULT GT3 reserved */
INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT3 desktop */
INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT3 server */
INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT3 mobile */
INTEL_VGA_DEVICE(0x0D0B, &intel_haswell_d_info), /* CRW GT1 reserved */
INTEL_VGA_DEVICE(0x0D1B, &intel_haswell_d_info), /* CRW GT2 reserved */
INTEL_VGA_DEVICE(0x0D2B, &intel_haswell_d_info), /* CRW GT3 reserved */
INTEL_VGA_DEVICE(0x0D0E, &intel_haswell_d_info), /* CRW GT1 reserved */
INTEL_VGA_DEVICE(0x0D1E, &intel_haswell_d_info), /* CRW GT2 reserved */
INTEL_VGA_DEVICE(0x0D2E, &intel_haswell_d_info), /* CRW GT3 reserved */
INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0f31, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0f32, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0f33, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
INTEL_PCI_IDS,
{0, 0, 0}
};
@ -432,7 +372,6 @@ void intel_detect_pch(struct drm_device *dev)
*/
if (INTEL_INFO(dev)->num_pipes == 0) {
dev_priv->pch_type = PCH_NOP;
dev_priv->num_pch_pll = 0;
return;
}
@ -441,9 +380,15 @@ void intel_detect_pch(struct drm_device *dev)
* make graphics device passthrough work easy for VMM, that only
* need to expose ISA bridge to let driver know the real hardware
* underneath. This is a requirement from virtualization team.
*
* In some virtualized environments (e.g. XEN), there is irrelevant
* ISA bridge in the system. To work reliably, we should scan trhough
* all the ISA bridge devices and check for the first match, instead
* of only checking the first one.
*/
pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
if (pch) {
while (pch) {
struct pci_dev *curr = pch;
if (pch->vendor == PCI_VENDOR_ID_INTEL) {
unsigned short id;
id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
@ -451,36 +396,38 @@ void intel_detect_pch(struct drm_device *dev)
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_IBX;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
WARN_ON(!IS_GEN5(dev));
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CPT;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */
dev_priv->pch_type = PCH_CPT;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found PatherPoint PCH\n");
WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
dev_priv->num_pch_pll = 0;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
WARN_ON(!IS_HASWELL(dev));
WARN_ON(IS_ULT(dev));
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
dev_priv->num_pch_pll = 0;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev));
WARN_ON(!IS_ULT(dev));
} else {
goto check_next;
}
BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
break;
}
check_next:
pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr);
// pci_dev_put(curr);
}
if (!pch)
DRM_DEBUG_KMS("No PCH found?\n");
}
bool i915_semaphore_is_enabled(struct drm_device *dev)
@ -500,59 +447,449 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
return 1;
}
int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent);
int i915_init(void)
#if 0
static int i915_drm_freeze(struct drm_device *dev)
{
static pci_dev_t device;
const struct pci_device_id *ent;
int err;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
ent = find_pci_device(&device, pciidlist);
if( unlikely(ent == NULL) )
{
dbgprintf("device not found\n");
return -ENODEV;
};
/* ignore lid events during suspend */
mutex_lock(&dev_priv->modeset_restore_lock);
dev_priv->modeset_restore = MODESET_SUSPENDED;
mutex_unlock(&dev_priv->modeset_restore_lock);
struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data;
/* We do a lot of poking in a lot of registers, make sure they work
* properly. */
hsw_disable_package_c8(dev_priv);
intel_set_power_well(dev, true);
if (intel_info->is_valleyview)
if(!i915_preliminary_hw_support) {
DRM_ERROR("Preliminary hardware support disabled\n");
return -ENODEV;
}
drm_kms_helper_poll_disable(dev);
DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
device.pci_dev.device);
pci_save_state(dev->pdev);
if (intel_info->gen != 3) {
/* If KMS is active, we do the leavevt stuff here */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
int error;
} else if (init_agp() != 0) {
DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
return -ENODEV;
}
mutex_lock(&dev->struct_mutex);
error = i915_gem_idle(dev);
mutex_unlock(&dev->struct_mutex);
if (error) {
dev_err(&dev->pdev->dev,
"GEM idle failed, resume might fail\n");
return error;
}
err = drm_get_dev(&device.pci_dev, ent);
cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
return err;
drm_irq_uninstall(dev);
dev_priv->enable_hotplug_processing = false;
/*
* Disable CRTCs directly since we want to preserve sw state
* for _thaw.
*/
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
dev_priv->display.crtc_disable(crtc);
intel_modeset_suspend_hw(dev);
}
i915_save_state(dev);
intel_opregion_fini(dev);
console_lock();
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
console_unlock();
return 0;
}
int i915_suspend(struct drm_device *dev, pm_message_t state)
{
int error;
if (!dev || !dev->dev_private) {
DRM_ERROR("dev: %p\n", dev);
DRM_ERROR("DRM not initialized, aborting suspend.\n");
return -ENODEV;
}
if (state.event == PM_EVENT_PRETHAW)
return 0;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
error = i915_drm_freeze(dev);
if (error)
return error;
if (state.event == PM_EVENT_SUSPEND) {
/* Shut down the device */
pci_disable_device(dev->pdev);
pci_set_power_state(dev->pdev, PCI_D3hot);
}
return 0;
}
void intel_console_resume(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private,
console_resume_work);
struct drm_device *dev = dev_priv->dev;
console_lock();
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
console_unlock();
}
static void intel_resume_hotplug(struct drm_device *dev)
{
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
mutex_lock(&mode_config->mutex);
DRM_DEBUG_KMS("running encoder hotplug functions\n");
list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
if (encoder->hot_plug)
encoder->hot_plug(encoder);
mutex_unlock(&mode_config->mutex);
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(dev);
}
static int __i915_drm_thaw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int error = 0;
i915_restore_state(dev);
intel_opregion_setup(dev);
/* KMS EnterVT equivalent */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_init_pch_refclk(dev);
mutex_lock(&dev->struct_mutex);
error = i915_gem_init_hw(dev);
mutex_unlock(&dev->struct_mutex);
/* We need working interrupts for modeset enabling ... */
drm_irq_install(dev);
intel_modeset_init_hw(dev);
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, true);
drm_modeset_unlock_all(dev);
/*
* ... but also need to make sure that hotplug processing
* doesn't cause havoc. Like in the driver load code we don't
* bother with the tiny race here where we might loose hotplug
* notifications.
* */
intel_hpd_init(dev);
dev_priv->enable_hotplug_processing = true;
/* Config may have changed between suspend and resume */
intel_resume_hotplug(dev);
}
intel_opregion_init(dev);
/*
* The console lock can be pretty contented on resume due
* to all the printk activity. Try to keep it out of the hot
* path of resume if possible.
*/
if (console_trylock()) {
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
console_unlock();
} else {
schedule_work(&dev_priv->console_resume_work);
}
/* Undo what we did at i915_drm_freeze so the refcount goes back to the
* expected level. */
hsw_enable_package_c8(dev_priv);
mutex_lock(&dev_priv->modeset_restore_lock);
dev_priv->modeset_restore = MODESET_DONE;
mutex_unlock(&dev_priv->modeset_restore_lock);
return error;
}
static int i915_drm_thaw(struct drm_device *dev)
{
int error = 0;
intel_uncore_sanitize(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex);
}
__i915_drm_thaw(dev);
return error;
}
int i915_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
if (pci_enable_device(dev->pdev))
return -EIO;
pci_set_master(dev->pdev);
intel_uncore_sanitize(dev);
/*
* Platforms with opregion should have sane BIOS, older ones (gen3 and
* earlier) need this since the BIOS might clear all our scratch PTEs.
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) &&
!dev_priv->opregion.header) {
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex);
}
ret = __i915_drm_thaw(dev);
if (ret)
return ret;
drm_kms_helper_poll_enable(dev);
return 0;
}
/**
* i915_reset - reset chip after a hang
* @dev: drm device to reset
*
* Reset the chip. Useful if a hang is detected. Returns zero on successful
* reset or otherwise an error code.
*
* Procedure is fairly simple:
* - reset the chip using the reset reg
* - re-init context state
* - re-init hardware status page
* - re-init ring buffer
* - re-init interrupt state
* - re-init display
*/
int i915_reset(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
bool simulated;
int ret;
if (!i915_try_reset)
return 0;
mutex_lock(&dev->struct_mutex);
i915_gem_reset(dev);
simulated = dev_priv->gpu_error.stop_rings != 0;
if (!simulated && get_seconds() - dev_priv->gpu_error.last_reset < 5) {
DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
ret = -ENODEV;
} else {
ret = intel_gpu_reset(dev);
/* Also reset the gpu hangman. */
if (simulated) {
DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
dev_priv->gpu_error.stop_rings = 0;
if (ret == -ENODEV) {
DRM_ERROR("Reset not implemented, but ignoring "
"error for simulated gpu hangs\n");
ret = 0;
}
} else
dev_priv->gpu_error.last_reset = get_seconds();
}
if (ret) {
DRM_ERROR("Failed to reset chip.\n");
mutex_unlock(&dev->struct_mutex);
return ret;
}
/* Ok, now get things going again... */
/*
* Everything depends on having the GTT running, so we need to start
* there. Fortunately we don't need to do this unless we reset the
* chip at a PCI level.
*
* Next we need to restore the context, but we don't use those
* yet either...
*
* Ring buffer needs to be re-initialized in the KMS case, or if X
* was running at the time of the reset (i.e. we weren't VT
* switched away).
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) ||
!dev_priv->ums.mm_suspended) {
struct intel_ring_buffer *ring;
int i;
dev_priv->ums.mm_suspended = 0;
i915_gem_init_swizzling(dev);
for_each_ring(ring, dev_priv, i)
ring->init(ring);
i915_gem_context_init(dev);
if (dev_priv->mm.aliasing_ppgtt) {
ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
if (ret)
i915_gem_cleanup_aliasing_ppgtt(dev);
}
/*
* It would make sense to re-init all the other hw state, at
* least the rps/rc6/emon init done within modeset_init_hw. For
* some unknown reason, this blows up my ilk, so don't.
*/
mutex_unlock(&dev->struct_mutex);
drm_irq_uninstall(dev);
drm_irq_install(dev);
intel_hpd_init(dev);
} else {
mutex_unlock(&dev->struct_mutex);
}
return 0;
}
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data;
/* Only bind to function 0 of the device. Early generations
* used function 1 as a placeholder for multi-head. This causes
* us confusion instead, especially on the systems where both
* functions have the same PCI-ID!
*/
if (PCI_FUNC(pdev->devfn))
return -ENODEV;
/* We've managed to ship a kms-enabled ddx that shipped with an XvMC
* implementation for gen3 (and only gen3) that used legacy drm maps
* (gasp!) to share buffers between X and the client. Hence we need to
* keep around the fake agp stuff for gen3, even when kms is enabled. */
if (intel_info->gen != 3) {
driver.driver_features &=
~(DRIVER_USE_AGP | DRIVER_REQUIRE_AGP);
} else if (!intel_agp_enabled) {
DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
return -ENODEV;
}
return drm_get_pci_dev(pdev, ent, &driver);
}
static void
i915_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
drm_put_dev(dev);
}
static int i915_pm_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
int error;
if (!drm_dev || !drm_dev->dev_private) {
dev_err(dev, "DRM not initialized, aborting suspend.\n");
return -ENODEV;
}
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
error = i915_drm_freeze(drm_dev);
if (error)
return error;
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
static int i915_pm_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
return i915_resume(drm_dev);
}
static int i915_pm_freeze(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
if (!drm_dev || !drm_dev->dev_private) {
dev_err(dev, "DRM not initialized, aborting suspend.\n");
return -ENODEV;
}
return i915_drm_freeze(drm_dev);
}
static int i915_pm_thaw(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
return i915_drm_thaw(drm_dev);
}
static int i915_pm_poweroff(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
return i915_drm_freeze(drm_dev);
}
#endif
static struct drm_driver driver = {
/* Don't use MTRRs here; the Xserver or userspace app should
* deal with them for Intel hardware.
*/
.driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_MODESET,
// .load = i915_driver_load,
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
DRIVER_RENDER,
.load = i915_driver_load,
// .unload = i915_driver_unload,
.open = i915_driver_open,
// .lastclose = i915_driver_lastclose,
@ -566,9 +903,12 @@ static struct drm_driver driver = {
// .device_is_agp = i915_driver_device_is_agp,
// .master_create = i915_master_create,
// .master_destroy = i915_master_destroy,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = i915_debugfs_init,
.debugfs_cleanup = i915_debugfs_cleanup,
#endif
.gem_init_object = i915_gem_init_object,
.gem_free_object = i915_gem_free_object,
// .gem_vm_ops = &i915_gem_vm_ops,
// .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
// .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
@ -589,85 +929,39 @@ static struct drm_driver driver = {
};
int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
int i915_init(void)
{
static struct drm_device drm_dev;
static struct drm_file drm_file;
static pci_dev_t device;
const struct pci_device_id *ent;
int err;
struct drm_device *dev;
struct drm_file *priv;
ent = find_pci_device(&device, pciidlist);
if( unlikely(ent == NULL) )
{
dbgprintf("device not found\n");
return -ENODEV;
};
int ret;
drm_core_init();
dev = &drm_dev;
priv = &drm_file;
DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
device.pci_dev.device);
/*
if (intel_info->gen != 3) {
drm_file_handlers[0] = priv;
// ret = pci_enable_device(pdev);
// if (ret)
// goto err_g1;
pci_set_master(pdev);
// if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
// printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
// goto err_g2;
// }
dev->pdev = pdev;
dev->pci_device = pdev->device;
dev->pci_vendor = pdev->vendor;
INIT_LIST_HEAD(&dev->filelist);
INIT_LIST_HEAD(&dev->ctxlist);
INIT_LIST_HEAD(&dev->vmalist);
INIT_LIST_HEAD(&dev->maplist);
spin_lock_init(&dev->count_lock);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
INIT_LIST_HEAD(&priv->lhead);
INIT_LIST_HEAD(&priv->fbs);
INIT_LIST_HEAD(&priv->event_list);
init_waitqueue_head(&priv->event_wait);
priv->event_space = 4096; /* set aside 4k for event buffer */
idr_init(&priv->object_idr);
spin_lock_init(&priv->table_lock);
dev->driver = &driver;
if (dev->driver->open) {
ret = dev->driver->open(dev, priv);
if (ret < 0)
goto err_g4;
} else if (init_agp() != 0) {
DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
return -ENODEV;
}
*/
err = drm_get_pci_dev(&device.pci_dev, ent, &driver);
ret = i915_driver_load(dev, ent->driver_data );
if (ret)
goto err_g4;
ret = init_display_kms(dev);
if (ret)
goto err_g4;
return 0;
err_g4:
//err_g3:
// if (drm_core_check_feature(dev, DRIVER_MODESET))
// drm_put_minor(&dev->control);
//err_g2:
// pci_disable_device(pdev);
//err_g1:
return ret;
return err;
}
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
((HAS_FORCE_WAKE((dev_priv)->dev)) && \
@ -777,16 +1071,16 @@ static bool IS_DISPLAYREG(u32 reg)
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
{
/* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the
* chip from rc6 before touching it for real. MI_MODE is masked, hence
* harmless to write 0 into. */
/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
* the chip from rc6 before touching it for real. MI_MODE is masked,
* hence harmless to write 0 into. */
I915_WRITE_NOTRACE(MI_MODE, 0);
}
static void
hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
{
if (IS_HASWELL(dev_priv->dev) &&
if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
(I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
DRM_ERROR("Unknown unclaimed register before writing to %x\n",
reg);
@ -797,57 +1091,10 @@ hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
static void
hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
{
if (IS_HASWELL(dev_priv->dev) &&
if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
(I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
DRM_ERROR("Unclaimed write to %x\n", reg);
I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
}
}
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
u##x val = 0; \
if (IS_GEN5(dev_priv->dev)) \
ilk_dummy_write(dev_priv); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
unsigned long irqflags; \
spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
if (dev_priv->forcewake_count == 0) \
dev_priv->gt.force_wake_get(dev_priv); \
val = read##y(dev_priv->regs + reg); \
if (dev_priv->forcewake_count == 0) \
dev_priv->gt.force_wake_put(dev_priv); \
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
} else { \
val = read##y(dev_priv->regs + reg); \
} \
return val; \
}
__i915_read(8, b)
__i915_read(16, w)
__i915_read(32, l)
__i915_read(64, q)
#undef __i915_read
#define __i915_write(x, y) \
void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
u32 __fifo_ret = 0; \
trace_i915_reg_rw(true, reg, val, sizeof(val)); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
if (IS_GEN5(dev_priv->dev)) \
ilk_dummy_write(dev_priv); \
hsw_unclaimed_reg_clear(dev_priv, reg); \
write##y(val, dev_priv->regs + reg); \
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
hsw_unclaimed_reg_check(dev_priv, reg); \
}
__i915_write(8, b)
__i915_write(16, w)
__i915_write(32, l)
__i915_write(64, q)
#undef __i915_write

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -113,7 +113,7 @@ static int get_context_size(struct drm_device *dev)
case 7:
reg = I915_READ(GEN7_CXT_SIZE);
if (IS_HASWELL(dev))
ret = HSW_CXT_TOTAL_SIZE(reg) * 64;
ret = HSW_CXT_TOTAL_SIZE;
else
ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
break;
@ -124,10 +124,10 @@ static int get_context_size(struct drm_device *dev)
return ret;
}
static void do_destroy(struct i915_hw_context *ctx)
void i915_gem_context_free(struct kref *ctx_ref)
{
if (ctx->file_priv)
idr_remove(&ctx->file_priv->context_idr, ctx->id);
struct i915_hw_context *ctx = container_of(ctx_ref,
typeof(*ctx), ref);
drm_gem_object_unreference(&ctx->obj->base);
kfree(ctx);
@ -145,6 +145,7 @@ create_hw_context(struct drm_device *dev,
if (ctx == NULL)
return ERR_PTR(-ENOMEM);
kref_init(&ctx->ref);
ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
if (ctx->obj == NULL) {
kfree(ctx);
@ -154,8 +155,9 @@ create_hw_context(struct drm_device *dev,
if (INTEL_INFO(dev)->gen >= 7) {
ret = i915_gem_object_set_cache_level(ctx->obj,
I915_CACHE_LLC_MLC);
if (ret)
I915_CACHE_L3_LLC);
/* Failure shouldn't ever happen this early */
if (WARN_ON(ret))
goto err_out;
}
@ -169,18 +171,18 @@ create_hw_context(struct drm_device *dev,
if (file_priv == NULL)
return ctx;
ctx->file_priv = file_priv;
ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID + 1, 0,
GFP_KERNEL);
if (ret < 0)
goto err_out;
ctx->file_priv = file_priv;
ctx->id = ret;
return ctx;
err_out:
do_destroy(ctx);
i915_gem_context_unreference(ctx);
return ERR_PTR(ret);
}
@ -212,13 +214,17 @@ static int create_default_context(struct drm_i915_private *dev_priv)
* default context.
*/
dev_priv->ring[RCS].default_context = ctx;
ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false);
if (ret)
ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
if (ret) {
DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
goto err_destroy;
}
ret = do_switch(ctx);
if (ret)
if (ret) {
DRM_DEBUG_DRIVER("Switch failed %d\n", ret);
goto err_unpin;
}
DRM_DEBUG_DRIVER("Default HW context loaded\n");
return 0;
@ -226,7 +232,7 @@ static int create_default_context(struct drm_i915_private *dev_priv)
err_unpin:
i915_gem_object_unpin(ctx->obj);
err_destroy:
do_destroy(ctx);
i915_gem_context_unreference(ctx);
return ret;
}
@ -236,6 +242,7 @@ void i915_gem_context_init(struct drm_device *dev)
if (!HAS_HW_CONTEXTS(dev)) {
dev_priv->hw_contexts_disabled = true;
DRM_DEBUG_DRIVER("Disabling HW Contexts; old hardware\n");
return;
}
@ -248,11 +255,13 @@ void i915_gem_context_init(struct drm_device *dev)
if (dev_priv->hw_context_size > (1<<20)) {
dev_priv->hw_contexts_disabled = true;
DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size\n");
return;
}
if (create_default_context(dev_priv)) {
dev_priv->hw_contexts_disabled = true;
DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed\n");
return;
}
@ -262,6 +271,7 @@ void i915_gem_context_init(struct drm_device *dev)
void i915_gem_context_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
if (dev_priv->hw_contexts_disabled)
return;
@ -269,11 +279,16 @@ void i915_gem_context_fini(struct drm_device *dev)
/* The only known way to stop the gpu from accessing the hw context is
* to reset it. Do this as the very last operation to avoid confusing
* other code, leading to spurious errors. */
// intel_gpu_reset(dev);
intel_gpu_reset(dev);
i915_gem_object_unpin(dev_priv->ring[RCS].default_context->obj);
i915_gem_object_unpin(dctx->obj);
do_destroy(dev_priv->ring[RCS].default_context);
/* When default context is created and switched to, base object refcount
* will be 2 (+1 from object creation and +1 from do_switch()).
* i915_gem_context_fini() will be called after gpu_idle() has switched
* to default context. So we need to unreference the base object once
* to offset the do_switch part, so that i915_gem_context_unreference()
* can then free the base object correctly. */
}
static int context_idr_cleanup(int id, void *p, void *data)
@ -282,7 +297,7 @@ static int context_idr_cleanup(int id, void *p, void *data)
BUG_ON(id == DEFAULT_CONTEXT_ID);
do_destroy(ctx);
return 0;
}
@ -325,6 +340,7 @@ mi_set_context(struct intel_ring_buffer *ring,
if (ret)
return ret;
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw */
if (IS_GEN7(ring->dev))
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
else
@ -332,7 +348,7 @@ mi_set_context(struct intel_ring_buffer *ring,
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT);
intel_ring_emit(ring, new_context->obj->gtt_offset |
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
@ -353,16 +369,16 @@ mi_set_context(struct intel_ring_buffer *ring,
static int do_switch(struct i915_hw_context *to)
{
struct intel_ring_buffer *ring = to->ring;
struct drm_i915_gem_object *from_obj = ring->last_context_obj;
struct i915_hw_context *from = ring->last_context;
u32 hw_flags = 0;
int ret;
BUG_ON(from_obj != NULL && from_obj->pin_count == 0);
BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
if (from_obj == to->obj)
if (from == to)
return 0;
ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false);
ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
if (ret)
return ret;
@ -382,7 +398,7 @@ static int do_switch(struct i915_hw_context *to)
if (!to->is_initialized || is_default_context(to))
hw_flags |= MI_RESTORE_INHIBIT;
else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */
else if (WARN_ON_ONCE(from == to)) /* not yet expected */
hw_flags |= MI_FORCE_RESTORE;
ret = mi_set_context(ring, to, hw_flags);
@ -397,9 +413,12 @@ static int do_switch(struct i915_hw_context *to)
* is a bit suboptimal because the retiring can occur simply after the
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from_obj != NULL) {
from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
i915_gem_object_move_to_active(from_obj, ring);
if (from != NULL) {
struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private;
struct i915_address_space *ggtt = &dev_priv->gtt.base;
from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list);
i915_gem_object_move_to_active(from->obj, ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
@ -407,15 +426,26 @@ static int do_switch(struct i915_hw_context *to)
* able to defer doing this until we know the object would be
* swapped, but there is no way to do that yet.
*/
from_obj->dirty = 1;
BUG_ON(from_obj->ring != ring);
i915_gem_object_unpin(from_obj);
from->obj->dirty = 1;
BUG_ON(from->obj->ring != ring);
drm_gem_object_unreference(&from_obj->base);
ret = i915_add_request(ring, NULL);
if (ret) {
/* Too late, we've already scheduled a context switch.
* Try to undo the change so that the hw state is
* consistent with out tracking. In case of emergency,
* scream.
*/
WARN_ON(mi_set_context(ring, from, MI_RESTORE_INHIBIT));
return ret;
}
i915_gem_object_unpin(from->obj);
i915_gem_context_unreference(from);
}
drm_gem_object_reference(&to->obj->base);
ring->last_context_obj = to->obj;
i915_gem_context_reference(to);
ring->last_context = to;
to->is_initialized = true;
return 0;
@ -444,6 +474,8 @@ int i915_switch_context(struct intel_ring_buffer *ring,
if (dev_priv->hw_contexts_disabled)
return 0;
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
if (ring != &dev_priv->ring[RCS])
return 0;
@ -513,7 +545,6 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
}
do_destroy(ctx);
mutex_unlock(&dev->struct_mutex);

View File

@ -35,16 +35,12 @@
#define I915_EXEC_SECURE (1<<9)
#define I915_EXEC_IS_PINNED (1<<10)
#define I915_EXEC_VEBOX (4<<0)
#define wmb() asm volatile ("sfence")
struct drm_i915_gem_object *get_fb_obj();
static inline __attribute__((const))
bool is_power_of_2(unsigned long n)
{
return (n != 0 && ((n & (n - 1)) == 0));
}
static unsigned long
copy_to_user(void __user *to, const void *from, unsigned long n)
@ -204,7 +200,8 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
struct drm_i915_gem_relocation_entry *reloc)
struct drm_i915_gem_relocation_entry *reloc,
struct i915_address_space *vm)
{
struct drm_device *dev = obj->base.dev;
struct drm_gem_object *target_obj;
@ -218,7 +215,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return -ENOENT;
target_i915_obj = to_intel_bo(target_obj);
target_offset = target_i915_obj->gtt_offset;
target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
* pipe_control writes because the gpu doesn't properly redirect them
@ -308,7 +305,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return ret;
/* Map the page containing the relocation we're going to perform. */
reloc->offset += obj->gtt_offset;
reloc->offset += i915_gem_obj_ggtt_offset(obj);
reloc_page = (void*)MapIoMem(reloc->offset & PAGE_MASK, 4096, 3);
reloc_entry = (uint32_t __iomem *)
(reloc_page + (reloc->offset & ~PAGE_MASK));
@ -324,7 +321,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
static int
i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
struct eb_objects *eb)
struct eb_objects *eb,
struct i915_address_space *vm)
{
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(64)];
@ -347,7 +345,8 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
do {
u64 offset = r->presumed_offset;
ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
ret = i915_gem_execbuffer_relocate_entry(obj, eb, r,
vm);
if (ret)
return ret;
@ -367,13 +366,15 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
static int
i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
struct drm_i915_gem_relocation_entry *relocs)
struct drm_i915_gem_relocation_entry *relocs,
struct i915_address_space *vm)
{
const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
int i, ret;
for (i = 0; i < entry->relocation_count; i++) {
ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i],
vm);
if (ret)
return ret;
}
@ -382,7 +383,8 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
}
static int
i915_gem_execbuffer_relocate(struct eb_objects *eb)
i915_gem_execbuffer_relocate(struct eb_objects *eb,
struct i915_address_space *vm)
{
struct drm_i915_gem_object *obj;
int ret = 0;
@ -394,9 +396,9 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb)
* acquire the struct mutex again. Obviously this is bad and so
* lockdep complains vehemently.
*/
// pagefault_disable();
// pagefault_disable();
list_for_each_entry(obj, &eb->objects, exec_list) {
ret = i915_gem_execbuffer_relocate_object(obj, eb);
ret = i915_gem_execbuffer_relocate_object(obj, eb, vm);
if (ret)
break;
}
@ -418,6 +420,7 @@ need_reloc_mappable(struct drm_i915_gem_object *obj)
static int
i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring,
struct i915_address_space *vm,
bool *need_reloc)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
@ -426,20 +429,16 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
bool need_fence, need_mappable;
int ret;
// ENTER();
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(obj);
ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);
ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable,
false);
if (ret)
{
FAIL();
return ret;
};
entry->flags |= __EXEC_OBJECT_HAS_PIN;
@ -447,10 +446,7 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
ret = i915_gem_object_get_fence(obj);
if (ret)
{
FAIL();
return ret;
};
if (i915_gem_object_pin_fence(obj))
entry->flags |= __EXEC_OBJECT_HAS_FENCE;
@ -467,8 +463,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
obj->has_aliasing_ppgtt_mapping = 1;
}
if (entry->offset != obj->gtt_offset) {
entry->offset = obj->gtt_offset;
if (entry->offset != i915_gem_obj_offset(obj, vm)) {
entry->offset = i915_gem_obj_offset(obj, vm);
*need_reloc = true;
}
@ -489,7 +485,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
{
struct drm_i915_gem_exec_object2 *entry;
if (!obj->gtt_space)
if (!i915_gem_obj_bound_any(obj))
return;
entry = obj->exec_entry;
@ -506,6 +502,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct list_head *objects,
struct i915_address_space *vm,
bool *need_relocs)
{
struct drm_i915_gem_object *obj;
@ -513,8 +510,6 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
int retry;
// ENTER();
INIT_LIST_HEAD(&ordered_objects);
while (!list_empty(objects)) {
struct drm_i915_gem_exec_object2 *entry;
@ -562,31 +557,35 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
list_for_each_entry(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool need_fence, need_mappable;
u32 obj_offset;
if (!obj->gtt_space)
if (!i915_gem_obj_bound(obj, vm))
continue;
obj_offset = i915_gem_obj_offset(obj, vm);
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(obj);
if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
if ((entry->alignment &&
obj_offset & (entry->alignment - 1)) ||
(need_mappable && !obj->map_and_fenceable))
ret = i915_gem_object_unbind(obj);
ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm));
else
ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
if (ret)
goto err;
}
/* Bind fresh objects */
list_for_each_entry(obj, objects, exec_list) {
if (obj->gtt_space)
if (i915_gem_obj_bound(obj, vm))
continue;
ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
if (ret)
goto err;
}
@ -596,10 +595,7 @@ err: /* Decrement pin count for bound objects */
i915_gem_execbuffer_unreserve_object(obj);
if (ret != -ENOSPC || retry++)
{
// LEAVE();
return ret;
};
// ret = i915_gem_evict_everything(ring->dev);
if (ret)
@ -613,7 +609,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct drm_file *file,
struct intel_ring_buffer *ring,
struct eb_objects *eb,
struct drm_i915_gem_exec_object2 *exec)
struct drm_i915_gem_exec_object2 *exec,
struct i915_address_space *vm)
{
struct drm_i915_gem_relocation_entry *reloc;
struct drm_i915_gem_object *obj;
@ -697,14 +694,15 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
goto err;
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs);
ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
if (ret)
goto err;
list_for_each_entry(obj, &eb->objects, exec_list) {
int offset = obj->exec_entry - exec;
ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
reloc + reloc_offset[offset]);
reloc + reloc_offset[offset],
vm);
if (ret)
goto err;
}
@ -727,6 +725,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
{
struct drm_i915_gem_object *obj;
uint32_t flush_domains = 0;
bool flush_chipset = false;
int ret;
list_for_each_entry(obj, objects, exec_list) {
@ -735,12 +734,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
return ret;
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj);
flush_chipset |= i915_gem_clflush_object(obj, false);
flush_domains |= obj->base.write_domain;
}
if (flush_domains & I915_GEM_DOMAIN_CPU)
if (flush_chipset)
i915_gem_chipset_flush(ring->dev);
if (flush_domains & I915_GEM_DOMAIN_GTT)
@ -803,6 +802,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
static void
i915_gem_execbuffer_move_to_active(struct list_head *objects,
struct i915_address_space *vm,
struct intel_ring_buffer *ring)
{
struct drm_i915_gem_object *obj;
@ -817,12 +817,14 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
obj->base.read_domains = obj->base.pending_read_domains;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
/* FIXME: This lookup gets fixed later <-- danvet */
list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list);
i915_gem_object_move_to_active(obj, ring);
if (obj->base.write_domain) {
obj->dirty = 1;
obj->last_write_seqno = intel_ring_get_seqno(ring);
if (obj->pin_count) /* check for potential scanout */
intel_mark_fb_busy(obj);
intel_mark_fb_busy(obj, ring);
}
trace_i915_gem_object_change_domain(obj, old_read, old_write);
@ -832,13 +834,14 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
static void
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
struct drm_file *file,
struct intel_ring_buffer *ring)
struct intel_ring_buffer *ring,
struct drm_i915_gem_object *obj)
{
/* Unconditionally force add_request to emit a full flush. */
ring->gpu_caches_dirty = true;
/* Add a breadcrumb for the completion of the batch buffer */
(void)i915_add_request(ring, file, NULL);
(void)__i915_add_request(ring, file, obj, NULL);
}
static int
@ -870,7 +873,8 @@ static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
struct drm_i915_gem_exec_object2 *exec)
struct drm_i915_gem_exec_object2 *exec,
struct i915_address_space *vm)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct eb_objects *eb;
@ -884,17 +888,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
bool need_relocs;
if (!i915_gem_check_execbuffer(args))
{
FAIL();
return -EINVAL;
}
ret = validate_exec_list(exec, args->buffer_count);
if (ret)
{
FAIL();
return ret;
};
flags = 0;
if (args->flags & I915_EXEC_SECURE) {
@ -911,21 +909,29 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
break;
case I915_EXEC_BSD:
ring = &dev_priv->ring[VCS];
if (ctx_id != 0) {
if (ctx_id != DEFAULT_CONTEXT_ID) {
DRM_DEBUG("Ring %s doesn't support contexts\n",
ring->name);
FAIL();
return -EPERM;
}
break;
case I915_EXEC_BLT:
ring = &dev_priv->ring[BCS];
if (ctx_id != 0) {
if (ctx_id != DEFAULT_CONTEXT_ID) {
DRM_DEBUG("Ring %s doesn't support contexts\n",
ring->name);
return -EPERM;
}
break;
case I915_EXEC_VEBOX:
ring = &dev_priv->ring[VECS];
if (ctx_id != DEFAULT_CONTEXT_ID) {
DRM_DEBUG("Ring %s doesn't support contexts\n",
ring->name);
return -EPERM;
}
break;
default:
DRM_DEBUG("execbuf with unknown ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
@ -1003,7 +1009,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
goto pre_mutex_err;
if (dev_priv->mm.suspended) {
if (dev_priv->ums.mm_suspended) {
mutex_unlock(&dev->struct_mutex);
ret = -EBUSY;
goto pre_mutex_err;
@ -1028,17 +1034,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* Move the objects en-masse into the GTT, evicting if necessary. */
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs);
ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
if (ret)
goto err;
/* The objects are in their final locations, apply the relocations. */
if (need_relocs)
ret = i915_gem_execbuffer_relocate(eb);
ret = i915_gem_execbuffer_relocate(eb, vm);
if (ret) {
if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
eb, exec);
eb, exec, vm);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
if (ret)
@ -1089,7 +1095,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
exec_start = batch_obj->gtt_offset + args->batch_start_offset;
exec_start = i915_gem_obj_offset(batch_obj, vm) +
args->batch_start_offset;
exec_len = args->batch_len;
if (cliprects) {
@ -1103,8 +1110,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
i915_gem_execbuffer_move_to_active(&eb->objects, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring);
i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
err:
eb_destroy(eb);
@ -1112,7 +1119,7 @@ err:
mutex_unlock(&dev->struct_mutex);
pre_mutex_err:
kfree(cliprects);
kfree(cliprects);
return ret;
}
@ -1122,16 +1129,14 @@ int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_execbuffer2 *args = data;
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret;
// ENTER();
if (args->buffer_count < 1 ||
args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
FAIL();
return -EINVAL;
}
@ -1140,7 +1145,6 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
if (exec2_list == NULL) {
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
FAIL();
return -ENOMEM;
}
ret = copy_from_user(exec2_list,
@ -1155,7 +1159,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
return -EFAULT;
}
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list,
&dev_priv->gtt.base);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,

View File

@ -35,10 +35,12 @@
#include "i915_trace.h"
#include "intel_drv.h"
typedef uint32_t gen6_gtt_pte_t;
#define GEN6_PPGTT_PD_ENTRIES 512
#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
/* PPGTT stuff */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
#define GEN6_PDE_VALID (1 << 0)
/* gen6+ has bit 11-4 for physical addr bit 39-32 */
@ -48,50 +50,124 @@ typedef uint32_t gen6_gtt_pte_t;
#define GEN6_PTE_UNCACHED (1 << 1)
#define HSW_PTE_UNCACHED (0)
#define GEN6_PTE_CACHE_LLC (2 << 1)
#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
static inline gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
dma_addr_t addr,
/* Cacheability Control is a 4-bit value. The low three bits are stored in *
* bits 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
*/
#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
(((bits) & 0x8) << (11 - 3)))
#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
enum i915_cache_level level)
{
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
case I915_CACHE_L3_LLC:
case I915_CACHE_LLC:
pte |= GEN6_PTE_CACHE_LLC;
break;
case I915_CACHE_NONE:
pte |= GEN6_PTE_UNCACHED;
break;
default:
WARN_ON(1);
}
return pte;
}
static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
enum i915_cache_level level)
{
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
case I915_CACHE_LLC_MLC:
/* Haswell doesn't set L3 this way */
if (IS_HASWELL(dev))
pte |= GEN6_PTE_CACHE_LLC;
else
pte |= GEN6_PTE_CACHE_LLC_MLC;
case I915_CACHE_L3_LLC:
pte |= GEN7_PTE_CACHE_L3_LLC;
break;
case I915_CACHE_LLC:
pte |= GEN6_PTE_CACHE_LLC;
break;
case I915_CACHE_NONE:
if (IS_HASWELL(dev))
pte |= HSW_PTE_UNCACHED;
else
pte |= GEN6_PTE_UNCACHED;
break;
default:
BUG();
WARN_ON(1);
}
return pte;
}
static int gen6_ppgtt_enable(struct drm_device *dev)
#define BYT_PTE_WRITEABLE (1 << 1)
#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
enum i915_cache_level level)
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t pd_offset;
struct intel_ring_buffer *ring;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
/* Mark the page as writeable. Other platforms don't have a
* setting for read-only/writable, so this matches that behavior.
*/
pte |= BYT_PTE_WRITEABLE;
if (level != I915_CACHE_NONE)
pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
return pte;
}
static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
enum i915_cache_level level)
{
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
pte |= HSW_PTE_ADDR_ENCODE(addr);
if (level != I915_CACHE_NONE)
pte |= HSW_WB_LLC_AGE3;
return pte;
}
static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
enum i915_cache_level level)
{
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
pte |= HSW_PTE_ADDR_ENCODE(addr);
switch (level) {
case I915_CACHE_NONE:
break;
case I915_CACHE_WT:
pte |= HSW_WT_ELLC_LLC_AGE0;
break;
default:
pte |= HSW_WB_ELLC_LLC_AGE0;
break;
}
return pte;
}
static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
{
struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
gen6_gtt_pte_t __iomem *pd_addr;
uint32_t pd_entry;
int i;
WARN_ON(ppgtt->pd_offset & 0x3f);
pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
for (i = 0; i < ppgtt->num_pd_entries; i++) {
@ -104,6 +180,19 @@ static int gen6_ppgtt_enable(struct drm_device *dev)
writel(pd_entry, pd_addr + i);
}
readl(pd_addr);
}
static int gen6_ppgtt_enable(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t pd_offset;
struct intel_ring_buffer *ring;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
int i;
BUG_ON(ppgtt->pd_offset & 0x3f);
gen6_write_pdes(ppgtt);
pd_offset = ppgtt->pd_offset;
pd_offset /= 64; /* in cachelines, */
@ -152,18 +241,18 @@ static int gen6_ppgtt_enable(struct drm_device *dev)
}
/* PPGTT support for Sandybdrige/Gen6 and later */
static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
unsigned first_entry,
unsigned num_entries)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
gen6_gtt_pte_t *pt_vaddr, scratch_pte;
unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i;
scratch_pte = gen6_pte_encode(ppgtt->dev,
ppgtt->scratch_page_dma_addr,
I915_CACHE_LLC);
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
pt_vaddr = AllocKernelSpace(4096);
@ -188,11 +277,13 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
FreeKernelSpace(pt_vaddr);
}
static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
unsigned first_entry,
enum i915_cache_level cache_level)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
gen6_gtt_pte_t *pt_vaddr;
unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
@ -210,25 +301,28 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
dma_addr_t page_addr;
page_addr = sg_page_iter_dma_address(&sg_iter);
pt_vaddr[act_pte] = gen6_pte_encode(ppgtt->dev, page_addr,
cache_level);
pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level);
if (++act_pte == I915_PPGTT_PT_ENTRIES) {
act_pt++;
MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pt]), 3);
act_pte = 0;
}
}
}
FreeKernelSpace(pt_vaddr);
FreeKernelSpace(pt_vaddr);
}
static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
int i;
drm_mm_takedown(&ppgtt->base.mm);
if (ppgtt->pt_dma_addr) {
for (i = 0; i < ppgtt->num_pd_entries; i++)
pci_unmap_page(ppgtt->dev->pdev,
pci_unmap_page(ppgtt->base.dev->pdev,
ppgtt->pt_dma_addr[i],
4096, PCI_DMA_BIDIRECTIONAL);
}
@ -242,7 +336,7 @@ static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
struct drm_device *dev = ppgtt->dev;
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned first_pd_entry_in_global_pt;
int i;
@ -253,11 +347,13 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
* now. */
first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
ppgtt->enable = gen6_ppgtt_enable;
ppgtt->clear_range = gen6_ppgtt_clear_range;
ppgtt->insert_entries = gen6_ppgtt_insert_entries;
ppgtt->cleanup = gen6_ppgtt_cleanup;
ppgtt->base.clear_range = gen6_ppgtt_clear_range;
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.cleanup = gen6_ppgtt_cleanup;
ppgtt->base.scratch = dev_priv->gtt.base.scratch;
ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
GFP_KERNEL);
if (!ppgtt->pt_pages)
@ -271,19 +367,19 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries,
GFP_KERNEL);
if (!ppgtt->pt_dma_addr)
goto err_pt_alloc;
if (!ppgtt->pt_dma_addr)
goto err_pt_alloc;
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
PCI_DMA_BIDIRECTIONAL);
ppgtt->pt_dma_addr[i] = pt_addr;
}
ppgtt->pt_dma_addr[i] = pt_addr;
}
ppgtt->clear_range(ppgtt, 0,
ppgtt->base.clear_range(&ppgtt->base, 0,
ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
@ -317,8 +413,7 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
if (!ppgtt)
return -ENOMEM;
ppgtt->dev = dev;
ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
ppgtt->base.dev = dev;
if (INTEL_INFO(dev)->gen < 8)
ret = gen6_ppgtt_init(ppgtt);
@ -326,9 +421,12 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
BUG();
if (ret)
kfree(ppgtt);
else
kfree(ppgtt);
else {
dev_priv->mm.aliasing_ppgtt = ppgtt;
drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
ppgtt->base.total);
}
return ret;
}
@ -341,7 +439,7 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
if (!ppgtt)
return;
ppgtt->cleanup(ppgtt);
ppgtt->base.cleanup(&ppgtt->base);
dev_priv->mm.aliasing_ppgtt = NULL;
}
@ -349,16 +447,16 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
ppgtt->insert_entries(ppgtt, obj->pages,
obj->gtt_space->start >> PAGE_SHIFT,
ppgtt->base.insert_entries(&ppgtt->base, obj->pages,
i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
cache_level);
}
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj)
{
ppgtt->clear_range(ppgtt,
obj->gtt_space->start >> PAGE_SHIFT,
ppgtt->base.clear_range(&ppgtt->base,
i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT);
}
@ -406,11 +504,12 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
struct drm_i915_gem_object *obj;
/* First fill our portion of the GTT with scratch pages */
dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
dev_priv->gtt.total / PAGE_SIZE);
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
dev_priv->gtt.base.start / PAGE_SIZE,
dev_priv->gtt.base.total / PAGE_SIZE);
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
i915_gem_clflush_object(obj);
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
i915_gem_clflush_object(obj, obj->pin_display);
i915_gem_gtt_bind_object(obj, obj->cache_level);
}
@ -436,12 +535,12 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
* within the global GTT as well as accessible by the GPU through the GMADR
* mapped BAR (dev_priv->mm.gtt->gtt).
*/
static void gen6_ggtt_insert_entries(struct drm_device *dev,
static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
unsigned int first_entry,
enum i915_cache_level level)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = vm->dev->dev_private;
gen6_gtt_pte_t __iomem *gtt_entries =
(gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int i = 0;
@ -450,7 +549,7 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
addr = sg_page_iter_dma_address(&sg_iter);
iowrite32(gen6_pte_encode(dev, addr, level), &gtt_entries[i]);
iowrite32(vm->pte_encode(addr, level), &gtt_entries[i]);
i++;
}
@ -461,8 +560,8 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
* hardware should work, we must keep this posting read for paranoia.
*/
if (i != 0)
WARN_ON(readl(&gtt_entries[i-1])
!= gen6_pte_encode(dev, addr, level));
WARN_ON(readl(&gtt_entries[i-1]) !=
vm->pte_encode(addr, level));
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
@ -472,11 +571,11 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
POSTING_READ(GFX_FLSH_CNTL_GEN6);
}
static void gen6_ggtt_clear_range(struct drm_device *dev,
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
unsigned int first_entry,
unsigned int num_entries)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = vm->dev->dev_private;
gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
(gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
@ -486,17 +585,16 @@ static void gen6_ggtt_clear_range(struct drm_device *dev,
// "First entry = %d; Num entries = %d (max=%d)\n",
// first_entry, num_entries, max_entries))
if (num_entries > max_entries)
num_entries = max_entries;
num_entries = max_entries;
scratch_pte = gen6_pte_encode(dev, dev_priv->gtt.scratch_page_dma,
I915_CACHE_LLC);
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
readl(gtt_base);
}
static void i915_ggtt_insert_entries(struct drm_device *dev,
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
unsigned int pg_start,
enum i915_cache_level cache_level)
@ -508,7 +606,7 @@ static void i915_ggtt_insert_entries(struct drm_device *dev,
}
static void i915_ggtt_clear_range(struct drm_device *dev,
static void i915_ggtt_clear_range(struct i915_address_space *vm,
unsigned int first_entry,
unsigned int num_entries)
{
@ -521,9 +619,10 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
obj->gtt_space->start >> PAGE_SHIFT,
dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages,
entry,
cache_level);
obj->has_global_gtt_mapping = 1;
@ -533,9 +632,10 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
dev_priv->gtt.gtt_clear_range(obj->base.dev,
obj->gtt_space->start >> PAGE_SHIFT,
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
entry,
obj->base.size >> PAGE_SHIFT);
obj->has_global_gtt_mapping = 0;
@ -587,7 +687,8 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
* aperture. One page should be enough to keep any prefetching inside
* of the aperture.
*/
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
struct drm_mm_node *entry;
struct drm_i915_gem_object *obj;
unsigned long hole_start, hole_end;
@ -595,37 +696,38 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
BUG_ON(mappable_end > end);
/* Subtract the guard page ... */
drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE);
if (!HAS_LLC(dev))
dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust;
/* Mark any preallocated objects as occupied */
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
obj->gtt_offset, obj->base.size);
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
int ret;
DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
i915_gem_obj_ggtt_offset(obj), obj->base.size);
BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
obj->gtt_offset,
obj->base.size,
false);
WARN_ON(i915_gem_obj_ggtt_bound(obj));
ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
if (ret)
DRM_DEBUG_KMS("Reservation failed\n");
obj->has_global_gtt_mapping = 1;
list_add(&vma->vma_link, &obj->vma_list);
}
dev_priv->gtt.start = start;
dev_priv->gtt.total = end - start;
dev_priv->gtt.base.start = start;
dev_priv->gtt.base.total = end - start;
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space,
hole_start, hole_end) {
drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE,
(hole_end-hole_start) / PAGE_SIZE);
ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count);
}
/* And finally clear the reserved guard page */
dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1);
}
static bool
@ -648,7 +750,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long gtt_size, mappable_size;
gtt_size = dev_priv->gtt.total;
gtt_size = dev_priv->gtt.base.total;
mappable_size = dev_priv->gtt.mappable_end;
#if 0
@ -658,22 +760,22 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
if (INTEL_INFO(dev)->gen <= 7) {
/* PPGTT pdes are stolen from global gtt ptes, so shrink the
* aperture accordingly when using aliasing ppgtt. */
gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
}
i915_gem_setup_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size);
i915_gem_setup_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size-LFB_SIZE);
ret = i915_gem_init_aliasing_ppgtt(dev);
if (!ret)
return;
DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
drm_mm_takedown(&dev_priv->mm.gtt_space);
gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
drm_mm_takedown(&dev_priv->gtt.base.mm);
gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
}
#endif
i915_gem_setup_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size);
i915_gem_setup_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size-LFB_SIZE);
}
static int setup_scratch_page(struct drm_device *dev)
@ -685,7 +787,7 @@ static int setup_scratch_page(struct drm_device *dev)
page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
if (page == NULL)
return -ENOMEM;
get_page(page);
get_page(page);
set_pages_uc(page, 1);
#ifdef CONFIG_INTEL_IOMMU
@ -696,8 +798,8 @@ static int setup_scratch_page(struct drm_device *dev)
#else
dma_addr = page_to_phys(page);
#endif
dev_priv->gtt.scratch_page = page;
dev_priv->gtt.scratch_page_dma = dma_addr;
dev_priv->gtt.base.scratch.page = page;
dev_priv->gtt.base.scratch.addr = dma_addr;
return 0;
}
@ -705,11 +807,13 @@ static int setup_scratch_page(struct drm_device *dev)
static void teardown_scratch_page(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
set_pages_wb(dev_priv->gtt.scratch_page, 1);
pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma,
struct page *page = dev_priv->gtt.base.scratch.page;
set_pages_wb(page, 1);
pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
put_page(dev_priv->gtt.scratch_page);
__free_page(dev_priv->gtt.scratch_page);
put_page(page);
__free_page(page);
}
static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@ -755,7 +859,7 @@ static int gen6_gmch_probe(struct drm_device *dev,
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
*stolen = gen6_get_stolen_size(snb_gmch_ctl);
*stolen = gen6_get_stolen_size(snb_gmch_ctl);
*gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
/* For Modern GENs the PTEs and register space are split in the BAR */
@ -772,17 +876,18 @@ static int gen6_gmch_probe(struct drm_device *dev,
if (ret)
DRM_ERROR("Scratch setup failed\n");
dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range;
dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries;
dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
return ret;
}
static void gen6_gmch_remove(struct drm_device *dev)
static void gen6_gmch_remove(struct i915_address_space *vm)
{
struct drm_i915_private *dev_priv = dev->dev_private;
iounmap(dev_priv->gtt.gsm);
teardown_scratch_page(dev_priv->dev);
struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
iounmap(gtt->gsm);
teardown_scratch_page(vm->dev);
}
static int i915_gmch_probe(struct drm_device *dev,
@ -803,13 +908,13 @@ static int i915_gmch_probe(struct drm_device *dev,
intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range;
dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries;
dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
return 0;
}
static void i915_gmch_remove(struct drm_device *dev)
static void i915_gmch_remove(struct i915_address_space *vm)
{
// intel_gmch_remove();
}
@ -821,27 +926,35 @@ int i915_gem_gtt_init(struct drm_device *dev)
int ret;
if (INTEL_INFO(dev)->gen <= 5) {
dev_priv->gtt.gtt_probe = i915_gmch_probe;
dev_priv->gtt.gtt_remove = i915_gmch_remove;
gtt->gtt_probe = i915_gmch_probe;
gtt->base.cleanup = i915_gmch_remove;
} else {
dev_priv->gtt.gtt_probe = gen6_gmch_probe;
dev_priv->gtt.gtt_remove = gen6_gmch_remove;
gtt->gtt_probe = gen6_gmch_probe;
gtt->base.cleanup = gen6_gmch_remove;
if (IS_HASWELL(dev) && dev_priv->ellc_size)
gtt->base.pte_encode = iris_pte_encode;
else if (IS_HASWELL(dev))
gtt->base.pte_encode = hsw_pte_encode;
else if (IS_VALLEYVIEW(dev))
gtt->base.pte_encode = byt_pte_encode;
else if (INTEL_INFO(dev)->gen >= 7)
gtt->base.pte_encode = ivb_pte_encode;
else
gtt->base.pte_encode = snb_pte_encode;
}
ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total,
&dev_priv->gtt.stolen_size,
&gtt->mappable_base,
&gtt->mappable_end);
ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
&gtt->mappable_base, &gtt->mappable_end);
if (ret)
return ret;
return ret;
gtt->base.dev = dev;
/* GMADR is the PCI mmio aperture into the global GTT. */
DRM_INFO("Memory usable by graphics device = %zdM\n",
dev_priv->gtt.total >> 20);
DRM_DEBUG_DRIVER("GMADR size = %ldM\n",
dev_priv->gtt.mappable_end >> 20);
DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n",
dev_priv->gtt.stolen_size >> 20);
gtt->base.total >> 20);
DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
return 0;
}

View File

@ -45,46 +45,48 @@
static unsigned long i915_stolen_to_physical(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct pci_dev *pdev = dev_priv->bridge_dev;
struct resource *r;
u32 base;
/* On the machines I have tested the Graphics Base of Stolen Memory
* is unreliable, so on those compute the base by subtracting the
* stolen memory from the Top of Low Usable DRAM which is where the
* BIOS places the graphics stolen memory.
/* Almost universally we can find the Graphics Base of Stolen Memory
* at offset 0x5c in the igfx configuration space. On a few (desktop)
* machines this is also mirrored in the bridge device at different
* locations, or in the MCHBAR. On gen2, the layout is again slightly
* different with the Graphics Segment immediately following Top of
* Memory (or Top of Usable DRAM). Note it appears that TOUD is only
* reported by 865g, so we just use the top of memory as determined
* by the e820 probe.
*
* On gen2, the layout is slightly different with the Graphics Segment
* immediately following Top of Memory (or Top of Usable DRAM). Note
* it appears that TOUD is only reported by 865g, so we just use the
* top of memory as determined by the e820 probe.
*
* XXX gen2 requires an unavailable symbol and 945gm fails with
* its value of TOLUD.
* XXX However gen2 requires an unavailable symbol.
*/
base = 0;
if (INTEL_INFO(dev)->gen >= 6) {
/* Read Base Data of Stolen Memory Register (BDSM) directly.
* Note that there is also a MCHBAR miror at 0x1080c0 or
* we could use device 2:0x5c instead.
*/
pci_read_config_dword(pdev, 0xB0, &base);
base &= ~4095; /* lower bits used for locking register */
} else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
if (INTEL_INFO(dev)->gen >= 3) {
/* Read Graphics Base of Stolen Memory directly */
pci_read_config_dword(pdev, 0xA4, &base);
pci_read_config_dword(dev->pdev, 0x5c, &base);
base &= ~((1<<20) - 1);
} else { /* GEN2 */
#if 0
} else if (IS_GEN3(dev)) {
u8 val;
/* Stolen is immediately below Top of Low Usable DRAM */
pci_read_config_byte(pdev, 0x9c, &val);
base = val >> 3 << 27;
base -= dev_priv->mm.gtt->stolen_size;
} else {
/* Stolen is immediately above Top of Memory */
base = max_low_pfn_mapped << PAGE_SHIFT;
#endif
}
if (base == 0)
return 0;
#if 0
/* Verify that nothing else uses this physical address. Stolen
* memory should be reserved by the BIOS and hidden from the
* kernel. So if the region is already marked as busy, something
* is seriously wrong.
*/
r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
"Graphics Stolen Memory");
if (r == NULL) {
DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
base, base + (uint32_t)dev_priv->gtt.stolen_size);
base = 0;
}
#endif
return base;
}
@ -92,32 +94,37 @@ static int i915_setup_compression(struct drm_device *dev, int size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
int ret;
compressed_fb = kzalloc(sizeof(*compressed_fb), GFP_KERNEL);
if (!compressed_fb)
goto err_llb;
/* Try to over-allocate to reduce reallocations and fragmentation */
compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
size <<= 1, 4096, 0);
if (!compressed_fb)
compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
size >>= 1, 4096, 0);
if (compressed_fb)
compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
if (!compressed_fb)
goto err;
ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
if (ret)
ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
size >>= 1, 4096,
DRM_MM_SEARCH_DEFAULT);
if (ret)
goto err_llb;
if (HAS_PCH_SPLIT(dev))
I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
else if (IS_GM45(dev)) {
I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
} else {
compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
4096, 4096, 0);
if (compressed_llb)
compressed_llb = drm_mm_get_block(compressed_llb,
4096, 4096);
compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
if (!compressed_llb)
goto err_fb;
dev_priv->compressed_llb = compressed_llb;
ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb,
4096, 4096, DRM_MM_SEARCH_DEFAULT);
if (ret)
goto err_fb;
dev_priv->fbc.compressed_llb = compressed_llb;
I915_WRITE(FBC_CFB_BASE,
dev_priv->mm.stolen_base + compressed_fb->start);
@ -125,8 +132,8 @@ static int i915_setup_compression(struct drm_device *dev, int size)
dev_priv->mm.stolen_base + compressed_llb->start);
}
dev_priv->compressed_fb = compressed_fb;
dev_priv->cfb_size = size;
dev_priv->fbc.compressed_fb = compressed_fb;
dev_priv->fbc.size = size;
DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
size);
@ -134,8 +141,11 @@ static int i915_setup_compression(struct drm_device *dev, int size)
return 0;
err_fb:
drm_mm_put_block(compressed_fb);
err:
kfree(compressed_llb);
drm_mm_remove_node(compressed_fb);
err_llb:
kfree(compressed_fb);
// pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC;
}
@ -143,10 +153,10 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->mm.stolen_base == 0)
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return -ENODEV;
if (size < dev_priv->cfb_size)
if (size < dev_priv->fbc.size)
return 0;
/* Release any current block */
@ -159,22 +169,29 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->cfb_size == 0)
if (dev_priv->fbc.size == 0)
return;
if (dev_priv->compressed_fb)
drm_mm_put_block(dev_priv->compressed_fb);
if (dev_priv->fbc.compressed_fb) {
drm_mm_remove_node(dev_priv->fbc.compressed_fb);
kfree(dev_priv->fbc.compressed_fb);
}
if (dev_priv->compressed_llb)
drm_mm_put_block(dev_priv->compressed_llb);
if (dev_priv->fbc.compressed_llb) {
drm_mm_remove_node(dev_priv->fbc.compressed_llb);
kfree(dev_priv->fbc.compressed_llb);
}
dev_priv->cfb_size = 0;
dev_priv->fbc.size = 0;
}
void i915_gem_cleanup_stolen(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return;
i915_gem_stolen_cleanup_compression(dev);
drm_mm_takedown(&dev_priv->mm.stolen);
}
@ -182,6 +199,10 @@ void i915_gem_cleanup_stolen(struct drm_device *dev)
int i915_gem_init_stolen(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int bios_reserved = 0;
if (dev_priv->gtt.stolen_size == 0)
return 0;
dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
if (dev_priv->mm.stolen_base == 0)
@ -190,8 +211,15 @@ int i915_gem_init_stolen(struct drm_device *dev)
DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
if (IS_VALLEYVIEW(dev))
bios_reserved = 1024*1024; /* top 1M on VLV/BYT */
if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
return 0;
/* Basic memrange allocator for stolen space */
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size);
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
bios_reserved);
return 0;
}
@ -259,9 +287,7 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
if (obj == NULL)
return NULL;
if (drm_gem_private_object_init(dev, &obj->base, stolen->size))
goto cleanup;
drm_gem_private_object_init(dev, &obj->base, stolen->size);
i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
obj->pages = i915_pages_create_for_stolen(dev,
@ -270,12 +296,11 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
goto cleanup;
obj->has_dma_mapping = true;
obj->pages_pin_count = 1;
i915_gem_object_pin_pages(obj);
obj->stolen = stolen;
obj->base.write_domain = I915_GEM_DOMAIN_GTT;
obj->base.read_domains = I915_GEM_DOMAIN_GTT;
obj->cache_level = I915_CACHE_NONE;
obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
return obj;
@ -290,25 +315,32 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
int ret;
if (dev_priv->mm.stolen_base == 0)
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return NULL;
DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
if (size == 0)
return NULL;
stolen = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
if (stolen)
stolen = drm_mm_get_block(stolen, size, 4096);
if (stolen == NULL)
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
if (!stolen)
return NULL;
ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size,
4096, DRM_MM_SEARCH_DEFAULT);
if (ret) {
kfree(stolen);
return NULL;
}
obj = _i915_gem_object_create_stolen(dev, stolen);
if (obj)
return obj;
drm_mm_put_block(stolen);
drm_mm_remove_node(stolen);
kfree(stolen);
return NULL;
}
@ -319,10 +351,13 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
u32 size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *ggtt = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
struct i915_vma *vma;
int ret;
if (dev_priv->mm.stolen_base == 0)
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return NULL;
DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
@ -330,58 +365,79 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
/* KISS and expect everything to be page-aligned */
BUG_ON(stolen_offset & 4095);
BUG_ON(gtt_offset & 4095);
BUG_ON(size & 4095);
if (WARN_ON(size == 0))
return NULL;
stolen = drm_mm_create_block(&dev_priv->mm.stolen,
stolen_offset, size,
false);
if (stolen == NULL) {
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
if (!stolen)
return NULL;
stolen->start = stolen_offset;
stolen->size = size;
ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen space\n");
kfree(stolen);
return NULL;
}
obj = _i915_gem_object_create_stolen(dev, stolen);
if (obj == NULL) {
DRM_DEBUG_KMS("failed to allocate stolen object\n");
drm_mm_put_block(stolen);
drm_mm_remove_node(stolen);
kfree(stolen);
return NULL;
}
/* Some objects just need physical mem from stolen space */
if (gtt_offset == I915_GTT_OFFSET_NONE)
return obj;
vma = i915_gem_vma_create(obj, ggtt);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_out;
}
/* To simplify the initialisation sequence between KMS and GTT,
* we allow construction of the stolen object prior to
* setting up the GTT space. The actual reservation will occur
* later.
*/
if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
gtt_offset, size,
false);
if (obj->gtt_space == NULL) {
vma->node.start = gtt_offset;
vma->node.size = size;
if (drm_mm_initialized(&ggtt->mm)) {
ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
drm_gem_object_unreference(&obj->base);
return NULL;
goto err_vma;
}
}
} else
obj->gtt_space = I915_GTT_RESERVED;
obj->gtt_offset = gtt_offset;
obj->has_global_gtt_mapping = 1;
list_add_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&vma->mm_list, &ggtt->inactive_list);
return obj;
err_vma:
i915_gem_vma_destroy(vma);
err_out:
drm_mm_remove_node(stolen);
kfree(stolen);
drm_gem_object_unreference(&obj->base);
return NULL;
}
void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
if (obj->stolen) {
drm_mm_put_block(obj->stolen);
drm_mm_remove_node(obj->stolen);
kfree(obj->stolen);
obj->stolen = NULL;
}
}

View File

@ -287,18 +287,18 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
return true;
if (INTEL_INFO(obj->base.dev)->gen == 3) {
if (obj->gtt_offset & ~I915_FENCE_START_MASK)
if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
return false;
} else {
if (obj->gtt_offset & ~I830_FENCE_START_MASK)
if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK)
return false;
}
size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
if (obj->gtt_space->size != size)
if (i915_gem_obj_ggtt_size(obj) != size)
return false;
if (obj->gtt_offset & (size - 1))
if (i915_gem_obj_ggtt_offset(obj) & (size - 1))
return false;
return true;
@ -378,18 +378,19 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
*/
obj->map_and_fenceable =
obj->gtt_space == NULL ||
(obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end &&
!i915_gem_obj_ggtt_bound(obj) ||
(i915_gem_obj_ggtt_offset(obj) +
obj->base.size <= dev_priv->gtt.mappable_end &&
i915_gem_object_fence_ok(obj, args->tiling_mode));
/* Rebind if we need a change of alignment */
if (!obj->map_and_fenceable) {
u32 unfenced_alignment =
u32 unfenced_align =
i915_gem_get_gtt_alignment(dev, obj->base.size,
args->tiling_mode,
false);
if (obj->gtt_offset & (unfenced_alignment - 1))
ret = i915_gem_object_unbind(obj);
if (i915_gem_obj_ggtt_offset(obj) & (unfenced_align - 1))
ret = i915_gem_object_ggtt_unbind(obj);
}
if (ret == 0) {

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -24,6 +24,8 @@
#define trace_i915_ring_wait_begin(a)
#define trace_i915_gem_object_pwrite(a, b, c)
#define trace_i915_gem_request_add(a, b)
#define trace_i915_gem_ring_dispatch(a, b, c);
#define trace_i915_gem_ring_dispatch(a, b, c)
#define trace_i915_vma_bind(a, b)
#define trace_i915_vma_unbind(a)
#endif

View File

@ -211,7 +211,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
if (!lvds_options)
return;
dev_priv->lvds_dither = lvds_options->pixel_dither;
dev_priv->vbt.lvds_dither = lvds_options->pixel_dither;
if (lvds_options->panel_type == 0xff)
return;
@ -225,7 +225,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
if (!lvds_lfp_data_ptrs)
return;
dev_priv->lvds_vbt = 1;
dev_priv->vbt.lvds_vbt = 1;
panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
lvds_lfp_data_ptrs,
@ -237,7 +237,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing);
dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
@ -273,9 +273,9 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
/* check the resolution, just to be sure */
if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
fp_timing->y_res == panel_fixed_mode->vdisplay) {
dev_priv->bios_lvds_val = fp_timing->lvds_reg_val;
dev_priv->vbt.bios_lvds_val = fp_timing->lvds_reg_val;
DRM_DEBUG_KMS("VBT initial LVDS value %x\n",
dev_priv->bios_lvds_val);
dev_priv->vbt.bios_lvds_val);
}
}
}
@ -315,7 +315,7 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
fill_detail_timing_data(panel_fixed_mode, dvo_timing + index);
dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
dev_priv->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode;
DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
@ -344,20 +344,20 @@ parse_general_features(struct drm_i915_private *dev_priv,
general = find_section(bdb, BDB_GENERAL_FEATURES);
if (general) {
dev_priv->int_tv_support = general->int_tv_support;
dev_priv->int_crt_support = general->int_crt_support;
dev_priv->lvds_use_ssc = general->enable_ssc;
dev_priv->lvds_ssc_freq =
dev_priv->vbt.int_tv_support = general->int_tv_support;
dev_priv->vbt.int_crt_support = general->int_crt_support;
dev_priv->vbt.lvds_use_ssc = general->enable_ssc;
dev_priv->vbt.lvds_ssc_freq =
intel_bios_ssc_frequency(dev, general->ssc_freq);
dev_priv->display_clock_mode = general->display_clock_mode;
dev_priv->fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
dev_priv->vbt.display_clock_mode = general->display_clock_mode;
dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
dev_priv->int_tv_support,
dev_priv->int_crt_support,
dev_priv->lvds_use_ssc,
dev_priv->lvds_ssc_freq,
dev_priv->display_clock_mode,
dev_priv->fdi_rx_polarity_inverted);
dev_priv->vbt.int_tv_support,
dev_priv->vbt.int_crt_support,
dev_priv->vbt.lvds_use_ssc,
dev_priv->vbt.lvds_ssc_freq,
dev_priv->vbt.display_clock_mode,
dev_priv->vbt.fdi_rx_polarity_inverted);
}
}
@ -374,7 +374,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
int bus_pin = general->crt_ddc_gmbus_pin;
DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
if (intel_gmbus_is_port_valid(bus_pin))
dev_priv->crt_ddc_pin = bus_pin;
dev_priv->vbt.crt_ddc_pin = bus_pin;
} else {
DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
block_size);
@ -485,7 +485,7 @@ parse_driver_features(struct drm_i915_private *dev_priv,
if (SUPPORTS_EDP(dev) &&
driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
dev_priv->edp.support = 1;
dev_priv->vbt.edp_support = 1;
if (driver->dual_frequency)
dev_priv->render_reclock_avail = true;
@ -500,20 +500,20 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
edp = find_section(bdb, BDB_EDP);
if (!edp) {
if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support)
if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->vbt.edp_support)
DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
return;
}
switch ((edp->color_depth >> (panel_type * 2)) & 3) {
case EDP_18BPP:
dev_priv->edp.bpp = 18;
dev_priv->vbt.edp_bpp = 18;
break;
case EDP_24BPP:
dev_priv->edp.bpp = 24;
dev_priv->vbt.edp_bpp = 24;
break;
case EDP_30BPP:
dev_priv->edp.bpp = 30;
dev_priv->vbt.edp_bpp = 30;
break;
}
@ -521,48 +521,48 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
edp_pps = &edp->power_seqs[panel_type];
edp_link_params = &edp->link_params[panel_type];
dev_priv->edp.pps = *edp_pps;
dev_priv->vbt.edp_pps = *edp_pps;
dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
dev_priv->vbt.edp_rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
DP_LINK_BW_1_62;
switch (edp_link_params->lanes) {
case 0:
dev_priv->edp.lanes = 1;
dev_priv->vbt.edp_lanes = 1;
break;
case 1:
dev_priv->edp.lanes = 2;
dev_priv->vbt.edp_lanes = 2;
break;
case 3:
default:
dev_priv->edp.lanes = 4;
dev_priv->vbt.edp_lanes = 4;
break;
}
switch (edp_link_params->preemphasis) {
case 0:
dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
break;
case 1:
dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
break;
case 2:
dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
break;
case 3:
dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
break;
}
switch (edp_link_params->vswing) {
case 0:
dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400;
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400;
break;
case 1:
dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600;
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600;
break;
case 2:
dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800;
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800;
break;
case 3:
dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200;
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200;
break;
}
}
@ -610,13 +610,13 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
return;
}
dev_priv->child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL);
if (!dev_priv->child_dev) {
dev_priv->vbt.child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL);
if (!dev_priv->vbt.child_dev) {
DRM_DEBUG_KMS("No memory space for child device\n");
return;
}
dev_priv->child_dev_num = count;
dev_priv->vbt.child_dev_num = count;
count = 0;
for (i = 0; i < child_device_num; i++) {
p_child = &(p_defs->devices[i]);
@ -624,7 +624,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
/* skip the device block if device type is invalid */
continue;
}
child_dev_ptr = dev_priv->child_dev + count;
child_dev_ptr = dev_priv->vbt.child_dev + count;
count++;
memcpy((void *)child_dev_ptr, (void *)p_child,
sizeof(*p_child));
@ -637,23 +637,23 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC;
dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC;
/* LFP panel data */
dev_priv->lvds_dither = 1;
dev_priv->lvds_vbt = 0;
dev_priv->vbt.lvds_dither = 1;
dev_priv->vbt.lvds_vbt = 0;
/* SDVO panel data */
dev_priv->sdvo_lvds_vbt_mode = NULL;
dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
/* general features */
dev_priv->int_tv_support = 1;
dev_priv->int_crt_support = 1;
dev_priv->vbt.int_tv_support = 1;
dev_priv->vbt.int_crt_support = 1;
/* Default to using SSC */
dev_priv->lvds_use_ssc = 1;
dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
dev_priv->vbt.lvds_use_ssc = 1;
dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq);
}

View File

@ -51,17 +51,16 @@ struct intel_crt {
u32 adpa_reg;
};
static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
{
return container_of(intel_attached_encoder(connector),
struct intel_crt, base);
}
static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
{
return container_of(encoder, struct intel_crt, base);
}
static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
{
return intel_encoder_to_crt(intel_attached_encoder(connector));
}
static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
@ -83,6 +82,28 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
return true;
}
static void intel_crt_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crt *crt = intel_encoder_to_crt(encoder);
u32 tmp, flags = 0;
tmp = I915_READ(crt->adpa_reg);
if (tmp & ADPA_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
else
flags |= DRM_MODE_FLAG_NHSYNC;
if (tmp & ADPA_VSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PVSYNC;
else
flags |= DRM_MODE_FLAG_NVSYNC;
pipe_config->adjusted_mode.flags |= flags;
}
/* Note: The caller is required to filter out dpms modes not supported by the
* platform. */
static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
@ -126,7 +147,7 @@ static void intel_enable_crt(struct intel_encoder *encoder)
intel_crt_set_dpms(encoder, crt->connector->base.dpms);
}
/* Special dpms function to support cloning between dvo/sdvo/crt. */
static void intel_crt_dpms(struct drm_connector *connector, int mode)
{
struct drm_device *dev = connector->dev;
@ -157,6 +178,8 @@ static void intel_crt_dpms(struct drm_connector *connector, int mode)
else
encoder->connectors_active = true;
/* We call connector dpms manually below in case pipe dpms doesn't
* change due to cloning. */
if (mode < old_dpms) {
/* From off to on, enable the pipe first. */
intel_crtc_update_dpms(crtc);
@ -206,20 +229,21 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
if (HAS_PCH_SPLIT(dev))
pipe_config->has_pch_encoder = true;
/* LPT FDI RX only supports 8bpc. */
if (HAS_PCH_LPT(dev))
pipe_config->pipe_bpp = 24;
return true;
}
static void intel_crt_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
static void intel_crt_mode_set(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crt *crt =
intel_encoder_to_crt(to_intel_encoder(encoder));
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = encoder->base.dev;
struct intel_crt *crt = intel_encoder_to_crt(encoder);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
u32 adpa;
if (HAS_PCH_SPLIT(dev))
@ -236,14 +260,14 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
if (HAS_PCH_LPT(dev))
; /* Those bits don't exist here */
else if (HAS_PCH_CPT(dev))
adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
else if (intel_crtc->pipe == 0)
adpa |= PORT_TRANS_SEL_CPT(crtc->pipe);
else if (crtc->pipe == 0)
adpa |= ADPA_PIPE_A_SELECT;
else
adpa |= ADPA_PIPE_B_SELECT;
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
I915_WRITE(BCLRPAT(crtc->pipe), 0);
I915_WRITE(crt->adpa_reg, adpa);
}
@ -430,7 +454,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
edid = intel_crt_get_edid(connector, i2c);
if (edid) {
@ -584,6 +608,10 @@ intel_crt_detect(struct drm_connector *connector, bool force)
enum drm_connector_status status;
struct intel_load_detect_pipe tmp;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
connector->base.id, drm_get_connector_name(connector),
force);
if (I915_HAS_HOTPLUG(dev)) {
/* We can not rely on the HPD pin always being correctly wired
* up, for example many KVM do not pass it through, and so
@ -636,7 +664,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
int ret;
struct i2c_adapter *i2c;
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
ret = intel_crt_ddc_get_modes(connector, i2c);
if (ret || !IS_G4X(dev))
return ret;
@ -659,7 +687,7 @@ static void intel_crt_reset(struct drm_connector *connector)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_attached_crt(connector);
if (HAS_PCH_SPLIT(dev)) {
if (INTEL_INFO(dev)->gen >= 5) {
u32 adpa;
adpa = I915_READ(crt->adpa_reg);
@ -678,10 +706,6 @@ static void intel_crt_reset(struct drm_connector *connector)
* Routines for controlling stuff on the analog port
*/
static const struct drm_encoder_helper_funcs crt_encoder_funcs = {
.mode_set = intel_crt_mode_set,
};
static const struct drm_connector_funcs intel_crt_connector_funcs = {
.reset = intel_crt_reset,
.dpms = intel_crt_dpms,
@ -749,8 +773,10 @@ void intel_crt_init(struct drm_device *dev)
crt->adpa_reg = ADPA;
crt->base.compute_config = intel_crt_compute_config;
crt->base.mode_set = intel_crt_mode_set;
crt->base.disable = intel_disable_crt;
crt->base.enable = intel_enable_crt;
crt->base.get_config = intel_crt_get_config;
if (I915_HAS_HOTPLUG(dev))
crt->base.hpd_pin = HPD_CRT;
if (HAS_DDI(dev))
@ -759,7 +785,6 @@ void intel_crt_init(struct drm_device *dev)
crt->base.get_hw_state = intel_crt_get_hw_state;
intel_connector->get_hw_state = intel_connector_get_hw_state;
drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs);
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
drm_sysfs_connector_add(connector);

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -26,6 +26,7 @@
#define __INTEL_DRV_H__
#include <linux/i2c.h>
#include <linux/hdmi.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include <drm/drm_crtc.h>
@ -64,28 +65,10 @@
ret__; \
})
#define wait_for_atomic_us(COND, US) ({ \
unsigned long timeout__ = GetTimerTicks() + usecs_to_jiffies(US); \
int ret__ = 0; \
while (!(COND)) { \
if (time_after(GetTimerTicks(), timeout__)) { \
ret__ = -ETIMEDOUT; \
break; \
} \
cpu_relax(); \
} \
ret__; \
})
#define wait_for(COND, MS) _wait_for(COND, MS, 1)
#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
#define MSLEEP(x) do { \
if (in_dbg_master()) \
mdelay(x); \
else \
msleep(x); \
} while(0)
#define wait_for_atomic_us(COND, US) _wait_for((COND), \
DIV_ROUND_UP((US), 1000), 0)
#define KHz(x) (1000*x)
#define MHz(x) KHz(1000*x)
@ -143,7 +126,6 @@ struct intel_encoder {
struct intel_crtc *new_crtc;
int type;
bool needs_tv_clock;
/*
* Intel hw has only one MUX where encoders could be clone, hence a
* simple flag is enough to compute the possible_clones mask.
@ -163,6 +145,12 @@ struct intel_encoder {
* the encoder is active. If the encoder is enabled it also set the pipe
* it is connected to in the pipe parameter. */
bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe);
/* Reconstructs the equivalent mode flags for the current hardware
* state. This must be called _after_ display->get_pipe_config has
* pre-filled the pipe config. Note that intel_encoder->base.crtc must
* be set correctly before calling this function. */
void (*get_config)(struct intel_encoder *,
struct intel_crtc_config *pipe_config);
int crtc_mask;
enum hpd_pin hpd_pin;
};
@ -200,13 +188,32 @@ struct intel_connector {
u8 polled;
};
typedef struct dpll {
/* given values */
int n;
int m1, m2;
int p1, p2;
/* derived values */
int dot;
int vco;
int m;
int p;
} intel_clock_t;
struct intel_crtc_config {
/**
* quirks - bitfield with hw state readout quirks
*
* For various reasons the hw state readout code might not be able to
* completely faithfully read out the current state. These cases are
* tracked with quirk flags so that fastboot and state checker can act
* accordingly.
*/
#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
unsigned long quirks;
struct drm_display_mode requested_mode;
struct drm_display_mode adjusted_mode;
/* This flag must be set by the encoder's compute_config callback if it
* changes the crtc timings in the mode to prevent the crtc fixup from
* overwriting them. Currently only lvds needs that. */
bool timings_set;
/* Whether to set up the PCH/FDI. Note that we never allow sharing
* between pch encoders and cpu encoders. */
bool has_pch_encoder;
@ -224,29 +231,68 @@ struct intel_crtc_config {
/* DP has a bunch of special case unfortunately, so mark the pipe
* accordingly. */
bool has_dp_encoder;
/*
* Enable dithering, used when the selected pipe bpp doesn't match the
* plane bpp.
*/
bool dither;
/* Controls for the clock computation, to override various stages. */
bool clock_set;
/* SDVO TV has a bunch of special case. To make multifunction encoders
* work correctly, we need to track this at runtime.*/
bool sdvo_tv_clock;
/*
* crtc bandwidth limit, don't increase pipe bpp or clock if not really
* required. This is set in the 2nd loop of calling encoder's
* ->compute_config if the first pick doesn't work out.
*/
bool bw_constrained;
/* Settings for the intel dpll used on pretty much everything but
* haswell. */
struct dpll {
unsigned n;
unsigned m1, m2;
unsigned p1, p2;
} dpll;
struct dpll dpll;
/* Selected dpll when shared or DPLL_ID_PRIVATE. */
enum intel_dpll_id shared_dpll;
/* Actual register state of the dpll, for shared dpll cross-checking. */
struct intel_dpll_hw_state dpll_hw_state;
int pipe_bpp;
struct intel_link_m_n dp_m_n;
/**
* This is currently used by DP and HDMI encoders since those can have a
* target pixel clock != the port link clock (which is currently stored
* in adjusted_mode->clock).
/*
* Frequence the dpll for the port should run at. Differs from the
* adjusted dotclock e.g. for DP or 12bpc hdmi mode.
*/
int pixel_target_clock;
int port_clock;
/* Used by SDVO (and if we ever fix it, HDMI). */
unsigned pixel_multiplier;
/* Panel fitter controls for gen2-gen4 + VLV */
struct {
u32 control;
u32 pgm_ratios;
u32 lvds_border_bits;
} gmch_pfit;
/* Panel fitter placement and size for Ironlake+ */
struct {
u32 pos;
u32 size;
bool enabled;
} pch_pfit;
/* FDI configuration, only valid if has_pch_encoder is set. */
int fdi_lanes;
struct intel_link_m_n fdi_m_n;
bool ips_enabled;
};
struct intel_crtc {
@ -265,7 +311,6 @@ struct intel_crtc {
bool lowfreq_avail;
struct intel_overlay *overlay;
struct intel_unpin_work *unpin_work;
int fdi_lanes;
atomic_t unpin_work_count;
@ -282,12 +327,21 @@ struct intel_crtc {
struct intel_crtc_config config;
/* We can share PLLs across outputs if the timings match */
struct intel_pch_pll *pch_pll;
uint32_t ddi_pll_sel;
/* reset counter value when the last flip was submitted */
unsigned int reset_counter;
/* Access to these should be protected by dev_priv->irq_lock. */
bool cpu_fifo_underrun_disabled;
bool pch_fifo_underrun_disabled;
};
struct intel_plane_wm_parameters {
uint32_t horiz_pixels;
uint8_t bytes_per_pixel;
bool enabled;
bool scaled;
};
struct intel_plane {
@ -302,14 +356,24 @@ struct intel_plane {
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y;
uint32_t src_w, src_h;
/* Since we need to change the watermarks before/after
* enabling/disabling the planes, we need to store the parameters here
* as the other pieces of the struct may not reflect the values we want
* for the watermark calculations. Currently only Haswell uses this.
*/
struct intel_plane_wm_parameters wm;
void (*update_plane)(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h);
void (*disable_plane)(struct drm_plane *plane);
void (*disable_plane)(struct drm_plane *plane,
struct drm_crtc *crtc);
int (*update_colorkey)(struct drm_plane *plane,
struct drm_intel_sprite_colorkey *key);
void (*get_colorkey)(struct drm_plane *plane,
@ -341,66 +405,6 @@ struct cxsr_latency {
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
#define to_intel_plane(x) container_of(x, struct intel_plane, base)
#define DIP_HEADER_SIZE 5
#define DIP_TYPE_AVI 0x82
#define DIP_VERSION_AVI 0x2
#define DIP_LEN_AVI 13
#define DIP_AVI_PR_1 0
#define DIP_AVI_PR_2 1
#define DIP_AVI_RGB_QUANT_RANGE_DEFAULT (0 << 2)
#define DIP_AVI_RGB_QUANT_RANGE_LIMITED (1 << 2)
#define DIP_AVI_RGB_QUANT_RANGE_FULL (2 << 2)
#define DIP_TYPE_SPD 0x83
#define DIP_VERSION_SPD 0x1
#define DIP_LEN_SPD 25
#define DIP_SPD_UNKNOWN 0
#define DIP_SPD_DSTB 0x1
#define DIP_SPD_DVDP 0x2
#define DIP_SPD_DVHS 0x3
#define DIP_SPD_HDDVR 0x4
#define DIP_SPD_DVC 0x5
#define DIP_SPD_DSC 0x6
#define DIP_SPD_VCD 0x7
#define DIP_SPD_GAME 0x8
#define DIP_SPD_PC 0x9
#define DIP_SPD_BD 0xa
#define DIP_SPD_SCD 0xb
struct dip_infoframe {
uint8_t type; /* HB0 */
uint8_t ver; /* HB1 */
uint8_t len; /* HB2 - body len, not including checksum */
uint8_t ecc; /* Header ECC */
uint8_t checksum; /* PB0 */
union {
struct {
/* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */
uint8_t Y_A_B_S;
/* PB2 - C 7:6, M 5:4, R 3:0 */
uint8_t C_M_R;
/* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */
uint8_t ITC_EC_Q_SC;
/* PB4 - VIC 6:0 */
uint8_t VIC;
/* PB5 - YQ 7:6, CN 5:4, PR 3:0 */
uint8_t YQ_CN_PR;
/* PB6 to PB13 */
uint16_t top_bar_end;
uint16_t bottom_bar_start;
uint16_t left_bar_end;
uint16_t right_bar_start;
} __attribute__ ((packed)) avi;
struct {
uint8_t vn[8];
uint8_t pd[16];
uint8_t sdi;
} __attribute__ ((packed)) spd;
uint8_t payload[27];
} __attribute__ ((packed)) body;
} __attribute__((packed));
struct intel_hdmi {
u32 hdmi_reg;
int ddc_bus;
@ -411,7 +415,8 @@ struct intel_hdmi {
enum hdmi_force_audio force_audio;
bool rgb_quant_range_selectable;
void (*write_infoframe)(struct drm_encoder *encoder,
struct dip_infoframe *frame);
enum hdmi_infoframe_type type,
const uint8_t *frame, ssize_t len);
void (*set_infoframes)(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode);
};
@ -431,10 +436,10 @@ struct intel_dp {
uint8_t link_bw;
uint8_t lane_count;
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
bool is_pch_edp;
uint8_t train_set[4];
int panel_power_up_delay;
int panel_power_down_delay;
@ -443,17 +448,31 @@ struct intel_dp {
int backlight_off_delay;
struct delayed_work panel_vdd_work;
bool want_panel_vdd;
bool psr_setup_done;
struct intel_connector *attached_connector;
};
struct intel_digital_port {
struct intel_encoder base;
enum port port;
u32 port_reversal;
u32 saved_port_bits;
struct intel_dp dp;
struct intel_hdmi hdmi;
};
static inline int
vlv_dport_to_channel(struct intel_digital_port *dport)
{
switch (dport->port) {
case PORT_B:
return 0;
case PORT_C:
return 1;
default:
BUG();
}
}
static inline struct drm_crtc *
intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
{
@ -481,13 +500,6 @@ struct intel_unpin_work {
bool enable_stall_check;
};
struct intel_fbc_work {
struct delayed_work work;
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
int interval;
};
int intel_pch_rawclk(struct drm_device *dev);
int intel_connector_update_modes(struct drm_connector *connector,
@ -497,6 +509,7 @@ int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
extern void intel_attach_force_audio_property(struct drm_connector *connector);
extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
extern bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev,
int hdmi_reg, enum port port);
@ -505,19 +518,19 @@ extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
extern bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
bool is_sdvob);
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
extern void intel_mark_busy(struct drm_device *dev);
extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj);
extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring);
extern void intel_mark_idle(struct drm_device *dev);
extern bool intel_lvds_init(struct drm_device *dev);
extern void intel_lvds_init(struct drm_device *dev);
extern bool intel_is_dual_link_lvds(struct drm_device *dev);
extern void intel_dp_init(struct drm_device *dev, int output_reg,
enum port port);
extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
extern bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
@ -535,7 +548,6 @@ extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
extern int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane);
@ -545,14 +557,16 @@ extern int intel_panel_init(struct intel_panel *panel,
struct drm_display_mode *fixed_mode);
extern void intel_panel_fini(struct intel_panel *panel);
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
extern void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
extern void intel_pch_panel_fitting(struct drm_device *dev,
int fitting_mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
extern void intel_pch_panel_fitting(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config,
int fitting_mode);
extern void intel_gmch_panel_fitting(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config,
int fitting_mode);
extern void intel_panel_set_backlight(struct drm_device *dev,
u32 level, u32 max);
extern int intel_panel_setup_backlight(struct drm_connector *connector);
extern void intel_panel_enable_backlight(struct drm_device *dev,
enum pipe pipe);
@ -568,19 +582,15 @@ struct intel_set_config {
bool mode_changed;
};
extern int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *old_fb);
extern void intel_modeset_disable(struct drm_device *dev);
extern void intel_crtc_restore_mode(struct drm_crtc *crtc);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
extern void intel_encoder_destroy(struct drm_encoder *encoder);
extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode);
extern bool intel_encoder_check_is_cloned(struct intel_encoder *encoder);
extern void intel_connector_dpms(struct drm_connector *, int mode);
extern bool intel_connector_get_hw_state(struct intel_connector *connector);
extern void intel_modeset_check_state(struct drm_device *dev);
extern void intel_plane_restore(struct drm_plane *plane);
extern void intel_plane_disable(struct drm_plane *plane);
static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
@ -588,19 +598,17 @@ static inline struct intel_encoder *intel_attached_encoder(struct drm_connector
return to_intel_connector(connector)->encoder;
}
static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
{
struct intel_digital_port *intel_dig_port =
container_of(encoder, struct intel_digital_port, base.base);
return &intel_dig_port->dp;
}
static inline struct intel_digital_port *
enc_to_dig_port(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_digital_port, base.base);
}
static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
{
return &enc_to_dig_port(encoder)->dp;
}
static inline struct intel_digital_port *
dp_to_dig_port(struct intel_dp *intel_dp)
{
@ -630,6 +638,7 @@ intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
extern void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port);
struct intel_load_detect_pipe {
struct drm_framebuffer *release_fb;
@ -642,12 +651,10 @@ extern bool intel_get_load_detect_pipe(struct drm_connector *connector,
extern void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old);
extern void intelfb_restore(void);
extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
extern void intel_enable_clock_gating(struct drm_device *dev);
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
@ -658,6 +665,7 @@ extern int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj);
extern void intel_framebuffer_fini(struct intel_framebuffer *fb);
extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_initial_config(struct drm_device *dev);
extern void intel_fbdev_fini(struct drm_device *dev);
@ -677,30 +685,41 @@ extern int intel_overlay_attrs(struct drm_device *dev, void *data,
extern void intel_fb_output_poll_changed(struct drm_device *dev);
extern void intel_fb_restore_mode(struct drm_device *dev);
struct intel_shared_dpll *
intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
void assert_shared_dpll(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
bool state);
#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
void assert_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state);
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
#define assert_pll_disabled(d, p) assert_pll(d, p, false)
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state);
#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
bool state);
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
extern void intel_init_clock_gating(struct drm_device *dev);
extern void intel_suspend_hw(struct drm_device *dev);
extern void intel_write_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode);
extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
extern void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n);
extern void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n);
extern void intel_prepare_ddi(struct drm_device *dev);
extern void hsw_fdi_link_train(struct drm_crtc *crtc);
extern void intel_ddi_init(struct drm_device *dev, enum port port);
/* For use by IVB LP watermark workaround in intel_sprite.c */
extern void intel_update_watermarks(struct drm_device *dev);
extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
uint32_t sprite_width,
int pixel_size);
extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
struct drm_display_mode *mode);
extern void intel_update_sprite_watermarks(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width, int pixel_size,
bool enabled, bool scaled);
extern unsigned long intel_gen4_compute_page_offset(int *x, int *y,
unsigned int tiling_mode,
@ -712,25 +731,27 @@ extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg);
/* Power-related functions, located in intel_pm.c */
extern void intel_init_pm(struct drm_device *dev);
/* FBC */
extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
extern void intel_update_fbc(struct drm_device *dev);
/* IPS */
extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
extern void intel_gpu_ips_teardown(void);
extern bool intel_using_power_well(struct drm_device *dev);
/* Power well */
extern int i915_init_power_well(struct drm_device *dev);
extern void i915_remove_power_well(struct drm_device *dev);
extern bool intel_display_power_enabled(struct drm_device *dev,
enum intel_display_power_domain domain);
extern void intel_init_power_well(struct drm_device *dev);
extern void intel_set_power_well(struct drm_device *dev, bool enable);
extern void intel_enable_gt_powersave(struct drm_device *dev);
extern void intel_disable_gt_powersave(struct drm_device *dev);
extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
extern void ironlake_teardown_rc6(struct drm_device *dev);
void gen6_update_ring_freq(struct drm_device *dev);
extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe);
@ -742,7 +763,7 @@ extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock);
extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc);
extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
@ -751,5 +772,31 @@ intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
extern void intel_display_handle_reset(struct drm_device *dev);
extern bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe,
bool enable);
extern bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable);
extern void intel_edp_psr_enable(struct intel_dp *intel_dp);
extern void intel_edp_psr_disable(struct intel_dp *intel_dp);
extern void intel_edp_psr_update(struct drm_device *dev);
extern void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
bool switch_to_fclk, bool allow_power_down);
extern void hsw_restore_lcpll(struct drm_i915_private *dev_priv);
extern void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
extern void ilk_disable_gt_irq(struct drm_i915_private *dev_priv,
uint32_t mask);
extern void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
extern void snb_disable_pm_irq(struct drm_i915_private *dev_priv,
uint32_t mask);
extern void hsw_enable_pc8_work(struct work_struct *__work);
extern void hsw_enable_package_c8(struct drm_i915_private *dev_priv);
extern void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
extern void hsw_pc8_disable_interrupts(struct drm_device *dev);
extern void hsw_pc8_restore_interrupts(struct drm_device *dev);
extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
#endif /* __INTEL_DRV_H__ */

View File

@ -53,6 +53,13 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
.slave_addr = CH7xxx_ADDR,
.dev_ops = &ch7xxx_ops,
},
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "ch7xxx",
.dvo_reg = DVOC,
.slave_addr = 0x75, /* For some ch7010 */
.dev_ops = &ch7xxx_ops,
},
{
.type = INTEL_DVO_CHIP_LVDS,
.name = "ivch",
@ -93,15 +100,14 @@ struct intel_dvo {
bool panel_wants_dither;
};
static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder)
static struct intel_dvo *enc_to_dvo(struct intel_encoder *encoder)
{
return container_of(encoder, struct intel_dvo, base.base);
return container_of(encoder, struct intel_dvo, base);
}
static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
{
return container_of(intel_attached_encoder(connector),
struct intel_dvo, base);
return enc_to_dvo(intel_attached_encoder(connector));
}
static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
@ -116,7 +122,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 tmp;
tmp = I915_READ(intel_dvo->dev.dvo_reg);
@ -129,10 +135,30 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
return true;
}
static void intel_dvo_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 tmp, flags = 0;
tmp = I915_READ(intel_dvo->dev.dvo_reg);
if (tmp & DVO_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
else
flags |= DRM_MODE_FLAG_NHSYNC;
if (tmp & DVO_VSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PVSYNC;
else
flags |= DRM_MODE_FLAG_NVSYNC;
pipe_config->adjusted_mode.flags |= flags;
}
static void intel_disable_dvo(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 dvo_reg = intel_dvo->dev.dvo_reg;
u32 temp = I915_READ(dvo_reg);
@ -144,7 +170,7 @@ static void intel_disable_dvo(struct intel_encoder *encoder)
static void intel_enable_dvo(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 dvo_reg = intel_dvo->dev.dvo_reg;
u32 temp = I915_READ(dvo_reg);
@ -153,6 +179,7 @@ static void intel_enable_dvo(struct intel_encoder *encoder)
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
}
/* Special dpms function to support cloning between dvo/sdvo/crt. */
static void intel_dvo_dpms(struct drm_connector *connector, int mode)
{
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
@ -174,6 +201,8 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode)
return;
}
/* We call connector dpms manually below in case pipe dpms doesn't
* change due to cloning. */
if (mode == DRM_MODE_DPMS_ON) {
intel_dvo->base.connectors_active = true;
@ -211,11 +240,11 @@ static int intel_dvo_mode_valid(struct drm_connector *connector,
return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode);
}
static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
static bool intel_dvo_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
/* If we have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
@ -234,26 +263,28 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
C(vtotal);
C(clock);
#undef C
drm_mode_set_crtcinfo(adjusted_mode, 0);
}
if (intel_dvo->dev.dev_ops->mode_fixup)
return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev, mode, adjusted_mode);
return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev,
&pipe_config->requested_mode,
adjusted_mode);
return true;
}
static void intel_dvo_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
static void intel_dvo_mode_set(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
int pipe = intel_crtc->pipe;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
int pipe = crtc->pipe;
u32 dvo_val;
u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
int dpll_reg = DPLL(pipe);
switch (dvo_reg) {
case DVOA:
@ -268,7 +299,9 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
break;
}
intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, mode, adjusted_mode);
intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
&crtc->config.requested_mode,
adjusted_mode);
/* Save the data order, since I don't know what it should be set to. */
dvo_val = I915_READ(dvo_reg) &
@ -284,8 +317,6 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
I915_WRITE(dpll_reg, I915_READ(dpll_reg) | DPLL_DVO_HIGH_SPEED);
/*I915_WRITE(DVOB_SRCDIM,
(adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
(adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
@ -305,6 +336,8 @@ static enum drm_connector_status
intel_dvo_detect(struct drm_connector *connector, bool force)
{
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector));
return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
}
@ -342,11 +375,6 @@ static void intel_dvo_destroy(struct drm_connector *connector)
kfree(connector);
}
static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
.mode_fixup = intel_dvo_mode_fixup,
.mode_set = intel_dvo_mode_set,
};
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
.dpms = intel_dvo_dpms,
.detect = intel_dvo_detect,
@ -362,7 +390,7 @@ static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs
static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
{
struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
struct intel_dvo *intel_dvo = enc_to_dvo(to_intel_encoder(encoder));
if (intel_dvo->dev.dev_ops->destroy)
intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev);
@ -440,6 +468,9 @@ void intel_dvo_init(struct drm_device *dev)
intel_encoder->disable = intel_disable_dvo;
intel_encoder->enable = intel_enable_dvo;
intel_encoder->get_hw_state = intel_dvo_get_hw_state;
intel_encoder->get_config = intel_dvo_get_config;
intel_encoder->compute_config = intel_dvo_compute_config;
intel_encoder->mode_set = intel_dvo_mode_set;
intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
/* Now, try to find a controller */
@ -506,9 +537,6 @@ void intel_dvo_init(struct drm_device *dev)
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
drm_encoder_helper_add(&intel_encoder->base,
&intel_dvo_helper_funcs);
intel_connector_attach_encoder(intel_connector, intel_encoder);
if (dvo->type == INTEL_DVO_CHIP_LVDS) {
/* For our LVDS chipsets, we should hopefully be able

View File

@ -94,8 +94,9 @@ static struct fb_ops intelfb_ops = {
static int intelfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper;
struct drm_device *dev = ifbdev->helper.dev;
struct intel_fbdev *ifbdev =
container_of(helper, struct intel_fbdev, helper);
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct fb_info *info;
struct drm_framebuffer *fb;
@ -148,8 +149,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
lfb_vm_node.start = 0;
lfb_vm_node.mm = NULL;
obj->gtt_space = &lfb_vm_node;
obj->gtt_offset = 0;
obj->pin_count = 2;
obj->cache_level = I915_CACHE_NONE;
obj->base.write_domain = 0;
@ -164,7 +163,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
goto out_unpin;
}
info->par = ifbdev;
info->par = helper;
ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
if (ret)
@ -189,22 +188,21 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
info->fix.smem_len = size;
info->screen_base = (void*) 0xFE000000;
info->screen_size = size;
// memset(info->screen_base, 0, size);
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n",
fb->width, fb->height,
obj->gtt_offset, obj);
i915_gem_obj_ggtt_offset(obj), obj);
mutex_unlock(&dev->struct_mutex);
@ -232,7 +230,7 @@ static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
int intel_fbdev_init(struct drm_device *dev)
{
struct intel_fbdev *ifbdev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
@ -257,7 +255,7 @@ int intel_fbdev_init(struct drm_device *dev)
void intel_fbdev_initial_config(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
/* Due to peculiar init order wrt to hpd handling this is separate. */
drm_fb_helper_initial_config(&dev_priv->fbdev->helper, 32);
@ -265,6 +263,6 @@ void intel_fbdev_initial_config(struct drm_device *dev)
void intel_fb_output_poll_changed(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
}

View File

@ -29,6 +29,7 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/hdmi.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
@ -66,89 +67,83 @@ static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
}
void intel_dip_infoframe_csum(struct dip_infoframe *frame)
static u32 g4x_infoframe_index(enum hdmi_infoframe_type type)
{
uint8_t *data = (uint8_t *)frame;
uint8_t sum = 0;
unsigned i;
frame->checksum = 0;
frame->ecc = 0;
for (i = 0; i < frame->len + DIP_HEADER_SIZE; i++)
sum += data[i];
frame->checksum = 0x100 - sum;
}
static u32 g4x_infoframe_index(struct dip_infoframe *frame)
{
switch (frame->type) {
case DIP_TYPE_AVI:
switch (type) {
case HDMI_INFOFRAME_TYPE_AVI:
return VIDEO_DIP_SELECT_AVI;
case DIP_TYPE_SPD:
case HDMI_INFOFRAME_TYPE_SPD:
return VIDEO_DIP_SELECT_SPD;
case HDMI_INFOFRAME_TYPE_VENDOR:
return VIDEO_DIP_SELECT_VENDOR;
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
return 0;
}
}
static u32 g4x_infoframe_enable(struct dip_infoframe *frame)
static u32 g4x_infoframe_enable(enum hdmi_infoframe_type type)
{
switch (frame->type) {
case DIP_TYPE_AVI:
switch (type) {
case HDMI_INFOFRAME_TYPE_AVI:
return VIDEO_DIP_ENABLE_AVI;
case DIP_TYPE_SPD:
case HDMI_INFOFRAME_TYPE_SPD:
return VIDEO_DIP_ENABLE_SPD;
case HDMI_INFOFRAME_TYPE_VENDOR:
return VIDEO_DIP_ENABLE_VENDOR;
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
return 0;
}
}
static u32 hsw_infoframe_enable(struct dip_infoframe *frame)
static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type)
{
switch (frame->type) {
case DIP_TYPE_AVI:
switch (type) {
case HDMI_INFOFRAME_TYPE_AVI:
return VIDEO_DIP_ENABLE_AVI_HSW;
case DIP_TYPE_SPD:
case HDMI_INFOFRAME_TYPE_SPD:
return VIDEO_DIP_ENABLE_SPD_HSW;
case HDMI_INFOFRAME_TYPE_VENDOR:
return VIDEO_DIP_ENABLE_VS_HSW;
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
return 0;
}
}
static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame,
static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type,
enum transcoder cpu_transcoder)
{
switch (frame->type) {
case DIP_TYPE_AVI:
switch (type) {
case HDMI_INFOFRAME_TYPE_AVI:
return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder);
case DIP_TYPE_SPD:
case HDMI_INFOFRAME_TYPE_SPD:
return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder);
case HDMI_INFOFRAME_TYPE_VENDOR:
return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder);
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
return 0;
}
}
static void g4x_write_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
enum hdmi_infoframe_type type,
const uint8_t *frame, ssize_t len)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val = I915_READ(VIDEO_DIP_CTL);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
int i;
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
val |= g4x_infoframe_index(type);
val &= ~g4x_infoframe_enable(frame);
val &= ~g4x_infoframe_enable(type);
I915_WRITE(VIDEO_DIP_CTL, val);
@ -162,7 +157,7 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(VIDEO_DIP_DATA, 0);
mmiowb();
val |= g4x_infoframe_enable(frame);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
@ -171,22 +166,22 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
}
static void ibx_write_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
enum hdmi_infoframe_type type,
const uint8_t *frame, ssize_t len)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
val |= g4x_infoframe_index(type);
val &= ~g4x_infoframe_enable(frame);
val &= ~g4x_infoframe_enable(type);
I915_WRITE(reg, val);
@ -200,7 +195,7 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
mmiowb();
val |= g4x_infoframe_enable(frame);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
@ -209,25 +204,25 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
}
static void cpt_write_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
enum hdmi_infoframe_type type,
const uint8_t *frame, ssize_t len)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
val |= g4x_infoframe_index(type);
/* The DIP control register spec says that we need to update the AVI
* infoframe without clearing its enable bit */
if (frame->type != DIP_TYPE_AVI)
val &= ~g4x_infoframe_enable(frame);
if (type != HDMI_INFOFRAME_TYPE_AVI)
val &= ~g4x_infoframe_enable(type);
I915_WRITE(reg, val);
@ -241,7 +236,7 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
mmiowb();
val |= g4x_infoframe_enable(frame);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
@ -250,22 +245,22 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
}
static void vlv_write_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
enum hdmi_infoframe_type type,
const uint8_t *frame, ssize_t len)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
int i, reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
val |= g4x_infoframe_index(type);
val &= ~g4x_infoframe_enable(frame);
val &= ~g4x_infoframe_enable(type);
I915_WRITE(reg, val);
@ -279,7 +274,7 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
mmiowb();
val |= g4x_infoframe_enable(frame);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
@ -288,21 +283,24 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
}
static void hsw_write_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
enum hdmi_infoframe_type type,
const uint8_t *frame, ssize_t len)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->config.cpu_transcoder);
unsigned int i, len = DIP_HEADER_SIZE + frame->len;
u32 data_reg;
int i;
u32 val = I915_READ(ctl_reg);
data_reg = hsw_infoframe_data_reg(type,
intel_crtc->config.cpu_transcoder);
if (data_reg == 0)
return;
val &= ~hsw_infoframe_enable(frame);
val &= ~hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val);
mmiowb();
@ -315,18 +313,48 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(data_reg + i, 0);
mmiowb();
val |= hsw_infoframe_enable(frame);
val |= hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val);
POSTING_READ(ctl_reg);
}
static void intel_set_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
/*
* The data we write to the DIP data buffer registers is 1 byte bigger than the
* HDMI infoframe size because of an ECC/reserved byte at position 3 (starting
* at 0). It's also a byte used by DisplayPort so the same DIP registers can be
* used for both technologies.
*
* DW0: Reserved/ECC/DP | HB2 | HB1 | HB0
* DW1: DB3 | DB2 | DB1 | DB0
* DW2: DB7 | DB6 | DB5 | DB4
* DW3: ...
*
* (HB is Header Byte, DB is Data Byte)
*
* The hdmi pack() functions don't know about that hardware specific hole so we
* trick them by giving an offset into the buffer and moving back the header
* bytes by one.
*/
static void intel_write_infoframe(struct drm_encoder *encoder,
union hdmi_infoframe *frame)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
uint8_t buffer[VIDEO_DIP_DATA_SIZE];
ssize_t len;
intel_dip_infoframe_csum(frame);
intel_hdmi->write_infoframe(encoder, frame);
/* see comment above for the reason for this offset */
len = hdmi_infoframe_pack(frame, buffer + 1, sizeof(buffer) - 1);
if (len < 0)
return;
/* Insert the 'hole' (see big comment above) at position 3 */
buffer[0] = buffer[1];
buffer[1] = buffer[2];
buffer[2] = buffer[3];
buffer[3] = 0;
len++;
intel_hdmi->write_infoframe(encoder, frame->any.type, buffer, len);
}
static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
@ -334,40 +362,57 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct dip_infoframe avi_if = {
.type = DIP_TYPE_AVI,
.ver = DIP_VERSION_AVI,
.len = DIP_LEN_AVI,
};
union hdmi_infoframe frame;
int ret;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
adjusted_mode);
if (ret < 0) {
DRM_ERROR("couldn't fill AVI infoframe\n");
return;
}
if (intel_hdmi->rgb_quant_range_selectable) {
if (intel_crtc->config.limited_color_range)
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
frame.avi.quantization_range =
HDMI_QUANTIZATION_RANGE_LIMITED;
else
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
frame.avi.quantization_range =
HDMI_QUANTIZATION_RANGE_FULL;
}
avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode);
intel_set_infoframe(encoder, &avi_if);
intel_write_infoframe(encoder, &frame);
}
static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
{
struct dip_infoframe spd_if;
union hdmi_infoframe frame;
int ret;
memset(&spd_if, 0, sizeof(spd_if));
spd_if.type = DIP_TYPE_SPD;
spd_if.ver = DIP_VERSION_SPD;
spd_if.len = DIP_LEN_SPD;
strcpy(spd_if.body.spd.vn, "Intel");
strcpy(spd_if.body.spd.pd, "Integrated gfx");
spd_if.body.spd.sdi = DIP_SPD_PC;
ret = hdmi_spd_infoframe_init(&frame.spd, "Intel", "Integrated gfx");
if (ret < 0) {
DRM_ERROR("couldn't fill SPD infoframe\n");
return;
}
intel_set_infoframe(encoder, &spd_if);
frame.spd.sdi = HDMI_SPD_SDI_PC;
intel_write_infoframe(encoder, &frame);
}
static void
intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
union hdmi_infoframe frame;
int ret;
ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
adjusted_mode);
if (ret < 0)
return;
intel_write_infoframe(encoder, &frame);
}
static void g4x_set_infoframes(struct drm_encoder *encoder,
@ -432,6 +477,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
}
static void ibx_set_infoframes(struct drm_encoder *encoder,
@ -493,6 +539,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
}
static void cpt_set_infoframes(struct drm_encoder *encoder,
@ -528,6 +575,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
}
static void vlv_set_infoframes(struct drm_encoder *encoder,
@ -562,6 +610,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
}
static void hsw_set_infoframes(struct drm_encoder *encoder,
@ -589,27 +638,27 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
}
static void intel_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
static void intel_hdmi_mode_set(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
u32 hdmi_val;
hdmi_val = SDVO_ENCODING_HDMI;
if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
if (!HAS_PCH_SPLIT(dev))
hdmi_val |= intel_hdmi->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH;
if (intel_crtc->config.pipe_bpp > 24)
if (crtc->config.pipe_bpp > 24)
hdmi_val |= HDMI_COLOR_FORMAT_12bpc;
else
hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
@ -620,21 +669,21 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
if (intel_hdmi->has_audio) {
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
pipe_name(intel_crtc->pipe));
pipe_name(crtc->pipe));
hdmi_val |= SDVO_AUDIO_ENABLE;
hdmi_val |= HDMI_MODE_SELECT_HDMI;
intel_write_eld(encoder, adjusted_mode);
intel_write_eld(&encoder->base, adjusted_mode);
}
if (HAS_PCH_CPT(dev))
hdmi_val |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe);
hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else
hdmi_val |= SDVO_PIPE_SEL(intel_crtc->pipe);
hdmi_val |= SDVO_PIPE_SEL(crtc->pipe);
I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val);
POSTING_READ(intel_hdmi->hdmi_reg);
intel_hdmi->set_infoframes(encoder, adjusted_mode);
intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
}
static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
@ -658,6 +707,28 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
return true;
}
static void intel_hdmi_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
u32 tmp, flags = 0;
tmp = I915_READ(intel_hdmi->hdmi_reg);
if (tmp & SDVO_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
else
flags |= DRM_MODE_FLAG_NHSYNC;
if (tmp & SDVO_VSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PVSYNC;
else
flags |= DRM_MODE_FLAG_NVSYNC;
pipe_config->adjusted_mode.flags |= flags;
}
static void intel_enable_hdmi(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
@ -699,6 +770,10 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
}
}
static void vlv_enable_hdmi(struct intel_encoder *encoder)
{
}
static void intel_disable_hdmi(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
@ -755,10 +830,22 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
}
}
static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
{
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
if (IS_G4X(dev))
return 165000;
else if (IS_HASWELL(dev))
return 300000;
else
return 225000;
}
static int intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
if (mode->clock > 165000)
if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector)))
return MODE_CLOCK_HIGH;
if (mode->clock < 20000)
return MODE_CLOCK_LOW;
@ -775,6 +862,9 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2;
int portclock_limit = hdmi_portclock_limit(intel_hdmi);
int desired_bpp;
if (intel_hdmi->color_range_auto) {
/* See CEA-861-E - 5.1 Default Encoding Parameters */
@ -794,14 +884,29 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
/*
* HDMI is either 12 or 8, so if the display lets 10bpc sneak
* through, clamp it down. Note that g4x/vlv don't support 12bpc hdmi
* outputs.
* outputs. We also need to check that the higher clock still fits
* within limits.
*/
if (pipe_config->pipe_bpp > 8*3 && HAS_PCH_SPLIT(dev)) {
DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
pipe_config->pipe_bpp = 12*3;
if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit
&& HAS_PCH_SPLIT(dev)) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3;
/* Need to adjust the port link by 1.5x for 12bpc. */
pipe_config->port_clock = clock_12bpc;
} else {
DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
pipe_config->pipe_bpp = 8*3;
DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n");
desired_bpp = 8*3;
}
if (!pipe_config->bw_constrained) {
DRM_DEBUG_KMS("forcing pipe bpc to %i for HDMI\n", desired_bpp);
pipe_config->pipe_bpp = desired_bpp;
}
if (adjusted_mode->clock > portclock_limit) {
DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n");
return false;
}
return true;
@ -819,6 +924,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
struct edid *edid;
enum drm_connector_status status = connector_status_disconnected;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector));
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
intel_hdmi->rgb_quant_range_selectable = false;
@ -955,6 +1063,105 @@ done:
return 0;
}
static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
int port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
u32 val;
if (!IS_VALLEYVIEW(dev))
return;
/* Enable clock channels for this port */
mutex_lock(&dev_priv->dpio_lock);
val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
val = 0;
if (pipe)
val |= (1<<21);
else
val &= ~(1<<21);
val |= 0x001000c4;
vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
/* HDMI 1.0V-2dB */
vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0);
vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port),
0x2b245f5f);
vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
0x5578b83a);
vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port),
0x0c782040);
vlv_dpio_write(dev_priv, DPIO_TX3_SWING_CTL4(port),
0x2b247878);
vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port),
0x00002000);
vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
DPIO_TX_OCALINIT_EN);
/* Program lane clock */
vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port),
0x00760018);
vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
0x00400888);
mutex_unlock(&dev_priv->dpio_lock);
intel_enable_hdmi(encoder);
vlv_wait_port_ready(dev_priv, port);
}
static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int port = vlv_dport_to_channel(dport);
if (!IS_VALLEYVIEW(dev))
return;
/* Program Tx lane resets to default */
mutex_lock(&dev_priv->dpio_lock);
vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port),
DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
(1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
DPIO_PCS_CLK_SOFT_RESET);
/* Fix up inter-pair skew failure */
vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port),
0x00002000);
vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
DPIO_TX_OCALINIT_EN);
mutex_unlock(&dev_priv->dpio_lock);
}
static void intel_hdmi_post_disable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
int port = vlv_dport_to_channel(dport);
/* Reset lanes to avoid HDMI flicker (VLV w/a) */
mutex_lock(&dev_priv->dpio_lock);
vlv_dpio_write(dev_priv, DPIO_PCS_TX(port), 0x00000000);
vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port), 0x00e00060);
mutex_unlock(&dev_priv->dpio_lock);
}
static void intel_hdmi_destroy(struct drm_connector *connector)
{
drm_sysfs_connector_remove(connector);
@ -962,10 +1169,6 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
kfree(connector);
}
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
.mode_set = intel_hdmi_mode_set,
};
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.dpms = intel_connector_dpms,
.detect = intel_hdmi_detect,
@ -1070,7 +1273,6 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
{
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
@ -1084,16 +1286,23 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
}
intel_encoder = &intel_dig_port->base;
encoder = &intel_encoder->base;
drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
intel_encoder->compute_config = intel_hdmi_compute_config;
intel_encoder->enable = intel_enable_hdmi;
intel_encoder->mode_set = intel_hdmi_mode_set;
intel_encoder->disable = intel_disable_hdmi;
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
intel_encoder->get_config = intel_hdmi_get_config;
if (IS_VALLEYVIEW(dev)) {
intel_encoder->pre_pll_enable = intel_hdmi_pre_pll_enable;
intel_encoder->pre_enable = intel_hdmi_pre_enable;
intel_encoder->enable = vlv_enable_hdmi;
intel_encoder->post_disable = intel_hdmi_post_disable;
} else {
intel_encoder->enable = intel_enable_hdmi;
}
intel_encoder->type = INTEL_OUTPUT_HDMI;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);

View File

@ -398,6 +398,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
int i, reg_offset;
int ret = 0;
intel_aux_display_runtime_get(dev_priv);
mutex_lock(&dev_priv->gmbus_mutex);
if (bus->force_bit) {
@ -497,6 +498,7 @@ timeout:
out:
mutex_unlock(&dev_priv->gmbus_mutex);
intel_aux_display_runtime_put(dev_priv);
return ret;
}

View File

@ -49,8 +49,6 @@ struct intel_lvds_connector {
struct intel_lvds_encoder {
struct intel_encoder base;
u32 pfit_control;
u32 pfit_pgm_ratios;
bool is_dual_link;
u32 reg;
@ -88,21 +86,61 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
return true;
}
static void intel_lvds_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 lvds_reg, tmp, flags = 0;
if (HAS_PCH_SPLIT(dev))
lvds_reg = PCH_LVDS;
else
lvds_reg = LVDS;
tmp = I915_READ(lvds_reg);
if (tmp & LVDS_HSYNC_POLARITY)
flags |= DRM_MODE_FLAG_NHSYNC;
else
flags |= DRM_MODE_FLAG_PHSYNC;
if (tmp & LVDS_VSYNC_POLARITY)
flags |= DRM_MODE_FLAG_NVSYNC;
else
flags |= DRM_MODE_FLAG_PVSYNC;
pipe_config->adjusted_mode.flags |= flags;
/* gen2/3 store dither state in pfit control, needs to match */
if (INTEL_INFO(dev)->gen < 4) {
tmp = I915_READ(PFIT_CONTROL);
pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
}
}
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
* things on.
*/
static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
static void intel_pre_enable_lvds(struct intel_encoder *encoder)
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *fixed_mode =
lvds_encoder->attached_connector->base.panel.fixed_mode;
int pipe = intel_crtc->pipe;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
const struct drm_display_mode *adjusted_mode =
&crtc->config.adjusted_mode;
int pipe = crtc->pipe;
u32 temp;
if (HAS_PCH_SPLIT(dev)) {
assert_fdi_rx_pll_disabled(dev_priv, pipe);
assert_shared_dpll_disabled(dev_priv,
intel_crtc_to_shared_dpll(crtc));
} else {
assert_pll_disabled(dev_priv, pipe);
}
temp = I915_READ(lvds_encoder->reg);
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
@ -118,7 +156,8 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
}
/* set the corresponsding LVDS_BORDER bit */
temp |= dev_priv->lvds_border_bits;
temp &= ~LVDS_BORDER_ENABLE;
temp |= crtc->config.gmch_pfit.lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to
* set the DPLLs for dual-channel mode or not.
*/
@ -136,43 +175,22 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
* special lvds dither control bit on pch-split platforms, dithering is
* only controlled through the PIPECONF reg. */
if (INTEL_INFO(dev)->gen == 4) {
if (dev_priv->lvds_dither)
/* Bspec wording suggests that LVDS port dithering only exists
* for 18bpp panels. */
if (crtc->config.dither && crtc->config.pipe_bpp == 18)
temp |= LVDS_ENABLE_DITHER;
else
temp &= ~LVDS_ENABLE_DITHER;
}
temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
if (fixed_mode->flags & DRM_MODE_FLAG_NHSYNC)
if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
temp |= LVDS_HSYNC_POLARITY;
if (fixed_mode->flags & DRM_MODE_FLAG_NVSYNC)
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
temp |= LVDS_VSYNC_POLARITY;
I915_WRITE(lvds_encoder->reg, temp);
}
static void intel_pre_enable_lvds(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct intel_lvds_encoder *enc = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = dev->dev_private;
if (HAS_PCH_SPLIT(dev) || !enc->pfit_control)
return;
/*
* Enable automatic panel scaling so that non-native modes
* fill the screen. The panel fitter should only be
* adjusted whilst the pipe is disabled, according to
* register description and PRM.
*/
DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
enc->pfit_control,
enc->pfit_pgm_ratios);
I915_WRITE(PFIT_PGM_RATIOS, enc->pfit_pgm_ratios);
I915_WRITE(PFIT_CONTROL, enc->pfit_control);
}
/**
* Sets the power state for the panel.
*/
@ -241,62 +259,6 @@ static int intel_lvds_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
static void
centre_horizontally(struct drm_display_mode *mode,
int width)
{
u32 border, sync_pos, blank_width, sync_width;
/* keep the hsync and hblank widths constant */
sync_width = mode->crtc_hsync_end - mode->crtc_hsync_start;
blank_width = mode->crtc_hblank_end - mode->crtc_hblank_start;
sync_pos = (blank_width - sync_width + 1) / 2;
border = (mode->hdisplay - width + 1) / 2;
border += border & 1; /* make the border even */
mode->crtc_hdisplay = width;
mode->crtc_hblank_start = width + border;
mode->crtc_hblank_end = mode->crtc_hblank_start + blank_width;
mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos;
mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width;
}
static void
centre_vertically(struct drm_display_mode *mode,
int height)
{
u32 border, sync_pos, blank_width, sync_width;
/* keep the vsync and vblank widths constant */
sync_width = mode->crtc_vsync_end - mode->crtc_vsync_start;
blank_width = mode->crtc_vblank_end - mode->crtc_vblank_start;
sync_pos = (blank_width - sync_width + 1) / 2;
border = (mode->vdisplay - height + 1) / 2;
mode->crtc_vdisplay = height;
mode->crtc_vblank_start = height + border;
mode->crtc_vblank_end = mode->crtc_vblank_start + blank_width;
mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos;
mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width;
}
static inline u32 panel_fitter_scaling(u32 source, u32 target)
{
/*
* Floating point operation is not supported. So the FACTOR
* is defined, which can avoid the floating point computation
* when calculating the panel ratio.
*/
#define ACCURACY 12
#define FACTOR (1 << ACCURACY)
u32 ratio = source * FACTOR / target;
return (FACTOR * ratio + FACTOR/2) / FACTOR;
}
static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
struct intel_crtc_config *pipe_config)
{
@ -307,11 +269,8 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
struct intel_connector *intel_connector =
&lvds_encoder->attached_connector->base;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
struct drm_display_mode *mode = &pipe_config->requested_mode;
struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
unsigned int lvds_bpp;
int pipe;
/* Should never happen!! */
if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
@ -319,20 +278,18 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
return false;
}
if (intel_encoder_check_is_cloned(&lvds_encoder->base))
return false;
if ((I915_READ(lvds_encoder->reg) & LVDS_A3_POWER_MASK) ==
LVDS_A3_POWER_UP)
lvds_bpp = 8*3;
else
lvds_bpp = 6*3;
if (lvds_bpp != pipe_config->pipe_bpp) {
if (lvds_bpp != pipe_config->pipe_bpp && !pipe_config->bw_constrained) {
DRM_DEBUG_KMS("forcing display bpp (was %d) to LVDS (%d)\n",
pipe_config->pipe_bpp, lvds_bpp);
pipe_config->pipe_bpp = lvds_bpp;
}
/*
* We have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
@ -345,139 +302,14 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
if (HAS_PCH_SPLIT(dev)) {
pipe_config->has_pch_encoder = true;
intel_pch_panel_fitting(dev,
intel_connector->panel.fitting_mode,
mode, adjusted_mode);
return true;
intel_pch_panel_fitting(intel_crtc, pipe_config,
intel_connector->panel.fitting_mode);
} else {
intel_gmch_panel_fitting(intel_crtc, pipe_config,
intel_connector->panel.fitting_mode);
}
/* Native modes don't need fitting */
if (adjusted_mode->hdisplay == mode->hdisplay &&
adjusted_mode->vdisplay == mode->vdisplay)
goto out;
/* 965+ wants fuzzy fitting */
if (INTEL_INFO(dev)->gen >= 4)
pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
PFIT_FILTER_FUZZY);
/*
* Enable automatic panel scaling for non-native modes so that they fill
* the screen. Should be enabled before the pipe is enabled, according
* to register description and PRM.
* Change the value here to see the borders for debugging
*/
for_each_pipe(pipe)
I915_WRITE(BCLRPAT(pipe), 0);
drm_mode_set_crtcinfo(adjusted_mode, 0);
pipe_config->timings_set = true;
switch (intel_connector->panel.fitting_mode) {
case DRM_MODE_SCALE_CENTER:
/*
* For centered modes, we have to calculate border widths &
* heights and modify the values programmed into the CRTC.
*/
centre_horizontally(adjusted_mode, mode->hdisplay);
centre_vertically(adjusted_mode, mode->vdisplay);
border = LVDS_BORDER_ENABLE;
break;
case DRM_MODE_SCALE_ASPECT:
/* Scale but preserve the aspect ratio */
if (INTEL_INFO(dev)->gen >= 4) {
u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
/* 965+ is easy, it does everything in hw */
if (scaled_width > scaled_height)
pfit_control |= PFIT_ENABLE | PFIT_SCALING_PILLAR;
else if (scaled_width < scaled_height)
pfit_control |= PFIT_ENABLE | PFIT_SCALING_LETTER;
else if (adjusted_mode->hdisplay != mode->hdisplay)
pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
} else {
u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
/*
* For earlier chips we have to calculate the scaling
* ratio by hand and program it into the
* PFIT_PGM_RATIO register
*/
if (scaled_width > scaled_height) { /* pillar */
centre_horizontally(adjusted_mode, scaled_height / mode->vdisplay);
border = LVDS_BORDER_ENABLE;
if (mode->vdisplay != adjusted_mode->vdisplay) {
u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay);
pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
bits << PFIT_VERT_SCALE_SHIFT);
pfit_control |= (PFIT_ENABLE |
VERT_INTERP_BILINEAR |
HORIZ_INTERP_BILINEAR);
}
} else if (scaled_width < scaled_height) { /* letter */
centre_vertically(adjusted_mode, scaled_width / mode->hdisplay);
border = LVDS_BORDER_ENABLE;
if (mode->hdisplay != adjusted_mode->hdisplay) {
u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay);
pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
bits << PFIT_VERT_SCALE_SHIFT);
pfit_control |= (PFIT_ENABLE |
VERT_INTERP_BILINEAR |
HORIZ_INTERP_BILINEAR);
}
} else
/* Aspects match, Let hw scale both directions */
pfit_control |= (PFIT_ENABLE |
VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
VERT_INTERP_BILINEAR |
HORIZ_INTERP_BILINEAR);
}
break;
case DRM_MODE_SCALE_FULLSCREEN:
/*
* Full scaling, even if it changes the aspect ratio.
* Fortunately this is all done for us in hw.
*/
if (mode->vdisplay != adjusted_mode->vdisplay ||
mode->hdisplay != adjusted_mode->hdisplay) {
pfit_control |= PFIT_ENABLE;
if (INTEL_INFO(dev)->gen >= 4)
pfit_control |= PFIT_SCALING_AUTO;
else
pfit_control |= (VERT_AUTO_SCALE |
VERT_INTERP_BILINEAR |
HORIZ_AUTO_SCALE |
HORIZ_INTERP_BILINEAR);
}
break;
default:
break;
}
out:
/* If not enabling scaling, be consistent and always use 0. */
if ((pfit_control & PFIT_ENABLE) == 0) {
pfit_control = 0;
pfit_pgm_ratios = 0;
}
/* Make sure pre-965 set dither correctly */
if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
if (pfit_control != lvds_encoder->pfit_control ||
pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) {
lvds_encoder->pfit_control = pfit_control;
lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios;
}
dev_priv->lvds_border_bits = border;
/*
* XXX: It would be nice to support lower refresh rates on the
* panels to reduce power consumption, and perhaps match the
@ -487,14 +319,12 @@ out:
return true;
}
static void intel_lvds_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
static void intel_lvds_mode_set(struct intel_encoder *encoder)
{
/*
* The LVDS pin pair will already have been turned on in the
* intel_crtc_mode_set since it has a large impact on the DPLL
* settings.
* We don't do anything here, the LVDS port is fully set up in the pre
* enable hook - the ordering constraints for enabling the lvds port vs.
* enabling the display pll are too strict.
*/
}
@ -511,6 +341,9 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
struct drm_device *dev = connector->dev;
enum drm_connector_status status;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector));
status = intel_panel_detect(dev);
if (status != connector_status_unknown)
return status;
@ -672,10 +505,6 @@ static int intel_lvds_set_property(struct drm_connector *connector,
return 0;
}
static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
.mode_set = intel_lvds_mode_set,
};
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
.get_modes = intel_lvds_get_modes,
.mode_valid = intel_lvds_mode_valid,
@ -869,6 +698,22 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Q900"),
},
},
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Intel D510MO",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "D510MO"),
},
},
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Intel D525MW",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"),
},
},
{ } /* terminating entry */
};
@ -937,11 +782,11 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
if (!dev_priv->child_dev_num)
if (!dev_priv->vbt.child_dev_num)
return true;
for (i = 0; i < dev_priv->child_dev_num; i++) {
struct child_device_config *child = dev_priv->child_dev + i;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
struct child_device_config *child = dev_priv->vbt.child_dev + i;
/* If the device type is not LFP, continue.
* We have to check both the new identifiers as well as the
@ -979,6 +824,19 @@ static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
return 1;
}
static const struct dmi_system_id intel_dual_link_lvds[] = {
{
.callback = intel_dual_link_lvds_callback,
.ident = "Apple MacBook Pro (Core i5/i7 Series)",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
},
},
{ } /* terminating entry */
};
bool intel_is_dual_link_lvds(struct drm_device *dev)
{
struct intel_encoder *encoder;
@ -1016,7 +874,7 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
*/
val = I915_READ(lvds_encoder->reg);
if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
val = dev_priv->bios_lvds_val;
val = dev_priv->vbt.bios_lvds_val;
return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
}
@ -1043,7 +901,7 @@ static bool intel_lvds_supported(struct drm_device *dev)
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
*/
bool intel_lvds_init(struct drm_device *dev)
void intel_lvds_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds_encoder *lvds_encoder;
@ -1061,7 +919,7 @@ bool intel_lvds_init(struct drm_device *dev)
u8 pin;
if (!intel_lvds_supported(dev))
return false;
return;
/* Skip init on machines we know falsely report LVDS */
// if (dmi_check_system(intel_no_lvds))
@ -1070,34 +928,30 @@ bool intel_lvds_init(struct drm_device *dev)
pin = GMBUS_PORT_PANEL;
if (!lvds_is_present_in_vbt(dev, &pin)) {
DRM_DEBUG_KMS("LVDS is not present in VBT\n");
return false;
return;
}
if (HAS_PCH_SPLIT(dev)) {
if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
return false;
if (dev_priv->edp.support) {
return;
if (dev_priv->vbt.edp_support) {
DRM_DEBUG_KMS("disable LVDS for eDP support\n");
return false;
return;
}
}
lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL);
if (!lvds_encoder)
return false;
return;
lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL);
if (!lvds_connector) {
kfree(lvds_encoder);
return false;
return;
}
lvds_encoder->attached_connector = lvds_connector;
if (!HAS_PCH_SPLIT(dev)) {
lvds_encoder->pfit_control = I915_READ(PFIT_CONTROL);
}
intel_encoder = &lvds_encoder->base;
encoder = &intel_encoder->base;
intel_connector = &lvds_connector->base;
@ -1110,10 +964,11 @@ bool intel_lvds_init(struct drm_device *dev)
intel_encoder->enable = intel_enable_lvds;
intel_encoder->pre_enable = intel_pre_enable_lvds;
intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds;
intel_encoder->compute_config = intel_lvds_compute_config;
intel_encoder->mode_set = intel_lvds_mode_set;
intel_encoder->disable = intel_disable_lvds;
intel_encoder->get_hw_state = intel_lvds_get_hw_state;
intel_encoder->get_config = intel_lvds_get_config;
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_connector_attach_encoder(intel_connector, intel_encoder);
@ -1127,7 +982,6 @@ bool intel_lvds_init(struct drm_device *dev)
else
intel_encoder->crtc_mask = (1 << 1);
drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
@ -1199,11 +1053,11 @@ bool intel_lvds_init(struct drm_device *dev)
}
/* Failed to get EDID, what about VBT? */
if (dev_priv->lfp_lvds_vbt_mode) {
if (dev_priv->vbt.lfp_lvds_vbt_mode) {
DRM_DEBUG_KMS("using mode from VBT: ");
drm_mode_debug_printmodeline(dev_priv->lfp_lvds_vbt_mode);
drm_mode_debug_printmodeline(dev_priv->vbt.lfp_lvds_vbt_mode);
fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
if (fixed_mode) {
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
goto out;
@ -1264,7 +1118,7 @@ out:
intel_panel_init(&intel_connector->panel, fixed_mode);
intel_panel_setup_backlight(connector);
return true;
return;
failed:
DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
@ -1274,5 +1128,5 @@ failed:
drm_mode_destroy(dev, fixed_mode);
kfree(lvds_encoder);
kfree(lvds_connector);
return false;
return;
}

View File

@ -112,6 +112,10 @@ struct opregion_asle {
u8 rsvd[102];
} __attribute__((packed));
/* Driver readiness indicator */
#define ASLE_ARDY_READY (1 << 0)
#define ASLE_ARDY_NOT_READY (0 << 0)
/* ASLE irq request bits */
#define ASLE_SET_ALS_ILLUM (1 << 0)
#define ASLE_SET_BACKLIGHT (1 << 1)
@ -125,6 +129,12 @@ struct opregion_asle {
#define ASLE_PFIT_FAILED (1<<14)
#define ASLE_PWM_FREQ_FAILED (1<<16)
/* Technology enabled indicator */
#define ASLE_TCHE_ALS_EN (1 << 0)
#define ASLE_TCHE_BLC_EN (1 << 1)
#define ASLE_TCHE_PFIT_EN (1 << 2)
#define ASLE_TCHE_PFMB_EN (1 << 3)
/* ASLE backlight brightness to set */
#define ASLE_BCLP_VALID (1<<31)
#define ASLE_BCLP_MSK (~(1<<31))
@ -197,6 +207,8 @@ int intel_opregion_setup(struct drm_device *dev)
if (mboxes & MBOX_ASLE) {
DRM_DEBUG_DRIVER("ASLE supported\n");
opregion->asle = base + OPREGION_ASLE_OFFSET;
iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy);
}
return 0;

View File

@ -36,32 +36,26 @@
#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
void
intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode)
{
adjusted_mode->hdisplay = fixed_mode->hdisplay;
adjusted_mode->hsync_start = fixed_mode->hsync_start;
adjusted_mode->hsync_end = fixed_mode->hsync_end;
adjusted_mode->htotal = fixed_mode->htotal;
drm_mode_copy(adjusted_mode, fixed_mode);
adjusted_mode->vdisplay = fixed_mode->vdisplay;
adjusted_mode->vsync_start = fixed_mode->vsync_start;
adjusted_mode->vsync_end = fixed_mode->vsync_end;
adjusted_mode->vtotal = fixed_mode->vtotal;
adjusted_mode->clock = fixed_mode->clock;
drm_mode_set_crtcinfo(adjusted_mode, 0);
}
/* adjusted_mode has been preset to be the panel's fixed mode */
void
intel_pch_panel_fitting(struct drm_device *dev,
int fitting_mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
struct intel_crtc_config *pipe_config,
int fitting_mode)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *mode, *adjusted_mode;
int x, y, width, height;
mode = &pipe_config->requested_mode;
adjusted_mode = &pipe_config->adjusted_mode;
x = y = width = height = 0;
/* Native modes don't need fitting */
@ -104,17 +98,210 @@ intel_pch_panel_fitting(struct drm_device *dev,
}
break;
default:
case DRM_MODE_SCALE_FULLSCREEN:
x = y = 0;
width = adjusted_mode->hdisplay;
height = adjusted_mode->vdisplay;
break;
default:
WARN(1, "bad panel fit mode: %d\n", fitting_mode);
return;
}
done:
dev_priv->pch_pf_pos = (x << 16) | y;
dev_priv->pch_pf_size = (width << 16) | height;
pipe_config->pch_pfit.pos = (x << 16) | y;
pipe_config->pch_pfit.size = (width << 16) | height;
pipe_config->pch_pfit.enabled = pipe_config->pch_pfit.size != 0;
}
static void
centre_horizontally(struct drm_display_mode *mode,
int width)
{
u32 border, sync_pos, blank_width, sync_width;
/* keep the hsync and hblank widths constant */
sync_width = mode->crtc_hsync_end - mode->crtc_hsync_start;
blank_width = mode->crtc_hblank_end - mode->crtc_hblank_start;
sync_pos = (blank_width - sync_width + 1) / 2;
border = (mode->hdisplay - width + 1) / 2;
border += border & 1; /* make the border even */
mode->crtc_hdisplay = width;
mode->crtc_hblank_start = width + border;
mode->crtc_hblank_end = mode->crtc_hblank_start + blank_width;
mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos;
mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width;
}
static void
centre_vertically(struct drm_display_mode *mode,
int height)
{
u32 border, sync_pos, blank_width, sync_width;
/* keep the vsync and vblank widths constant */
sync_width = mode->crtc_vsync_end - mode->crtc_vsync_start;
blank_width = mode->crtc_vblank_end - mode->crtc_vblank_start;
sync_pos = (blank_width - sync_width + 1) / 2;
border = (mode->vdisplay - height + 1) / 2;
mode->crtc_vdisplay = height;
mode->crtc_vblank_start = height + border;
mode->crtc_vblank_end = mode->crtc_vblank_start + blank_width;
mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos;
mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width;
}
static inline u32 panel_fitter_scaling(u32 source, u32 target)
{
/*
* Floating point operation is not supported. So the FACTOR
* is defined, which can avoid the floating point computation
* when calculating the panel ratio.
*/
#define ACCURACY 12
#define FACTOR (1 << ACCURACY)
u32 ratio = source * FACTOR / target;
return (FACTOR * ratio + FACTOR/2) / FACTOR;
}
void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
struct intel_crtc_config *pipe_config,
int fitting_mode)
{
struct drm_device *dev = intel_crtc->base.dev;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
struct drm_display_mode *mode, *adjusted_mode;
mode = &pipe_config->requested_mode;
adjusted_mode = &pipe_config->adjusted_mode;
/* Native modes don't need fitting */
if (adjusted_mode->hdisplay == mode->hdisplay &&
adjusted_mode->vdisplay == mode->vdisplay)
goto out;
switch (fitting_mode) {
case DRM_MODE_SCALE_CENTER:
/*
* For centered modes, we have to calculate border widths &
* heights and modify the values programmed into the CRTC.
*/
centre_horizontally(adjusted_mode, mode->hdisplay);
centre_vertically(adjusted_mode, mode->vdisplay);
border = LVDS_BORDER_ENABLE;
break;
case DRM_MODE_SCALE_ASPECT:
/* Scale but preserve the aspect ratio */
if (INTEL_INFO(dev)->gen >= 4) {
u32 scaled_width = adjusted_mode->hdisplay *
mode->vdisplay;
u32 scaled_height = mode->hdisplay *
adjusted_mode->vdisplay;
/* 965+ is easy, it does everything in hw */
if (scaled_width > scaled_height)
pfit_control |= PFIT_ENABLE |
PFIT_SCALING_PILLAR;
else if (scaled_width < scaled_height)
pfit_control |= PFIT_ENABLE |
PFIT_SCALING_LETTER;
else if (adjusted_mode->hdisplay != mode->hdisplay)
pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
} else {
u32 scaled_width = adjusted_mode->hdisplay *
mode->vdisplay;
u32 scaled_height = mode->hdisplay *
adjusted_mode->vdisplay;
/*
* For earlier chips we have to calculate the scaling
* ratio by hand and program it into the
* PFIT_PGM_RATIO register
*/
if (scaled_width > scaled_height) { /* pillar */
centre_horizontally(adjusted_mode,
scaled_height /
mode->vdisplay);
border = LVDS_BORDER_ENABLE;
if (mode->vdisplay != adjusted_mode->vdisplay) {
u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay);
pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
bits << PFIT_VERT_SCALE_SHIFT);
pfit_control |= (PFIT_ENABLE |
VERT_INTERP_BILINEAR |
HORIZ_INTERP_BILINEAR);
}
} else if (scaled_width < scaled_height) { /* letter */
centre_vertically(adjusted_mode,
scaled_width /
mode->hdisplay);
border = LVDS_BORDER_ENABLE;
if (mode->hdisplay != adjusted_mode->hdisplay) {
u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay);
pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
bits << PFIT_VERT_SCALE_SHIFT);
pfit_control |= (PFIT_ENABLE |
VERT_INTERP_BILINEAR |
HORIZ_INTERP_BILINEAR);
}
} else {
/* Aspects match, Let hw scale both directions */
pfit_control |= (PFIT_ENABLE |
VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
VERT_INTERP_BILINEAR |
HORIZ_INTERP_BILINEAR);
}
}
break;
case DRM_MODE_SCALE_FULLSCREEN:
/*
* Full scaling, even if it changes the aspect ratio.
* Fortunately this is all done for us in hw.
*/
if (mode->vdisplay != adjusted_mode->vdisplay ||
mode->hdisplay != adjusted_mode->hdisplay) {
pfit_control |= PFIT_ENABLE;
if (INTEL_INFO(dev)->gen >= 4)
pfit_control |= PFIT_SCALING_AUTO;
else
pfit_control |= (VERT_AUTO_SCALE |
VERT_INTERP_BILINEAR |
HORIZ_AUTO_SCALE |
HORIZ_INTERP_BILINEAR);
}
break;
default:
WARN(1, "bad panel fit mode: %d\n", fitting_mode);
return;
}
/* 965+ wants fuzzy fitting */
/* FIXME: handle multiple panels by failing gracefully */
if (INTEL_INFO(dev)->gen >= 4)
pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
PFIT_FILTER_FUZZY);
out:
if ((pfit_control & PFIT_ENABLE) == 0) {
pfit_control = 0;
pfit_pgm_ratios = 0;
}
/* Make sure pre-965 set dither correctly for 18bpp panels. */
if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
pipe_config->gmch_pfit.control = pfit_control;
pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
pipe_config->gmch_pfit.lvds_border_bits = border;
}
static int is_backlight_combination_mode(struct drm_device *dev)
@ -130,11 +317,16 @@ static int is_backlight_combination_mode(struct drm_device *dev)
return 0;
}
/* XXX: query mode clock or hardware clock and program max PWM appropriately
* when it's 0.
*/
static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
// WARN_ON_SMP(!spin_is_locked(&dev_priv->backlight.lock));
/* Restore the CTL value if it lost, e.g. GPU reset */
if (HAS_PCH_SPLIT(dev_priv->dev)) {
@ -164,7 +356,7 @@ static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
return val;
}
static u32 _intel_panel_get_max_backlight(struct drm_device *dev)
static u32 intel_panel_get_max_backlight(struct drm_device *dev)
{
u32 max;
@ -182,23 +374,8 @@ static u32 _intel_panel_get_max_backlight(struct drm_device *dev)
max *= 0xff;
}
return max;
}
u32 intel_panel_get_max_backlight(struct drm_device *dev)
{
u32 max;
max = _intel_panel_get_max_backlight(dev);
if (max == 0) {
/* XXX add code here to query mode clock or hardware clock
* and program max PWM appropriately.
*/
pr_warn_once("fixme: max PWM is zero\n");
return 1;
}
DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
return max;
}
@ -217,8 +394,11 @@ static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
return val;
if (i915_panel_invert_brightness > 0 ||
dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS)
return intel_panel_get_max_backlight(dev) - val;
dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
u32 max = intel_panel_get_max_backlight(dev);
if (max)
return max - val;
}
return val;
}
@ -227,6 +407,9 @@ static u32 intel_panel_get_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
unsigned long flags;
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
if (HAS_PCH_SPLIT(dev)) {
val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
@ -244,6 +427,9 @@ static u32 intel_panel_get_backlight(struct drm_device *dev)
}
val = intel_panel_compute_brightness(dev, val);
spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
return val;
}
@ -270,6 +456,10 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
u32 max = intel_panel_get_max_backlight(dev);
u8 lbpc;
/* we're screwed, but keep behaviour backwards compatible */
if (!max)
max = 1;
lbpc = level * 0xfe / max + 1;
level /= lbpc;
pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc);
@ -282,7 +472,8 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
I915_WRITE(BLC_PWM_CTL, tmp | level);
}
void intel_panel_set_backlight(struct drm_device *dev, u32 level)
/* set backlight brightness to level in range [0..max] */
void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@ -297,6 +488,9 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
void intel_panel_disable_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
dev_priv->backlight.enabled = false;
intel_panel_actually_set_backlight(dev, 0);
@ -314,12 +508,19 @@ void intel_panel_disable_backlight(struct drm_device *dev)
I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
}
}
spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
}
void intel_panel_enable_backlight(struct drm_device *dev,
enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum transcoder cpu_transcoder =
intel_pipe_to_cpu_transcoder(dev_priv, pipe);
unsigned long flags;
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
if (dev_priv->backlight.level == 0) {
dev_priv->backlight.level = intel_panel_get_max_backlight(dev);
@ -347,14 +548,18 @@ void intel_panel_enable_backlight(struct drm_device *dev,
else
tmp &= ~BLM_PIPE_SELECT;
tmp |= BLM_PIPE(pipe);
if (cpu_transcoder == TRANSCODER_EDP)
tmp |= BLM_TRANSCODER_EDP;
else
tmp |= BLM_PIPE(cpu_transcoder);
tmp &= ~BLM_PWM_ENABLE;
I915_WRITE(reg, tmp);
POSTING_READ(reg);
I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
if (HAS_PCH_SPLIT(dev)) {
if (HAS_PCH_SPLIT(dev) &&
!(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
tmp = I915_READ(BLC_PWM_PCH_CTL1);
tmp |= BLM_PCH_PWM_ENABLE;
tmp &= ~BLM_PCH_OVERRIDE_ENABLE;
@ -369,6 +574,8 @@ set_level:
*/
dev_priv->backlight.enabled = true;
intel_panel_actually_set_backlight(dev, dev_priv->backlight.level);
spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
}
static void intel_panel_init_backlight(struct drm_device *dev)
@ -405,7 +612,8 @@ intel_panel_detect(struct drm_device *dev)
static int intel_panel_update_status(struct backlight_device *bd)
{
struct drm_device *dev = bl_get_data(bd);
intel_panel_set_backlight(dev, bd->props.brightness);
intel_panel_set_backlight(dev, bd->props.brightness,
bd->props.max_brightness);
return 0;
}
@ -425,6 +633,7 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct backlight_properties props;
unsigned long flags;
intel_panel_init_backlight(dev);
@ -434,7 +643,11 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.brightness = dev_priv->backlight.level;
props.max_brightness = _intel_panel_get_max_backlight(dev);
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
props.max_brightness = intel_panel_get_max_backlight(dev);
spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
if (props.max_brightness == 0) {
DRM_DEBUG_DRIVER("Failed to get maximum backlight value\n");
return -ENODEV;

File diff suppressed because it is too large Load Diff

View File

@ -33,16 +33,6 @@
#include "i915_trace.h"
#include "intel_drv.h"
/*
* 965+ support PIPE_CONTROL commands, which provide finer grained control
* over cache flushing.
*/
struct pipe_control {
struct drm_i915_gem_object *obj;
volatile u32 *cpu_page;
u32 gtt_offset;
};
static inline int ring_space(struct intel_ring_buffer *ring)
{
int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
@ -175,8 +165,7 @@ gen4_render_ring_flush(struct intel_ring_buffer *ring,
static int
intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
{
struct pipe_control *pc = ring->private;
u32 scratch_addr = pc->gtt_offset + 128;
u32 scratch_addr = ring->scratch.gtt_offset + 128;
int ret;
@ -213,8 +202,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
struct pipe_control *pc = ring->private;
u32 scratch_addr = pc->gtt_offset + 128;
u32 scratch_addr = ring->scratch.gtt_offset + 128;
int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */
@ -280,13 +268,33 @@ gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
return 0;
}
static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value)
{
int ret;
if (!ring->fbc_dirty)
return 0;
ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
intel_ring_emit(ring, MI_NOOP);
/* WaFbcNukeOn3DBlt:ivb/hsw */
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, MSG_FBC_REND_STATE);
intel_ring_emit(ring, value);
intel_ring_advance(ring);
ring->fbc_dirty = false;
return 0;
}
static int
gen7_render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
struct pipe_control *pc = ring->private;
u32 scratch_addr = pc->gtt_offset + 128;
u32 scratch_addr = ring->scratch.gtt_offset + 128;
int ret;
/*
@ -336,6 +344,9 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
if (flush_domains)
return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
return 0;
}
@ -355,6 +366,17 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
return I915_READ(acthd_reg);
}
static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 addr;
addr = dev_priv->status_page_dmah->busaddr;
if (INTEL_INFO(ring->dev)->gen >= 4)
addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
I915_WRITE(HWS_PGA, addr);
}
static int init_ring_common(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
@ -366,6 +388,11 @@ static int init_ring_common(struct intel_ring_buffer *ring)
if (HAS_FORCE_WAKE(dev))
gen6_gt_force_wake_get(dev_priv);
if (I915_NEED_GFX_HWS(dev))
intel_ring_setup_status_page(ring);
else
ring_setup_phys_status_page(ring);
/* Stop the ring if it's running. */
I915_WRITE_CTL(ring, 0);
I915_WRITE_HEAD(ring, 0);
@ -400,14 +427,14 @@ static int init_ring_common(struct intel_ring_buffer *ring)
* registers with the above sequence (the readback of the HEAD registers
* also enforces ordering), otherwise the hw might lose the new ring
* register values. */
I915_WRITE_START(ring, obj->gtt_offset);
I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
I915_WRITE_CTL(ring,
((ring->size - PAGE_SIZE) & RING_NR_PAGES)
| RING_VALID);
/* If the head is still not zero, the ring is dead */
if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
I915_READ_START(ring) == obj->gtt_offset &&
I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
(I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
DRM_ERROR("%s initialization failed "
"ctl %08x head %08x tail %08x start %08x\n",
@ -425,6 +452,8 @@ static int init_ring_common(struct intel_ring_buffer *ring)
ring->space = ring_space(ring);
ring->last_retired_head = -1;
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
out:
if (HAS_FORCE_WAKE(dev))
gen6_gt_force_wake_put(dev_priv);
@ -435,83 +464,57 @@ out:
static int
init_pipe_control(struct intel_ring_buffer *ring)
{
struct pipe_control *pc;
struct drm_i915_gem_object *obj;
int ret;
if (ring->private)
if (ring->scratch.obj)
return 0;
pc = kmalloc(sizeof(*pc), GFP_KERNEL);
if (!pc)
return -ENOMEM;
obj = i915_gem_alloc_object(ring->dev, 4096);
if (obj == NULL) {
ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
if (ring->scratch.obj == NULL) {
DRM_ERROR("Failed to allocate seqno page\n");
ret = -ENOMEM;
goto err;
}
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
ret = i915_gem_object_pin(obj, 4096, true, false);
ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false);
if (ret)
goto err_unref;
pc->gtt_offset = obj->gtt_offset;
pc->cpu_page = (void*)MapIoMem((addr_t)sg_page(obj->pages->sgl),4096, PG_SW);
if (pc->cpu_page == NULL)
ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
ring->scratch.cpu_page = (void*)MapIoMem((addr_t)sg_page(ring->scratch.obj->pages->sgl),4096, PG_SW);
if (ring->scratch.cpu_page == NULL) {
ret = -ENOMEM;
goto err_unpin;
}
DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
ring->name, pc->gtt_offset);
pc->obj = obj;
ring->private = pc;
ring->name, ring->scratch.gtt_offset);
return 0;
err_unpin:
i915_gem_object_unpin(obj);
i915_gem_object_unpin(ring->scratch.obj);
err_unref:
drm_gem_object_unreference(&obj->base);
drm_gem_object_unreference(&ring->scratch.obj->base);
err:
kfree(pc);
return ret;
}
static void
cleanup_pipe_control(struct intel_ring_buffer *ring)
{
struct pipe_control *pc = ring->private;
struct drm_i915_gem_object *obj;
if (!ring->private)
return;
obj = pc->obj;
// kunmap(obj->pages[0]);
i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
kfree(pc);
ring->private = NULL;
}
static int init_render_ring(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = init_ring_common(ring);
ENTER();
if (INTEL_INFO(dev)->gen > 3)
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
/* We need to disable the AsyncFlip performance optimisations in order
* to use MI_WAIT_FOR_EVENT within the CS. It should already be
* programmed to '1' on all products.
*
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
*/
if (INTEL_INFO(dev)->gen >= 6)
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
@ -553,9 +556,7 @@ static int init_render_ring(struct intel_ring_buffer *ring)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
if (HAS_L3_GPU_CACHE(dev))
I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
LEAVE();
I915_WRITE_IMR(ring, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
return ret;
}
@ -564,19 +565,32 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
if (!ring->private)
if (ring->scratch.obj == NULL)
return;
cleanup_pipe_control(ring);
if (INTEL_INFO(dev)->gen >= 5) {
// kunmap(sg_page(ring->scratch.obj->pages->sgl));
i915_gem_object_unpin(ring->scratch.obj);
}
drm_gem_object_unreference(&ring->scratch.obj->base);
ring->scratch.obj = NULL;
}
static void
update_mboxes(struct intel_ring_buffer *ring,
u32 mmio_offset)
{
/* NB: In order to be able to do semaphore MBOX updates for varying number
* of rings, it's easiest if we round up each individual update to a
* multiple of 2 (since ring updates must always be a multiple of 2)
* even though the actual update only requires 3 dwords.
*/
#define MBOX_UPDATE_DWORDS 4
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, mmio_offset);
intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, MI_NOOP);
}
/**
@ -591,19 +605,24 @@ update_mboxes(struct intel_ring_buffer *ring,
static int
gen6_add_request(struct intel_ring_buffer *ring)
{
u32 mbox1_reg;
u32 mbox2_reg;
int ret;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *useless;
int i, ret;
ret = intel_ring_begin(ring, 10);
ret = intel_ring_begin(ring, ((I915_NUM_RINGS-1) *
MBOX_UPDATE_DWORDS) +
4);
if (ret)
return ret;
#undef MBOX_UPDATE_DWORDS
mbox1_reg = ring->signal_mbox[0];
mbox2_reg = ring->signal_mbox[1];
for_each_ring(useless, dev_priv, i) {
u32 mbox_reg = ring->signal_mbox[i];
if (mbox_reg != GEN6_NOSYNC)
update_mboxes(ring, mbox_reg);
}
update_mboxes(ring, mbox1_reg);
update_mboxes(ring, mbox2_reg);
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
intel_ring_emit(ring, ring->outstanding_lazy_request);
@ -681,8 +700,7 @@ do { \
static int
pc_render_add_request(struct intel_ring_buffer *ring)
{
struct pipe_control *pc = ring->private;
u32 scratch_addr = pc->gtt_offset + 128;
u32 scratch_addr = ring->scratch.gtt_offset + 128;
int ret;
/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
@ -700,7 +718,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, 0);
PIPE_CONTROL_FLUSH(ring, scratch_addr);
@ -719,7 +737,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_NOTIFY);
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
@ -753,15 +771,13 @@ ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
static u32
pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{
struct pipe_control *pc = ring->private;
return pc->cpu_page[0];
return ring->scratch.cpu_page[0];
}
static void
pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
struct pipe_control *pc = ring->private;
pc->cpu_page[0] = seqno;
ring->scratch.cpu_page[0] = seqno;
}
static bool
@ -775,11 +791,8 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring)
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
}
if (ring->irq_refcount++ == 0)
ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return true;
@ -793,11 +806,8 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring)
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
dev_priv->gt_irq_mask |= ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
}
if (--ring->irq_refcount == 0)
ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
@ -881,8 +891,6 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
drm_i915_private_t *dev_priv = ring->dev->dev_private;
u32 mmio = 0;
ENTER();
/* The ring status page addresses are no longer next to the rest of
* the ring registers as of gen7.
*/
@ -897,6 +905,9 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
case VCS:
mmio = BSD_HWS_PGA_GEN7;
break;
case VECS:
mmio = VEBOX_HWS_PGA_GEN7;
break;
}
} else if (IS_GEN6(ring->dev)) {
mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
@ -906,8 +917,18 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
POSTING_READ(mmio);
LEAVE();
/* Flush the TLB for this page */
if (INTEL_INFO(dev)->gen >= 6) {
u32 reg = RING_INSTPM(ring->mmio_base);
I915_WRITE(reg,
_MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
INSTPM_SYNC_FLUSH));
if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
1000))
DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
ring->name);
}
}
static int
@ -963,13 +984,12 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
I915_WRITE_IMR(ring, ~(ring->irq_enable_mask |
GEN6_RENDER_L3_PARITY_ERROR));
I915_WRITE_IMR(ring,
~(ring->irq_enable_mask |
GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
else
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@ -986,18 +1006,55 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
I915_WRITE_IMR(ring,
~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
else
I915_WRITE_IMR(ring, ~0);
dev_priv->gt_irq_mask |= ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
gen6_gt_force_wake_put(dev_priv);
}
static bool
hsw_vebox_get_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
if (!dev->irq_enabled)
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
snb_enable_pm_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return true;
}
static void
hsw_vebox_put_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
if (!dev->irq_enabled)
return;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
I915_WRITE_IMR(ring, ~0);
snb_disable_pm_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
static int
i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 length,
@ -1039,8 +1096,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
} else {
struct drm_i915_gem_object *obj = ring->private;
u32 cs_offset = obj->gtt_offset;
u32 cs_offset = ring->scratch.gtt_offset;
if (len > I830_BATCH_LIMIT)
return -ENOSPC;
@ -1120,12 +1176,12 @@ static int init_status_page(struct intel_ring_buffer *ring)
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
ret = i915_gem_object_pin(obj, 4096, true, false);
ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
if (ret != 0) {
goto err_unref;
}
ring->status_page.gfx_addr = obj->gtt_offset;
ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
ring->status_page.page_addr = (void*)MapIoMem((addr_t)sg_page(obj->pages->sgl),4096,PG_SW);
if (ring->status_page.page_addr == NULL) {
ret = -ENOMEM;
@ -1134,7 +1190,6 @@ static int init_status_page(struct intel_ring_buffer *ring)
ring->status_page.obj = obj;
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
intel_ring_setup_status_page(ring);
DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
ring->name, ring->status_page.gfx_addr);
@ -1148,10 +1203,9 @@ err:
return ret;
}
static int init_phys_hws_pga(struct intel_ring_buffer *ring)
static int init_phys_status_page(struct intel_ring_buffer *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 addr;
if (!dev_priv->status_page_dmah) {
dev_priv->status_page_dmah =
@ -1160,11 +1214,6 @@ static int init_phys_hws_pga(struct intel_ring_buffer *ring)
return -ENOMEM;
}
addr = dev_priv->status_page_dmah->busaddr;
if (INTEL_INFO(ring->dev)->gen >= 4)
addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
I915_WRITE(HWS_PGA, addr);
ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
@ -1192,7 +1241,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
return ret;
} else {
BUG_ON(ring->id != RCS);
ret = init_phys_hws_pga(ring);
ret = init_phys_status_page(ring);
if (ret)
return ret;
}
@ -1210,7 +1259,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
ring->obj = obj;
ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false);
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, true, false);
if (ret)
goto err_unref;
@ -1219,7 +1268,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
goto err_unpin;
ring->virtual_start =
ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
ring->size);
if (ring->virtual_start == NULL) {
DRM_ERROR("Failed to map ringbuffer.\n");
@ -1417,7 +1466,7 @@ int intel_ring_idle(struct intel_ring_buffer *ring)
/* We need to add any requests required to flush the objects and ring */
if (ring->outstanding_lazy_request) {
ret = i915_add_request(ring, NULL, NULL);
ret = i915_add_request(ring, NULL);
if (ret)
return ret;
}
@ -1491,9 +1540,12 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
if (INTEL_INFO(ring->dev)->gen >= 6) {
I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
if (HAS_VEBOX(ring->dev))
I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
}
ring->set_seqno(ring, seqno);
ring->hangcheck.seqno = seqno;
}
void intel_ring_advance(struct intel_ring_buffer *ring)
@ -1540,7 +1592,7 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
_MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
}
static int gen6_ring_flush(struct intel_ring_buffer *ring,
static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate, u32 flush)
{
uint32_t cmd;
@ -1612,9 +1664,10 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
/* Blitter support (SandyBridge+) */
static int blt_ring_flush(struct intel_ring_buffer *ring,
static int gen6_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate, u32 flush)
{
struct drm_device *dev = ring->dev;
uint32_t cmd;
int ret;
@ -1637,6 +1690,10 @@ static int blt_ring_flush(struct intel_ring_buffer *ring,
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
if (IS_GEN7(dev) && flush)
return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
return 0;
}
@ -1656,15 +1713,18 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->flush = gen6_render_ring_flush;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
ring->irq_enable_mask = GT_USER_INTERRUPT;
ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
ring->sync_to = gen6_ring_sync;
ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB;
ring->signal_mbox[0] = GEN6_VRSYNC;
ring->signal_mbox[1] = GEN6_BRSYNC;
ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_RV;
ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_RB;
ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_RVE;
ring->signal_mbox[RCS] = GEN6_NOSYNC;
ring->signal_mbox[VCS] = GEN6_VRSYNC;
ring->signal_mbox[BCS] = GEN6_BRSYNC;
ring->signal_mbox[VECS] = GEN6_VERSYNC;
} else if (IS_GEN5(dev)) {
ring->add_request = pc_render_add_request;
ring->flush = gen4_render_ring_flush;
@ -1672,7 +1732,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->set_seqno = pc_render_set_seqno;
ring->irq_get = gen5_ring_get_irq;
ring->irq_put = gen5_ring_put_irq;
ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
} else {
ring->add_request = i9xx_add_request;
if (INTEL_INFO(dev)->gen < 4)
@ -1715,14 +1776,15 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
return -ENOMEM;
}
ret = i915_gem_object_pin(obj, 0, true, false);
ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
if (ret != 0) {
drm_gem_object_unreference(&obj->base);
DRM_ERROR("Failed to ping batch bo\n");
return ret;
}
ring->private = obj;
ring->scratch.obj = obj;
ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
}
return intel_init_ring_buffer(dev, ring);
@ -1789,7 +1851,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
}
if (!I915_NEED_GFX_HWS(dev)) {
ret = init_phys_hws_pga(ring);
ret = init_phys_status_page(ring);
if (ret)
return ret;
}
@ -1812,20 +1874,23 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
/* gen6 bsd needs a special wa for tail updates */
if (IS_GEN6(dev))
ring->write_tail = gen6_bsd_ring_write_tail;
ring->flush = gen6_ring_flush;
ring->flush = gen6_bsd_ring_flush;
ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
ring->sync_to = gen6_ring_sync;
ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR;
ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB;
ring->signal_mbox[0] = GEN6_RVSYNC;
ring->signal_mbox[1] = GEN6_BVSYNC;
ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR;
ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VB;
ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_VVE;
ring->signal_mbox[RCS] = GEN6_RVSYNC;
ring->signal_mbox[VCS] = GEN6_NOSYNC;
ring->signal_mbox[BCS] = GEN6_BVSYNC;
ring->signal_mbox[VECS] = GEN6_VEVSYNC;
} else {
ring->mmio_base = BSD_RING_BASE;
ring->flush = bsd_ring_flush;
@ -1833,7 +1898,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
ring->get_seqno = ring_get_seqno;
ring->set_seqno = ring_set_seqno;
if (IS_GEN5(dev)) {
ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
ring->irq_get = gen5_ring_get_irq;
ring->irq_put = gen5_ring_put_irq;
} else {
@ -1858,20 +1923,55 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
ring->mmio_base = BLT_RING_BASE;
ring->write_tail = ring_write_tail;
ring->flush = blt_ring_flush;
ring->flush = gen6_ring_flush;
ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
ring->sync_to = gen6_ring_sync;
ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR;
ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV;
ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID;
ring->signal_mbox[0] = GEN6_RBSYNC;
ring->signal_mbox[1] = GEN6_VBSYNC;
ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR;
ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV;
ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_BVE;
ring->signal_mbox[RCS] = GEN6_RBSYNC;
ring->signal_mbox[VCS] = GEN6_VBSYNC;
ring->signal_mbox[BCS] = GEN6_NOSYNC;
ring->signal_mbox[VECS] = GEN6_VEBSYNC;
ring->init = init_ring_common;
return intel_init_ring_buffer(dev, ring);
}
int intel_init_vebox_ring_buffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[VECS];
ring->name = "video enhancement ring";
ring->id = VECS;
ring->mmio_base = VEBOX_RING_BASE;
ring->write_tail = ring_write_tail;
ring->flush = gen6_ring_flush;
ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
ring->irq_get = hsw_vebox_get_irq;
ring->irq_put = hsw_vebox_put_irq;
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
ring->sync_to = gen6_ring_sync;
ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER;
ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV;
ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VEB;
ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_INVALID;
ring->signal_mbox[RCS] = GEN6_RVESYNC;
ring->signal_mbox[VCS] = GEN6_VVESYNC;
ring->signal_mbox[BCS] = GEN6_BVESYNC;
ring->signal_mbox[VECS] = GEN6_NOSYNC;
ring->init = init_ring_common;
return intel_init_ring_buffer(dev, ring);

View File

@ -33,9 +33,20 @@ struct intel_hw_status_page {
#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
#define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base))
#define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base))
#define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base))
enum intel_ring_hangcheck_action {
HANGCHECK_WAIT,
HANGCHECK_ACTIVE,
HANGCHECK_KICK,
HANGCHECK_HUNG,
};
struct intel_ring_hangcheck {
bool deadlock;
u32 seqno;
u32 acthd;
int score;
enum intel_ring_hangcheck_action action;
};
struct intel_ring_buffer {
const char *name;
@ -43,8 +54,9 @@ struct intel_ring_buffer {
RCS = 0x0,
VCS,
BCS,
VECS,
} id;
#define I915_NUM_RINGS 3
#define I915_NUM_RINGS 4
u32 mmio_base;
void __iomem *virtual_start;
struct drm_device *dev;
@ -67,7 +79,7 @@ struct intel_ring_buffer {
*/
u32 last_retired_head;
u32 irq_refcount; /* protected by dev_priv->irq_lock */
unsigned irq_refcount; /* protected by dev_priv->irq_lock */
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
u32 trace_irq_seqno;
u32 sync_seqno[I915_NUM_RINGS-1];
@ -102,8 +114,11 @@ struct intel_ring_buffer {
struct intel_ring_buffer *to,
u32 seqno);
u32 semaphore_register[3]; /*our mbox written by others */
u32 signal_mbox[2]; /* mboxes this ring signals to */
/* our mbox written by others */
u32 semaphore_register[I915_NUM_RINGS];
/* mboxes this ring signals to */
u32 signal_mbox[I915_NUM_RINGS];
/**
* List of objects currently involved in rendering from the
* ringbuffer.
@ -127,6 +142,7 @@ struct intel_ring_buffer {
*/
u32 outstanding_lazy_request;
bool gpu_caches_dirty;
bool fbc_dirty;
wait_queue_head_t irq_queue;
@ -135,9 +151,15 @@ struct intel_ring_buffer {
*/
bool itlb_before_ctx_switch;
struct i915_hw_context *default_context;
struct drm_i915_gem_object *last_context_obj;
struct i915_hw_context *last_context;
void *private;
struct intel_ring_hangcheck hangcheck;
struct {
struct drm_i915_gem_object *obj;
u32 gtt_offset;
volatile u32 *cpu_page;
} scratch;
};
static inline bool
@ -224,6 +246,7 @@ int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev);
int intel_init_blt_ring_buffer(struct drm_device *dev);
int intel_init_vebox_ring_buffer(struct drm_device *dev);
u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
void intel_ring_setup_status_page(struct intel_ring_buffer *ring);

View File

@ -80,7 +80,7 @@ struct intel_sdvo {
/*
* Capabilities of the SDVO device returned by
* i830_sdvo_get_capabilities()
* intel_sdvo_get_capabilities()
*/
struct intel_sdvo_caps caps;
@ -202,15 +202,14 @@ struct intel_sdvo_connector {
u32 cur_dot_crawl, max_dot_crawl;
};
static struct intel_sdvo *to_intel_sdvo(struct drm_encoder *encoder)
static struct intel_sdvo *to_sdvo(struct intel_encoder *encoder)
{
return container_of(encoder, struct intel_sdvo, base.base);
return container_of(encoder, struct intel_sdvo, base);
}
static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
{
return container_of(intel_attached_encoder(connector),
struct intel_sdvo, base);
return to_sdvo(intel_attached_encoder(connector));
}
static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector)
@ -539,7 +538,8 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
&status))
goto log_fail;
while (status == SDVO_CMD_STATUS_PENDING && --retry) {
while ((status == SDVO_CMD_STATUS_PENDING ||
status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) {
if (retry < 10)
msleep(15);
else
@ -712,6 +712,13 @@ static bool intel_sdvo_set_timing(struct intel_sdvo *intel_sdvo, u8 cmd,
intel_sdvo_set_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
}
static bool intel_sdvo_get_timing(struct intel_sdvo *intel_sdvo, u8 cmd,
struct intel_sdvo_dtd *dtd)
{
return intel_sdvo_get_value(intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) &&
intel_sdvo_get_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
}
static bool intel_sdvo_set_input_timing(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_dtd *dtd)
{
@ -726,6 +733,13 @@ static bool intel_sdvo_set_output_timing(struct intel_sdvo *intel_sdvo,
SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
}
static bool intel_sdvo_get_input_timing(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_dtd *dtd)
{
return intel_sdvo_get_timing(intel_sdvo,
SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd);
}
static bool
intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
uint16_t clock,
@ -774,6 +788,8 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
uint16_t h_sync_offset, v_sync_offset;
int mode_clock;
memset(dtd, 0, sizeof(*dtd));
width = mode->hdisplay;
height = mode->vdisplay;
@ -816,44 +832,51 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE;
dtd->part2.sdvo_flags = 0;
dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
dtd->part2.reserved = 0;
}
static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode *pmode,
const struct intel_sdvo_dtd *dtd)
{
mode->hdisplay = dtd->part1.h_active;
mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off;
mode->hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width;
mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
mode->htotal = mode->hdisplay + dtd->part1.h_blank;
mode->htotal += (dtd->part1.h_high & 0xf) << 8;
struct drm_display_mode mode = {};
mode->vdisplay = dtd->part1.v_active;
mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
mode->vsync_start = mode->vdisplay;
mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0;
mode->vsync_end = mode->vsync_start +
mode.hdisplay = dtd->part1.h_active;
mode.hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
mode.hsync_start = mode.hdisplay + dtd->part2.h_sync_off;
mode.hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
mode.hsync_end = mode.hsync_start + dtd->part2.h_sync_width;
mode.hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
mode.htotal = mode.hdisplay + dtd->part1.h_blank;
mode.htotal += (dtd->part1.h_high & 0xf) << 8;
mode.vdisplay = dtd->part1.v_active;
mode.vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
mode.vsync_start = mode.vdisplay;
mode.vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
mode.vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
mode.vsync_start += dtd->part2.v_sync_off_high & 0xc0;
mode.vsync_end = mode.vsync_start +
(dtd->part2.v_sync_off_width & 0xf);
mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
mode->vtotal = mode->vdisplay + dtd->part1.v_blank;
mode->vtotal += (dtd->part1.v_high & 0xf) << 8;
mode.vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
mode.vtotal = mode.vdisplay + dtd->part1.v_blank;
mode.vtotal += (dtd->part1.v_high & 0xf) << 8;
mode->clock = dtd->part1.clock * 10;
mode.clock = dtd->part1.clock * 10;
mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE)
mode->flags |= DRM_MODE_FLAG_INTERLACE;
mode.flags |= DRM_MODE_FLAG_INTERLACE;
if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
mode->flags |= DRM_MODE_FLAG_PHSYNC;
mode.flags |= DRM_MODE_FLAG_PHSYNC;
else
mode.flags |= DRM_MODE_FLAG_NHSYNC;
if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
mode->flags |= DRM_MODE_FLAG_PVSYNC;
mode.flags |= DRM_MODE_FLAG_PVSYNC;
else
mode.flags |= DRM_MODE_FLAG_NVSYNC;
drm_mode_set_crtcinfo(&mode, 0);
drm_mode_copy(pmode, &mode);
}
static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
@ -950,30 +973,32 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
const struct drm_display_mode *adjusted_mode)
{
struct dip_infoframe avi_if = {
.type = DIP_TYPE_AVI,
.ver = DIP_VERSION_AVI,
.len = DIP_LEN_AVI,
};
uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
struct intel_crtc *intel_crtc = to_intel_crtc(intel_sdvo->base.base.crtc);
uint8_t sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
union hdmi_infoframe frame;
int ret;
ssize_t len;
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
adjusted_mode);
if (ret < 0) {
DRM_ERROR("couldn't fill AVI infoframe\n");
return false;
}
if (intel_sdvo->rgb_quant_range_selectable) {
if (intel_crtc->config.limited_color_range)
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
frame.avi.quantization_range =
HDMI_QUANTIZATION_RANGE_LIMITED;
else
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
frame.avi.quantization_range =
HDMI_QUANTIZATION_RANGE_FULL;
}
avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode);
intel_dip_infoframe_csum(&avi_if);
/* sdvo spec says that the ecc is handled by the hw, and it looks like
* we must not send the ecc field, either. */
memcpy(sdvo_data, &avi_if, 3);
sdvo_data[3] = avi_if.checksum;
memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi));
len = hdmi_infoframe_pack(&frame, sdvo_data, sizeof(sdvo_data));
if (len < 0)
return false;
return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
SDVO_HBUF_TX_VSYNC,
@ -1041,10 +1066,36 @@ intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
return true;
}
static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config)
{
unsigned dotclock = pipe_config->adjusted_mode.clock;
struct dpll *clock = &pipe_config->dpll;
/* SDVO TV has fixed PLL values depend on its clock range,
this mirrors vbios setting. */
if (dotclock >= 100000 && dotclock < 140500) {
clock->p1 = 2;
clock->p2 = 10;
clock->n = 3;
clock->m1 = 16;
clock->m2 = 8;
} else if (dotclock >= 140500 && dotclock <= 200000) {
clock->p1 = 1;
clock->p2 = 10;
clock->n = 6;
clock->m1 = 12;
clock->m2 = 8;
} else {
WARN(1, "SDVO TV clock out of range: %i\n", dotclock);
}
pipe_config->clock_set = true;
}
static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
struct drm_display_mode *mode = &pipe_config->requested_mode;
@ -1066,6 +1117,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
(void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
mode,
adjusted_mode);
pipe_config->sdvo_tv_clock = true;
} else if (intel_sdvo->is_lvds) {
if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
intel_sdvo->sdvo_lvds_fixed_mode))
@ -1097,6 +1149,10 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
if (intel_sdvo->color_range)
pipe_config->limited_color_range = true;
/* Clock computation needs to happen after pixel multiplier. */
if (intel_sdvo->is_tv)
i9xx_adjust_sdvo_tv_clock(pipe_config);
return true;
}
@ -1104,12 +1160,11 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
{
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = intel_encoder->base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc);
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&intel_encoder->base);
&crtc->config.adjusted_mode;
struct drm_display_mode *mode = &crtc->config.requested_mode;
struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
u32 sdvox;
struct intel_sdvo_in_out_map in_out;
struct intel_sdvo_dtd input_dtd, output_dtd;
@ -1166,14 +1221,17 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
* adjusted_mode.
*/
intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
input_dtd.part1.clock /= crtc->config.pixel_multiplier;
if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
DRM_INFO("Setting input timings on %s failed\n",
SDVO_NAME(intel_sdvo));
switch (intel_crtc->config.pixel_multiplier) {
switch (crtc->config.pixel_multiplier) {
default:
WARN(1, "unknown pixel mutlipler specified\n");
case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
@ -1204,9 +1262,9 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
}
if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
sdvox |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe);
sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else
sdvox |= SDVO_PIPE_SEL(intel_crtc->pipe);
sdvox |= SDVO_PIPE_SEL(crtc->pipe);
if (intel_sdvo->has_hdmi_audio)
sdvox |= SDVO_AUDIO_ENABLE;
@ -1216,7 +1274,7 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
/* done in crtc_mode_set as it lives inside the dpll register */
} else {
sdvox |= (intel_crtc->config.pixel_multiplier - 1)
sdvox |= (crtc->config.pixel_multiplier - 1)
<< SDVO_PORT_MULTIPLY_SHIFT;
}
@ -1231,7 +1289,7 @@ static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(&connector->base);
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base);
u16 active_outputs;
u16 active_outputs = 0;
intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
@ -1246,8 +1304,8 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
u16 active_outputs;
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
u16 active_outputs = 0;
u32 tmp;
tmp = I915_READ(intel_sdvo->sdvo_reg);
@ -1264,10 +1322,77 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
return true;
}
static void intel_sdvo_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_sdvo_dtd dtd;
int encoder_pixel_multiplier = 0;
u32 flags = 0, sdvox;
u8 val;
bool ret;
ret = intel_sdvo_get_input_timing(intel_sdvo, &dtd);
if (!ret) {
/* Some sdvo encoders are not spec compliant and don't
* implement the mandatory get_timings function. */
DRM_DEBUG_DRIVER("failed to retrieve SDVO DTD\n");
pipe_config->quirks |= PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS;
} else {
if (dtd.part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
flags |= DRM_MODE_FLAG_PHSYNC;
else
flags |= DRM_MODE_FLAG_NHSYNC;
if (dtd.part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
flags |= DRM_MODE_FLAG_PVSYNC;
else
flags |= DRM_MODE_FLAG_NVSYNC;
}
pipe_config->adjusted_mode.flags |= flags;
/*
* pixel multiplier readout is tricky: Only on i915g/gm it is stored in
* the sdvo port register, on all other platforms it is part of the dpll
* state. Since the general pipe state readout happens before the
* encoder->get_config we so already have a valid pixel multplier on all
* other platfroms.
*/
if (IS_I915G(dev) || IS_I915GM(dev)) {
sdvox = I915_READ(intel_sdvo->sdvo_reg);
pipe_config->pixel_multiplier =
((sdvox & SDVO_PORT_MULTIPLY_MASK)
>> SDVO_PORT_MULTIPLY_SHIFT) + 1;
}
/* Cross check the port pixel multiplier with the sdvo encoder state. */
if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
&val, 1)) {
switch (val) {
case SDVO_CLOCK_RATE_MULT_1X:
encoder_pixel_multiplier = 1;
break;
case SDVO_CLOCK_RATE_MULT_2X:
encoder_pixel_multiplier = 2;
break;
case SDVO_CLOCK_RATE_MULT_4X:
encoder_pixel_multiplier = 4;
break;
}
}
WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier,
"SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
pipe_config->pixel_multiplier, encoder_pixel_multiplier);
}
static void intel_disable_sdvo(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
u32 temp;
intel_sdvo_set_active_outputs(intel_sdvo, 0);
@ -1309,7 +1434,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
u32 temp;
bool input1, input2;
@ -1344,6 +1469,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
}
/* Special dpms function to support cloning between dvo/sdvo/crt. */
static void intel_sdvo_dpms(struct drm_connector *connector, int mode)
{
struct drm_crtc *crtc;
@ -1365,6 +1491,8 @@ static void intel_sdvo_dpms(struct drm_connector *connector, int mode)
return;
}
/* We set active outputs manually below in case pipe dpms doesn't change
* due to cloning. */
if (mode != DRM_MODE_DPMS_ON) {
intel_sdvo_set_active_outputs(intel_sdvo, 0);
if (0)
@ -1467,7 +1595,7 @@ static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
{
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG,
&intel_sdvo->hotplug_active, 2);
@ -1495,7 +1623,7 @@ intel_sdvo_get_analog_edid(struct drm_connector *connector)
return drm_get_edid(connector,
intel_gmbus_get_adapter(dev_priv,
dev_priv->crt_ddc_pin));
dev_priv->vbt.crt_ddc_pin));
}
static enum drm_connector_status
@ -1580,6 +1708,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
enum drm_connector_status ret;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector));
if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_ATTACHED_DISPLAYS,
&response, 2))
@ -1625,12 +1756,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
if (ret == connector_status_connected) {
intel_sdvo->is_tv = false;
intel_sdvo->is_lvds = false;
intel_sdvo->base.needs_tv_clock = false;
if (response & SDVO_TV_MASK) {
if (response & SDVO_TV_MASK)
intel_sdvo->is_tv = true;
intel_sdvo->base.needs_tv_clock = true;
}
if (response & SDVO_LVDS_MASK)
intel_sdvo->is_lvds = intel_sdvo->sdvo_lvds_fixed_mode != NULL;
}
@ -1771,22 +1899,13 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct drm_display_mode *newmode;
/*
* Attempt to get the mode list from DDC.
* Assume that the preferred modes are
* arranged in priority order.
*/
intel_ddc_get_modes(connector, &intel_sdvo->ddc);
/*
* Fetch modes from VBT. For SDVO prefer the VBT mode since some
* SDVO->LVDS transcoders can't cope with the EDID mode. Since
* drm_mode_probed_add adds the mode at the head of the list we add it
* last.
* SDVO->LVDS transcoders can't cope with the EDID mode.
*/
if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
if (dev_priv->vbt.sdvo_lvds_vbt_mode != NULL) {
newmode = drm_mode_duplicate(connector->dev,
dev_priv->sdvo_lvds_vbt_mode);
dev_priv->vbt.sdvo_lvds_vbt_mode);
if (newmode != NULL) {
/* Guarantee the mode is preferred */
newmode->type = (DRM_MODE_TYPE_PREFERRED |
@ -1795,6 +1914,13 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
}
}
/*
* Attempt to get the mode list from DDC.
* Assume that the preferred modes are
* arranged in priority order.
*/
intel_ddc_get_modes(connector, &intel_sdvo->ddc);
list_for_each_entry(newmode, &connector->probed_modes, head) {
if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
intel_sdvo->sdvo_lvds_fixed_mode =
@ -2076,7 +2202,7 @@ static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs
static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
{
struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
struct intel_sdvo *intel_sdvo = to_sdvo(to_intel_encoder(encoder));
if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
drm_mode_destroy(encoder->dev,
@ -2329,7 +2455,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
intel_sdvo_connector->output_flag = type;
intel_sdvo->is_tv = true;
intel_sdvo->base.needs_tv_clock = true;
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
@ -2417,7 +2542,6 @@ static bool
intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
{
intel_sdvo->is_tv = false;
intel_sdvo->base.needs_tv_clock = false;
intel_sdvo->is_lvds = false;
/* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
@ -2751,7 +2875,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
struct intel_sdvo *intel_sdvo;
u32 hotplug_mask;
int i;
intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
if (!intel_sdvo)
@ -2780,23 +2903,12 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
}
}
hotplug_mask = 0;
if (IS_G4X(dev)) {
hotplug_mask = intel_sdvo->is_sdvob ?
SDVOB_HOTPLUG_INT_STATUS_G4X : SDVOC_HOTPLUG_INT_STATUS_G4X;
} else if (IS_GEN4(dev)) {
hotplug_mask = intel_sdvo->is_sdvob ?
SDVOB_HOTPLUG_INT_STATUS_I965 : SDVOC_HOTPLUG_INT_STATUS_I965;
} else {
hotplug_mask = intel_sdvo->is_sdvob ?
SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915;
}
intel_encoder->compute_config = intel_sdvo_compute_config;
intel_encoder->disable = intel_disable_sdvo;
intel_encoder->mode_set = intel_sdvo_mode_set;
intel_encoder->enable = intel_enable_sdvo;
intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
intel_encoder->get_config = intel_sdvo_get_config;
/* In default case sdvo lvds is false */
if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))

View File

@ -0,0 +1,177 @@
/*
* Copyright © 2013 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include "i915_drv.h"
#include "intel_drv.h"
/* IOSF sideband */
static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
u32 port, u32 opcode, u32 addr, u32 *val)
{
u32 cmd, be = 0xf, bar = 0;
bool is_read = (opcode == PUNIT_OPCODE_REG_READ ||
opcode == DPIO_OPCODE_REG_READ);
cmd = (devfn << IOSF_DEVFN_SHIFT) | (opcode << IOSF_OPCODE_SHIFT) |
(port << IOSF_PORT_SHIFT) | (be << IOSF_BYTE_ENABLES_SHIFT) |
(bar << IOSF_BAR_SHIFT);
WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
is_read ? "read" : "write");
return -EAGAIN;
}
I915_WRITE(VLV_IOSF_ADDR, addr);
if (!is_read)
I915_WRITE(VLV_IOSF_DATA, *val);
I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n",
is_read ? "read" : "write");
return -ETIMEDOUT;
}
if (is_read)
*val = I915_READ(VLV_IOSF_DATA);
I915_WRITE(VLV_IOSF_DATA, 0);
return 0;
}
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr)
{
u32 val = 0;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
mutex_lock(&dev_priv->dpio_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
PUNIT_OPCODE_REG_READ, addr, &val);
mutex_unlock(&dev_priv->dpio_lock);
return val;
}
void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
{
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
mutex_lock(&dev_priv->dpio_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
PUNIT_OPCODE_REG_WRITE, addr, &val);
mutex_unlock(&dev_priv->dpio_lock);
}
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
{
u32 val = 0;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
mutex_lock(&dev_priv->dpio_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_NC,
PUNIT_OPCODE_REG_READ, addr, &val);
mutex_unlock(&dev_priv->dpio_lock);
return val;
}
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg)
{
u32 val = 0;
vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_DPIO,
DPIO_OPCODE_REG_READ, reg, &val);
return val;
}
void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val)
{
vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_DPIO,
DPIO_OPCODE_REG_WRITE, reg, &val);
}
/* SBI access */
u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
enum intel_sbi_destination destination)
{
u32 value = 0;
WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
return 0;
}
I915_WRITE(SBI_ADDR, (reg << 16));
if (destination == SBI_ICLK)
value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
else
value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
return 0;
}
return I915_READ(SBI_DATA);
}
void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
enum intel_sbi_destination destination)
{
u32 tmp;
WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
return;
}
I915_WRITE(SBI_ADDR, (reg << 16));
I915_WRITE(SBI_DATA, value);
if (destination == SBI_ICLK)
tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
else
tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
return;
}
}

View File

@ -32,12 +32,14 @@
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_rect.h>
#include "intel_drv.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
static void
vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
@ -107,14 +109,15 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
sprctl |= SP_ENABLE;
intel_update_sprite_watermarks(dplane, crtc, src_w, pixel_size, true,
src_w != crtc_w || src_h != crtc_h);
/* Sizes are 0 based */
src_w--;
src_h--;
crtc_w--;
crtc_h--;
intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
@ -132,13 +135,13 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
I915_WRITE(SPCNTR(pipe, plane), sprctl);
I915_MODIFY_DISPBASE(SPSURF(pipe, plane), obj->gtt_offset +
I915_MODIFY_DISPBASE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
sprsurf_offset);
POSTING_READ(SPSURF(pipe, plane));
}
static void
vlv_disable_plane(struct drm_plane *dplane)
vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
{
struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@ -151,6 +154,8 @@ vlv_disable_plane(struct drm_plane *dplane)
/* Activate double buffered register update */
I915_MODIFY_DISPBASE(SPSURF(pipe, plane), 0);
POSTING_READ(SPSURF(pipe, plane));
intel_update_sprite_watermarks(dplane, crtc, 0, 0, false, false);
}
static int
@ -205,7 +210,8 @@ vlv_get_colorkey(struct drm_plane *dplane,
}
static void
ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
@ -254,21 +260,25 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
if (obj->tiling_mode != I915_TILING_NONE)
sprctl |= SPRITE_TILED;
/* must disable */
if (IS_HASWELL(dev))
sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE;
else
sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
sprctl |= SPRITE_ENABLE;
if (IS_HASWELL(dev))
sprctl |= SPRITE_PIPE_CSC_ENABLE;
intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
src_w != crtc_w || src_h != crtc_h);
/* Sizes are 0 based */
src_w--;
src_h--;
crtc_w--;
crtc_h--;
intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
/*
* IVB workaround: must disable low power watermarks for at least
* one frame before enabling scaling. LP watermarks can be re-enabled
@ -307,7 +317,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
if (intel_plane->can_scale)
I915_WRITE(SPRSCALE(pipe), sprscale);
I915_WRITE(SPRCTL(pipe), sprctl);
I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
I915_MODIFY_DISPBASE(SPRSURF(pipe),
i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
POSTING_READ(SPRSURF(pipe));
/* potentially re-enable LP watermarks */
@ -316,7 +327,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
}
static void
ivb_disable_plane(struct drm_plane *plane)
ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@ -334,6 +345,8 @@ ivb_disable_plane(struct drm_plane *plane)
dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
/* potentially re-enable LP watermarks */
if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
intel_update_watermarks(dev);
@ -394,7 +407,8 @@ ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
}
static void
ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
@ -446,14 +460,15 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
dvscntr |= DVS_ENABLE;
intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
src_w != crtc_w || src_h != crtc_h);
/* Sizes are 0 based */
src_w--;
src_h--;
crtc_w--;
crtc_h--;
intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
dvsscale = 0;
if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h)
dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
@ -475,12 +490,13 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(DVSSCALE(pipe), dvsscale);
I915_WRITE(DVSCNTR(pipe), dvscntr);
I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset);
I915_MODIFY_DISPBASE(DVSSURF(pipe),
i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
POSTING_READ(DVSSURF(pipe));
}
static void
ilk_disable_plane(struct drm_plane *plane)
ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@ -493,6 +509,8 @@ ilk_disable_plane(struct drm_plane *plane)
/* Flush double buffered register updates */
I915_MODIFY_DISPBASE(DVSSURF(pipe), 0);
POSTING_READ(DVSSURF(pipe));
intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
}
static void
@ -583,6 +601,20 @@ ilk_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
key->flags = I915_SET_COLORKEY_NONE;
}
static bool
format_is_yuv(uint32_t format)
{
switch (format) {
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_YVYU:
return true;
default:
return false;
}
}
static int
intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
@ -600,9 +632,29 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
int ret = 0;
int x = src_x >> 16, y = src_y >> 16;
int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay;
bool disable_primary = false;
bool visible;
int hscale, vscale;
int max_scale, min_scale;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
struct drm_rect src = {
/* sample coordinates in 16.16 fixed point */
.x1 = src_x,
.x2 = src_x + src_w,
.y1 = src_y,
.y2 = src_y + src_h,
};
struct drm_rect dst = {
/* integer pixels */
.x1 = crtc_x,
.x2 = crtc_x + crtc_w,
.y1 = crtc_y,
.y2 = crtc_y + crtc_h,
};
const struct drm_rect clip = {
.x2 = crtc->mode.hdisplay,
.y2 = crtc->mode.vdisplay,
};
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
@ -618,19 +670,23 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
intel_plane->src_w = src_w;
intel_plane->src_h = src_h;
src_w = src_w >> 16;
src_h = src_h >> 16;
/* Pipe must be running... */
if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE))
return -EINVAL;
if (crtc_x >= primary_w || crtc_y >= primary_h)
if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE)) {
DRM_DEBUG_KMS("Pipe disabled\n");
return -EINVAL;
}
/* Don't modify another pipe's plane */
if (intel_plane->pipe != intel_crtc->pipe)
if (intel_plane->pipe != intel_crtc->pipe) {
DRM_DEBUG_KMS("Wrong plane <-> crtc mapping\n");
return -EINVAL;
}
/* FIXME check all gen limits */
if (fb->width < 3 || fb->height < 3 || fb->pitches[0] > 16384) {
DRM_DEBUG_KMS("Unsuitable framebuffer for plane\n");
return -EINVAL;
}
/* Sprite planes can be linear or x-tiled surfaces */
switch (obj->tiling_mode) {
@ -638,55 +694,123 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
case I915_TILING_X:
break;
default:
DRM_DEBUG_KMS("Unsupported tiling mode\n");
return -EINVAL;
}
/*
* Clamp the width & height into the visible area. Note we don't
* try to scale the source if part of the visible region is offscreen.
* The caller must handle that by adjusting source offset and size.
* FIXME the following code does a bunch of fuzzy adjustments to the
* coordinates and sizes. We probably need some way to decide whether
* more strict checking should be done instead.
*/
if ((crtc_x < 0) && ((crtc_x + crtc_w) > 0)) {
crtc_w += crtc_x;
crtc_x = 0;
}
if ((crtc_x + crtc_w) <= 0) /* Nothing to display */
goto out;
if ((crtc_x + crtc_w) > primary_w)
crtc_w = primary_w - crtc_x;
max_scale = intel_plane->max_downscale << 16;
min_scale = intel_plane->can_scale ? 1 : (1 << 16);
if ((crtc_y < 0) && ((crtc_y + crtc_h) > 0)) {
crtc_h += crtc_y;
crtc_y = 0;
}
if ((crtc_y + crtc_h) <= 0) /* Nothing to display */
goto out;
if (crtc_y + crtc_h > primary_h)
crtc_h = primary_h - crtc_y;
hscale = drm_rect_calc_hscale_relaxed(&src, &dst, min_scale, max_scale);
BUG_ON(hscale < 0);
if (!crtc_w || !crtc_h) /* Again, nothing to display */
goto out;
vscale = drm_rect_calc_vscale_relaxed(&src, &dst, min_scale, max_scale);
BUG_ON(vscale < 0);
visible = drm_rect_clip_scaled(&src, &dst, &clip, hscale, vscale);
crtc_x = dst.x1;
crtc_y = dst.y1;
crtc_w = drm_rect_width(&dst);
crtc_h = drm_rect_height(&dst);
if (visible) {
/* check again in case clipping clamped the results */
hscale = drm_rect_calc_hscale(&src, &dst, min_scale, max_scale);
if (hscale < 0) {
DRM_DEBUG_KMS("Horizontal scaling factor out of limits\n");
drm_rect_debug_print(&src, true);
drm_rect_debug_print(&dst, false);
return hscale;
}
vscale = drm_rect_calc_vscale(&src, &dst, min_scale, max_scale);
if (vscale < 0) {
DRM_DEBUG_KMS("Vertical scaling factor out of limits\n");
drm_rect_debug_print(&src, true);
drm_rect_debug_print(&dst, false);
return vscale;
}
/* Make the source viewport size an exact multiple of the scaling factors. */
drm_rect_adjust_size(&src,
drm_rect_width(&dst) * hscale - drm_rect_width(&src),
drm_rect_height(&dst) * vscale - drm_rect_height(&src));
/* sanity check to make sure the src viewport wasn't enlarged */
WARN_ON(src.x1 < (int) src_x ||
src.y1 < (int) src_y ||
src.x2 > (int) (src_x + src_w) ||
src.y2 > (int) (src_y + src_h));
/*
* We may not have a scaler, eg. HSW does not have it any more
* Hardware doesn't handle subpixel coordinates.
* Adjust to (macro)pixel boundary, but be careful not to
* increase the source viewport size, because that could
* push the downscaling factor out of bounds.
*/
if (!intel_plane->can_scale && (crtc_w != src_w || crtc_h != src_h))
return -EINVAL;
src_x = src.x1 >> 16;
src_w = drm_rect_width(&src) >> 16;
src_y = src.y1 >> 16;
src_h = drm_rect_height(&src) >> 16;
if (format_is_yuv(fb->pixel_format)) {
src_x &= ~1;
src_w &= ~1;
/*
* We can take a larger source and scale it down, but
* only so much... 16x is the max on SNB.
* Must keep src and dst the
* same if we can't scale.
*/
if (((src_w * src_h) / (crtc_w * crtc_h)) > intel_plane->max_downscale)
if (!intel_plane->can_scale)
crtc_w &= ~1;
if (crtc_w == 0)
visible = false;
}
}
/* Check size restrictions when scaling */
if (visible && (src_w != crtc_w || src_h != crtc_h)) {
unsigned int width_bytes;
WARN_ON(!intel_plane->can_scale);
/* FIXME interlacing min height is 6 */
if (crtc_w < 3 || crtc_h < 3)
visible = false;
if (src_w < 3 || src_h < 3)
visible = false;
width_bytes = ((src_x * pixel_size) & 63) + src_w * pixel_size;
if (src_w > 2048 || src_h > 2048 ||
width_bytes > 4096 || fb->pitches[0] > 4096) {
DRM_DEBUG_KMS("Source dimensions exceed hardware limits\n");
return -EINVAL;
}
}
dst.x1 = crtc_x;
dst.x2 = crtc_x + crtc_w;
dst.y1 = crtc_y;
dst.y2 = crtc_y + crtc_h;
/*
* If the sprite is completely covering the primary plane,
* we can disable the primary and save power.
*/
if ((crtc_x == 0) && (crtc_y == 0) &&
(crtc_w == primary_w) && (crtc_h == primary_h))
disable_primary = true;
disable_primary = drm_rect_equals(&dst, &clip);
WARN_ON(disable_primary && !visible);
mutex_lock(&dev->struct_mutex);
@ -708,8 +832,12 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (!disable_primary)
intel_enable_primary(crtc);
intel_plane->update_plane(plane, fb, obj, crtc_x, crtc_y,
crtc_w, crtc_h, x, y, src_w, src_h);
if (visible)
intel_plane->update_plane(plane, crtc, fb, obj,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
else
intel_plane->disable_plane(plane, crtc);
if (disable_primary)
intel_disable_primary(crtc);
@ -732,7 +860,6 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
out_unlock:
mutex_unlock(&dev->struct_mutex);
out:
return ret;
}
@ -743,9 +870,14 @@ intel_disable_plane(struct drm_plane *plane)
struct intel_plane *intel_plane = to_intel_plane(plane);
int ret = 0;
if (plane->crtc)
if (!plane->fb)
return 0;
if (WARN_ON(!plane->crtc))
return -EINVAL;
intel_enable_primary(plane->crtc);
intel_plane->disable_plane(plane);
intel_plane->disable_plane(plane, plane->crtc);
if (!intel_plane->obj)
goto out;
@ -845,6 +977,14 @@ void intel_plane_restore(struct drm_plane *plane)
intel_plane->src_w, intel_plane->src_h);
}
void intel_plane_disable(struct drm_plane *plane)
{
if (!plane->crtc || !plane->fb)
return;
intel_disable_plane(plane);
}
static const struct drm_plane_funcs intel_plane_funcs = {
.update_plane = intel_update_plane,
.disable_plane = intel_disable_plane,
@ -918,13 +1058,15 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
break;
case 7:
if (IS_HASWELL(dev) || IS_VALLEYVIEW(dev))
intel_plane->can_scale = false;
else
if (IS_IVYBRIDGE(dev)) {
intel_plane->can_scale = true;
intel_plane->max_downscale = 2;
} else {
intel_plane->can_scale = false;
intel_plane->max_downscale = 1;
}
if (IS_VALLEYVIEW(dev)) {
intel_plane->max_downscale = 1;
intel_plane->update_plane = vlv_update_plane;
intel_plane->disable_plane = vlv_disable_plane;
intel_plane->update_colorkey = vlv_update_colorkey;
@ -933,7 +1075,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
plane_formats = vlv_plane_formats;
num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
} else {
intel_plane->max_downscale = 2;
intel_plane->update_plane = ivb_update_plane;
intel_plane->disable_plane = ivb_disable_plane;
intel_plane->update_colorkey = ivb_update_colorkey;

View File

@ -0,0 +1,600 @@
/*
* Copyright © 2013 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "i915_drv.h"
#include "intel_drv.h"
#define FORCEWAKE_ACK_TIMEOUT_MS 2
#define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
#define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
#define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
#define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
#define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
#define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
{
u32 gt_thread_status_mask;
if (IS_HASWELL(dev_priv->dev))
gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
else
gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
/* w/a for a sporadic read returning 0 by waiting for the GT
* thread to wake up.
*/
if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
DRM_ERROR("GT thread status wait timed out\n");
}
static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
{
__raw_i915_write32(dev_priv, FORCEWAKE, 0);
/* something from same cacheline, but !FORCEWAKE */
__raw_posting_read(dev_priv, ECOBUS);
}
static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
__raw_i915_write32(dev_priv, FORCEWAKE, 1);
/* something from same cacheline, but !FORCEWAKE */
__raw_posting_read(dev_priv, ECOBUS);
if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
/* WaRsForcewakeWaitTC0:snb */
__gen6_gt_wait_for_thread_c0(dev_priv);
}
static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
{
__raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
/* something from same cacheline, but !FORCEWAKE_MT */
__raw_posting_read(dev_priv, ECOBUS);
}
static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
{
u32 forcewake_ack;
if (IS_HASWELL(dev_priv->dev))
forcewake_ack = FORCEWAKE_ACK_HSW;
else
forcewake_ack = FORCEWAKE_MT_ACK;
if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
__raw_i915_write32(dev_priv, FORCEWAKE_MT,
_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
/* something from same cacheline, but !FORCEWAKE_MT */
__raw_posting_read(dev_priv, ECOBUS);
if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
/* WaRsForcewakeWaitTC0:ivb,hsw */
__gen6_gt_wait_for_thread_c0(dev_priv);
}
static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
{
u32 gtfifodbg;
gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
"MMIO read or write has been dropped %x\n", gtfifodbg))
__raw_i915_write32(dev_priv, GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
}
static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
__raw_i915_write32(dev_priv, FORCEWAKE, 0);
/* something from same cacheline, but !FORCEWAKE */
__raw_posting_read(dev_priv, ECOBUS);
gen6_gt_check_fifodbg(dev_priv);
}
static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
{
__raw_i915_write32(dev_priv, FORCEWAKE_MT,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
/* something from same cacheline, but !FORCEWAKE_MT */
__raw_posting_read(dev_priv, ECOBUS);
gen6_gt_check_fifodbg(dev_priv);
}
static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
{
int ret = 0;
if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
int loop = 500;
u32 fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
udelay(10);
fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
}
if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
++ret;
dev_priv->uncore.fifo_count = fifo;
}
dev_priv->uncore.fifo_count--;
return ret;
}
static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
{
__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
_MASKED_BIT_DISABLE(0xffff));
/* something from same cacheline, but !FORCEWAKE_VLV */
__raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
}
static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
{
if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_MEDIA_VLV) &
FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
/* WaRsForcewakeWaitTC0:vlv */
__gen6_gt_wait_for_thread_c0(dev_priv);
}
static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
{
__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
/* The below doubles as a POSTING_READ */
gen6_gt_check_fifodbg(dev_priv);
}
void intel_uncore_early_sanitize(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (HAS_FPGA_DBG_UNCLAIMED(dev))
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
}
void intel_uncore_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (IS_VALLEYVIEW(dev)) {
dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
} else if (IS_HASWELL(dev)) {
dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
} else if (IS_IVYBRIDGE(dev)) {
u32 ecobus;
/* IVB configs may use multi-threaded forcewake */
/* A small trick here - if the bios hasn't configured
* MT forcewake, and if the device is in RC6, then
* force_wake_mt_get will not wake the device and the
* ECOBUS read will return zero. Which will be
* (correctly) interpreted by the test below as MT
* forcewake being disabled.
*/
mutex_lock(&dev->struct_mutex);
__gen6_gt_force_wake_mt_get(dev_priv);
ecobus = __raw_i915_read32(dev_priv, ECOBUS);
__gen6_gt_force_wake_mt_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
if (ecobus & FORCEWAKE_MT_ENABLE) {
dev_priv->uncore.funcs.force_wake_get =
__gen6_gt_force_wake_mt_get;
dev_priv->uncore.funcs.force_wake_put =
__gen6_gt_force_wake_mt_put;
} else {
DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
DRM_INFO("when using vblank-synced partial screen updates.\n");
dev_priv->uncore.funcs.force_wake_get =
__gen6_gt_force_wake_get;
dev_priv->uncore.funcs.force_wake_put =
__gen6_gt_force_wake_put;
}
} else if (IS_GEN6(dev)) {
dev_priv->uncore.funcs.force_wake_get =
__gen6_gt_force_wake_get;
dev_priv->uncore.funcs.force_wake_put =
__gen6_gt_force_wake_put;
}
}
static void intel_uncore_forcewake_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (IS_VALLEYVIEW(dev)) {
vlv_force_wake_reset(dev_priv);
} else if (INTEL_INFO(dev)->gen >= 6) {
__gen6_gt_force_wake_reset(dev_priv);
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
__gen6_gt_force_wake_mt_reset(dev_priv);
}
}
void intel_uncore_sanitize(struct drm_device *dev)
{
intel_uncore_forcewake_reset(dev);
/* BIOS often leaves RC6 enabled, but disable it for hw init */
intel_disable_gt_powersave(dev);
}
/*
* Generally this is called implicitly by the register read function. However,
* if some sequence requires the GT to not power down then this function should
* be called at the beginning of the sequence followed by a call to
* gen6_gt_force_wake_put() at the end of the sequence.
*/
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (dev_priv->uncore.forcewake_count++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
/*
* see gen6_gt_force_wake_get()
*/
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (--dev_priv->uncore.forcewake_count == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
((HAS_FORCE_WAKE((dev_priv)->dev)) && \
((reg) < 0x40000) && \
((reg) != FORCEWAKE))
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
{
/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
* the chip from rc6 before touching it for real. MI_MODE is masked,
* hence harmless to write 0 into. */
__raw_i915_write32(dev_priv, MI_MODE, 0);
}
static void
hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
{
if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
(__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
DRM_ERROR("Unknown unclaimed register before writing to %x\n",
reg);
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
}
}
static void
hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
{
if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
(__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
DRM_ERROR("Unclaimed write to %x\n", reg);
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
}
}
#define __i915_read(x) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \
unsigned long irqflags; \
u##x val = 0; \
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
if (dev_priv->info->gen == 5) \
ilk_dummy_write(dev_priv); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
if (dev_priv->uncore.forcewake_count == 0) \
dev_priv->uncore.funcs.force_wake_get(dev_priv); \
val = __raw_i915_read##x(dev_priv, reg); \
if (dev_priv->uncore.forcewake_count == 0) \
dev_priv->uncore.funcs.force_wake_put(dev_priv); \
} else { \
val = __raw_i915_read##x(dev_priv, reg); \
} \
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
return val; \
}
__i915_read(8)
__i915_read(16)
__i915_read(32)
__i915_read(64)
#undef __i915_read
#define __i915_write(x) \
void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace) { \
unsigned long irqflags; \
u32 __fifo_ret = 0; \
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
if (dev_priv->info->gen == 5) \
ilk_dummy_write(dev_priv); \
hsw_unclaimed_reg_clear(dev_priv, reg); \
__raw_i915_write##x(dev_priv, reg, val); \
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
hsw_unclaimed_reg_check(dev_priv, reg); \
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
}
__i915_write(8)
__i915_write(16)
__i915_write(32)
__i915_write(64)
#undef __i915_write
static const struct register_whitelist {
uint64_t offset;
uint32_t size;
uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
} whitelist[] = {
{ RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
};
int i915_reg_read_ioctl(struct drm_device *dev,
void *data, struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_reg_read *reg = data;
struct register_whitelist const *entry = whitelist;
int i;
for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
if (entry->offset == reg->offset &&
(1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
break;
}
if (i == ARRAY_SIZE(whitelist))
return -EINVAL;
switch (entry->size) {
case 8:
reg->val = I915_READ64(reg->offset);
break;
case 4:
reg->val = I915_READ(reg->offset);
break;
case 2:
reg->val = I915_READ16(reg->offset);
break;
case 1:
reg->val = I915_READ8(reg->offset);
break;
default:
WARN_ON(1);
return -EINVAL;
}
return 0;
}
static int i8xx_do_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (IS_I85X(dev))
return -ENODEV;
I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
POSTING_READ(D_STATE);
if (IS_I830(dev) || IS_845G(dev)) {
I915_WRITE(DEBUG_RESET_I830,
DEBUG_RESET_DISPLAY |
DEBUG_RESET_RENDER |
DEBUG_RESET_FULL);
POSTING_READ(DEBUG_RESET_I830);
msleep(1);
I915_WRITE(DEBUG_RESET_I830, 0);
POSTING_READ(DEBUG_RESET_I830);
}
msleep(1);
I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
POSTING_READ(D_STATE);
return 0;
}
static int i965_reset_complete(struct drm_device *dev)
{
u8 gdrst;
pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
return (gdrst & GRDOM_RESET_ENABLE) == 0;
}
static int i965_do_reset(struct drm_device *dev)
{
int ret;
/*
* Set the domains we want to reset (GRDOM/bits 2 and 3) as
* well as the reset bit (GR/bit 0). Setting the GR bit
* triggers the reset; when done, the hardware will clear it.
*/
pci_write_config_byte(dev->pdev, I965_GDRST,
GRDOM_RENDER | GRDOM_RESET_ENABLE);
ret = wait_for(i965_reset_complete(dev), 500);
if (ret)
return ret;
/* We can't reset render&media without also resetting display ... */
pci_write_config_byte(dev->pdev, I965_GDRST,
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
ret = wait_for(i965_reset_complete(dev), 500);
if (ret)
return ret;
pci_write_config_byte(dev->pdev, I965_GDRST, 0);
return 0;
}
static int ironlake_do_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 gdrst;
int ret;
gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
gdrst &= ~GRDOM_MASK;
I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
if (ret)
return ret;
/* We can't reset render&media without also resetting display ... */
gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
gdrst &= ~GRDOM_MASK;
I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
}
static int gen6_do_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
unsigned long irqflags;
/* Hold uncore.lock across reset to prevent any register access
* with forcewake not set correctly
*/
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
/* Reset the chip */
/* GEN6_GDRST is not in the gt power well, no need to check
* for fifo space for the write or forcewake the chip for
* the read
*/
__raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
/* Spin waiting for the device to ack the reset request */
ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
intel_uncore_forcewake_reset(dev);
/* If reset with a user forcewake, try to restore, otherwise turn it off */
if (dev_priv->uncore.forcewake_count)
dev_priv->uncore.funcs.force_wake_get(dev_priv);
else
dev_priv->uncore.funcs.force_wake_put(dev_priv);
/* Restore fifo count */
dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
return ret;
}
int intel_gpu_reset(struct drm_device *dev)
{
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6: return gen6_do_reset(dev);
case 5: return ironlake_do_reset(dev);
case 4: return i965_do_reset(dev);
case 2: return i8xx_do_reset(dev);
default: return -ENODEV;
}
}
void intel_uncore_clear_errors(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/* XXX needs spinlock around caller's grouping */
if (HAS_FPGA_DBG_UNCLAIMED(dev))
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
}
void intel_uncore_check_errors(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
(__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
DRM_ERROR("Unclaimed register before interrupt\n");
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
}
}

View File

@ -17,9 +17,6 @@
#include "bitmap.h"
extern struct drm_device *main_device;
typedef struct
{
kobj_t header;
@ -147,7 +144,6 @@ bool set_mode(struct drm_device *dev, struct drm_connector *connector,
do_set:
encoder = connector->encoder;
crtc = encoder->crtc;
@ -365,8 +361,6 @@ int init_display_kms(struct drm_device *dev)
};
safe_sti(ifl);
main_device = dev;
#ifdef __HWA__
err = init_bitmaps();
#endif
@ -471,16 +465,23 @@ int init_cursor(cursor_t *cursor)
if (unlikely(obj == NULL))
return -ENOMEM;
ret = i915_gem_object_pin(obj, CURSOR_WIDTH*CURSOR_HEIGHT*4, true, true);
ret = i915_gem_obj_ggtt_pin(obj, CURSOR_WIDTH*CURSOR_HEIGHT*4, true, true);
if (ret) {
drm_gem_object_unreference(&obj->base);
return ret;
}
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
{
i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
return ret;
}
/* You don't need to worry about fragmentation issues.
* GTT space is continuous. I guarantee it. */
mapped = bits = (u32*)MapIoMem(dev_priv->gtt.mappable_base + obj->gtt_offset,
mapped = bits = (u32*)MapIoMem(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
CURSOR_WIDTH*CURSOR_HEIGHT*4, PG_SW);
if (unlikely(bits == NULL))
@ -612,7 +613,7 @@ cursor_t* __stdcall select_cursor_kms(cursor_t *cursor)
os_display->cursor = cursor;
if (!dev_priv->info->cursor_needs_physical)
intel_crtc->cursor_addr = cursor->cobj->gtt_offset;
intel_crtc->cursor_addr = i915_gem_obj_ggtt_offset(cursor->cobj);
else
intel_crtc->cursor_addr = (addr_t)cursor->cobj;
@ -714,7 +715,7 @@ int i915_mask_update(struct drm_device *dev, void *data,
};
#endif
};
};
slot = *((u8*)CURRENT_TASK);
@ -855,21 +856,27 @@ void getrawmonotonic(struct timespec *ts)
ts->tv_nsec = (tmp - ts->tv_sec*100)*10000000;
}
void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec)
void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec)
{
while (nsec >= NSEC_PER_SEC) {
nsec -= NSEC_PER_SEC;
++sec;
}
while (nsec < 0) {
nsec += NSEC_PER_SEC;
--sec;
}
ts->tv_sec = sec;
ts->tv_nsec = nsec;
while (nsec >= NSEC_PER_SEC) {
/*
* The following asm() prevents the compiler from
* optimising this loop into a modulo operation. See
* also __iter_div_u64_rem() in include/linux/time.h
*/
asm("" : "+rm"(nsec));
nsec -= NSEC_PER_SEC;
++sec;
}
while (nsec < 0) {
asm("" : "+rm"(nsec));
nsec += NSEC_PER_SEC;
--sec;
}
ts->tv_sec = sec;
ts->tv_nsec = nsec;
}
void
prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
{

View File

@ -26,8 +26,8 @@ struct pci_device {
uint8_t revision;
};
extern struct drm_device *main_device;
extern struct drm_file *drm_file_handlers[256];
struct drm_device *main_device;
struct drm_file *drm_file_handlers[256];
void cpu_detect();
@ -63,8 +63,7 @@ int i915_modeset = 1;
u32_t __attribute__((externally_visible)) drvEntry(int action, char *cmdline)
{
int err = 0;
int err = 0;
if(action != 1)
{
@ -80,8 +79,9 @@ u32_t __attribute__((externally_visible)) drvEntry(int action, char *cmdline)
if(!dbg_open(log))
{
strcpy(log, "/tmp1/1/i915.log");
// strcpy(log, "/tmp1/1/i915.log");
// strcpy(log, "/RD/1/DRIVERS/i915.log");
strcpy(log, "/BD1/4/i915.log");
if(!dbg_open(log))
{
@ -89,7 +89,7 @@ u32_t __attribute__((externally_visible)) drvEntry(int action, char *cmdline)
return 0;
};
}
dbgprintf(" i915 v3.10\n cmdline: %s\n", cmdline);
dbgprintf(" i915 v3.12-6\n cmdline: %s\n", cmdline);
cpu_detect();
// dbgprintf("\ncache line size %d\n", x86_clflush_size);
@ -97,12 +97,12 @@ u32_t __attribute__((externally_visible)) drvEntry(int action, char *cmdline)
enum_pci_devices();
err = i915_init();
if(err)
{
dbgprintf("Epic Fail :(\n");
return 0;
};
init_display_kms(main_device);
err = RegService("DISPLAY", display_handler);
@ -110,7 +110,9 @@ u32_t __attribute__((externally_visible)) drvEntry(int action, char *cmdline)
dbgprintf("Set DISPLAY handler\n");
struct drm_i915_private *dev_priv = main_device->dev_private;
driver_wq_state = 1;
run_workqueue(dev_priv->wq);
return err;
@ -140,10 +142,10 @@ u32_t __attribute__((externally_visible)) drvEntry(int action, char *cmdline)
#define SRV_GET_PCI_INFO 20
#define SRV_GET_PARAM 21
#define SRV_I915_GEM_CREATE 22
#define SRV_DRM_GEM_CLOSE 23
#define SRV_I915_GEM_PIN 24
#define SRV_GET_PARAM 21
#define SRV_I915_GEM_CREATE 22
#define SRV_DRM_GEM_CLOSE 23
#define SRV_I915_GEM_PIN 24
#define SRV_I915_GEM_SET_CACHEING 25
#define SRV_I915_GEM_GET_APERTURE 26
#define SRV_I915_GEM_PWRITE 27

View File

@ -104,22 +104,6 @@ void shmem_file_delete(struct file *filep)
kfree(filep->pages);
}
/**
* hdmi_avi_infoframe_init() - initialize an HDMI AVI infoframe
* @frame: HDMI AVI infoframe
*
* Returns 0 on success or a negative error code on failure.
*/
int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame)
{
memset(frame, 0, sizeof(*frame));
frame->type = HDMI_INFOFRAME_TYPE_AVI;
frame->version = 2;
frame->length = 13;
return 0;
}
static void *check_bytes8(const u8 *start, u8 value, unsigned int bytes)
@ -509,4 +493,14 @@ void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
buf, len, true);
}
void *kmemdup(const void *src, size_t len, gfp_t gfp)
{
void *p;
p = kmalloc(len, gfp);
if (p)
memcpy(p, src, len);
return p;
}