forked from KolibriOS/kolibrios
i915-v3.9-rc8
git-svn-id: svn://kolibrios.org@3480 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
parent
1cd44bfab6
commit
d15a13f685
@ -18,6 +18,9 @@
|
||||
.global _DestroyEvent
|
||||
.global _DestroyObject
|
||||
|
||||
.global _DiskAdd
|
||||
.global _DiskMediaChanged
|
||||
|
||||
.global _FreeKernelSpace
|
||||
.global _FreePage
|
||||
|
||||
@ -82,6 +85,9 @@
|
||||
.def _DestroyEvent; .scl 2; .type 32; .endef
|
||||
.def _DestroyObject; .scl 2; .type 32; .endef
|
||||
|
||||
.def _DiskAdd; .scl 2; .type 32; .endef
|
||||
.def _DiskMediaChanged; .scl 2; .type 32; .endef
|
||||
|
||||
.def _FreeKernelSpace; .scl 2; .type 32; .endef
|
||||
.def _FreePage; .scl 2; .type 32; .endef
|
||||
|
||||
@ -146,6 +152,9 @@ _Delay:
|
||||
_DestroyEvent:
|
||||
_DestroyObject:
|
||||
|
||||
_DiskAdd:
|
||||
_DiskMediaChanged:
|
||||
|
||||
_FreeKernelSpace:
|
||||
_FreePage:
|
||||
|
||||
@ -214,6 +223,9 @@ _WaitEventTimeout:
|
||||
.ascii " -export:DestroyEvent"
|
||||
.ascii " -export:DestroyObject"
|
||||
|
||||
.ascii " -export:DiskAdd" # stdcall
|
||||
.ascii " -export:DiskMediaChanged" # stdcall
|
||||
|
||||
.ascii " -export:FreeKernelSpace" # stdcall
|
||||
.ascii " -export:FreePage" #
|
||||
|
||||
|
@ -50,7 +50,7 @@ typedef struct
|
||||
int out_size;
|
||||
}ioctl_t;
|
||||
|
||||
typedef int (__stdcall *srv_proc_t)(ioctl_t *);
|
||||
typedef int ( __stdcall *srv_proc_t)(ioctl_t *);
|
||||
|
||||
#define ERR_OK 0
|
||||
#define ERR_PARAM -1
|
||||
|
@ -817,7 +817,7 @@ struct drm_mode_config {
|
||||
/* output poll support */
|
||||
bool poll_enabled;
|
||||
bool poll_running;
|
||||
// struct delayed_work output_poll_work;
|
||||
struct delayed_work output_poll_work;
|
||||
|
||||
/* pointers to standard properties */
|
||||
struct list_head property_blob_list;
|
||||
|
@ -58,6 +58,8 @@
|
||||
|
||||
#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0)
|
||||
|
||||
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
|
||||
BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
|
||||
|
||||
|
||||
#endif
|
||||
|
@ -5,6 +5,9 @@
|
||||
/*
|
||||
* Common definitions for all gcc versions go here.
|
||||
*/
|
||||
#define GCC_VERSION (__GNUC__ * 10000 \
|
||||
+ __GNUC_MINOR__ * 100 \
|
||||
+ __GNUC_PATCHLEVEL__)
|
||||
|
||||
|
||||
/* Optimization barrier */
|
||||
|
@ -2,22 +2,22 @@
|
||||
#error "Please don't include <linux/compiler-gcc3.h> directly, include <linux/compiler.h> instead."
|
||||
#endif
|
||||
|
||||
#if __GNUC_MINOR__ < 2
|
||||
#if GCC_VERSION < 30200
|
||||
# error Sorry, your compiler is too old - please upgrade it.
|
||||
#endif
|
||||
|
||||
#if __GNUC_MINOR__ >= 3
|
||||
#if GCC_VERSION >= 30300
|
||||
# define __used __attribute__((__used__))
|
||||
#else
|
||||
# define __used __attribute__((__unused__))
|
||||
#endif
|
||||
|
||||
#if __GNUC_MINOR__ >= 4
|
||||
#if GCC_VERSION >= 30400
|
||||
#define __must_check __attribute__((warn_unused_result))
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_GCOV_KERNEL
|
||||
# if __GNUC_MINOR__ < 4
|
||||
# if GCC_VERSION < 30400
|
||||
# error "GCOV profiling support for gcc versions below 3.4 not included"
|
||||
# endif /* __GNUC_MINOR__ */
|
||||
#endif /* CONFIG_GCOV_KERNEL */
|
||||
|
@ -4,7 +4,7 @@
|
||||
|
||||
/* GCC 4.1.[01] miscompiles __weak */
|
||||
#ifdef __KERNEL__
|
||||
# if __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ <= 1
|
||||
# if GCC_VERSION >= 40100 && GCC_VERSION <= 40101
|
||||
# error Your version of gcc miscompiles the __weak directive
|
||||
# endif
|
||||
#endif
|
||||
@ -13,7 +13,11 @@
|
||||
#define __must_check __attribute__((warn_unused_result))
|
||||
#define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
|
||||
|
||||
#if __GNUC_MINOR__ >= 3
|
||||
#if GCC_VERSION >= 40100
|
||||
# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
|
||||
#endif
|
||||
|
||||
#if GCC_VERSION >= 40300
|
||||
/* Mark functions as cold. gcc will assume any path leading to a call
|
||||
to them will be unlikely. This means a lot of manual unlikely()s
|
||||
are unnecessary now for any paths leading to the usual suspects
|
||||
@ -29,9 +33,15 @@
|
||||
the kernel context */
|
||||
#define __cold __attribute__((__cold__))
|
||||
|
||||
#define __linktime_error(message) __attribute__((__error__(message)))
|
||||
#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
|
||||
|
||||
#if __GNUC_MINOR__ >= 5
|
||||
#ifndef __CHECKER__
|
||||
# define __compiletime_warning(message) __attribute__((warning(message)))
|
||||
# define __compiletime_error(message) __attribute__((error(message)))
|
||||
#endif /* __CHECKER__ */
|
||||
#endif /* GCC_VERSION >= 40300 */
|
||||
|
||||
#if GCC_VERSION >= 40500
|
||||
/*
|
||||
* Mark a position in code as unreachable. This can be used to
|
||||
* suppress control flow warnings after asm blocks that transfer
|
||||
@ -46,30 +56,22 @@
|
||||
/* Mark a function definition as prohibited from being cloned. */
|
||||
#define __noclone __attribute__((__noclone__))
|
||||
|
||||
#endif
|
||||
#endif
|
||||
#endif /* GCC_VERSION >= 40500 */
|
||||
|
||||
#if __GNUC_MINOR__ >= 6
|
||||
#if GCC_VERSION >= 40600
|
||||
/*
|
||||
* Tell the optimizer that something else uses this function or variable.
|
||||
*/
|
||||
#define __visible __attribute__((externally_visible))
|
||||
#endif
|
||||
|
||||
#if __GNUC_MINOR__ > 0
|
||||
#define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
|
||||
#endif
|
||||
#if __GNUC_MINOR__ >= 3 && !defined(__CHECKER__)
|
||||
#define __compiletime_warning(message) __attribute__((warning(message)))
|
||||
#define __compiletime_error(message) __attribute__((error(message)))
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
|
||||
#if __GNUC_MINOR__ >= 4
|
||||
#if GCC_VERSION >= 40400
|
||||
#define __HAVE_BUILTIN_BSWAP32__
|
||||
#define __HAVE_BUILTIN_BSWAP64__
|
||||
#endif
|
||||
#if __GNUC_MINOR__ >= 8 || (defined(__powerpc__) && __GNUC_MINOR__ >= 6)
|
||||
#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
|
||||
#define __HAVE_BUILTIN_BSWAP16__
|
||||
#endif
|
||||
#endif
|
||||
#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
|
||||
|
@ -170,6 +170,11 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
|
||||
(typeof(ptr)) (__ptr + (off)); })
|
||||
#endif
|
||||
|
||||
/* Not-quite-unique ID. */
|
||||
#ifndef __UNIQUE_ID
|
||||
# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
|
||||
#endif
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
@ -302,10 +307,36 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
|
||||
#endif
|
||||
#ifndef __compiletime_error
|
||||
# define __compiletime_error(message)
|
||||
# define __compiletime_error_fallback(condition) \
|
||||
do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
|
||||
#else
|
||||
# define __compiletime_error_fallback(condition) do { } while (0)
|
||||
#endif
|
||||
#ifndef __linktime_error
|
||||
# define __linktime_error(message)
|
||||
#endif
|
||||
|
||||
#define __compiletime_assert(condition, msg, prefix, suffix) \
|
||||
do { \
|
||||
bool __cond = !(condition); \
|
||||
extern void prefix ## suffix(void) __compiletime_error(msg); \
|
||||
if (__cond) \
|
||||
prefix ## suffix(); \
|
||||
__compiletime_error_fallback(__cond); \
|
||||
} while (0)
|
||||
|
||||
#define _compiletime_assert(condition, msg, prefix, suffix) \
|
||||
__compiletime_assert(condition, msg, prefix, suffix)
|
||||
|
||||
/**
|
||||
* compiletime_assert - break build and emit msg if condition is false
|
||||
* @condition: a compile-time constant condition to check
|
||||
* @msg: a message to emit if condition is false
|
||||
*
|
||||
* In tradition of POSIX assert, this macro will break the build if the
|
||||
* supplied condition is *false*, emitting the supplied error message if the
|
||||
* compiler has support to do so.
|
||||
*/
|
||||
#define compiletime_assert(condition, msg) \
|
||||
_compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
|
||||
|
||||
/*
|
||||
* Prevent the compiler from merging or refetching accesses. The compiler
|
||||
* is also forbidden from reordering successive instances of ACCESS_ONCE(),
|
||||
|
@ -87,8 +87,14 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
|
||||
return buf;
|
||||
}
|
||||
|
||||
extern int hex_to_bin(char ch);
|
||||
extern void hex2bin(u8 *dst, const char *src, size_t count);
|
||||
enum {
|
||||
DUMP_PREFIX_NONE,
|
||||
DUMP_PREFIX_ADDRESS,
|
||||
DUMP_PREFIX_OFFSET
|
||||
};
|
||||
|
||||
int hex_to_bin(char ch);
|
||||
int hex2bin(u8 *dst, const char *src, size_t count);
|
||||
|
||||
|
||||
//int printk(const char *fmt, ...);
|
||||
@ -335,8 +341,8 @@ static inline void writeq(__u64 val, volatile void __iomem *addr)
|
||||
#define dev_info(dev, format, arg...) \
|
||||
printk("Info %s " format , __func__, ## arg)
|
||||
|
||||
#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
|
||||
|
||||
//#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
|
||||
#define BUILD_BUG_ON(condition)
|
||||
|
||||
struct page
|
||||
{
|
||||
|
@ -172,6 +172,13 @@ struct delayed_work {
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
static inline struct delayed_work *to_delayed_work(struct work_struct *work)
|
||||
{
|
||||
return container_of(work, struct delayed_work, work);
|
||||
}
|
||||
|
||||
|
||||
|
||||
struct workqueue_struct *alloc_workqueue_key(const char *fmt,
|
||||
unsigned int flags, int max_active);
|
||||
|
||||
@ -182,6 +189,13 @@ struct workqueue_struct *alloc_workqueue_key(const char *fmt,
|
||||
int queue_delayed_work(struct workqueue_struct *wq,
|
||||
struct delayed_work *dwork, unsigned long delay);
|
||||
|
||||
#define INIT_WORK(_work, _func) \
|
||||
do { \
|
||||
INIT_LIST_HEAD(&(_work)->entry); \
|
||||
(_work)->func = _func; \
|
||||
} while (0)
|
||||
|
||||
|
||||
#define INIT_DELAYED_WORK(_work, _func) \
|
||||
do { \
|
||||
INIT_LIST_HEAD(&(_work)->work.entry); \
|
||||
@ -207,5 +221,7 @@ int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *
|
||||
#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
|
||||
|
||||
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -69,6 +69,7 @@ void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
|
||||
EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
|
||||
|
||||
static bool drm_kms_helper_poll = true;
|
||||
module_param_named(poll, drm_kms_helper_poll, bool, 0600);
|
||||
|
||||
static void drm_mode_validate_flag(struct drm_connector *connector,
|
||||
int flags)
|
||||
@ -141,6 +142,12 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
|
||||
// dbgprintf("status %x\n", connector->status);
|
||||
}
|
||||
|
||||
/* Re-enable polling in case the global poll config changed. */
|
||||
if (drm_kms_helper_poll != dev->mode_config.poll_running)
|
||||
drm_kms_helper_poll_enable(dev);
|
||||
|
||||
dev->mode_config.poll_running = drm_kms_helper_poll;
|
||||
|
||||
if (connector->status == connector_status_disconnected) {
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
|
||||
connector->base.id, drm_get_connector_name(connector));
|
||||
@ -955,7 +962,13 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_helper_resume_force_mode);
|
||||
|
||||
#if 0
|
||||
void drm_kms_helper_hotplug_event(struct drm_device *dev)
|
||||
{
|
||||
/* send a uevent + call fbdev */
|
||||
if (dev->mode_config.funcs->output_poll_changed)
|
||||
dev->mode_config.funcs->output_poll_changed(dev);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
|
||||
|
||||
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
|
||||
static void output_poll_execute(struct work_struct *work)
|
||||
@ -1004,15 +1017,15 @@ static void output_poll_execute(struct work_struct *work)
|
||||
if (changed)
|
||||
drm_kms_helper_hotplug_event(dev);
|
||||
|
||||
if (repoll)
|
||||
schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
|
||||
// if (repoll)
|
||||
// schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
|
||||
}
|
||||
|
||||
void drm_kms_helper_poll_disable(struct drm_device *dev)
|
||||
{
|
||||
if (!dev->mode_config.poll_enabled)
|
||||
return;
|
||||
cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
|
||||
// cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_kms_helper_poll_disable);
|
||||
|
||||
@ -1030,8 +1043,8 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
|
||||
poll = true;
|
||||
}
|
||||
|
||||
if (poll)
|
||||
schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
|
||||
// if (poll)
|
||||
// schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_kms_helper_poll_enable);
|
||||
|
||||
@ -1083,5 +1096,3 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
|
||||
drm_kms_helper_hotplug_event(dev);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
|
||||
|
||||
#endif
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -52,9 +52,36 @@ static LIST_HEAD(kernel_fb_helper_list);
|
||||
* mode setting driver. They can be used mostly independantely from the crtc
|
||||
* helper functions used by many drivers to implement the kernel mode setting
|
||||
* interfaces.
|
||||
*
|
||||
* Initialization is done as a three-step process with drm_fb_helper_init(),
|
||||
* drm_fb_helper_single_add_all_connectors() and drm_fb_helper_initial_config().
|
||||
* Drivers with fancier requirements than the default beheviour can override the
|
||||
* second step with their own code. Teardown is done with drm_fb_helper_fini().
|
||||
*
|
||||
* At runtime drivers should restore the fbdev console by calling
|
||||
* drm_fb_helper_restore_fbdev_mode() from their ->lastclose callback. They
|
||||
* should also notify the fb helper code from updates to the output
|
||||
* configuration by calling drm_fb_helper_hotplug_event(). For easier
|
||||
* integration with the output polling code in drm_crtc_helper.c the modeset
|
||||
* code proves a ->output_poll_changed callback.
|
||||
*
|
||||
* All other functions exported by the fb helper library can be used to
|
||||
* implement the fbdev driver interface by the driver.
|
||||
*/
|
||||
|
||||
/* simple single crtc case helper function */
|
||||
/**
|
||||
* drm_fb_helper_single_add_all_connectors() - add all connectors to fbdev
|
||||
* emulation helper
|
||||
* @fb_helper: fbdev initialized with drm_fb_helper_init
|
||||
*
|
||||
* This functions adds all the available connectors for use with the given
|
||||
* fb_helper. This is a separate step to allow drivers to freely assign
|
||||
* connectors to the fbdev, e.g. if some are reserved for special purposes or
|
||||
* not adequate to be used for the fbcon.
|
||||
*
|
||||
* Since this is part of the initial setup before the fbdev is published, no
|
||||
* locking is required.
|
||||
*/
|
||||
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
|
||||
{
|
||||
struct drm_device *dev = fb_helper->dev;
|
||||
@ -110,6 +137,23 @@ static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
|
||||
}
|
||||
|
||||
|
||||
static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
|
||||
{
|
||||
struct drm_device *dev = fb_helper->dev;
|
||||
struct drm_crtc *crtc;
|
||||
int bound = 0, crtcs_bound = 0;
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
if (crtc->fb)
|
||||
crtcs_bound++;
|
||||
if (crtc->fb == fb_helper->fb)
|
||||
bound++;
|
||||
}
|
||||
|
||||
if (bound < crtcs_bound)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
|
||||
{
|
||||
@ -119,10 +163,21 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
|
||||
struct drm_connector *connector;
|
||||
int i, j;
|
||||
|
||||
/*
|
||||
* fbdev->blank can be called from irq context in case of a panic.
|
||||
* Since we already have our own special panic handler which will
|
||||
* restore the fbdev console mode completely, just bail out early.
|
||||
*/
|
||||
|
||||
/*
|
||||
* For each CRTC in this fb, turn the connectors on/off.
|
||||
*/
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
drm_modeset_lock_all(dev);
|
||||
if (!drm_fb_helper_is_bound(fb_helper)) {
|
||||
drm_modeset_unlock_all(dev);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < fb_helper->crtc_count; i++) {
|
||||
crtc = fb_helper->crtc_info[i].mode_set.crtc;
|
||||
|
||||
@ -137,9 +192,14 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
|
||||
dev->mode_config.dpms_property, dpms_mode);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
drm_modeset_unlock_all(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_fb_helper_blank - implementation for ->fb_blank
|
||||
* @blank: desired blanking state
|
||||
* @info: fbdev registered by the helper
|
||||
*/
|
||||
int drm_fb_helper_blank(int blank, struct fb_info *info)
|
||||
{
|
||||
switch (blank) {
|
||||
@ -183,6 +243,24 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
|
||||
kfree(helper->crtc_info);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_fb_helper_init - initialize a drm_fb_helper structure
|
||||
* @dev: drm device
|
||||
* @fb_helper: driver-allocated fbdev helper structure to initialize
|
||||
* @crtc_count: maximum number of crtcs to support in this fbdev emulation
|
||||
* @max_conn_count: max connector count
|
||||
*
|
||||
* This allocates the structures for the fbdev helper with the given limits.
|
||||
* Note that this won't yet touch the hardware (through the driver interfaces)
|
||||
* nor register the fbdev. This is only done in drm_fb_helper_initial_config()
|
||||
* to allow driver writes more control over the exact init sequence.
|
||||
*
|
||||
* Drivers must set fb_helper->funcs before calling
|
||||
* drm_fb_helper_initial_config().
|
||||
*
|
||||
* RETURNS:
|
||||
* Zero if everything went ok, nonzero otherwise.
|
||||
*/
|
||||
int drm_fb_helper_init(struct drm_device *dev,
|
||||
struct drm_fb_helper *fb_helper,
|
||||
int crtc_count, int max_conn_count)
|
||||
@ -294,6 +372,11 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_fb_helper_setcmap - implementation for ->fb_setcmap
|
||||
* @cmap: cmap to set
|
||||
* @info: fbdev registered by the helper
|
||||
*/
|
||||
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
|
||||
{
|
||||
struct drm_fb_helper *fb_helper = info->par;
|
||||
@ -333,6 +416,11 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_setcmap);
|
||||
|
||||
/**
|
||||
* drm_fb_helper_check_var - implementation for ->fb_check_var
|
||||
* @var: screeninfo to check
|
||||
* @info: fbdev registered by the helper
|
||||
*/
|
||||
int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
|
||||
struct fb_info *info)
|
||||
{
|
||||
@ -425,13 +513,19 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_check_var);
|
||||
|
||||
/* this will let fbcon do the mode init */
|
||||
/**
|
||||
* drm_fb_helper_set_par - implementation for ->fb_set_par
|
||||
* @info: fbdev registered by the helper
|
||||
*
|
||||
* This will let fbcon do the mode init and is called at initialization time by
|
||||
* the fbdev core when registering the driver, and later on through the hotplug
|
||||
* callback.
|
||||
*/
|
||||
int drm_fb_helper_set_par(struct fb_info *info)
|
||||
{
|
||||
struct drm_fb_helper *fb_helper = info->par;
|
||||
struct drm_device *dev = fb_helper->dev;
|
||||
struct fb_var_screeninfo *var = &info->var;
|
||||
struct drm_crtc *crtc;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
@ -440,25 +534,29 @@ int drm_fb_helper_set_par(struct fb_info *info)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
drm_modeset_lock_all(dev);
|
||||
for (i = 0; i < fb_helper->crtc_count; i++) {
|
||||
crtc = fb_helper->crtc_info[i].mode_set.crtc;
|
||||
ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
|
||||
ret = drm_mode_set_config_internal(&fb_helper->crtc_info[i].mode_set);
|
||||
if (ret) {
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
drm_modeset_unlock_all(dev);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
if (fb_helper->delayed_hotplug) {
|
||||
fb_helper->delayed_hotplug = false;
|
||||
// drm_fb_helper_hotplug_event(fb_helper);
|
||||
drm_fb_helper_hotplug_event(fb_helper);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_set_par);
|
||||
|
||||
/**
|
||||
* drm_fb_helper_pan_display - implementation for ->fb_pan_display
|
||||
* @var: updated screen information
|
||||
* @info: fbdev registered by the helper
|
||||
*/
|
||||
int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
|
||||
struct fb_info *info)
|
||||
{
|
||||
@ -469,7 +567,12 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
drm_modeset_lock_all(dev);
|
||||
if (!drm_fb_helper_is_bound(fb_helper)) {
|
||||
drm_modeset_unlock_all(dev);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
for (i = 0; i < fb_helper->crtc_count; i++) {
|
||||
crtc = fb_helper->crtc_info[i].mode_set.crtc;
|
||||
|
||||
@ -479,22 +582,27 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
|
||||
modeset->y = var->yoffset;
|
||||
|
||||
if (modeset->num_connectors) {
|
||||
ret = crtc->funcs->set_config(modeset);
|
||||
ret = drm_mode_set_config_internal(modeset);
|
||||
if (!ret) {
|
||||
info->var.xoffset = var->xoffset;
|
||||
info->var.yoffset = var->yoffset;
|
||||
}
|
||||
}
|
||||
}
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
drm_modeset_unlock_all(dev);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_pan_display);
|
||||
|
||||
int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
|
||||
/*
|
||||
* Allocates the backing storage and sets up the fbdev info structure through
|
||||
* the ->fb_probe callback and then registers the fbdev and sets up the panic
|
||||
* notifier.
|
||||
*/
|
||||
static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
|
||||
int preferred_bpp)
|
||||
{
|
||||
int new_fb = 0;
|
||||
int ret = 0;
|
||||
int crtc_count = 0;
|
||||
int i;
|
||||
struct fb_info *info;
|
||||
@ -572,34 +680,44 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
|
||||
}
|
||||
|
||||
/* push down into drivers */
|
||||
new_fb = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
|
||||
if (new_fb < 0)
|
||||
return new_fb;
|
||||
ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
info = fb_helper->fbdev;
|
||||
|
||||
/* set the fb pointer */
|
||||
/*
|
||||
* Set the fb pointer - usually drm_setup_crtcs does this for hotplug
|
||||
* events, but at init time drm_setup_crtcs needs to be called before
|
||||
* the fb is allocated (since we need to figure out the desired size of
|
||||
* the fb before we can allocate it ...). Hence we need to fix things up
|
||||
* here again.
|
||||
*/
|
||||
for (i = 0; i < fb_helper->crtc_count; i++)
|
||||
if (fb_helper->crtc_info[i].mode_set.num_connectors)
|
||||
fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
|
||||
|
||||
if (new_fb) {
|
||||
|
||||
info->var.pixclock = 0;
|
||||
|
||||
// dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
|
||||
// info->node, info->fix.id);
|
||||
|
||||
} else {
|
||||
drm_fb_helper_set_par(info);
|
||||
}
|
||||
|
||||
|
||||
if (new_fb)
|
||||
list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
|
||||
|
||||
/**
|
||||
* drm_fb_helper_fill_fix - initializes fixed fbdev information
|
||||
* @info: fbdev registered by the helper
|
||||
* @pitch: desired pitch
|
||||
* @depth: desired depth
|
||||
*
|
||||
* Helper to fill in the fixed fbdev information useful for a non-accelerated
|
||||
* fbdev emulations. Drivers which support acceleration methods which impose
|
||||
* additional constraints need to set up their own limits.
|
||||
*
|
||||
* Drivers should call this (or their equivalent setup code) from their
|
||||
* ->fb_probe callback.
|
||||
*/
|
||||
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
|
||||
uint32_t depth)
|
||||
{
|
||||
@ -620,6 +738,20 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_fill_fix);
|
||||
|
||||
/**
|
||||
* drm_fb_helper_fill_var - initalizes variable fbdev information
|
||||
* @info: fbdev instance to set up
|
||||
* @fb_helper: fb helper instance to use as template
|
||||
* @fb_width: desired fb width
|
||||
* @fb_height: desired fb height
|
||||
*
|
||||
* Sets up the variable fbdev metainformation from the given fb helper instance
|
||||
* and the drm framebuffer allocated in fb_helper->fb.
|
||||
*
|
||||
* Drivers should call this (or their equivalent setup code) from their
|
||||
* ->fb_probe callback after having allocated the fbdev backing
|
||||
* storage framebuffer.
|
||||
*/
|
||||
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
|
||||
uint32_t fb_width, uint32_t fb_height)
|
||||
{
|
||||
@ -937,6 +1069,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
|
||||
for (i = 0; i < fb_helper->crtc_count; i++) {
|
||||
modeset = &fb_helper->crtc_info[i].mode_set;
|
||||
modeset->num_connectors = 0;
|
||||
modeset->fb = NULL;
|
||||
}
|
||||
|
||||
for (i = 0; i < fb_helper->connector_count; i++) {
|
||||
@ -953,9 +1086,21 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
|
||||
modeset->mode = drm_mode_duplicate(dev,
|
||||
fb_crtc->desired_mode);
|
||||
modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
|
||||
modeset->fb = fb_helper->fb;
|
||||
}
|
||||
}
|
||||
|
||||
/* Clear out any old modes if there are no more connected outputs. */
|
||||
for (i = 0; i < fb_helper->crtc_count; i++) {
|
||||
modeset = &fb_helper->crtc_info[i].mode_set;
|
||||
if (modeset->num_connectors == 0) {
|
||||
BUG_ON(modeset->fb);
|
||||
BUG_ON(modeset->num_connectors);
|
||||
if (modeset->mode)
|
||||
drm_mode_destroy(dev, modeset->mode);
|
||||
modeset->mode = NULL;
|
||||
}
|
||||
}
|
||||
out:
|
||||
kfree(crtcs);
|
||||
kfree(modes);
|
||||
@ -963,18 +1108,23 @@ out:
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_helper_initial_config - setup a sane initial connector configuration
|
||||
* drm_fb_helper_initial_config - setup a sane initial connector configuration
|
||||
* @fb_helper: fb_helper device struct
|
||||
* @bpp_sel: bpp value to use for the framebuffer configuration
|
||||
*
|
||||
* LOCKING:
|
||||
* Called at init time by the driver to set up the @fb_helper initial
|
||||
* configuration, must take the mode config lock.
|
||||
*
|
||||
* Scans the CRTCs and connectors and tries to put together an initial setup.
|
||||
* At the moment, this is a cloned configuration across all heads with
|
||||
* a new framebuffer object as the backing store.
|
||||
*
|
||||
* Note that this also registers the fbdev and so allows userspace to call into
|
||||
* the driver through the fbdev interfaces.
|
||||
*
|
||||
* This function will call down into the ->fb_probe callback to let
|
||||
* the driver allocate and initialize the fbdev info structure and the drm
|
||||
* framebuffer used to back the fbdev. drm_fb_helper_fill_var() and
|
||||
* drm_fb_helper_fill_fix() are provided as helpers to setup simple default
|
||||
* values for the fbdev info structure.
|
||||
*
|
||||
* RETURNS:
|
||||
* Zero if everything went ok, nonzero otherwise.
|
||||
*/
|
||||
@ -983,9 +1133,6 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
|
||||
struct drm_device *dev = fb_helper->dev;
|
||||
int count = 0;
|
||||
|
||||
/* disable all the possible outputs/crtcs before entering KMS mode */
|
||||
drm_helper_disable_unused_functions(fb_helper->dev);
|
||||
|
||||
// drm_fb_helper_parse_command_line(fb_helper);
|
||||
|
||||
count = drm_fb_helper_probe_connector_modes(fb_helper,
|
||||
@ -1003,18 +1150,22 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_initial_config);
|
||||
|
||||
#if 0
|
||||
/**
|
||||
* drm_fb_helper_hotplug_event - respond to a hotplug notification by
|
||||
* probing all the outputs attached to the fb
|
||||
* @fb_helper: the drm_fb_helper
|
||||
*
|
||||
* LOCKING:
|
||||
* Called at runtime, must take mode config lock.
|
||||
*
|
||||
* Scan the connectors attached to the fb_helper and try to put together a
|
||||
* setup after *notification of a change in output configuration.
|
||||
*
|
||||
* Called at runtime, takes the mode config locks to be able to check/change the
|
||||
* modeset configuration. Must be run from process context (which usually means
|
||||
* either the output polling work or a work item launched from the driver's
|
||||
* hotplug interrupt).
|
||||
*
|
||||
* Note that the driver must ensure that this is only called _after_ the fb has
|
||||
* been fully set up, i.e. after the call to drm_fb_helper_initial_config.
|
||||
*
|
||||
* RETURNS:
|
||||
* 0 on success and a non-zero error code otherwise.
|
||||
*/
|
||||
@ -1023,23 +1174,14 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
|
||||
struct drm_device *dev = fb_helper->dev;
|
||||
int count = 0;
|
||||
u32 max_width, max_height, bpp_sel;
|
||||
int bound = 0, crtcs_bound = 0;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
if (!fb_helper->fb)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
if (crtc->fb)
|
||||
crtcs_bound++;
|
||||
if (crtc->fb == fb_helper->fb)
|
||||
bound++;
|
||||
}
|
||||
|
||||
if (bound < crtcs_bound) {
|
||||
mutex_lock(&fb_helper->dev->mode_config.mutex);
|
||||
if (!drm_fb_helper_is_bound(fb_helper)) {
|
||||
fb_helper->delayed_hotplug = true;
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
mutex_unlock(&fb_helper->dev->mode_config.mutex);
|
||||
return 0;
|
||||
}
|
||||
DRM_DEBUG_KMS("\n");
|
||||
@ -1050,13 +1192,16 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
|
||||
|
||||
count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
|
||||
max_height);
|
||||
drm_setup_crtcs(fb_helper);
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
mutex_unlock(&fb_helper->dev->mode_config.mutex);
|
||||
|
||||
return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
|
||||
drm_modeset_lock_all(dev);
|
||||
drm_setup_crtcs(fb_helper);
|
||||
drm_modeset_unlock_all(dev);
|
||||
drm_fb_helper_set_par(fb_helper->fbdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
|
@ -217,6 +217,9 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
|
||||
* we may want to use ida for number allocation and a hash table
|
||||
* for the pointers, anyway.
|
||||
*/
|
||||
if(handle == -2)
|
||||
printf("%s handle %d\n", __FUNCTION__, handle);
|
||||
|
||||
spin_lock(&filp->table_lock);
|
||||
|
||||
/* Check if we currently have a reference on the object */
|
||||
@ -257,21 +260,19 @@ drm_gem_handle_create(struct drm_file *file_priv,
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Get the user-visible handle using idr.
|
||||
* Get the user-visible handle using idr. Preload and perform
|
||||
* allocation under our spinlock.
|
||||
*/
|
||||
again:
|
||||
/* ensure there is space available to allocate a handle */
|
||||
if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
|
||||
return -ENOMEM;
|
||||
|
||||
/* do the allocation under our spinlock */
|
||||
idr_preload(GFP_KERNEL);
|
||||
spin_lock(&file_priv->table_lock);
|
||||
ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
|
||||
|
||||
ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
|
||||
|
||||
spin_unlock(&file_priv->table_lock);
|
||||
if (ret == -EAGAIN)
|
||||
goto again;
|
||||
else if (ret)
|
||||
idr_preload_end();
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
*handlep = ret;
|
||||
|
||||
drm_gem_object_handle_reference(obj);
|
||||
|
||||
@ -384,6 +385,9 @@ drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
|
||||
{
|
||||
struct drm_gem_object *obj;
|
||||
|
||||
if(handle == -2)
|
||||
printf("%s handle %d\n", __FUNCTION__, handle);
|
||||
|
||||
spin_lock(&filp->table_lock);
|
||||
|
||||
/* Check if we currently have a reference on the object */
|
||||
@ -439,29 +443,25 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
|
||||
if (obj == NULL)
|
||||
return -ENOENT;
|
||||
|
||||
again:
|
||||
if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
idr_preload(GFP_KERNEL);
|
||||
spin_lock(&dev->object_name_lock);
|
||||
if (!obj->name) {
|
||||
ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
|
||||
&obj->name);
|
||||
ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
|
||||
obj->name = ret;
|
||||
args->name = (uint64_t) obj->name;
|
||||
spin_unlock(&dev->object_name_lock);
|
||||
idr_preload_end();
|
||||
|
||||
if (ret == -EAGAIN)
|
||||
goto again;
|
||||
else if (ret)
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
ret = 0;
|
||||
|
||||
/* Allocate a reference for the name table. */
|
||||
drm_gem_object_reference(obj);
|
||||
} else {
|
||||
args->name = (uint64_t) obj->name;
|
||||
spin_unlock(&dev->object_name_lock);
|
||||
idr_preload_end();
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
@ -488,6 +488,9 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
|
||||
if (!(dev->driver->driver_features & DRIVER_GEM))
|
||||
return -ENODEV;
|
||||
|
||||
if(handle == -2)
|
||||
printf("%s handle %d\n", __FUNCTION__, handle);
|
||||
|
||||
spin_lock(&dev->object_name_lock);
|
||||
obj = idr_find(&dev->object_name_idr, (int) args->name);
|
||||
if (obj)
|
||||
@ -549,8 +552,6 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
|
||||
{
|
||||
idr_for_each(&file_private->object_idr,
|
||||
&drm_gem_object_release_handle, file_private);
|
||||
|
||||
idr_remove_all(&file_private->object_idr);
|
||||
idr_destroy(&file_private->object_idr);
|
||||
}
|
||||
#endif
|
||||
|
@ -111,6 +111,7 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc)
|
||||
|
||||
/* Valid dotclock? */
|
||||
if (dotclock > 0) {
|
||||
int frame_size;
|
||||
/* Convert scanline length in pixels and video dot clock to
|
||||
* line duration, frame duration and pixel duration in
|
||||
* nanoseconds:
|
||||
@ -118,7 +119,10 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc)
|
||||
pixeldur_ns = (s64) div64_u64(1000000000, dotclock);
|
||||
linedur_ns = (s64) div64_u64(((u64) crtc->hwmode.crtc_htotal *
|
||||
1000000000), dotclock);
|
||||
framedur_ns = (s64) crtc->hwmode.crtc_vtotal * linedur_ns;
|
||||
frame_size = crtc->hwmode.crtc_htotal *
|
||||
crtc->hwmode.crtc_vtotal;
|
||||
framedur_ns = (s64) div64_u64((u64) frame_size * 1000000000,
|
||||
dotclock);
|
||||
} else
|
||||
DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
|
||||
crtc->base.id);
|
||||
|
@ -102,20 +102,6 @@ int drm_mm_pre_get(struct drm_mm *mm)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mm_pre_get);
|
||||
|
||||
static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
|
||||
{
|
||||
return hole_node->start + hole_node->size;
|
||||
}
|
||||
|
||||
static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
|
||||
{
|
||||
struct drm_mm_node *next_node =
|
||||
list_entry(hole_node->node_list.next, struct drm_mm_node,
|
||||
node_list);
|
||||
|
||||
return next_node->start;
|
||||
}
|
||||
|
||||
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
|
||||
struct drm_mm_node *node,
|
||||
unsigned long size, unsigned alignment,
|
||||
@ -127,7 +113,7 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
|
||||
unsigned long adj_start = hole_start;
|
||||
unsigned long adj_end = hole_end;
|
||||
|
||||
BUG_ON(!hole_node->hole_follows || node->allocated);
|
||||
BUG_ON(node->allocated);
|
||||
|
||||
if (mm->color_adjust)
|
||||
mm->color_adjust(hole_node, color, &adj_start, &adj_end);
|
||||
@ -155,12 +141,57 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
|
||||
BUG_ON(node->start + node->size > adj_end);
|
||||
|
||||
node->hole_follows = 0;
|
||||
if (node->start + node->size < hole_end) {
|
||||
if (__drm_mm_hole_node_start(node) < hole_end) {
|
||||
list_add(&node->hole_stack, &mm->hole_stack);
|
||||
node->hole_follows = 1;
|
||||
}
|
||||
}
|
||||
|
||||
struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
|
||||
unsigned long start,
|
||||
unsigned long size,
|
||||
bool atomic)
|
||||
{
|
||||
struct drm_mm_node *hole, *node;
|
||||
unsigned long end = start + size;
|
||||
unsigned long hole_start;
|
||||
unsigned long hole_end;
|
||||
|
||||
drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
|
||||
if (hole_start > start || hole_end < end)
|
||||
continue;
|
||||
|
||||
node = drm_mm_kmalloc(mm, atomic);
|
||||
if (unlikely(node == NULL))
|
||||
return NULL;
|
||||
|
||||
node->start = start;
|
||||
node->size = size;
|
||||
node->mm = mm;
|
||||
node->allocated = 1;
|
||||
|
||||
INIT_LIST_HEAD(&node->hole_stack);
|
||||
list_add(&node->node_list, &hole->node_list);
|
||||
|
||||
if (start == hole_start) {
|
||||
hole->hole_follows = 0;
|
||||
list_del_init(&hole->hole_stack);
|
||||
}
|
||||
|
||||
node->hole_follows = 0;
|
||||
if (end != hole_end) {
|
||||
list_add(&node->hole_stack, &mm->hole_stack);
|
||||
node->hole_follows = 1;
|
||||
}
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mm_create_block);
|
||||
|
||||
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
|
||||
unsigned long size,
|
||||
unsigned alignment,
|
||||
@ -253,7 +284,7 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
|
||||
BUG_ON(node->start + node->size > end);
|
||||
|
||||
node->hole_follows = 0;
|
||||
if (node->start + node->size < hole_end) {
|
||||
if (__drm_mm_hole_node_start(node) < hole_end) {
|
||||
list_add(&node->hole_stack, &mm->hole_stack);
|
||||
node->hole_follows = 1;
|
||||
}
|
||||
@ -327,12 +358,13 @@ void drm_mm_remove_node(struct drm_mm_node *node)
|
||||
list_entry(node->node_list.prev, struct drm_mm_node, node_list);
|
||||
|
||||
if (node->hole_follows) {
|
||||
BUG_ON(drm_mm_hole_node_start(node)
|
||||
== drm_mm_hole_node_end(node));
|
||||
BUG_ON(__drm_mm_hole_node_start(node) ==
|
||||
__drm_mm_hole_node_end(node));
|
||||
list_del(&node->hole_stack);
|
||||
} else
|
||||
BUG_ON(drm_mm_hole_node_start(node)
|
||||
!= drm_mm_hole_node_end(node));
|
||||
BUG_ON(__drm_mm_hole_node_start(node) !=
|
||||
__drm_mm_hole_node_end(node));
|
||||
|
||||
|
||||
if (!prev_node->hole_follows) {
|
||||
prev_node->hole_follows = 1;
|
||||
@ -390,6 +422,8 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
|
||||
{
|
||||
struct drm_mm_node *entry;
|
||||
struct drm_mm_node *best;
|
||||
unsigned long adj_start;
|
||||
unsigned long adj_end;
|
||||
unsigned long best_size;
|
||||
|
||||
BUG_ON(mm->scanned_blocks);
|
||||
@ -397,17 +431,13 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
|
||||
best = NULL;
|
||||
best_size = ~0UL;
|
||||
|
||||
list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
|
||||
unsigned long adj_start = drm_mm_hole_node_start(entry);
|
||||
unsigned long adj_end = drm_mm_hole_node_end(entry);
|
||||
|
||||
drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
|
||||
if (mm->color_adjust) {
|
||||
mm->color_adjust(entry, color, &adj_start, &adj_end);
|
||||
if (adj_end <= adj_start)
|
||||
continue;
|
||||
}
|
||||
|
||||
BUG_ON(!entry->hole_follows);
|
||||
if (!check_free_hole(adj_start, adj_end, size, alignment))
|
||||
continue;
|
||||
|
||||
@ -434,6 +464,8 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
|
||||
{
|
||||
struct drm_mm_node *entry;
|
||||
struct drm_mm_node *best;
|
||||
unsigned long adj_start;
|
||||
unsigned long adj_end;
|
||||
unsigned long best_size;
|
||||
|
||||
BUG_ON(mm->scanned_blocks);
|
||||
@ -441,13 +473,11 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
|
||||
best = NULL;
|
||||
best_size = ~0UL;
|
||||
|
||||
list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
|
||||
unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
|
||||
start : drm_mm_hole_node_start(entry);
|
||||
unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
|
||||
end : drm_mm_hole_node_end(entry);
|
||||
|
||||
BUG_ON(!entry->hole_follows);
|
||||
drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
|
||||
if (adj_start < start)
|
||||
adj_start = start;
|
||||
if (adj_end > end)
|
||||
adj_end = end;
|
||||
|
||||
if (mm->color_adjust) {
|
||||
mm->color_adjust(entry, color, &adj_start, &adj_end);
|
||||
|
@ -504,6 +504,74 @@ drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gtf_mode);
|
||||
|
||||
#if IS_ENABLED(CONFIG_VIDEOMODE)
|
||||
int drm_display_mode_from_videomode(const struct videomode *vm,
|
||||
struct drm_display_mode *dmode)
|
||||
{
|
||||
dmode->hdisplay = vm->hactive;
|
||||
dmode->hsync_start = dmode->hdisplay + vm->hfront_porch;
|
||||
dmode->hsync_end = dmode->hsync_start + vm->hsync_len;
|
||||
dmode->htotal = dmode->hsync_end + vm->hback_porch;
|
||||
|
||||
dmode->vdisplay = vm->vactive;
|
||||
dmode->vsync_start = dmode->vdisplay + vm->vfront_porch;
|
||||
dmode->vsync_end = dmode->vsync_start + vm->vsync_len;
|
||||
dmode->vtotal = dmode->vsync_end + vm->vback_porch;
|
||||
|
||||
dmode->clock = vm->pixelclock / 1000;
|
||||
|
||||
dmode->flags = 0;
|
||||
if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH)
|
||||
dmode->flags |= DRM_MODE_FLAG_PHSYNC;
|
||||
else if (vm->dmt_flags & VESA_DMT_HSYNC_LOW)
|
||||
dmode->flags |= DRM_MODE_FLAG_NHSYNC;
|
||||
if (vm->dmt_flags & VESA_DMT_VSYNC_HIGH)
|
||||
dmode->flags |= DRM_MODE_FLAG_PVSYNC;
|
||||
else if (vm->dmt_flags & VESA_DMT_VSYNC_LOW)
|
||||
dmode->flags |= DRM_MODE_FLAG_NVSYNC;
|
||||
if (vm->data_flags & DISPLAY_FLAGS_INTERLACED)
|
||||
dmode->flags |= DRM_MODE_FLAG_INTERLACE;
|
||||
if (vm->data_flags & DISPLAY_FLAGS_DOUBLESCAN)
|
||||
dmode->flags |= DRM_MODE_FLAG_DBLSCAN;
|
||||
drm_mode_set_name(dmode);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(drm_display_mode_from_videomode);
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_OF_VIDEOMODE)
|
||||
/**
|
||||
* of_get_drm_display_mode - get a drm_display_mode from devicetree
|
||||
* @np: device_node with the timing specification
|
||||
* @dmode: will be set to the return value
|
||||
* @index: index into the list of display timings in devicetree
|
||||
*
|
||||
* This function is expensive and should only be used, if only one mode is to be
|
||||
* read from DT. To get multiple modes start with of_get_display_timings and
|
||||
* work with that instead.
|
||||
*/
|
||||
int of_get_drm_display_mode(struct device_node *np,
|
||||
struct drm_display_mode *dmode, int index)
|
||||
{
|
||||
struct videomode vm;
|
||||
int ret;
|
||||
|
||||
ret = of_get_videomode(np, &vm, index);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
drm_display_mode_from_videomode(&vm, dmode);
|
||||
|
||||
pr_debug("%s: got %dx%d display mode from %s\n",
|
||||
of_node_full_name(np), vm.hactive, vm.vactive, np->name);
|
||||
drm_mode_debug_printmodeline(dmode);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(of_get_drm_display_mode);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* drm_mode_set_name - set the name on a mode
|
||||
* @mode: name will be set in this mode
|
||||
|
@ -88,7 +88,6 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
|
||||
int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
|
||||
{
|
||||
struct pci_dev *root;
|
||||
int pos;
|
||||
u32 lnkcap, lnkcap2;
|
||||
|
||||
*mask = 0;
|
||||
@ -103,33 +102,26 @@ int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
|
||||
#if 0
|
||||
root = dev->pdev->bus->self;
|
||||
|
||||
pos = pci_pcie_cap(root);
|
||||
if (!pos)
|
||||
/* we've been informed via and serverworks don't make the cut */
|
||||
if (root->vendor == PCI_VENDOR_ID_VIA ||
|
||||
root->vendor == PCI_VENDOR_ID_SERVERWORKS)
|
||||
return -EINVAL;
|
||||
|
||||
/* we've been informed via and serverworks don't make the cut */
|
||||
// if (root->vendor == PCI_VENDOR_ID_VIA ||
|
||||
// root->vendor == PCI_VENDOR_ID_SERVERWORKS)
|
||||
// return -EINVAL;
|
||||
pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
|
||||
pcie_capability_read_dword(root, PCI_EXP_LNKCAP2, &lnkcap2);
|
||||
|
||||
pci_read_config_dword(root, pos + PCI_EXP_LNKCAP, &lnkcap);
|
||||
pci_read_config_dword(root, pos + PCI_EXP_LNKCAP2, &lnkcap2);
|
||||
|
||||
lnkcap &= PCI_EXP_LNKCAP_SLS;
|
||||
lnkcap2 &= 0xfe;
|
||||
|
||||
if (lnkcap2) { /* PCIE GEN 3.0 */
|
||||
if (lnkcap2) { /* PCIe r3.0-compliant */
|
||||
if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
|
||||
*mask |= DRM_PCIE_SPEED_25;
|
||||
if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
|
||||
*mask |= DRM_PCIE_SPEED_50;
|
||||
if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
|
||||
*mask |= DRM_PCIE_SPEED_80;
|
||||
} else {
|
||||
if (lnkcap & 1)
|
||||
} else { /* pre-r3.0 */
|
||||
if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
|
||||
*mask |= DRM_PCIE_SPEED_25;
|
||||
if (lnkcap & 2)
|
||||
*mask |= DRM_PCIE_SPEED_50;
|
||||
if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
|
||||
*mask |= (DRM_PCIE_SPEED_25 | DRM_PCIE_SPEED_50);
|
||||
}
|
||||
|
||||
DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", root->vendor, root->device, lnkcap, lnkcap2);
|
||||
|
@ -15,6 +15,8 @@
|
||||
* /fairy-tale-mode off
|
||||
*/
|
||||
|
||||
#include <syscall.h>
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <errno-base.h>
|
||||
#include <linux/pci.h>
|
||||
@ -30,7 +32,6 @@
|
||||
#include "intel-agp.h"
|
||||
#include <drm/intel-gtt.h>
|
||||
|
||||
#include <syscall.h>
|
||||
|
||||
struct pci_dev *
|
||||
pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from);
|
||||
@ -86,7 +87,6 @@ struct intel_gtt_driver {
|
||||
};
|
||||
|
||||
static struct _intel_private {
|
||||
struct intel_gtt base;
|
||||
const struct intel_gtt_driver *driver;
|
||||
struct pci_dev *pcidev; /* device one */
|
||||
struct pci_dev *bridge_dev;
|
||||
@ -101,7 +101,18 @@ static struct _intel_private {
|
||||
struct resource ifp_resource;
|
||||
int resource_valid;
|
||||
struct page *scratch_page;
|
||||
phys_addr_t scratch_page_dma;
|
||||
int refcount;
|
||||
/* Whether i915 needs to use the dmar apis or not. */
|
||||
unsigned int needs_dmar : 1;
|
||||
phys_addr_t gma_bus_addr;
|
||||
/* Size of memory reserved for graphics by the BIOS */
|
||||
unsigned int stolen_size;
|
||||
/* Total number of gtt entries. */
|
||||
unsigned int gtt_total_entries;
|
||||
/* Part of the gtt that is mappable by the cpu, for those chips where
|
||||
* this is not the full gtt. */
|
||||
unsigned int gtt_mappable_entries;
|
||||
} intel_private;
|
||||
|
||||
#define INTEL_GTT_GEN intel_private.driver->gen
|
||||
@ -118,7 +129,7 @@ static int intel_gtt_setup_scratch_page(void)
|
||||
page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
|
||||
if (page == NULL)
|
||||
return -ENOMEM;
|
||||
intel_private.base.scratch_page_dma = page_to_phys(page);
|
||||
intel_private.scratch_page_dma = page_to_phys(page);
|
||||
|
||||
intel_private.scratch_page = page;
|
||||
|
||||
@ -300,7 +311,7 @@ static unsigned int intel_gtt_total_entries(void)
|
||||
/* On previous hardware, the GTT size was just what was
|
||||
* required to map the aperture.
|
||||
*/
|
||||
return intel_private.base.gtt_mappable_entries;
|
||||
return intel_private.gtt_mappable_entries;
|
||||
}
|
||||
}
|
||||
|
||||
@ -362,8 +373,8 @@ static int intel_gtt_init(void)
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
|
||||
intel_private.base.gtt_total_entries = intel_gtt_total_entries();
|
||||
intel_private.gtt_mappable_entries = intel_gtt_mappable_entries();
|
||||
intel_private.gtt_total_entries = intel_gtt_total_entries();
|
||||
|
||||
/* save the PGETBL reg for resume */
|
||||
intel_private.PGETBL_save =
|
||||
@ -375,10 +386,10 @@ static int intel_gtt_init(void)
|
||||
|
||||
dev_info(&intel_private.bridge_dev->dev,
|
||||
"detected gtt size: %dK total, %dK mappable\n",
|
||||
intel_private.base.gtt_total_entries * 4,
|
||||
intel_private.base.gtt_mappable_entries * 4);
|
||||
intel_private.gtt_total_entries * 4,
|
||||
intel_private.gtt_mappable_entries * 4);
|
||||
|
||||
gtt_map_size = intel_private.base.gtt_total_entries * 4;
|
||||
gtt_map_size = intel_private.gtt_total_entries * 4;
|
||||
|
||||
intel_private.gtt = NULL;
|
||||
if (intel_private.gtt == NULL)
|
||||
@ -389,13 +400,12 @@ static int intel_gtt_init(void)
|
||||
iounmap(intel_private.registers);
|
||||
return -ENOMEM;
|
||||
}
|
||||
intel_private.base.gtt = intel_private.gtt;
|
||||
|
||||
asm volatile("wbinvd");
|
||||
|
||||
intel_private.base.stolen_size = intel_gtt_stolen_size();
|
||||
intel_private.stolen_size = intel_gtt_stolen_size();
|
||||
|
||||
intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
|
||||
intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
|
||||
|
||||
ret = intel_gtt_setup_scratch_page();
|
||||
if (ret != 0) {
|
||||
@ -410,7 +420,8 @@ static int intel_gtt_init(void)
|
||||
pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
|
||||
&gma_addr);
|
||||
|
||||
intel_private.base.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
|
||||
intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -528,7 +539,7 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
|
||||
unsigned int i;
|
||||
|
||||
for (i = first_entry; i < (first_entry + num_entries); i++) {
|
||||
intel_private.driver->write_entry(intel_private.base.scratch_page_dma,
|
||||
intel_private.driver->write_entry(intel_private.scratch_page_dma,
|
||||
i, 0);
|
||||
}
|
||||
readl(intel_private.gtt+i-1);
|
||||
@ -594,25 +605,6 @@ static void i965_write_entry(dma_addr_t addr,
|
||||
writel(addr | pte_flags, intel_private.gtt + entry);
|
||||
}
|
||||
|
||||
/* Certain Gen5 chipsets require require idling the GPU before
|
||||
* unmapping anything from the GTT when VT-d is enabled.
|
||||
*/
|
||||
static inline int needs_idle_maps(void)
|
||||
{
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
const unsigned short gpu_devid = intel_private.pcidev->device;
|
||||
|
||||
/* Query intel_iommu to see if we need the workaround. Presumably that
|
||||
* was loaded first.
|
||||
*/
|
||||
if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
|
||||
gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
|
||||
intel_iommu_gfx_mapped)
|
||||
return 1;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i9xx_setup(void)
|
||||
{
|
||||
u32 reg_addr, gtt_addr;
|
||||
@ -640,9 +632,6 @@ static int i9xx_setup(void)
|
||||
break;
|
||||
}
|
||||
|
||||
if (needs_idle_maps())
|
||||
intel_private.base.do_idle_maps = 1;
|
||||
|
||||
intel_i9xx_setup_flush();
|
||||
|
||||
return 0;
|
||||
@ -794,7 +783,17 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
|
||||
struct agp_bridge_data *bridge)
|
||||
{
|
||||
int i, mask;
|
||||
intel_private.driver = NULL;
|
||||
|
||||
/*
|
||||
* Can be called from the fake agp driver but also directly from
|
||||
* drm/i915.ko. Hence we need to check whether everything is set up
|
||||
* already.
|
||||
*/
|
||||
if (intel_private.driver) {
|
||||
intel_private.refcount++;
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
|
||||
if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
|
||||
@ -807,6 +806,8 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
|
||||
if (!intel_private.driver)
|
||||
return 0;
|
||||
|
||||
intel_private.refcount++;
|
||||
|
||||
if (bridge) {
|
||||
bridge->dev_private_data = &intel_private;
|
||||
bridge->dev = bridge_pdev;
|
||||
@ -834,9 +835,13 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
|
||||
}
|
||||
EXPORT_SYMBOL(intel_gmch_probe);
|
||||
|
||||
struct intel_gtt *intel_gtt_get(void)
|
||||
void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
|
||||
phys_addr_t *mappable_base, unsigned long *mappable_end)
|
||||
{
|
||||
return &intel_private.base;
|
||||
*gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
|
||||
*stolen_size = intel_private.stolen_size;
|
||||
*mappable_base = intel_private.gma_bus_addr;
|
||||
*mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT;
|
||||
}
|
||||
EXPORT_SYMBOL(intel_gtt_get);
|
||||
|
||||
|
@ -997,6 +997,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
|
||||
case I915_PARAM_HAS_PINNED_BATCHES:
|
||||
value = 1;
|
||||
break;
|
||||
case I915_PARAM_HAS_EXEC_NO_RELOC:
|
||||
value = 1;
|
||||
break;
|
||||
case I915_PARAM_HAS_EXEC_HANDLE_LUT:
|
||||
value = 1;
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
|
||||
param->param);
|
||||
@ -1051,53 +1057,6 @@ static int i915_setparam(struct drm_device *dev, void *data,
|
||||
#endif
|
||||
|
||||
|
||||
static int i915_set_status_page(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
drm_i915_hws_addr_t *hws = data;
|
||||
struct intel_ring_buffer *ring;
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
return -ENODEV;
|
||||
|
||||
if (!I915_NEED_GFX_HWS(dev))
|
||||
return -EINVAL;
|
||||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("called with no initialization\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
WARN(1, "tried to set status page when mode setting active\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
|
||||
|
||||
ring = LP_RING(dev_priv);
|
||||
ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
|
||||
|
||||
dev_priv->dri1.gfx_hws_cpu_addr =
|
||||
ioremap(dev_priv->mm.gtt_base_addr + hws->addr, 4096);
|
||||
if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
|
||||
i915_dma_cleanup(dev);
|
||||
ring->status_page.gfx_addr = 0;
|
||||
DRM_ERROR("can not ioremap virtual address for"
|
||||
" G33 hw status page\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
|
||||
I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
|
||||
|
||||
DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
|
||||
ring->status_page.gfx_addr);
|
||||
DRM_DEBUG_DRIVER("load hws at %p\n",
|
||||
ring->status_page.page_addr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_get_bridge_dev(struct drm_device *dev)
|
||||
{
|
||||
@ -1200,40 +1159,61 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
||||
if (ret)
|
||||
goto cleanup_vga_switcheroo;
|
||||
|
||||
ret = drm_irq_install(dev);
|
||||
if (ret)
|
||||
goto cleanup_gem_stolen;
|
||||
|
||||
/* Important: The output setup functions called by modeset_init need
|
||||
* working irqs for e.g. gmbus and dp aux transfers. */
|
||||
intel_modeset_init(dev);
|
||||
|
||||
ret = i915_gem_init(dev);
|
||||
if (ret)
|
||||
goto cleanup_gem_stolen;
|
||||
goto cleanup_irq;
|
||||
|
||||
|
||||
intel_modeset_gem_init(dev);
|
||||
|
||||
ret = drm_irq_install(dev);
|
||||
if (ret)
|
||||
goto cleanup_gem;
|
||||
|
||||
/* Always safe in the mode setting case. */
|
||||
/* FIXME: do pre/post-mode set stuff in core KMS code */
|
||||
dev->vblank_disable_allowed = 1;
|
||||
|
||||
ret = intel_fbdev_init(dev);
|
||||
if (ret)
|
||||
goto cleanup_irq;
|
||||
goto cleanup_gem;
|
||||
|
||||
// drm_kms_helper_poll_init(dev);
|
||||
/* Only enable hotplug handling once the fbdev is fully set up. */
|
||||
intel_hpd_init(dev);
|
||||
|
||||
/*
|
||||
* Some ports require correctly set-up hpd registers for detection to
|
||||
* work properly (leading to ghost connected connector status), e.g. VGA
|
||||
* on gm45. Hence we can only set up the initial fbdev config after hpd
|
||||
* irqs are fully enabled. Now we should scan for the initial config
|
||||
* only once hotplug handling is enabled, but due to screwed-up locking
|
||||
* around kms/fbdev init we can't protect the fdbev initial config
|
||||
* scanning against hotplug events. Hence do this first and ignore the
|
||||
* tiny window where we will loose hotplug notifactions.
|
||||
*/
|
||||
intel_fbdev_initial_config(dev);
|
||||
|
||||
/* Only enable hotplug handling once the fbdev is fully set up. */
|
||||
dev_priv->enable_hotplug_processing = true;
|
||||
|
||||
drm_kms_helper_poll_init(dev);
|
||||
|
||||
/* We're off and running w/KMS */
|
||||
dev_priv->mm.suspended = 0;
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup_gem:
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
i915_gem_cleanup_aliasing_ppgtt(dev);
|
||||
cleanup_irq:
|
||||
// drm_irq_uninstall(dev);
|
||||
cleanup_gem:
|
||||
// mutex_lock(&dev->struct_mutex);
|
||||
// i915_gem_cleanup_ringbuffer(dev);
|
||||
// mutex_unlock(&dev->struct_mutex);
|
||||
// i915_gem_cleanup_aliasing_ppgtt(dev);
|
||||
cleanup_gem_stolen:
|
||||
// i915_gem_cleanup_stolen(dev);
|
||||
cleanup_vga_switcheroo:
|
||||
@ -1336,8 +1316,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
goto put_gmch;
|
||||
}
|
||||
|
||||
aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
|
||||
dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr;
|
||||
aperture_size = dev_priv->gtt.mappable_end;
|
||||
|
||||
|
||||
|
||||
@ -1389,11 +1368,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
*/
|
||||
|
||||
spin_lock_init(&dev_priv->irq_lock);
|
||||
spin_lock_init(&dev_priv->error_lock);
|
||||
spin_lock_init(&dev_priv->gpu_error.lock);
|
||||
spin_lock_init(&dev_priv->rps.lock);
|
||||
spin_lock_init(&dev_priv->dpio_lock);
|
||||
mutex_init(&dev_priv->dpio_lock);
|
||||
|
||||
mutex_init(&dev_priv->rps.hw_lock);
|
||||
mutex_init(&dev_priv->modeset_restore_lock);
|
||||
|
||||
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
|
||||
dev_priv->num_pipe = 3;
|
||||
@ -1444,7 +1424,7 @@ out_mtrrfree:
|
||||
out_rmmap:
|
||||
pci_iounmap(dev->pdev, dev_priv->regs);
|
||||
put_gmch:
|
||||
// intel_gmch_remove();
|
||||
// dev_priv->gtt.gtt_remove(dev);
|
||||
put_bridge:
|
||||
// pci_dev_put(dev_priv->bridge_dev);
|
||||
free_priv:
|
||||
@ -1476,11 +1456,11 @@ int i915_driver_unload(struct drm_device *dev)
|
||||
/* Cancel the retire work handler, which should be idle now. */
|
||||
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
|
||||
|
||||
io_mapping_free(dev_priv->mm.gtt_mapping);
|
||||
io_mapping_free(dev_priv->gtt.mappable);
|
||||
if (dev_priv->mm.gtt_mtrr >= 0) {
|
||||
mtrr_del(dev_priv->mm.gtt_mtrr,
|
||||
dev_priv->mm.gtt_base_addr,
|
||||
dev_priv->mm.gtt->gtt_mappable_entries * PAGE_SIZE);
|
||||
dev_priv->gtt.mappable_base,
|
||||
dev_priv->gtt.mappable_end);
|
||||
dev_priv->mm.gtt_mtrr = -1;
|
||||
}
|
||||
|
||||
@ -1506,8 +1486,8 @@ int i915_driver_unload(struct drm_device *dev)
|
||||
}
|
||||
|
||||
/* Free error state after interrupts are fully disabled. */
|
||||
del_timer_sync(&dev_priv->hangcheck_timer);
|
||||
cancel_work_sync(&dev_priv->error_work);
|
||||
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
|
||||
cancel_work_sync(&dev_priv->gpu_error.work);
|
||||
i915_destroy_error_state(dev);
|
||||
|
||||
if (dev->pdev->msi_enabled)
|
||||
@ -1526,9 +1506,6 @@ int i915_driver_unload(struct drm_device *dev)
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
i915_gem_cleanup_aliasing_ppgtt(dev);
|
||||
i915_gem_cleanup_stolen(dev);
|
||||
drm_mm_takedown(&dev_priv->mm.stolen);
|
||||
|
||||
intel_cleanup_overlay(dev);
|
||||
|
||||
if (!I915_NEED_GFX_HWS(dev))
|
||||
i915_free_hws(dev);
|
||||
@ -1541,6 +1518,10 @@ int i915_driver_unload(struct drm_device *dev)
|
||||
intel_teardown_mchbar(dev);
|
||||
|
||||
destroy_workqueue(dev_priv->wq);
|
||||
pm_qos_remove_request(&dev_priv->pm_qos);
|
||||
|
||||
if (dev_priv->slab)
|
||||
kmem_cache_destroy(dev_priv->slab);
|
||||
|
||||
pci_dev_put(dev_priv->bridge_dev);
|
||||
kfree(dev->dev_private);
|
||||
|
@ -52,26 +52,30 @@ struct drm_device *main_device;
|
||||
struct drm_file *drm_file_handlers[256];
|
||||
|
||||
static int i915_modeset __read_mostly = 1;
|
||||
module_param_named(modeset, i915_modeset, int, 0400);
|
||||
MODULE_PARM_DESC(modeset,
|
||||
"Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
|
||||
"1=on, -1=force vga console preference [default])");
|
||||
|
||||
|
||||
int i915_panel_ignore_lid __read_mostly = 0;
|
||||
int i915_panel_ignore_lid __read_mostly = 1;
|
||||
module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
|
||||
MODULE_PARM_DESC(panel_ignore_lid,
|
||||
"Override lid status (0=autodetect [default], 1=lid open, "
|
||||
"-1=lid closed)");
|
||||
"Override lid status (0=autodetect, 1=autodetect disabled [default], "
|
||||
"-1=force lid closed, -2=force lid open)");
|
||||
|
||||
unsigned int i915_powersave __read_mostly = 0;
|
||||
module_param_named(powersave, i915_powersave, int, 0600);
|
||||
MODULE_PARM_DESC(powersave,
|
||||
"Enable powersavings, fbc, downclocking, etc. (default: true)");
|
||||
|
||||
int i915_semaphores __read_mostly = -1;
|
||||
|
||||
module_param_named(semaphores, i915_semaphores, int, 0600);
|
||||
MODULE_PARM_DESC(semaphores,
|
||||
"Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
|
||||
|
||||
int i915_enable_rc6 __read_mostly = 0;
|
||||
module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400);
|
||||
MODULE_PARM_DESC(i915_enable_rc6,
|
||||
"Enable power-saving render C-state 6. "
|
||||
"Different stages can be selected via bitmask values "
|
||||
@ -80,44 +84,53 @@ MODULE_PARM_DESC(i915_enable_rc6,
|
||||
"default: -1 (use per-chip default)");
|
||||
|
||||
int i915_enable_fbc __read_mostly = 0;
|
||||
module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
|
||||
MODULE_PARM_DESC(i915_enable_fbc,
|
||||
"Enable frame buffer compression for power savings "
|
||||
"(default: -1 (use per-chip default))");
|
||||
|
||||
unsigned int i915_lvds_downclock __read_mostly = 0;
|
||||
module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
|
||||
MODULE_PARM_DESC(lvds_downclock,
|
||||
"Use panel (LVDS/eDP) downclocking for power savings "
|
||||
"(default: false)");
|
||||
|
||||
int i915_lvds_channel_mode __read_mostly;
|
||||
module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600);
|
||||
MODULE_PARM_DESC(lvds_channel_mode,
|
||||
"Specify LVDS channel mode "
|
||||
"(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
|
||||
|
||||
int i915_panel_use_ssc __read_mostly = -1;
|
||||
module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
|
||||
MODULE_PARM_DESC(lvds_use_ssc,
|
||||
"Use Spread Spectrum Clock with panels [LVDS/eDP] "
|
||||
"(default: auto from VBT)");
|
||||
|
||||
int i915_vbt_sdvo_panel_type __read_mostly = -1;
|
||||
module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
|
||||
MODULE_PARM_DESC(vbt_sdvo_panel_type,
|
||||
"Override/Ignore selection of SDVO panel mode in the VBT "
|
||||
"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
|
||||
|
||||
static bool i915_try_reset __read_mostly = true;
|
||||
module_param_named(reset, i915_try_reset, bool, 0600);
|
||||
MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
|
||||
|
||||
bool i915_enable_hangcheck __read_mostly = false;
|
||||
module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644);
|
||||
MODULE_PARM_DESC(enable_hangcheck,
|
||||
"Periodically check GPU activity for detecting hangs. "
|
||||
"WARNING: Disabling this can cause system wide hangs. "
|
||||
"(default: true)");
|
||||
|
||||
int i915_enable_ppgtt __read_mostly = false;
|
||||
module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
|
||||
MODULE_PARM_DESC(i915_enable_ppgtt,
|
||||
"Enable PPGTT (default: true)");
|
||||
|
||||
unsigned int i915_preliminary_hw_support __read_mostly = true;
|
||||
module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
|
||||
MODULE_PARM_DESC(preliminary_hw_support,
|
||||
"Enable preliminary hardware support. "
|
||||
"Enable Haswell and ValleyView Support. "
|
||||
@ -254,6 +267,7 @@ static const struct intel_device_info intel_valleyview_m_info = {
|
||||
.has_bsd_ring = 1,
|
||||
.has_blt_ring = 1,
|
||||
.is_valleyview = 1,
|
||||
.display_mmio_offset = VLV_DISPLAY_BASE,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_valleyview_d_info = {
|
||||
@ -263,6 +277,7 @@ static const struct intel_device_info intel_valleyview_d_info = {
|
||||
.has_bsd_ring = 1,
|
||||
.has_blt_ring = 1,
|
||||
.is_valleyview = 1,
|
||||
.display_mmio_offset = VLV_DISPLAY_BASE,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_haswell_d_info = {
|
||||
@ -350,15 +365,15 @@ static const struct pci_device_id pciidlist[] = { /* aka */
|
||||
INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
|
||||
INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
|
||||
INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */
|
||||
INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT1 desktop */
|
||||
INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
|
||||
INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
|
||||
INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */
|
||||
INTEL_VGA_DEVICE(0x0D32, &intel_haswell_d_info), /* CRW GT2 desktop */
|
||||
INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT1 server */
|
||||
INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
|
||||
INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
|
||||
INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */
|
||||
INTEL_VGA_DEVICE(0x0D3A, &intel_haswell_d_info), /* CRW GT2 server */
|
||||
INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT1 mobile */
|
||||
INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
|
||||
INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
|
||||
INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
|
||||
INTEL_VGA_DEVICE(0x0D36, &intel_haswell_m_info), /* CRW GT2 mobile */
|
||||
INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
|
||||
INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
|
||||
INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
|
||||
@ -454,7 +469,7 @@ int i915_init(void)
|
||||
if( unlikely(ent == NULL) )
|
||||
{
|
||||
dbgprintf("device not found\n");
|
||||
return 0;
|
||||
return -ENODEV;
|
||||
};
|
||||
|
||||
struct intel_device_info *intel_info =
|
||||
@ -730,8 +745,6 @@ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
|
||||
if (dev_priv->forcewake_count == 0) \
|
||||
dev_priv->gt.force_wake_put(dev_priv); \
|
||||
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
|
||||
} else if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
|
||||
val = read##y(dev_priv->regs + reg + 0x180000); \
|
||||
} else { \
|
||||
val = read##y(dev_priv->regs + reg); \
|
||||
} \
|
||||
@ -757,11 +770,7 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
|
||||
DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
|
||||
I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
|
||||
} \
|
||||
if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
|
||||
write##y(val, dev_priv->regs + reg + 0x180000); \
|
||||
} else { \
|
||||
write##y(val, dev_priv->regs + reg); \
|
||||
} \
|
||||
if (unlikely(__fifo_ret)) { \
|
||||
gen6_gt_check_fifodbg(dev_priv); \
|
||||
} \
|
||||
|
@ -30,6 +30,8 @@
|
||||
#ifndef _I915_DRV_H_
|
||||
#define _I915_DRV_H_
|
||||
|
||||
#include <uapi/drm/i915_drm.h>
|
||||
|
||||
#include "i915_reg.h"
|
||||
#include "intel_bios.h"
|
||||
#include "intel_ringbuffer.h"
|
||||
@ -96,7 +98,12 @@ enum port {
|
||||
};
|
||||
#define port_name(p) ((p) + 'A')
|
||||
|
||||
#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
|
||||
#define I915_GEM_GPU_DOMAINS \
|
||||
(I915_GEM_DOMAIN_RENDER | \
|
||||
I915_GEM_DOMAIN_SAMPLER | \
|
||||
I915_GEM_DOMAIN_COMMAND | \
|
||||
I915_GEM_DOMAIN_INSTRUCTION | \
|
||||
I915_GEM_DOMAIN_VERTEX)
|
||||
|
||||
#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
|
||||
|
||||
@ -114,6 +121,19 @@ struct intel_pch_pll {
|
||||
};
|
||||
#define I915_NUM_PLLS 2
|
||||
|
||||
/* Used by dp and fdi links */
|
||||
struct intel_link_m_n {
|
||||
uint32_t tu;
|
||||
uint32_t gmch_m;
|
||||
uint32_t gmch_n;
|
||||
uint32_t link_m;
|
||||
uint32_t link_n;
|
||||
};
|
||||
|
||||
void intel_link_compute_m_n(int bpp, int nlanes,
|
||||
int pixel_clock, int link_clock,
|
||||
struct intel_link_m_n *m_n);
|
||||
|
||||
struct intel_ddi_plls {
|
||||
int spll_refcount;
|
||||
int wrpll1_refcount;
|
||||
@ -143,7 +163,12 @@ struct intel_ddi_plls {
|
||||
#define I915_GEM_PHYS_OVERLAY_REGS 3
|
||||
#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
|
||||
|
||||
|
||||
struct drm_i915_gem_phys_object {
|
||||
int id;
|
||||
struct page **page_list;
|
||||
drm_dma_handle_t *handle;
|
||||
struct drm_i915_gem_object *cur_obj;
|
||||
};
|
||||
|
||||
struct opregion_header;
|
||||
struct opregion_acpi;
|
||||
@ -287,6 +312,7 @@ struct drm_i915_display_funcs {
|
||||
struct drm_i915_gem_object *obj);
|
||||
int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
||||
int x, int y);
|
||||
void (*hpd_irq_setup)(struct drm_device *dev);
|
||||
/* clock updates for mode set */
|
||||
/* cursor updates */
|
||||
/* render clock increase/decrease */
|
||||
@ -326,6 +352,7 @@ struct drm_i915_gt_funcs {
|
||||
DEV_INFO_FLAG(has_llc)
|
||||
|
||||
struct intel_device_info {
|
||||
u32 display_mmio_offset;
|
||||
u8 gen;
|
||||
u8 is_mobile:1;
|
||||
u8 is_i85x:1;
|
||||
@ -353,6 +380,50 @@ struct intel_device_info {
|
||||
u8 has_llc:1;
|
||||
};
|
||||
|
||||
enum i915_cache_level {
|
||||
I915_CACHE_NONE = 0,
|
||||
I915_CACHE_LLC,
|
||||
I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
|
||||
};
|
||||
|
||||
/* The Graphics Translation Table is the way in which GEN hardware translates a
|
||||
* Graphics Virtual Address into a Physical Address. In addition to the normal
|
||||
* collateral associated with any va->pa translations GEN hardware also has a
|
||||
* portion of the GTT which can be mapped by the CPU and remain both coherent
|
||||
* and correct (in cases like swizzling). That region is referred to as GMADR in
|
||||
* the spec.
|
||||
*/
|
||||
struct i915_gtt {
|
||||
unsigned long start; /* Start offset of used GTT */
|
||||
size_t total; /* Total size GTT can map */
|
||||
size_t stolen_size; /* Total size of stolen memory */
|
||||
|
||||
unsigned long mappable_end; /* End offset that we can CPU map */
|
||||
struct io_mapping *mappable; /* Mapping to our CPU mappable region */
|
||||
phys_addr_t mappable_base; /* PA of our GMADR */
|
||||
|
||||
/** "Graphics Stolen Memory" holds the global PTEs */
|
||||
void __iomem *gsm;
|
||||
|
||||
bool do_idle_maps;
|
||||
dma_addr_t scratch_page_dma;
|
||||
struct page *scratch_page;
|
||||
|
||||
/* global gtt ops */
|
||||
int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
|
||||
size_t *stolen, phys_addr_t *mappable_base,
|
||||
unsigned long *mappable_end);
|
||||
void (*gtt_remove)(struct drm_device *dev);
|
||||
void (*gtt_clear_range)(struct drm_device *dev,
|
||||
unsigned int first_entry,
|
||||
unsigned int num_entries);
|
||||
void (*gtt_insert_entries)(struct drm_device *dev,
|
||||
struct sg_table *st,
|
||||
unsigned int pg_start,
|
||||
enum i915_cache_level cache_level);
|
||||
};
|
||||
#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT)
|
||||
|
||||
#define I915_PPGTT_PD_ENTRIES 512
|
||||
#define I915_PPGTT_PT_ENTRIES 1024
|
||||
struct i915_hw_ppgtt {
|
||||
@ -362,6 +433,16 @@ struct i915_hw_ppgtt {
|
||||
uint32_t pd_offset;
|
||||
dma_addr_t *pt_dma_addr;
|
||||
dma_addr_t scratch_page_dma_addr;
|
||||
|
||||
/* pte functions, mirroring the interface of the global gtt. */
|
||||
void (*clear_range)(struct i915_hw_ppgtt *ppgtt,
|
||||
unsigned int first_entry,
|
||||
unsigned int num_entries);
|
||||
void (*insert_entries)(struct i915_hw_ppgtt *ppgtt,
|
||||
struct sg_table *st,
|
||||
unsigned int pg_start,
|
||||
enum i915_cache_level cache_level);
|
||||
void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
|
||||
};
|
||||
|
||||
|
||||
@ -588,6 +669,9 @@ struct intel_gen6_power_mgmt {
|
||||
struct mutex hw_lock;
|
||||
};
|
||||
|
||||
/* defined intel_pm.c */
|
||||
extern spinlock_t mchdev_lock;
|
||||
|
||||
struct intel_ilk_power_mgmt {
|
||||
u8 cur_delay;
|
||||
u8 min_delay;
|
||||
@ -628,6 +712,158 @@ struct intel_l3_parity {
|
||||
struct work_struct error_work;
|
||||
};
|
||||
|
||||
struct i915_gem_mm {
|
||||
/** Memory allocator for GTT stolen memory */
|
||||
struct drm_mm stolen;
|
||||
/** Memory allocator for GTT */
|
||||
struct drm_mm gtt_space;
|
||||
/** List of all objects in gtt_space. Used to restore gtt
|
||||
* mappings on resume */
|
||||
struct list_head bound_list;
|
||||
/**
|
||||
* List of objects which are not bound to the GTT (thus
|
||||
* are idle and not used by the GPU) but still have
|
||||
* (presumably uncached) pages still attached.
|
||||
*/
|
||||
struct list_head unbound_list;
|
||||
|
||||
/** Usable portion of the GTT for GEM */
|
||||
unsigned long stolen_base; /* limited to low memory (32-bit) */
|
||||
|
||||
int gtt_mtrr;
|
||||
|
||||
/** PPGTT used for aliasing the PPGTT with the GTT */
|
||||
struct i915_hw_ppgtt *aliasing_ppgtt;
|
||||
|
||||
bool shrinker_no_lock_stealing;
|
||||
|
||||
/**
|
||||
* List of objects currently involved in rendering.
|
||||
*
|
||||
* Includes buffers having the contents of their GPU caches
|
||||
* flushed, not necessarily primitives. last_rendering_seqno
|
||||
* represents when the rendering involved will be completed.
|
||||
*
|
||||
* A reference is held on the buffer while on this list.
|
||||
*/
|
||||
struct list_head active_list;
|
||||
|
||||
/**
|
||||
* LRU list of objects which are not in the ringbuffer and
|
||||
* are ready to unbind, but are still in the GTT.
|
||||
*
|
||||
* last_rendering_seqno is 0 while an object is in this list.
|
||||
*
|
||||
* A reference is not held on the buffer while on this list,
|
||||
* as merely being GTT-bound shouldn't prevent its being
|
||||
* freed, and we'll pull it off the list in the free path.
|
||||
*/
|
||||
struct list_head inactive_list;
|
||||
|
||||
/** LRU list of objects with fence regs on them. */
|
||||
struct list_head fence_list;
|
||||
|
||||
/**
|
||||
* We leave the user IRQ off as much as possible,
|
||||
* but this means that requests will finish and never
|
||||
* be retired once the system goes idle. Set a timer to
|
||||
* fire periodically while the ring is running. When it
|
||||
* fires, go retire requests.
|
||||
*/
|
||||
struct delayed_work retire_work;
|
||||
|
||||
/**
|
||||
* Are we in a non-interruptible section of code like
|
||||
* modesetting?
|
||||
*/
|
||||
bool interruptible;
|
||||
|
||||
/**
|
||||
* Flag if the X Server, and thus DRM, is not currently in
|
||||
* control of the device.
|
||||
*
|
||||
* This is set between LeaveVT and EnterVT. It needs to be
|
||||
* replaced with a semaphore. It also needs to be
|
||||
* transitioned away from for kernel modesetting.
|
||||
*/
|
||||
int suspended;
|
||||
|
||||
/** Bit 6 swizzling required for X tiling */
|
||||
uint32_t bit_6_swizzle_x;
|
||||
/** Bit 6 swizzling required for Y tiling */
|
||||
uint32_t bit_6_swizzle_y;
|
||||
|
||||
/* storage for physical objects */
|
||||
struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
|
||||
|
||||
/* accounting, useful for userland debugging */
|
||||
size_t object_memory;
|
||||
u32 object_count;
|
||||
};
|
||||
|
||||
struct i915_gpu_error {
|
||||
/* For hangcheck timer */
|
||||
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
|
||||
#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
|
||||
struct timer_list hangcheck_timer;
|
||||
int hangcheck_count;
|
||||
uint32_t last_acthd[I915_NUM_RINGS];
|
||||
uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
|
||||
|
||||
/* For reset and error_state handling. */
|
||||
spinlock_t lock;
|
||||
/* Protected by the above dev->gpu_error.lock. */
|
||||
struct drm_i915_error_state *first_error;
|
||||
struct work_struct work;
|
||||
|
||||
unsigned long last_reset;
|
||||
|
||||
/**
|
||||
* State variable and reset counter controlling the reset flow
|
||||
*
|
||||
* Upper bits are for the reset counter. This counter is used by the
|
||||
* wait_seqno code to race-free noticed that a reset event happened and
|
||||
* that it needs to restart the entire ioctl (since most likely the
|
||||
* seqno it waited for won't ever signal anytime soon).
|
||||
*
|
||||
* This is important for lock-free wait paths, where no contended lock
|
||||
* naturally enforces the correct ordering between the bail-out of the
|
||||
* waiter and the gpu reset work code.
|
||||
*
|
||||
* Lowest bit controls the reset state machine: Set means a reset is in
|
||||
* progress. This state will (presuming we don't have any bugs) decay
|
||||
* into either unset (successful reset) or the special WEDGED value (hw
|
||||
* terminally sour). All waiters on the reset_queue will be woken when
|
||||
* that happens.
|
||||
*/
|
||||
atomic_t reset_counter;
|
||||
|
||||
/**
|
||||
* Special values/flags for reset_counter
|
||||
*
|
||||
* Note that the code relies on
|
||||
* I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
|
||||
* being true.
|
||||
*/
|
||||
#define I915_RESET_IN_PROGRESS_FLAG 1
|
||||
#define I915_WEDGED 0xffffffff
|
||||
|
||||
/**
|
||||
* Waitqueue to signal when the reset has completed. Used by clients
|
||||
* that wait for dev_priv->mm.wedged to settle.
|
||||
*/
|
||||
wait_queue_head_t reset_queue;
|
||||
|
||||
/* For gpu hang simulation. */
|
||||
unsigned int stop_rings;
|
||||
};
|
||||
|
||||
enum modeset_restore {
|
||||
MODESET_ON_LID_OPEN,
|
||||
MODESET_DONE,
|
||||
MODESET_SUSPENDED,
|
||||
};
|
||||
|
||||
typedef struct drm_i915_private {
|
||||
struct drm_device *dev;
|
||||
|
||||
@ -644,10 +880,11 @@ typedef struct drm_i915_private {
|
||||
/** forcewake_count is protected by gt_lock */
|
||||
unsigned forcewake_count;
|
||||
/** gt_lock is also taken in irq contexts. */
|
||||
struct spinlock gt_lock;
|
||||
spinlock_t gt_lock;
|
||||
|
||||
struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
|
||||
|
||||
|
||||
/** gmbus_mutex protects against concurrent usage of the single hw gmbus
|
||||
* controller on different i2c buses. */
|
||||
struct mutex gmbus_mutex;
|
||||
@ -657,9 +894,11 @@ typedef struct drm_i915_private {
|
||||
*/
|
||||
uint32_t gpio_mmio_base;
|
||||
|
||||
wait_queue_head_t gmbus_wait_queue;
|
||||
|
||||
struct pci_dev *bridge_dev;
|
||||
struct intel_ring_buffer ring[I915_NUM_RINGS];
|
||||
uint32_t next_seqno;
|
||||
uint32_t last_seqno, next_seqno;
|
||||
|
||||
drm_dma_handle_t *status_page_dmah;
|
||||
struct resource mch_res;
|
||||
@ -669,31 +908,24 @@ typedef struct drm_i915_private {
|
||||
/* protects the irq masks */
|
||||
spinlock_t irq_lock;
|
||||
|
||||
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
|
||||
// struct pm_qos_request pm_qos;
|
||||
|
||||
/* DPIO indirect register protection */
|
||||
spinlock_t dpio_lock;
|
||||
struct mutex dpio_lock;
|
||||
|
||||
/** Cached value of IMR to avoid reads in updating the bitfield */
|
||||
u32 pipestat[2];
|
||||
u32 irq_mask;
|
||||
u32 gt_irq_mask;
|
||||
u32 pch_irq_mask;
|
||||
|
||||
u32 hotplug_supported_mask;
|
||||
struct work_struct hotplug_work;
|
||||
bool enable_hotplug_processing;
|
||||
|
||||
int num_pipe;
|
||||
int num_pch_pll;
|
||||
|
||||
/* For hangcheck timer */
|
||||
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
|
||||
#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
|
||||
struct timer_list hangcheck_timer;
|
||||
int hangcheck_count;
|
||||
uint32_t last_acthd[I915_NUM_RINGS];
|
||||
uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
|
||||
|
||||
unsigned int stop_rings;
|
||||
|
||||
unsigned long cfb_size;
|
||||
unsigned int cfb_fb;
|
||||
enum plane cfb_plane;
|
||||
@ -704,7 +936,7 @@ typedef struct drm_i915_private {
|
||||
|
||||
/* overlay */
|
||||
struct intel_overlay *overlay;
|
||||
bool sprite_scaling_enabled;
|
||||
unsigned int sprite_scaling_enabled;
|
||||
|
||||
/* LVDS info */
|
||||
int backlight_level; /* restore backlight to this value */
|
||||
@ -721,7 +953,6 @@ typedef struct drm_i915_private {
|
||||
unsigned int display_clock_mode:1;
|
||||
int lvds_ssc_freq;
|
||||
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
|
||||
unsigned int lvds_val; /* used for checking LVDS channel mode */
|
||||
struct {
|
||||
int rate;
|
||||
int lanes;
|
||||
@ -742,11 +973,6 @@ typedef struct drm_i915_private {
|
||||
|
||||
unsigned int fsb_freq, mem_freq, is_ddr3;
|
||||
|
||||
spinlock_t error_lock;
|
||||
/* Protected by dev->error_lock. */
|
||||
struct drm_i915_error_state *first_error;
|
||||
struct work_struct error_work;
|
||||
struct completion error_completion;
|
||||
struct workqueue_struct *wq;
|
||||
|
||||
/* Display functions */
|
||||
@ -758,115 +984,12 @@ typedef struct drm_i915_private {
|
||||
|
||||
unsigned long quirks;
|
||||
|
||||
/* Register state */
|
||||
bool modeset_on_lid;
|
||||
enum modeset_restore modeset_restore;
|
||||
struct mutex modeset_restore_lock;
|
||||
|
||||
struct {
|
||||
/** Bridge to intel-gtt-ko */
|
||||
struct intel_gtt *gtt;
|
||||
/** Memory allocator for GTT stolen memory */
|
||||
struct drm_mm stolen;
|
||||
/** Memory allocator for GTT */
|
||||
struct drm_mm gtt_space;
|
||||
/** List of all objects in gtt_space. Used to restore gtt
|
||||
* mappings on resume */
|
||||
struct list_head bound_list;
|
||||
/**
|
||||
* List of objects which are not bound to the GTT (thus
|
||||
* are idle and not used by the GPU) but still have
|
||||
* (presumably uncached) pages still attached.
|
||||
*/
|
||||
struct list_head unbound_list;
|
||||
struct i915_gtt gtt;
|
||||
|
||||
/** Usable portion of the GTT for GEM */
|
||||
unsigned long gtt_start;
|
||||
unsigned long gtt_mappable_end;
|
||||
unsigned long gtt_end;
|
||||
|
||||
// struct io_mapping *gtt_mapping;
|
||||
phys_addr_t gtt_base_addr;
|
||||
int gtt_mtrr;
|
||||
|
||||
/** PPGTT used for aliasing the PPGTT with the GTT */
|
||||
struct i915_hw_ppgtt *aliasing_ppgtt;
|
||||
|
||||
// struct shrinker inactive_shrinker;
|
||||
bool shrinker_no_lock_stealing;
|
||||
|
||||
/**
|
||||
* List of objects currently involved in rendering.
|
||||
*
|
||||
* Includes buffers having the contents of their GPU caches
|
||||
* flushed, not necessarily primitives. last_rendering_seqno
|
||||
* represents when the rendering involved will be completed.
|
||||
*
|
||||
* A reference is held on the buffer while on this list.
|
||||
*/
|
||||
struct list_head active_list;
|
||||
|
||||
/**
|
||||
* LRU list of objects which are not in the ringbuffer and
|
||||
* are ready to unbind, but are still in the GTT.
|
||||
*
|
||||
* last_rendering_seqno is 0 while an object is in this list.
|
||||
*
|
||||
* A reference is not held on the buffer while on this list,
|
||||
* as merely being GTT-bound shouldn't prevent its being
|
||||
* freed, and we'll pull it off the list in the free path.
|
||||
*/
|
||||
struct list_head inactive_list;
|
||||
|
||||
/** LRU list of objects with fence regs on them. */
|
||||
struct list_head fence_list;
|
||||
|
||||
/**
|
||||
* We leave the user IRQ off as much as possible,
|
||||
* but this means that requests will finish and never
|
||||
* be retired once the system goes idle. Set a timer to
|
||||
* fire periodically while the ring is running. When it
|
||||
* fires, go retire requests.
|
||||
*/
|
||||
struct delayed_work retire_work;
|
||||
|
||||
/**
|
||||
* Are we in a non-interruptible section of code like
|
||||
* modesetting?
|
||||
*/
|
||||
bool interruptible;
|
||||
|
||||
/**
|
||||
* Flag if the X Server, and thus DRM, is not currently in
|
||||
* control of the device.
|
||||
*
|
||||
* This is set between LeaveVT and EnterVT. It needs to be
|
||||
* replaced with a semaphore. It also needs to be
|
||||
* transitioned away from for kernel modesetting.
|
||||
*/
|
||||
int suspended;
|
||||
|
||||
/**
|
||||
* Flag if the hardware appears to be wedged.
|
||||
*
|
||||
* This is set when attempts to idle the device timeout.
|
||||
* It prevents command submission from occurring and makes
|
||||
* every pending request fail
|
||||
*/
|
||||
atomic_t wedged;
|
||||
|
||||
/** Bit 6 swizzling required for X tiling */
|
||||
uint32_t bit_6_swizzle_x;
|
||||
/** Bit 6 swizzling required for Y tiling */
|
||||
uint32_t bit_6_swizzle_y;
|
||||
|
||||
/* storage for physical objects */
|
||||
// struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
|
||||
|
||||
/* accounting, useful for userland debugging */
|
||||
size_t gtt_total;
|
||||
size_t mappable_gtt_total;
|
||||
size_t object_memory;
|
||||
u32 object_count;
|
||||
} mm;
|
||||
struct i915_gem_mm mm;
|
||||
|
||||
/* Kernel Modesetting */
|
||||
|
||||
@ -908,7 +1031,7 @@ typedef struct drm_i915_private {
|
||||
struct drm_mm_node *compressed_fb;
|
||||
struct drm_mm_node *compressed_llb;
|
||||
|
||||
unsigned long last_gpu_reset;
|
||||
struct i915_gpu_error gpu_error;
|
||||
|
||||
/* list of fbdev register on this device */
|
||||
struct intel_fbdev *fbdev;
|
||||
@ -927,7 +1050,7 @@ typedef struct drm_i915_private {
|
||||
bool hw_contexts_disabled;
|
||||
uint32_t hw_context_size;
|
||||
|
||||
bool fdi_rx_polarity_reversed;
|
||||
u32 fdi_rx_config;
|
||||
|
||||
struct i915_suspend_saved_registers regfile;
|
||||
|
||||
@ -948,11 +1071,7 @@ enum hdmi_force_audio {
|
||||
HDMI_AUDIO_ON, /* force turn on HDMI audio */
|
||||
};
|
||||
|
||||
enum i915_cache_level {
|
||||
I915_CACHE_NONE = 0,
|
||||
I915_CACHE_LLC,
|
||||
I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
|
||||
};
|
||||
#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1)
|
||||
|
||||
struct drm_i915_gem_object_ops {
|
||||
/* Interface between the GEM object and its backing storage.
|
||||
@ -977,10 +1096,10 @@ struct drm_i915_gem_object {
|
||||
|
||||
const struct drm_i915_gem_object_ops *ops;
|
||||
|
||||
// void *mapped;
|
||||
|
||||
/** Current space allocated to this object in the GTT, if any. */
|
||||
struct drm_mm_node *gtt_space;
|
||||
/** Stolen memory for this object, instead of being backed by shmem. */
|
||||
struct drm_mm_node *stolen;
|
||||
struct list_head gtt_list;
|
||||
|
||||
/** This object's place on the active/inactive lists */
|
||||
@ -1065,7 +1184,6 @@ struct drm_i915_gem_object {
|
||||
unsigned int has_global_gtt_mapping:1;
|
||||
unsigned int has_dma_mapping:1;
|
||||
|
||||
// dma_addr_t *allocated_pages;
|
||||
struct sg_table *pages;
|
||||
int pages_pin_count;
|
||||
|
||||
@ -1107,13 +1225,6 @@ struct drm_i915_gem_object {
|
||||
|
||||
/** for phy allocated objects */
|
||||
struct drm_i915_gem_phys_object *phys_obj;
|
||||
|
||||
/**
|
||||
* Number of crtcs where this object is currently the fb, but
|
||||
* will be page flipped away on the next vblank. When it
|
||||
* reaches 0, dev_priv->pending_flip_queue will be woken up.
|
||||
*/
|
||||
atomic_t pending_flip;
|
||||
};
|
||||
#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
|
||||
|
||||
@ -1152,7 +1263,7 @@ struct drm_i915_gem_request {
|
||||
|
||||
struct drm_i915_file_private {
|
||||
struct {
|
||||
struct spinlock lock;
|
||||
spinlock_t lock;
|
||||
struct list_head request_list;
|
||||
} mm;
|
||||
struct idr context_idr;
|
||||
@ -1238,6 +1349,8 @@ struct drm_i915_file_private {
|
||||
|
||||
#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
|
||||
|
||||
#define HAS_DDI(dev) (IS_HASWELL(dev))
|
||||
|
||||
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
|
||||
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
|
||||
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
|
||||
@ -1293,6 +1406,7 @@ extern int i915_enable_fbc __read_mostly;
|
||||
extern bool i915_enable_hangcheck __read_mostly;
|
||||
extern int i915_enable_ppgtt __read_mostly;
|
||||
extern unsigned int i915_preliminary_hw_support __read_mostly;
|
||||
extern int i915_disable_power_well __read_mostly;
|
||||
|
||||
extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
|
||||
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
|
||||
@ -1329,6 +1443,7 @@ void i915_hangcheck_elapsed(unsigned long data);
|
||||
void i915_handle_error(struct drm_device *dev, bool wedged);
|
||||
|
||||
extern void intel_irq_init(struct drm_device *dev);
|
||||
extern void intel_hpd_init(struct drm_device *dev);
|
||||
extern void intel_gt_init(struct drm_device *dev);
|
||||
extern void intel_gt_reset(struct drm_device *dev);
|
||||
|
||||
@ -1397,18 +1512,22 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
|
||||
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
void i915_gem_load(struct drm_device *dev);
|
||||
void *i915_gem_object_alloc(struct drm_device *dev);
|
||||
void i915_gem_object_free(struct drm_i915_gem_object *obj);
|
||||
int i915_gem_init_object(struct drm_gem_object *obj);
|
||||
void i915_gem_object_init(struct drm_i915_gem_object *obj,
|
||||
const struct drm_i915_gem_object_ops *ops);
|
||||
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
|
||||
size_t size);
|
||||
void i915_gem_free_object(struct drm_gem_object *obj);
|
||||
|
||||
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
|
||||
uint32_t alignment,
|
||||
bool map_and_fenceable,
|
||||
bool nonblocking);
|
||||
void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
|
||||
int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
|
||||
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_lastclose(struct drm_device *dev);
|
||||
|
||||
@ -1460,8 +1579,8 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
|
||||
return (int32_t)(seq1 - seq2) >= 0;
|
||||
}
|
||||
|
||||
extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
|
||||
|
||||
int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
|
||||
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
|
||||
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
|
||||
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
|
||||
|
||||
@ -1487,8 +1606,18 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
|
||||
|
||||
void i915_gem_retire_requests(struct drm_device *dev);
|
||||
void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
|
||||
int __must_check i915_gem_check_wedge(struct drm_i915_private *dev_priv,
|
||||
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
|
||||
bool interruptible);
|
||||
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
|
||||
{
|
||||
return unlikely(atomic_read(&error->reset_counter)
|
||||
& I915_RESET_IN_PROGRESS_FLAG);
|
||||
}
|
||||
|
||||
static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
|
||||
{
|
||||
return atomic_read(&error->reset_counter) == I915_WEDGED;
|
||||
}
|
||||
|
||||
void i915_gem_reset(struct drm_device *dev);
|
||||
void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
|
||||
@ -1529,9 +1658,10 @@ void i915_gem_free_all_phys_object(struct drm_device *dev);
|
||||
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
|
||||
|
||||
uint32_t
|
||||
i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
|
||||
uint32_t size,
|
||||
int tiling_mode);
|
||||
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
|
||||
uint32_t
|
||||
i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
|
||||
int tiling_mode, bool fenced);
|
||||
|
||||
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
|
||||
enum i915_cache_level cache_level);
|
||||
@ -1552,7 +1682,6 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file);
|
||||
|
||||
/* i915_gem_gtt.c */
|
||||
int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
|
||||
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
|
||||
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
|
||||
struct drm_i915_gem_object *obj,
|
||||
@ -1566,12 +1695,10 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
|
||||
enum i915_cache_level cache_level);
|
||||
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_init_global_gtt(struct drm_device *dev,
|
||||
unsigned long start,
|
||||
unsigned long mappable_end,
|
||||
unsigned long end);
|
||||
void i915_gem_init_global_gtt(struct drm_device *dev);
|
||||
void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
|
||||
unsigned long mappable_end, unsigned long end);
|
||||
int i915_gem_gtt_init(struct drm_device *dev);
|
||||
void i915_gem_gtt_fini(struct drm_device *dev);
|
||||
static inline void i915_gem_chipset_flush(struct drm_device *dev)
|
||||
{
|
||||
if (INTEL_INFO(dev)->gen < 6)
|
||||
@ -1589,9 +1716,22 @@ int i915_gem_evict_everything(struct drm_device *dev);
|
||||
|
||||
/* i915_gem_stolen.c */
|
||||
int i915_gem_init_stolen(struct drm_device *dev);
|
||||
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
|
||||
void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
|
||||
void i915_gem_cleanup_stolen(struct drm_device *dev);
|
||||
struct drm_i915_gem_object *
|
||||
i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
|
||||
void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
|
||||
|
||||
/* i915_gem_tiling.c */
|
||||
inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
|
||||
|
||||
return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
|
||||
obj->tiling_mode != I915_TILING_NONE;
|
||||
}
|
||||
|
||||
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
|
||||
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
|
||||
@ -1617,9 +1757,9 @@ void i915_debugfs_cleanup(struct drm_minor *minor);
|
||||
extern int i915_save_state(struct drm_device *dev);
|
||||
extern int i915_restore_state(struct drm_device *dev);
|
||||
|
||||
/* i915_suspend.c */
|
||||
extern int i915_save_state(struct drm_device *dev);
|
||||
extern int i915_restore_state(struct drm_device *dev);
|
||||
/* i915_ums.c */
|
||||
void i915_save_display_reg(struct drm_device *dev);
|
||||
void i915_restore_display_reg(struct drm_device *dev);
|
||||
|
||||
/* i915_sysfs.c */
|
||||
void i915_setup_sysfs(struct drm_device *dev_priv);
|
||||
@ -1676,6 +1816,7 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
|
||||
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
|
||||
extern void intel_modeset_setup_hw_state(struct drm_device *dev,
|
||||
bool force_restore);
|
||||
extern void i915_redisable_vga(struct drm_device *dev);
|
||||
extern bool intel_fbc_enabled(struct drm_device *dev);
|
||||
extern void intel_disable_fbc(struct drm_device *dev);
|
||||
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
|
||||
@ -1748,6 +1889,21 @@ __i915_write(64, q)
|
||||
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
|
||||
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
|
||||
|
||||
/* "Broadcast RGB" property */
|
||||
#define INTEL_BROADCAST_RGB_AUTO 0
|
||||
#define INTEL_BROADCAST_RGB_FULL 1
|
||||
#define INTEL_BROADCAST_RGB_LIMITED 2
|
||||
|
||||
static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
|
||||
{
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
return CPU_VGACNTRL;
|
||||
else if (IS_VALLEYVIEW(dev))
|
||||
return VLV_VGACNTRL;
|
||||
else
|
||||
return VGACNTRL;
|
||||
}
|
||||
|
||||
typedef struct
|
||||
{
|
||||
int width;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -128,13 +128,8 @@ static int get_context_size(struct drm_device *dev)
|
||||
|
||||
static void do_destroy(struct i915_hw_context *ctx)
|
||||
{
|
||||
struct drm_device *dev = ctx->obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (ctx->file_priv)
|
||||
idr_remove(&ctx->file_priv->context_idr, ctx->id);
|
||||
else
|
||||
BUG_ON(ctx != dev_priv->ring[RCS].default_context);
|
||||
|
||||
drm_gem_object_unreference(&ctx->obj->base);
|
||||
kfree(ctx);
|
||||
@ -146,7 +141,7 @@ create_hw_context(struct drm_device *dev,
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_hw_context *ctx;
|
||||
int ret, id;
|
||||
int ret;
|
||||
|
||||
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
|
||||
if (ctx == NULL)
|
||||
@ -171,22 +166,11 @@ create_hw_context(struct drm_device *dev,
|
||||
|
||||
ctx->file_priv = file_priv;
|
||||
|
||||
again:
|
||||
if (idr_pre_get(&file_priv->context_idr, GFP_KERNEL) == 0) {
|
||||
ret = -ENOMEM;
|
||||
DRM_DEBUG_DRIVER("idr allocation failed\n");
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
ret = idr_get_new_above(&file_priv->context_idr, ctx,
|
||||
DEFAULT_CONTEXT_ID + 1, &id);
|
||||
if (ret == 0)
|
||||
ctx->id = id;
|
||||
|
||||
if (ret == -EAGAIN)
|
||||
goto again;
|
||||
else if (ret)
|
||||
ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID + 1, 0,
|
||||
GFP_KERNEL);
|
||||
if (ret < 0)
|
||||
goto err_out;
|
||||
ctx->id = ret;
|
||||
|
||||
return ctx;
|
||||
|
||||
@ -245,10 +229,6 @@ err_destroy:
|
||||
void i915_gem_context_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t ctx_size;
|
||||
|
||||
dev_priv->hw_contexts_disabled = true;
|
||||
return;
|
||||
|
||||
#if 0
|
||||
if (!HAS_HW_CONTEXTS(dev)) {
|
||||
@ -261,11 +241,9 @@ void i915_gem_context_init(struct drm_device *dev)
|
||||
dev_priv->ring[RCS].default_context)
|
||||
return;
|
||||
|
||||
ctx_size = get_context_size(dev);
|
||||
dev_priv->hw_context_size = get_context_size(dev);
|
||||
dev_priv->hw_context_size = round_up(dev_priv->hw_context_size, 4096);
|
||||
dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
|
||||
|
||||
if (ctx_size <= 0 || ctx_size > (1<<20)) {
|
||||
if (dev_priv->hw_context_size > (1<<20)) {
|
||||
dev_priv->hw_contexts_disabled = true;
|
||||
return;
|
||||
}
|
||||
|
@ -63,61 +63,136 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
}
|
||||
|
||||
struct eb_objects {
|
||||
struct list_head objects;
|
||||
int and;
|
||||
union {
|
||||
struct drm_i915_gem_object *lut[0];
|
||||
struct hlist_head buckets[0];
|
||||
};
|
||||
};
|
||||
|
||||
static struct eb_objects *
|
||||
eb_create(int size)
|
||||
eb_create(struct drm_i915_gem_execbuffer2 *args)
|
||||
{
|
||||
struct eb_objects *eb;
|
||||
struct eb_objects *eb = NULL;
|
||||
|
||||
if (args->flags & I915_EXEC_HANDLE_LUT) {
|
||||
int size = args->buffer_count;
|
||||
size *= sizeof(struct drm_i915_gem_object *);
|
||||
size += sizeof(struct eb_objects);
|
||||
eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
|
||||
}
|
||||
|
||||
if (eb == NULL) {
|
||||
int size = args->buffer_count;
|
||||
int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
|
||||
BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head)));
|
||||
while (count > size)
|
||||
BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
|
||||
while (count > 2*size)
|
||||
count >>= 1;
|
||||
eb = kzalloc(count*sizeof(struct hlist_head) +
|
||||
sizeof(struct eb_objects),
|
||||
GFP_KERNEL);
|
||||
GFP_TEMPORARY);
|
||||
if (eb == NULL)
|
||||
return eb;
|
||||
|
||||
eb->and = count - 1;
|
||||
} else
|
||||
eb->and = -args->buffer_count;
|
||||
|
||||
INIT_LIST_HEAD(&eb->objects);
|
||||
return eb;
|
||||
}
|
||||
|
||||
static void
|
||||
eb_reset(struct eb_objects *eb)
|
||||
{
|
||||
if (eb->and >= 0)
|
||||
memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
|
||||
}
|
||||
|
||||
static void
|
||||
eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
|
||||
static int
|
||||
eb_lookup_objects(struct eb_objects *eb,
|
||||
struct drm_i915_gem_exec_object2 *exec,
|
||||
const struct drm_i915_gem_execbuffer2 *args,
|
||||
struct drm_file *file)
|
||||
{
|
||||
int i;
|
||||
|
||||
spin_lock(&file->table_lock);
|
||||
for (i = 0; i < args->buffer_count; i++) {
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
if(exec[i].handle == -2)
|
||||
obj = get_fb_obj();
|
||||
else
|
||||
obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
|
||||
if (obj == NULL) {
|
||||
spin_unlock(&file->table_lock);
|
||||
DRM_DEBUG("Invalid object handle %d at index %d\n",
|
||||
exec[i].handle, i);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
if (!list_empty(&obj->exec_list)) {
|
||||
spin_unlock(&file->table_lock);
|
||||
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
|
||||
obj, exec[i].handle, i);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
drm_gem_object_reference(&obj->base);
|
||||
list_add_tail(&obj->exec_list, &eb->objects);
|
||||
|
||||
obj->exec_entry = &exec[i];
|
||||
if (eb->and < 0) {
|
||||
eb->lut[i] = obj;
|
||||
} else {
|
||||
uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
|
||||
obj->exec_handle = handle;
|
||||
hlist_add_head(&obj->exec_node,
|
||||
&eb->buckets[obj->exec_handle & eb->and]);
|
||||
&eb->buckets[handle & eb->and]);
|
||||
}
|
||||
}
|
||||
spin_unlock(&file->table_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct drm_i915_gem_object *
|
||||
eb_get_object(struct eb_objects *eb, unsigned long handle)
|
||||
{
|
||||
if (eb->and < 0) {
|
||||
if (handle >= -eb->and)
|
||||
return NULL;
|
||||
return eb->lut[handle];
|
||||
} else {
|
||||
struct hlist_head *head;
|
||||
struct hlist_node *node;
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
head = &eb->buckets[handle & eb->and];
|
||||
hlist_for_each(node, head) {
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
|
||||
if (obj->exec_handle == handle)
|
||||
return obj;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
eb_destroy(struct eb_objects *eb)
|
||||
{
|
||||
while (!list_empty(&eb->objects)) {
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
obj = list_first_entry(&eb->objects,
|
||||
struct drm_i915_gem_object,
|
||||
exec_list);
|
||||
list_del_init(&obj->exec_list);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
}
|
||||
kfree(eb);
|
||||
}
|
||||
|
||||
@ -179,17 +254,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
|
||||
reloc->write_domain);
|
||||
return ret;
|
||||
}
|
||||
if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
|
||||
reloc->write_domain != target_obj->pending_write_domain)) {
|
||||
DRM_DEBUG("Write domain conflict: "
|
||||
"obj %p target %d offset %d "
|
||||
"new %08x old %08x\n",
|
||||
obj, reloc->target_handle,
|
||||
(int) reloc->offset,
|
||||
reloc->write_domain,
|
||||
target_obj->pending_write_domain);
|
||||
return ret;
|
||||
}
|
||||
|
||||
target_obj->pending_read_domains |= reloc->read_domains;
|
||||
target_obj->pending_write_domain |= reloc->write_domain;
|
||||
@ -218,9 +282,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
|
||||
}
|
||||
|
||||
/* We can't wait for rendering with pagefaults disabled */
|
||||
// if (obj->active && in_atomic())
|
||||
// return -EFAULT;
|
||||
|
||||
|
||||
reloc->delta += target_offset;
|
||||
if (use_cpu_reloc(obj)) {
|
||||
@ -324,8 +385,7 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
|
||||
|
||||
static int
|
||||
i915_gem_execbuffer_relocate(struct drm_device *dev,
|
||||
struct eb_objects *eb,
|
||||
struct list_head *objects)
|
||||
struct eb_objects *eb)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
int ret = 0;
|
||||
@ -338,7 +398,7 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
|
||||
* lockdep complains vehemently.
|
||||
*/
|
||||
// pagefault_disable();
|
||||
list_for_each_entry(obj, objects, exec_list) {
|
||||
list_for_each_entry(obj, &eb->objects, exec_list) {
|
||||
ret = i915_gem_execbuffer_relocate_object(obj, eb);
|
||||
if (ret)
|
||||
break;
|
||||
@ -360,7 +420,8 @@ need_reloc_mappable(struct drm_i915_gem_object *obj)
|
||||
|
||||
static int
|
||||
i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
|
||||
struct intel_ring_buffer *ring)
|
||||
struct intel_ring_buffer *ring,
|
||||
bool *need_reloc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
|
||||
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
|
||||
@ -409,8 +470,19 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
|
||||
obj->has_aliasing_ppgtt_mapping = 1;
|
||||
}
|
||||
|
||||
if (entry->offset != obj->gtt_offset) {
|
||||
entry->offset = obj->gtt_offset;
|
||||
// LEAVE();
|
||||
*need_reloc = true;
|
||||
}
|
||||
|
||||
if (entry->flags & EXEC_OBJECT_WRITE) {
|
||||
obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
|
||||
obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
|
||||
}
|
||||
|
||||
if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
|
||||
!obj->has_global_gtt_mapping)
|
||||
i915_gem_gtt_bind_object(obj, obj->cache_level);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -437,7 +509,8 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
|
||||
static int
|
||||
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
|
||||
struct drm_file *file,
|
||||
struct list_head *objects)
|
||||
struct list_head *objects,
|
||||
bool *need_relocs)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct list_head ordered_objects;
|
||||
@ -467,7 +540,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
|
||||
else
|
||||
list_move_tail(&obj->exec_list, &ordered_objects);
|
||||
|
||||
obj->base.pending_read_domains = 0;
|
||||
obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
|
||||
obj->base.pending_write_domain = 0;
|
||||
obj->pending_fenced_gpu_access = false;
|
||||
}
|
||||
@ -507,7 +580,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
|
||||
(need_mappable && !obj->map_and_fenceable))
|
||||
ret = i915_gem_object_unbind(obj);
|
||||
else
|
||||
ret = i915_gem_execbuffer_reserve_object(obj, ring);
|
||||
ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
@ -517,7 +590,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
|
||||
if (obj->gtt_space)
|
||||
continue;
|
||||
|
||||
ret = i915_gem_execbuffer_reserve_object(obj, ring);
|
||||
ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
@ -540,21 +613,22 @@ err: /* Decrement pin count for bound objects */
|
||||
|
||||
static int
|
||||
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
||||
struct drm_i915_gem_execbuffer2 *args,
|
||||
struct drm_file *file,
|
||||
struct intel_ring_buffer *ring,
|
||||
struct list_head *objects,
|
||||
struct eb_objects *eb,
|
||||
struct drm_i915_gem_exec_object2 *exec,
|
||||
int count)
|
||||
struct drm_i915_gem_exec_object2 *exec)
|
||||
{
|
||||
struct drm_i915_gem_relocation_entry *reloc;
|
||||
struct drm_i915_gem_object *obj;
|
||||
bool need_relocs;
|
||||
int *reloc_offset;
|
||||
int i, total, ret;
|
||||
int count = args->buffer_count;
|
||||
|
||||
/* We may process another execbuffer during the unlock... */
|
||||
while (!list_empty(objects)) {
|
||||
obj = list_first_entry(objects,
|
||||
while (!list_empty(&eb->objects)) {
|
||||
obj = list_first_entry(&eb->objects,
|
||||
struct drm_i915_gem_object,
|
||||
exec_list);
|
||||
list_del_init(&obj->exec_list);
|
||||
@ -622,34 +696,16 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
||||
|
||||
/* reacquire the objects */
|
||||
eb_reset(eb);
|
||||
for (i = 0; i < count; i++) {
|
||||
|
||||
if(exec[i].handle == -2)
|
||||
{
|
||||
obj = get_fb_obj();
|
||||
drm_gem_object_reference(&obj->base);
|
||||
}
|
||||
else
|
||||
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
|
||||
exec[i].handle));
|
||||
if (&obj->base == NULL) {
|
||||
DRM_DEBUG("Invalid object handle %d at index %d\n",
|
||||
exec[i].handle, i);
|
||||
ret = -ENOENT;
|
||||
ret = eb_lookup_objects(eb, exec, args, file);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
list_add_tail(&obj->exec_list, objects);
|
||||
obj->exec_handle = exec[i].handle;
|
||||
obj->exec_entry = &exec[i];
|
||||
eb_add_object(eb, obj);
|
||||
}
|
||||
|
||||
ret = i915_gem_execbuffer_reserve(ring, file, objects);
|
||||
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
|
||||
ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
list_for_each_entry(obj, objects, exec_list) {
|
||||
list_for_each_entry(obj, &eb->objects, exec_list) {
|
||||
int offset = obj->exec_entry - exec;
|
||||
ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
|
||||
reloc + reloc_offset[offset]);
|
||||
@ -669,45 +725,12 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
|
||||
{
|
||||
u32 plane, flip_mask;
|
||||
int ret;
|
||||
|
||||
/* Check for any pending flips. As we only maintain a flip queue depth
|
||||
* of 1, we can simply insert a WAIT for the next display flip prior
|
||||
* to executing the batch and avoid stalling the CPU.
|
||||
*/
|
||||
|
||||
for (plane = 0; flips >> plane; plane++) {
|
||||
if (((flips >> plane) & 1) == 0)
|
||||
continue;
|
||||
|
||||
if (plane)
|
||||
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
|
||||
else
|
||||
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
|
||||
|
||||
ret = intel_ring_begin(ring, 2);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_advance(ring);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
|
||||
struct list_head *objects)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
uint32_t flush_domains = 0;
|
||||
uint32_t flips = 0;
|
||||
int ret;
|
||||
|
||||
list_for_each_entry(obj, objects, exec_list) {
|
||||
@ -718,18 +741,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
|
||||
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
|
||||
i915_gem_clflush_object(obj);
|
||||
|
||||
if (obj->base.pending_write_domain)
|
||||
flips |= atomic_read(&obj->pending_flip);
|
||||
|
||||
flush_domains |= obj->base.write_domain;
|
||||
}
|
||||
|
||||
if (flips) {
|
||||
ret = i915_gem_execbuffer_wait_for_flips(ring, flips);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (flush_domains & I915_GEM_DOMAIN_CPU)
|
||||
i915_gem_chipset_flush(ring->dev);
|
||||
|
||||
@ -745,6 +759,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
|
||||
static bool
|
||||
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
|
||||
{
|
||||
if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
|
||||
return false;
|
||||
|
||||
return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
|
||||
}
|
||||
|
||||
@ -753,21 +770,26 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
|
||||
int count)
|
||||
{
|
||||
int i;
|
||||
int relocs_total = 0;
|
||||
int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
|
||||
int length; /* limited by fault_in_pages_readable() */
|
||||
|
||||
/* First check for malicious input causing overflow */
|
||||
if (exec[i].relocation_count >
|
||||
INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
|
||||
if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
|
||||
return -EINVAL;
|
||||
|
||||
/* First check for malicious input causing overflow in
|
||||
* the worst case where we need to allocate the entire
|
||||
* relocation tree as a single array.
|
||||
*/
|
||||
if (exec[i].relocation_count > relocs_max - relocs_total)
|
||||
return -EINVAL;
|
||||
relocs_total += exec[i].relocation_count;
|
||||
|
||||
length = exec[i].relocation_count *
|
||||
sizeof(struct drm_i915_gem_relocation_entry);
|
||||
// if (!access_ok(VERIFY_READ, ptr, length))
|
||||
// return -EFAULT;
|
||||
|
||||
/* we may also need to update the presumed offsets */
|
||||
// if (!access_ok(VERIFY_WRITE, ptr, length))
|
||||
// return -EFAULT;
|
||||
@ -789,8 +811,10 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
|
||||
u32 old_read = obj->base.read_domains;
|
||||
u32 old_write = obj->base.write_domain;
|
||||
|
||||
obj->base.read_domains = obj->base.pending_read_domains;
|
||||
obj->base.write_domain = obj->base.pending_write_domain;
|
||||
if (obj->base.write_domain == 0)
|
||||
obj->base.pending_read_domains |= obj->base.read_domains;
|
||||
obj->base.read_domains = obj->base.pending_read_domains;
|
||||
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
|
||||
|
||||
i915_gem_object_move_to_active(obj, ring);
|
||||
@ -849,19 +873,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
struct drm_i915_gem_exec_object2 *exec)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct list_head objects;
|
||||
struct eb_objects *eb;
|
||||
struct drm_i915_gem_object *batch_obj;
|
||||
struct drm_clip_rect *cliprects = NULL;
|
||||
struct intel_ring_buffer *ring;
|
||||
u32 ctx_id = i915_execbuffer2_get_context_id(*args);
|
||||
u32 exec_start, exec_len;
|
||||
u32 mask;
|
||||
u32 flags;
|
||||
u32 mask, flags;
|
||||
int ret, mode, i;
|
||||
bool need_relocs;
|
||||
|
||||
if (!i915_gem_check_execbuffer(args)) {
|
||||
DRM_DEBUG("execbuf with invalid offset/length\n");
|
||||
if (!i915_gem_check_execbuffer(args))
|
||||
{
|
||||
FAIL();
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -875,8 +898,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
|
||||
flags = 0;
|
||||
if (args->flags & I915_EXEC_SECURE) {
|
||||
// if (!file->is_master || !capable(CAP_SYS_ADMIN))
|
||||
// return -EPERM;
|
||||
|
||||
flags |= I915_DISPATCH_SECURE;
|
||||
}
|
||||
@ -989,7 +1010,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
goto pre_mutex_err;
|
||||
}
|
||||
|
||||
eb = eb_create(args->buffer_count);
|
||||
eb = eb_create(args);
|
||||
if (eb == NULL) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
ret = -ENOMEM;
|
||||
@ -997,60 +1018,28 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
}
|
||||
|
||||
/* Look up object handles */
|
||||
INIT_LIST_HEAD(&objects);
|
||||
for (i = 0; i < args->buffer_count; i++) {
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
if(exec[i].handle == -2)
|
||||
{
|
||||
obj = get_fb_obj();
|
||||
drm_gem_object_reference(&obj->base);
|
||||
}
|
||||
else
|
||||
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
|
||||
exec[i].handle));
|
||||
|
||||
// printf("%s object %p handle %d\n", __FUNCTION__, obj, exec[i].handle);
|
||||
|
||||
if (&obj->base == NULL) {
|
||||
DRM_DEBUG("Invalid object handle %d at index %d\n",
|
||||
exec[i].handle, i);
|
||||
/* prevent error path from reading uninitialized data */
|
||||
ret = -ENOENT;
|
||||
ret = eb_lookup_objects(eb, exec, args, file);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!list_empty(&obj->exec_list)) {
|
||||
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
|
||||
obj, exec[i].handle, i);
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
list_add_tail(&obj->exec_list, &objects);
|
||||
obj->exec_handle = exec[i].handle;
|
||||
obj->exec_entry = &exec[i];
|
||||
eb_add_object(eb, obj);
|
||||
}
|
||||
|
||||
/* take note of the batch buffer before we might reorder the lists */
|
||||
batch_obj = list_entry(objects.prev,
|
||||
batch_obj = list_entry(eb->objects.prev,
|
||||
struct drm_i915_gem_object,
|
||||
exec_list);
|
||||
|
||||
/* Move the objects en-masse into the GTT, evicting if necessary. */
|
||||
ret = i915_gem_execbuffer_reserve(ring, file, &objects);
|
||||
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
|
||||
ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
/* The objects are in their final locations, apply the relocations. */
|
||||
ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
|
||||
if (need_relocs)
|
||||
ret = i915_gem_execbuffer_relocate(dev, eb);
|
||||
if (ret) {
|
||||
if (ret == -EFAULT) {
|
||||
ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
|
||||
&objects, eb,
|
||||
exec,
|
||||
args->buffer_count);
|
||||
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
|
||||
eb, exec);
|
||||
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
}
|
||||
if (ret)
|
||||
@ -1072,7 +1061,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
|
||||
i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
|
||||
|
||||
ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
|
||||
ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -1104,18 +1093,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
exec_start = batch_obj->gtt_offset + args->batch_start_offset;
|
||||
exec_len = args->batch_len;
|
||||
if (cliprects) {
|
||||
// for (i = 0; i < args->num_cliprects; i++) {
|
||||
// ret = i915_emit_box(dev, &cliprects[i],
|
||||
// args->DR1, args->DR4);
|
||||
// if (ret)
|
||||
// goto err;
|
||||
|
||||
// ret = ring->dispatch_execbuffer(ring,
|
||||
// exec_start, exec_len,
|
||||
// flags);
|
||||
// if (ret)
|
||||
// goto err;
|
||||
// }
|
||||
} else {
|
||||
ret = ring->dispatch_execbuffer(ring,
|
||||
exec_start, exec_len,
|
||||
@ -1126,30 +1104,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
|
||||
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
|
||||
|
||||
i915_gem_execbuffer_move_to_active(&objects, ring);
|
||||
i915_gem_execbuffer_move_to_active(&eb->objects, ring);
|
||||
i915_gem_execbuffer_retire_commands(dev, file, ring);
|
||||
|
||||
err:
|
||||
eb_destroy(eb);
|
||||
while (!list_empty(&objects)) {
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
obj = list_first_entry(&objects,
|
||||
struct drm_i915_gem_object,
|
||||
exec_list);
|
||||
list_del_init(&obj->exec_list);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
pre_mutex_err:
|
||||
kfree(cliprects);
|
||||
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
|
||||
int
|
||||
i915_gem_execbuffer2(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
@ -1167,11 +1136,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
exec2_list = malloc(sizeof(*exec2_list)*args->buffer_count);
|
||||
|
||||
// if (exec2_list == NULL)
|
||||
// exec2_list = drm_malloc_ab(sizeof(*exec2_list),
|
||||
// args->buffer_count);
|
||||
exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
|
||||
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
|
||||
if (exec2_list == NULL) {
|
||||
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
|
||||
args->buffer_count);
|
||||
|
@ -24,16 +24,18 @@
|
||||
|
||||
#define iowrite32(v, addr) writel((v), (addr))
|
||||
|
||||
#define AGP_NORMAL_MEMORY 0
|
||||
|
||||
#define AGP_USER_TYPES (1 << 16)
|
||||
#define AGP_USER_MEMORY (AGP_USER_TYPES)
|
||||
#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
#include "i915_trace.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
#define AGP_USER_TYPES (1 << 16)
|
||||
#define AGP_USER_MEMORY (AGP_USER_TYPES)
|
||||
#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
|
||||
|
||||
typedef uint32_t gtt_pte_t;
|
||||
|
||||
/* PPGTT stuff */
|
||||
@ -50,7 +52,7 @@ typedef uint32_t gtt_pte_t;
|
||||
#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
|
||||
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
|
||||
|
||||
static inline gtt_pte_t pte_encode(struct drm_device *dev,
|
||||
static inline gtt_pte_t gen6_pte_encode(struct drm_device *dev,
|
||||
dma_addr_t addr,
|
||||
enum i915_cache_level level)
|
||||
{
|
||||
@ -83,7 +85,7 @@ static inline gtt_pte_t pte_encode(struct drm_device *dev,
|
||||
}
|
||||
|
||||
/* PPGTT support for Sandybdrige/Gen6 and later */
|
||||
static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
|
||||
static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
|
||||
unsigned first_entry,
|
||||
unsigned num_entries)
|
||||
{
|
||||
@ -93,15 +95,16 @@ static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
|
||||
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
|
||||
unsigned last_pte, i;
|
||||
|
||||
scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr,
|
||||
scratch_pte = gen6_pte_encode(ppgtt->dev,
|
||||
ppgtt->scratch_page_dma_addr,
|
||||
I915_CACHE_LLC);
|
||||
|
||||
pt_vaddr = AllocKernelSpace(4096);
|
||||
|
||||
if(pt_vaddr != NULL)
|
||||
{
|
||||
while (num_entries)
|
||||
{
|
||||
if(pt_vaddr == NULL)
|
||||
return;
|
||||
|
||||
while (num_entries) {
|
||||
last_pte = first_pte + num_entries;
|
||||
if (last_pte > I915_PPGTT_PT_ENTRIES)
|
||||
last_pte = I915_PPGTT_PT_ENTRIES;
|
||||
@ -114,121 +117,15 @@ static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
|
||||
num_entries -= last_pte - first_pte;
|
||||
first_pte = 0;
|
||||
act_pd++;
|
||||
}
|
||||
FreeKernelSpace(pt_vaddr);
|
||||
};
|
||||
|
||||
FreeKernelSpace(pt_vaddr);
|
||||
}
|
||||
|
||||
int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_hw_ppgtt *ppgtt;
|
||||
unsigned first_pd_entry_in_global_pt;
|
||||
int i;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
|
||||
* entries. For aliasing ppgtt support we just steal them at the end for
|
||||
* now. */
|
||||
first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES;
|
||||
|
||||
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
|
||||
if (!ppgtt)
|
||||
return ret;
|
||||
|
||||
ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
|
||||
ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
|
||||
GFP_KERNEL);
|
||||
if (!ppgtt->pt_pages)
|
||||
goto err_ppgtt;
|
||||
|
||||
for (i = 0; i < ppgtt->num_pd_entries; i++) {
|
||||
ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
|
||||
if (!ppgtt->pt_pages[i])
|
||||
goto err_pt_alloc;
|
||||
}
|
||||
|
||||
/*
|
||||
if (dev_priv->mm.gtt->needs_dmar) {
|
||||
ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t)
|
||||
*ppgtt->num_pd_entries,
|
||||
GFP_KERNEL);
|
||||
if (!ppgtt->pt_dma_addr)
|
||||
goto err_pt_alloc;
|
||||
|
||||
for (i = 0; i < ppgtt->num_pd_entries; i++) {
|
||||
dma_addr_t pt_addr;
|
||||
|
||||
pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
|
||||
0, 4096,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
|
||||
if (pci_dma_mapping_error(dev->pdev,
|
||||
pt_addr)) {
|
||||
ret = -EIO;
|
||||
goto err_pd_pin;
|
||||
|
||||
}
|
||||
ppgtt->pt_dma_addr[i] = pt_addr;
|
||||
}
|
||||
}
|
||||
*/
|
||||
ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
|
||||
|
||||
i915_ppgtt_clear_range(ppgtt, 0,
|
||||
ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
|
||||
|
||||
ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
|
||||
|
||||
dev_priv->mm.aliasing_ppgtt = ppgtt;
|
||||
|
||||
return 0;
|
||||
|
||||
err_pd_pin:
|
||||
// if (ppgtt->pt_dma_addr) {
|
||||
// for (i--; i >= 0; i--)
|
||||
// pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
|
||||
// 4096, PCI_DMA_BIDIRECTIONAL);
|
||||
// }
|
||||
err_pt_alloc:
|
||||
// kfree(ppgtt->pt_dma_addr);
|
||||
for (i = 0; i < ppgtt->num_pd_entries; i++) {
|
||||
if (ppgtt->pt_pages[i])
|
||||
FreePage((addr_t)(ppgtt->pt_pages[i]));
|
||||
}
|
||||
kfree(ppgtt->pt_pages);
|
||||
err_ppgtt:
|
||||
kfree(ppgtt);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
|
||||
int i;
|
||||
|
||||
if (!ppgtt)
|
||||
return;
|
||||
|
||||
// if (ppgtt->pt_dma_addr) {
|
||||
// for (i = 0; i < ppgtt->num_pd_entries; i++)
|
||||
// pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
|
||||
// 4096, PCI_DMA_BIDIRECTIONAL);
|
||||
// }
|
||||
|
||||
// kfree(ppgtt->pt_dma_addr);
|
||||
for (i = 0; i < ppgtt->num_pd_entries; i++)
|
||||
FreePage((addr_t)(ppgtt->pt_pages[i]));
|
||||
kfree(ppgtt->pt_pages);
|
||||
kfree(ppgtt);
|
||||
}
|
||||
|
||||
static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
|
||||
const struct sg_table *pages,
|
||||
unsigned first_entry,
|
||||
enum i915_cache_level cache_level)
|
||||
static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
|
||||
struct sg_table *pages,
|
||||
unsigned first_entry,
|
||||
enum i915_cache_level cache_level)
|
||||
{
|
||||
gtt_pte_t *pt_vaddr;
|
||||
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
|
||||
@ -244,16 +141,17 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
|
||||
m = 0;
|
||||
|
||||
pt_vaddr = AllocKernelSpace(4096);
|
||||
if( pt_vaddr == NULL)
|
||||
return;
|
||||
|
||||
if(pt_vaddr == NULL)
|
||||
return;
|
||||
|
||||
while (i < pages->nents) {
|
||||
MapPage(pt_vaddr,(addr_t)ppgtt->pt_pages[act_pd], 3);
|
||||
MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pd]), 3);
|
||||
|
||||
for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
|
||||
for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
|
||||
page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
|
||||
pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr,
|
||||
cache_level);
|
||||
pt_vaddr[j] = gen6_pte_encode(ppgtt->dev, page_addr,
|
||||
cache_level);
|
||||
|
||||
/* grab the next page */
|
||||
if (++m == segment_len) {
|
||||
@ -266,19 +164,136 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
first_pte = 0;
|
||||
act_pd++;
|
||||
first_pte = 0;
|
||||
act_pd++;
|
||||
}
|
||||
FreeKernelSpace(pt_vaddr);
|
||||
}
|
||||
|
||||
static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (ppgtt->pt_dma_addr) {
|
||||
for (i = 0; i < ppgtt->num_pd_entries; i++)
|
||||
pci_unmap_page(ppgtt->dev->pdev,
|
||||
ppgtt->pt_dma_addr[i],
|
||||
4096, PCI_DMA_BIDIRECTIONAL);
|
||||
}
|
||||
|
||||
kfree(ppgtt->pt_dma_addr);
|
||||
for (i = 0; i < ppgtt->num_pd_entries; i++)
|
||||
__free_page(ppgtt->pt_pages[i]);
|
||||
kfree(ppgtt->pt_pages);
|
||||
kfree(ppgtt);
|
||||
}
|
||||
|
||||
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
|
||||
{
|
||||
struct drm_device *dev = ppgtt->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned first_pd_entry_in_global_pt;
|
||||
int i;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
|
||||
* entries. For aliasing ppgtt support we just steal them at the end for
|
||||
* now. */
|
||||
first_pd_entry_in_global_pt =
|
||||
gtt_total_entries(dev_priv->gtt) - I915_PPGTT_PD_ENTRIES;
|
||||
|
||||
ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
|
||||
ppgtt->clear_range = gen6_ppgtt_clear_range;
|
||||
ppgtt->insert_entries = gen6_ppgtt_insert_entries;
|
||||
ppgtt->cleanup = gen6_ppgtt_cleanup;
|
||||
ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
|
||||
GFP_KERNEL);
|
||||
if (!ppgtt->pt_pages)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < ppgtt->num_pd_entries; i++) {
|
||||
ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
|
||||
if (!ppgtt->pt_pages[i])
|
||||
goto err_pt_alloc;
|
||||
}
|
||||
|
||||
ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries,
|
||||
GFP_KERNEL);
|
||||
if (!ppgtt->pt_dma_addr)
|
||||
goto err_pt_alloc;
|
||||
|
||||
for (i = 0; i < ppgtt->num_pd_entries; i++) {
|
||||
dma_addr_t pt_addr;
|
||||
|
||||
pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
|
||||
ppgtt->pt_dma_addr[i] = pt_addr;
|
||||
}
|
||||
|
||||
ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
|
||||
|
||||
ppgtt->clear_range(ppgtt, 0,
|
||||
ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
|
||||
|
||||
ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
|
||||
|
||||
return 0;
|
||||
|
||||
err_pd_pin:
|
||||
if (ppgtt->pt_dma_addr) {
|
||||
for (i--; i >= 0; i--)
|
||||
pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
|
||||
4096, PCI_DMA_BIDIRECTIONAL);
|
||||
}
|
||||
err_pt_alloc:
|
||||
kfree(ppgtt->pt_dma_addr);
|
||||
for (i = 0; i < ppgtt->num_pd_entries; i++) {
|
||||
if (ppgtt->pt_pages[i])
|
||||
__free_page(ppgtt->pt_pages[i]);
|
||||
}
|
||||
kfree(ppgtt->pt_pages);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_hw_ppgtt *ppgtt;
|
||||
int ret;
|
||||
|
||||
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
|
||||
if (!ppgtt)
|
||||
return -ENOMEM;
|
||||
|
||||
ppgtt->dev = dev;
|
||||
|
||||
ret = gen6_ppgtt_init(ppgtt);
|
||||
if (ret)
|
||||
kfree(ppgtt);
|
||||
else
|
||||
dev_priv->mm.aliasing_ppgtt = ppgtt;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
|
||||
|
||||
if (!ppgtt)
|
||||
return;
|
||||
|
||||
ppgtt->cleanup(ppgtt);
|
||||
}
|
||||
|
||||
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
|
||||
struct drm_i915_gem_object *obj,
|
||||
enum i915_cache_level cache_level)
|
||||
{
|
||||
i915_ppgtt_insert_sg_entries(ppgtt,
|
||||
obj->pages,
|
||||
ppgtt->insert_entries(ppgtt, obj->pages,
|
||||
obj->gtt_space->start >> PAGE_SHIFT,
|
||||
cache_level);
|
||||
}
|
||||
@ -286,7 +301,7 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
|
||||
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
|
||||
struct drm_i915_gem_object *obj)
|
||||
{
|
||||
i915_ppgtt_clear_range(ppgtt,
|
||||
ppgtt->clear_range(ppgtt,
|
||||
obj->gtt_space->start >> PAGE_SHIFT,
|
||||
obj->base.size >> PAGE_SHIFT);
|
||||
}
|
||||
@ -297,7 +312,7 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
|
||||
uint32_t pd_offset;
|
||||
struct intel_ring_buffer *ring;
|
||||
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
|
||||
uint32_t __iomem *pd_addr;
|
||||
gtt_pte_t __iomem *pd_addr;
|
||||
uint32_t pd_entry;
|
||||
int i;
|
||||
|
||||
@ -305,15 +320,11 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
|
||||
return;
|
||||
|
||||
|
||||
pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
|
||||
pd_addr = (gtt_pte_t __iomem*)dev_priv->gtt.gsm + ppgtt->pd_offset/sizeof(gtt_pte_t);
|
||||
for (i = 0; i < ppgtt->num_pd_entries; i++) {
|
||||
dma_addr_t pt_addr;
|
||||
|
||||
if (dev_priv->mm.gtt->needs_dmar)
|
||||
pt_addr = ppgtt->pt_dma_addr[i];
|
||||
else
|
||||
pt_addr = page_to_phys(ppgtt->pt_pages[i]);
|
||||
|
||||
pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
|
||||
pd_entry |= GEN6_PDE_VALID;
|
||||
|
||||
@ -353,11 +364,27 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
|
||||
}
|
||||
}
|
||||
|
||||
extern int intel_iommu_gfx_mapped;
|
||||
/* Certain Gen5 chipsets require require idling the GPU before
|
||||
* unmapping anything from the GTT when VT-d is enabled.
|
||||
*/
|
||||
static inline bool needs_idle_maps(struct drm_device *dev)
|
||||
{
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
/* Query intel_iommu to see if we need the workaround. Presumably that
|
||||
* was loaded first.
|
||||
*/
|
||||
if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
|
||||
return true;
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool do_idling(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
bool ret = dev_priv->mm.interruptible;
|
||||
|
||||
if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
|
||||
if (unlikely(dev_priv->gtt.do_idle_maps)) {
|
||||
dev_priv->mm.interruptible = false;
|
||||
if (i915_gpu_idle(dev_priv->dev)) {
|
||||
DRM_ERROR("Couldn't idle GPU\n");
|
||||
@ -371,47 +398,18 @@ static bool do_idling(struct drm_i915_private *dev_priv)
|
||||
|
||||
static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
|
||||
{
|
||||
if (unlikely(dev_priv->mm.gtt->do_idle_maps))
|
||||
if (unlikely(dev_priv->gtt.do_idle_maps))
|
||||
dev_priv->mm.interruptible = interruptible;
|
||||
}
|
||||
|
||||
|
||||
static void i915_ggtt_clear_range(struct drm_device *dev,
|
||||
unsigned first_entry,
|
||||
unsigned num_entries)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
gtt_pte_t scratch_pte;
|
||||
gtt_pte_t __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry;
|
||||
const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
|
||||
int i;
|
||||
|
||||
if (INTEL_INFO(dev)->gen < 6) {
|
||||
intel_gtt_clear_range(first_entry, num_entries);
|
||||
return;
|
||||
}
|
||||
|
||||
if (WARN(num_entries > max_entries,
|
||||
"First entry = %d; Num entries = %d (max=%d)\n",
|
||||
first_entry, num_entries, max_entries))
|
||||
num_entries = max_entries;
|
||||
|
||||
scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC);
|
||||
for (i = 0; i < num_entries; i++)
|
||||
iowrite32(scratch_pte, >t_base[i]);
|
||||
readl(gtt_base);
|
||||
}
|
||||
|
||||
|
||||
#if 0
|
||||
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
/* First fill our portion of the GTT with scratch pages */
|
||||
i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE,
|
||||
(dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
|
||||
dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
|
||||
dev_priv->gtt.total / PAGE_SIZE);
|
||||
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
|
||||
i915_gem_clflush_object(obj);
|
||||
@ -420,29 +418,16 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
|
||||
|
||||
i915_gem_chipset_flush(dev);
|
||||
}
|
||||
#endif
|
||||
|
||||
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct scatterlist *sg, *s;
|
||||
unsigned int nents ;
|
||||
int i;
|
||||
|
||||
if (obj->has_dma_mapping)
|
||||
return 0;
|
||||
|
||||
sg = obj->pages->sgl;
|
||||
nents = obj->pages->nents;
|
||||
|
||||
|
||||
WARN_ON(nents == 0 || sg[0].length == 0);
|
||||
|
||||
for_each_sg(sg, s, nents, i) {
|
||||
BUG_ON(!sg_page(s));
|
||||
s->dma_address = sg_phys(s);
|
||||
}
|
||||
|
||||
asm volatile("lock; addl $0,0(%%esp)": : :"memory");
|
||||
if (!dma_map_sg(&obj->base.dev->pdev->dev,
|
||||
obj->pages->sgl, obj->pages->nents,
|
||||
PCI_DMA_BIDIRECTIONAL))
|
||||
return -ENOSPC;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -453,16 +438,15 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
|
||||
* within the global GTT as well as accessible by the GPU through the GMADR
|
||||
* mapped BAR (dev_priv->mm.gtt->gtt).
|
||||
*/
|
||||
static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
|
||||
static void gen6_ggtt_insert_entries(struct drm_device *dev,
|
||||
struct sg_table *st,
|
||||
unsigned int first_entry,
|
||||
enum i915_cache_level level)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct sg_table *st = obj->pages;
|
||||
struct scatterlist *sg = st->sgl;
|
||||
const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
|
||||
const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
|
||||
gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry;
|
||||
gtt_pte_t __iomem *gtt_entries =
|
||||
(gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
|
||||
int unused, i = 0;
|
||||
unsigned int len, m = 0;
|
||||
dma_addr_t addr;
|
||||
@ -471,14 +455,12 @@ static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
|
||||
len = sg_dma_len(sg) >> PAGE_SHIFT;
|
||||
for (m = 0; m < len; m++) {
|
||||
addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
|
||||
iowrite32(pte_encode(dev, addr, level), >t_entries[i]);
|
||||
iowrite32(gen6_pte_encode(dev, addr, level),
|
||||
>t_entries[i]);
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
BUG_ON(i > max_entries);
|
||||
BUG_ON(i != obj->base.size / PAGE_SIZE);
|
||||
|
||||
/* XXX: This serves as a posting read to make sure that the PTE has
|
||||
* actually been updated. There is some concern that even though
|
||||
* registers and PTEs are within the same BAR that they are potentially
|
||||
@ -486,7 +468,8 @@ static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
|
||||
* hardware should work, we must keep this posting read for paranoia.
|
||||
*/
|
||||
if (i != 0)
|
||||
WARN_ON(readl(>t_entries[i-1]) != pte_encode(dev, addr, level));
|
||||
WARN_ON(readl(>t_entries[i-1])
|
||||
!= gen6_pte_encode(dev, addr, level));
|
||||
|
||||
/* This next bit makes the above posting read even more important. We
|
||||
* want to flush the TLBs only after we're certain all the PTE updates
|
||||
@ -496,26 +479,68 @@ static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
|
||||
POSTING_READ(GFX_FLSH_CNTL_GEN6);
|
||||
}
|
||||
|
||||
static void gen6_ggtt_clear_range(struct drm_device *dev,
|
||||
unsigned int first_entry,
|
||||
unsigned int num_entries)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
gtt_pte_t scratch_pte;
|
||||
gtt_pte_t __iomem *gtt_base = (gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
|
||||
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
|
||||
int i;
|
||||
|
||||
if (WARN(num_entries > max_entries,
|
||||
"First entry = %d; Num entries = %d (max=%d)\n",
|
||||
first_entry, num_entries, max_entries))
|
||||
num_entries = max_entries;
|
||||
|
||||
scratch_pte = gen6_pte_encode(dev, dev_priv->gtt.scratch_page_dma,
|
||||
I915_CACHE_LLC);
|
||||
for (i = 0; i < num_entries; i++)
|
||||
iowrite32(scratch_pte, >t_base[i]);
|
||||
readl(gtt_base);
|
||||
}
|
||||
|
||||
|
||||
static void i915_ggtt_insert_entries(struct drm_device *dev,
|
||||
struct sg_table *st,
|
||||
unsigned int pg_start,
|
||||
enum i915_cache_level cache_level)
|
||||
{
|
||||
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
|
||||
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
|
||||
|
||||
intel_gtt_insert_sg_entries(st, pg_start, flags);
|
||||
|
||||
}
|
||||
|
||||
static void i915_ggtt_clear_range(struct drm_device *dev,
|
||||
unsigned int first_entry,
|
||||
unsigned int num_entries)
|
||||
{
|
||||
intel_gtt_clear_range(first_entry, num_entries);
|
||||
}
|
||||
|
||||
|
||||
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
|
||||
enum i915_cache_level cache_level)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
if (INTEL_INFO(dev)->gen < 6) {
|
||||
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
|
||||
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
|
||||
intel_gtt_insert_sg_entries(obj->pages,
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
|
||||
obj->gtt_space->start >> PAGE_SHIFT,
|
||||
flags);
|
||||
} else {
|
||||
gen6_ggtt_bind_object(obj, cache_level);
|
||||
}
|
||||
cache_level);
|
||||
|
||||
obj->has_global_gtt_mapping = 1;
|
||||
}
|
||||
|
||||
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
i915_ggtt_clear_range(obj->base.dev,
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
dev_priv->gtt.gtt_clear_range(obj->base.dev,
|
||||
obj->gtt_space->start >> PAGE_SHIFT,
|
||||
obj->base.size >> PAGE_SHIFT);
|
||||
|
||||
@ -530,10 +555,10 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
|
||||
|
||||
interruptible = do_idling(dev_priv);
|
||||
|
||||
// if (!obj->has_dma_mapping)
|
||||
// dma_unmap_sg(&dev->pdev->dev,
|
||||
// obj->pages->sgl, obj->pages->nents,
|
||||
// PCI_DMA_BIDIRECTIONAL);
|
||||
if (!obj->has_dma_mapping)
|
||||
dma_unmap_sg(&dev->pdev->dev,
|
||||
obj->pages->sgl, obj->pages->nents,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
|
||||
undo_idling(dev_priv, interruptible);
|
||||
}
|
||||
@ -554,27 +579,102 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
|
||||
*end -= 4096;
|
||||
}
|
||||
}
|
||||
|
||||
void i915_gem_init_global_gtt(struct drm_device *dev,
|
||||
void i915_gem_setup_global_gtt(struct drm_device *dev,
|
||||
unsigned long start,
|
||||
unsigned long mappable_end,
|
||||
unsigned long end)
|
||||
{
|
||||
/* Let GEM Manage all of the aperture.
|
||||
*
|
||||
* However, leave one page at the end still bound to the scratch page.
|
||||
* There are a number of places where the hardware apparently prefetches
|
||||
* past the end of the object, and we've seen multiple hangs with the
|
||||
* GPU head pointer stuck in a batchbuffer bound at the last page of the
|
||||
* aperture. One page should be enough to keep any prefetching inside
|
||||
* of the aperture.
|
||||
*/
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_mm_node *entry;
|
||||
struct drm_i915_gem_object *obj;
|
||||
unsigned long hole_start, hole_end;
|
||||
|
||||
/* Substract the guard page ... */
|
||||
BUG_ON(mappable_end > end);
|
||||
|
||||
/* Subtract the guard page ... */
|
||||
drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
|
||||
if (!HAS_LLC(dev))
|
||||
dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
|
||||
|
||||
dev_priv->mm.gtt_start = start;
|
||||
dev_priv->mm.gtt_mappable_end = mappable_end;
|
||||
dev_priv->mm.gtt_end = end;
|
||||
dev_priv->mm.gtt_total = end - start;
|
||||
dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
|
||||
/* Mark any preallocated objects as occupied */
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
|
||||
DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
|
||||
obj->gtt_offset, obj->base.size);
|
||||
|
||||
/* ... but ensure that we clear the entire range. */
|
||||
i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE);
|
||||
BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
|
||||
obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
|
||||
obj->gtt_offset,
|
||||
obj->base.size,
|
||||
false);
|
||||
obj->has_global_gtt_mapping = 1;
|
||||
}
|
||||
|
||||
dev_priv->gtt.start = start;
|
||||
dev_priv->gtt.total = end - start;
|
||||
|
||||
/* Clear any non-preallocated blocks */
|
||||
drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space,
|
||||
hole_start, hole_end) {
|
||||
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
|
||||
hole_start, hole_end);
|
||||
dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE,
|
||||
(hole_end-hole_start) / PAGE_SIZE);
|
||||
}
|
||||
|
||||
/* And finally clear the reserved guard page */
|
||||
dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_enable_ppgtt(struct drm_device *dev)
|
||||
{
|
||||
if (i915_enable_ppgtt >= 0)
|
||||
return i915_enable_ppgtt;
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
/* Disable ppgtt on SNB if VT-d is on. */
|
||||
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
|
||||
return false;
|
||||
#endif
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void i915_gem_init_global_gtt(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long gtt_size, mappable_size;
|
||||
|
||||
gtt_size = dev_priv->gtt.total;
|
||||
mappable_size = dev_priv->gtt.mappable_end;
|
||||
|
||||
if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
|
||||
int ret;
|
||||
/* PPGTT pdes are stolen from global gtt ptes, so shrink the
|
||||
* aperture accordingly when using aliasing ppgtt. */
|
||||
gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
|
||||
gtt_size -= LFB_SIZE;
|
||||
|
||||
i915_gem_setup_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size);
|
||||
|
||||
ret = i915_gem_init_aliasing_ppgtt(dev);
|
||||
if (!ret)
|
||||
return;
|
||||
|
||||
DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
|
||||
drm_mm_takedown(&dev_priv->mm.gtt_space);
|
||||
gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
|
||||
}
|
||||
i915_gem_setup_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size);
|
||||
}
|
||||
|
||||
static int setup_scratch_page(struct drm_device *dev)
|
||||
@ -586,6 +686,8 @@ static int setup_scratch_page(struct drm_device *dev)
|
||||
page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
|
||||
if (page == NULL)
|
||||
return -ENOMEM;
|
||||
get_page(page);
|
||||
set_pages_uc(page, 1);
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
|
||||
@ -595,12 +697,21 @@ static int setup_scratch_page(struct drm_device *dev)
|
||||
#else
|
||||
dma_addr = page_to_phys(page);
|
||||
#endif
|
||||
dev_priv->mm.gtt->scratch_page = page;
|
||||
dev_priv->mm.gtt->scratch_page_dma = dma_addr;
|
||||
dev_priv->gtt.scratch_page = page;
|
||||
dev_priv->gtt.scratch_page_dma = dma_addr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void teardown_scratch_page(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
set_pages_wb(dev_priv->gtt.scratch_page, 1);
|
||||
pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma,
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
put_page(dev_priv->gtt.scratch_page);
|
||||
__free_page(dev_priv->gtt.scratch_page);
|
||||
}
|
||||
|
||||
static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
|
||||
{
|
||||
@ -609,14 +720,14 @@ static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
|
||||
return snb_gmch_ctl << 20;
|
||||
}
|
||||
|
||||
static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl)
|
||||
static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
|
||||
{
|
||||
snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
|
||||
snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
|
||||
return snb_gmch_ctl << 25; /* 32 MB units */
|
||||
}
|
||||
|
||||
static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
|
||||
static inline size_t gen7_get_stolen_size(u16 snb_gmch_ctl)
|
||||
{
|
||||
static const int stolen_decoder[] = {
|
||||
0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
|
||||
@ -625,91 +736,130 @@ static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
|
||||
return stolen_decoder[snb_gmch_ctl] << 20;
|
||||
}
|
||||
|
||||
int i915_gem_gtt_init(struct drm_device *dev)
|
||||
static int gen6_gmch_probe(struct drm_device *dev,
|
||||
size_t *gtt_total,
|
||||
size_t *stolen,
|
||||
phys_addr_t *mappable_base,
|
||||
unsigned long *mappable_end)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
phys_addr_t gtt_bus_addr;
|
||||
unsigned int gtt_size;
|
||||
u16 snb_gmch_ctl;
|
||||
int ret;
|
||||
|
||||
/* On modern platforms we need not worry ourself with the legacy
|
||||
* hostbridge query stuff. Skip it entirely
|
||||
*mappable_base = pci_resource_start(dev->pdev, 2);
|
||||
*mappable_end = pci_resource_len(dev->pdev, 2);
|
||||
|
||||
/* 64/512MB is the current min/max we actually know of, but this is just
|
||||
* a coarse sanity check.
|
||||
*/
|
||||
if (INTEL_INFO(dev)->gen < 6) {
|
||||
ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
|
||||
if (!ret) {
|
||||
DRM_ERROR("failed to set up gmch\n");
|
||||
return -EIO;
|
||||
if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
|
||||
DRM_ERROR("Unknown GMADR size (%lx)\n",
|
||||
dev_priv->gtt.mappable_end);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
dev_priv->mm.gtt = intel_gtt_get();
|
||||
if (!dev_priv->mm.gtt) {
|
||||
DRM_ERROR("Failed to initialize GTT\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
|
||||
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
|
||||
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
|
||||
gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
|
||||
|
||||
dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL);
|
||||
if (!dev_priv->mm.gtt)
|
||||
return -ENOMEM;
|
||||
if (IS_GEN7(dev))
|
||||
*stolen = gen7_get_stolen_size(snb_gmch_ctl);
|
||||
else
|
||||
*stolen = gen6_get_stolen_size(snb_gmch_ctl);
|
||||
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
dev_priv->mm.gtt->needs_dmar = 1;
|
||||
#endif
|
||||
*gtt_total = (gtt_size / sizeof(gtt_pte_t)) << PAGE_SHIFT;
|
||||
|
||||
/* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
|
||||
gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
|
||||
dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2);
|
||||
|
||||
/* i9xx_setup */
|
||||
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
|
||||
dev_priv->mm.gtt->gtt_total_entries =
|
||||
gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t);
|
||||
if (INTEL_INFO(dev)->gen < 7)
|
||||
dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
|
||||
else
|
||||
dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl);
|
||||
|
||||
dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT;
|
||||
/* 64/512MB is the current min/max we actually know of, but this is just a
|
||||
* coarse sanity check.
|
||||
*/
|
||||
if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 ||
|
||||
dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) {
|
||||
DRM_ERROR("Unknown GMADR entries (%d)\n",
|
||||
dev_priv->mm.gtt->gtt_mappable_entries);
|
||||
ret = -ENXIO;
|
||||
goto err_out;
|
||||
dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
|
||||
if (!dev_priv->gtt.gsm) {
|
||||
DRM_ERROR("Failed to map the gtt page table\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = setup_scratch_page(dev);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
DRM_ERROR("Scratch setup failed\n");
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
dev_priv->mm.gtt->gtt = ioremap(gtt_bus_addr,
|
||||
dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t));
|
||||
if (!dev_priv->mm.gtt->gtt) {
|
||||
DRM_ERROR("Failed to map the gtt page table\n");
|
||||
ret = -ENOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range;
|
||||
dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries;
|
||||
|
||||
/* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */
|
||||
DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8);
|
||||
DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8);
|
||||
DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20);
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
kfree(dev_priv->mm.gtt);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void gen6_gmch_remove(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
iounmap(dev_priv->gtt.gsm);
|
||||
teardown_scratch_page(dev_priv->dev);
|
||||
}
|
||||
|
||||
static int i915_gmch_probe(struct drm_device *dev,
|
||||
size_t *gtt_total,
|
||||
size_t *stolen,
|
||||
phys_addr_t *mappable_base,
|
||||
unsigned long *mappable_end)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
|
||||
if (!ret) {
|
||||
DRM_ERROR("failed to set up gmch\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
|
||||
|
||||
dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
|
||||
dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range;
|
||||
dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void i915_gmch_remove(struct drm_device *dev)
|
||||
{
|
||||
// intel_gmch_remove();
|
||||
}
|
||||
|
||||
int i915_gem_gtt_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_gtt *gtt = &dev_priv->gtt;
|
||||
unsigned long gtt_size;
|
||||
int ret;
|
||||
|
||||
if (INTEL_INFO(dev)->gen <= 5) {
|
||||
dev_priv->gtt.gtt_probe = i915_gmch_probe;
|
||||
dev_priv->gtt.gtt_remove = i915_gmch_remove;
|
||||
} else {
|
||||
dev_priv->gtt.gtt_probe = gen6_gmch_probe;
|
||||
dev_priv->gtt.gtt_remove = gen6_gmch_remove;
|
||||
}
|
||||
|
||||
ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total,
|
||||
&dev_priv->gtt.stolen_size,
|
||||
>t->mappable_base,
|
||||
>t->mappable_end);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
gtt_size = (dev_priv->gtt.total >> PAGE_SHIFT) * sizeof(gtt_pte_t);
|
||||
|
||||
/* GMADR is the PCI mmio aperture into the global GTT. */
|
||||
DRM_INFO("Memory usable by graphics device = %zdM\n",
|
||||
dev_priv->gtt.total >> 20);
|
||||
DRM_DEBUG_DRIVER("GMADR size = %ldM\n",
|
||||
dev_priv->gtt.mappable_end >> 20);
|
||||
DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n",
|
||||
dev_priv->gtt.stolen_size >> 20);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct scatterlist *sg_next(struct scatterlist *sg)
|
||||
{
|
||||
|
@ -42,85 +42,73 @@
|
||||
* for is a boon.
|
||||
*/
|
||||
|
||||
#define PTE_ADDRESS_MASK 0xfffff000
|
||||
#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
|
||||
#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
|
||||
#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
|
||||
#define PTE_MAPPING_TYPE_CACHED (3 << 1)
|
||||
#define PTE_MAPPING_TYPE_MASK (3 << 1)
|
||||
#define PTE_VALID (1 << 0)
|
||||
|
||||
/**
|
||||
* i915_stolen_to_phys - take an offset into stolen memory and turn it into
|
||||
* a physical one
|
||||
* @dev: drm device
|
||||
* @offset: address to translate
|
||||
*
|
||||
* Some chip functions require allocations from stolen space and need the
|
||||
* physical address of the memory in question.
|
||||
*/
|
||||
static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
|
||||
static unsigned long i915_stolen_to_physical(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct pci_dev *pdev = dev_priv->bridge_dev;
|
||||
u32 base;
|
||||
|
||||
#if 0
|
||||
/* On the machines I have tested the Graphics Base of Stolen Memory
|
||||
* is unreliable, so compute the base by subtracting the stolen memory
|
||||
* from the Top of Low Usable DRAM which is where the BIOS places
|
||||
* the graphics stolen memory.
|
||||
* is unreliable, so on those compute the base by subtracting the
|
||||
* stolen memory from the Top of Low Usable DRAM which is where the
|
||||
* BIOS places the graphics stolen memory.
|
||||
*
|
||||
* On gen2, the layout is slightly different with the Graphics Segment
|
||||
* immediately following Top of Memory (or Top of Usable DRAM). Note
|
||||
* it appears that TOUD is only reported by 865g, so we just use the
|
||||
* top of memory as determined by the e820 probe.
|
||||
*
|
||||
* XXX gen2 requires an unavailable symbol and 945gm fails with
|
||||
* its value of TOLUD.
|
||||
*/
|
||||
if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
|
||||
/* top 32bits are reserved = 0 */
|
||||
base = 0;
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
/* Read Base Data of Stolen Memory Register (BDSM) directly.
|
||||
* Note that there is also a MCHBAR miror at 0x1080c0 or
|
||||
* we could use device 2:0x5c instead.
|
||||
*/
|
||||
pci_read_config_dword(pdev, 0xB0, &base);
|
||||
base &= ~4095; /* lower bits used for locking register */
|
||||
} else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
|
||||
/* Read Graphics Base of Stolen Memory directly */
|
||||
pci_read_config_dword(pdev, 0xA4, &base);
|
||||
} else {
|
||||
/* XXX presume 8xx is the same as i915 */
|
||||
pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
|
||||
}
|
||||
#else
|
||||
if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
|
||||
u16 val;
|
||||
pci_read_config_word(pdev, 0xb0, &val);
|
||||
base = val >> 4 << 20;
|
||||
} else {
|
||||
#if 0
|
||||
} else if (IS_GEN3(dev)) {
|
||||
u8 val;
|
||||
/* Stolen is immediately below Top of Low Usable DRAM */
|
||||
pci_read_config_byte(pdev, 0x9c, &val);
|
||||
base = val >> 3 << 27;
|
||||
}
|
||||
base -= dev_priv->mm.gtt->stolen_size;
|
||||
} else {
|
||||
/* Stolen is immediately above Top of Memory */
|
||||
base = max_low_pfn_mapped << PAGE_SHIFT;
|
||||
#endif
|
||||
}
|
||||
|
||||
return base + offset;
|
||||
return base;
|
||||
}
|
||||
|
||||
static void i915_warn_stolen(struct drm_device *dev)
|
||||
{
|
||||
DRM_INFO("not enough stolen space for compressed buffer, disabling\n");
|
||||
DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
|
||||
}
|
||||
|
||||
static void i915_setup_compression(struct drm_device *dev, int size)
|
||||
static int i915_setup_compression(struct drm_device *dev, int size)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
|
||||
unsigned long cfb_base;
|
||||
unsigned long ll_base = 0;
|
||||
|
||||
/* Just in case the BIOS is doing something questionable. */
|
||||
intel_disable_fbc(dev);
|
||||
|
||||
compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
|
||||
/* Try to over-allocate to reduce reallocations and fragmentation */
|
||||
compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
|
||||
size <<= 1, 4096, 0);
|
||||
if (!compressed_fb)
|
||||
compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
|
||||
size >>= 1, 4096, 0);
|
||||
if (compressed_fb)
|
||||
compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
|
||||
if (!compressed_fb)
|
||||
goto err;
|
||||
|
||||
cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
|
||||
if (!cfb_base)
|
||||
goto err_fb;
|
||||
|
||||
if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
|
||||
else if (IS_GM45(dev)) {
|
||||
I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
|
||||
} else {
|
||||
compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
|
||||
4096, 4096, 0);
|
||||
if (compressed_llb)
|
||||
@ -129,73 +117,206 @@ static void i915_setup_compression(struct drm_device *dev, int size)
|
||||
if (!compressed_llb)
|
||||
goto err_fb;
|
||||
|
||||
ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
|
||||
if (!ll_base)
|
||||
goto err_llb;
|
||||
}
|
||||
dev_priv->compressed_llb = compressed_llb;
|
||||
|
||||
dev_priv->cfb_size = size;
|
||||
I915_WRITE(FBC_CFB_BASE,
|
||||
dev_priv->mm.stolen_base + compressed_fb->start);
|
||||
I915_WRITE(FBC_LL_BASE,
|
||||
dev_priv->mm.stolen_base + compressed_llb->start);
|
||||
}
|
||||
|
||||
dev_priv->compressed_fb = compressed_fb;
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
|
||||
else if (IS_GM45(dev)) {
|
||||
I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
|
||||
} else {
|
||||
I915_WRITE(FBC_CFB_BASE, cfb_base);
|
||||
I915_WRITE(FBC_LL_BASE, ll_base);
|
||||
dev_priv->compressed_llb = compressed_llb;
|
||||
}
|
||||
dev_priv->cfb_size = size;
|
||||
|
||||
DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
|
||||
cfb_base, ll_base, size >> 20);
|
||||
return;
|
||||
DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
|
||||
size);
|
||||
|
||||
return 0;
|
||||
|
||||
err_llb:
|
||||
drm_mm_put_block(compressed_llb);
|
||||
err_fb:
|
||||
drm_mm_put_block(compressed_fb);
|
||||
err:
|
||||
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
|
||||
i915_warn_stolen(dev);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
static void i915_cleanup_compression(struct drm_device *dev)
|
||||
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->mm.stolen_base == 0)
|
||||
return -ENODEV;
|
||||
|
||||
if (size < dev_priv->cfb_size)
|
||||
return 0;
|
||||
|
||||
/* Release any current block */
|
||||
i915_gem_stolen_cleanup_compression(dev);
|
||||
|
||||
return i915_setup_compression(dev, size);
|
||||
}
|
||||
|
||||
void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->cfb_size == 0)
|
||||
return;
|
||||
|
||||
if (dev_priv->compressed_fb)
|
||||
drm_mm_put_block(dev_priv->compressed_fb);
|
||||
|
||||
if (dev_priv->compressed_llb)
|
||||
drm_mm_put_block(dev_priv->compressed_llb);
|
||||
|
||||
dev_priv->cfb_size = 0;
|
||||
}
|
||||
|
||||
void i915_gem_cleanup_stolen(struct drm_device *dev)
|
||||
{
|
||||
if (I915_HAS_FBC(dev) && i915_powersave)
|
||||
i915_cleanup_compression(dev);
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
i915_gem_stolen_cleanup_compression(dev);
|
||||
drm_mm_takedown(&dev_priv->mm.stolen);
|
||||
}
|
||||
|
||||
int i915_gem_init_stolen(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long prealloc_size = dev_priv->mm.gtt->stolen_size;
|
||||
|
||||
dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
|
||||
if (dev_priv->mm.stolen_base == 0)
|
||||
return 0;
|
||||
|
||||
DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
|
||||
dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
|
||||
|
||||
/* Basic memrange allocator for stolen space */
|
||||
drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
|
||||
|
||||
/* Try to set up FBC with a reasonable compressed buffer size */
|
||||
if (I915_HAS_FBC(dev) && i915_powersave) {
|
||||
int cfb_size;
|
||||
|
||||
/* Leave 1M for line length buffer & misc. */
|
||||
|
||||
/* Try to get a 32M buffer... */
|
||||
if (prealloc_size > (36*1024*1024))
|
||||
cfb_size = 32*1024*1024;
|
||||
else /* fall back to 7/8 of the stolen space */
|
||||
cfb_size = prealloc_size * 7 / 8;
|
||||
i915_setup_compression(dev, cfb_size);
|
||||
}
|
||||
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct sg_table *
|
||||
i915_pages_create_for_stolen(struct drm_device *dev,
|
||||
u32 offset, u32 size)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct sg_table *st;
|
||||
struct scatterlist *sg;
|
||||
|
||||
DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
|
||||
BUG_ON(offset > dev_priv->gtt.stolen_size - size);
|
||||
|
||||
/* We hide that we have no struct page backing our stolen object
|
||||
* by wrapping the contiguous physical allocation with a fake
|
||||
* dma mapping in a single scatterlist.
|
||||
*/
|
||||
|
||||
st = kmalloc(sizeof(*st), GFP_KERNEL);
|
||||
if (st == NULL)
|
||||
return NULL;
|
||||
|
||||
if (sg_alloc_table(st, 1, GFP_KERNEL)) {
|
||||
kfree(st);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
sg = st->sgl;
|
||||
sg->offset = offset;
|
||||
sg->length = size;
|
||||
|
||||
sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
|
||||
sg_dma_len(sg) = size;
|
||||
|
||||
return st;
|
||||
}
|
||||
|
||||
static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
BUG();
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
/* Should only be called during free */
|
||||
sg_free_table(obj->pages);
|
||||
kfree(obj->pages);
|
||||
}
|
||||
|
||||
static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
|
||||
.get_pages = i915_gem_object_get_pages_stolen,
|
||||
.put_pages = i915_gem_object_put_pages_stolen,
|
||||
};
|
||||
|
||||
static struct drm_i915_gem_object *
|
||||
_i915_gem_object_create_stolen(struct drm_device *dev,
|
||||
struct drm_mm_node *stolen)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
obj = i915_gem_object_alloc(dev);
|
||||
if (obj == NULL)
|
||||
return NULL;
|
||||
|
||||
if (drm_gem_private_object_init(dev, &obj->base, stolen->size))
|
||||
goto cleanup;
|
||||
|
||||
i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
|
||||
|
||||
obj->pages = i915_pages_create_for_stolen(dev,
|
||||
stolen->start, stolen->size);
|
||||
if (obj->pages == NULL)
|
||||
goto cleanup;
|
||||
|
||||
obj->has_dma_mapping = true;
|
||||
obj->pages_pin_count = 1;
|
||||
obj->stolen = stolen;
|
||||
|
||||
obj->base.write_domain = I915_GEM_DOMAIN_GTT;
|
||||
obj->base.read_domains = I915_GEM_DOMAIN_GTT;
|
||||
obj->cache_level = I915_CACHE_NONE;
|
||||
|
||||
return obj;
|
||||
|
||||
cleanup:
|
||||
i915_gem_object_free(obj);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct drm_i915_gem_object *
|
||||
i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct drm_mm_node *stolen;
|
||||
|
||||
if (dev_priv->mm.stolen_base == 0)
|
||||
return NULL;
|
||||
|
||||
DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
|
||||
if (size == 0)
|
||||
return NULL;
|
||||
|
||||
stolen = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
|
||||
if (stolen)
|
||||
stolen = drm_mm_get_block(stolen, size, 4096);
|
||||
if (stolen == NULL)
|
||||
return NULL;
|
||||
|
||||
obj = _i915_gem_object_create_stolen(dev, stolen);
|
||||
if (obj)
|
||||
return obj;
|
||||
|
||||
drm_mm_put_block(stolen);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void
|
||||
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
if (obj->stolen) {
|
||||
drm_mm_put_block(obj->stolen);
|
||||
obj->stolen = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -291,18 +291,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Previous chips need to be aligned to the size of the smallest
|
||||
* fence register that can contain the object.
|
||||
*/
|
||||
if (INTEL_INFO(obj->base.dev)->gen == 3)
|
||||
size = 1024*1024;
|
||||
else
|
||||
size = 512*1024;
|
||||
|
||||
while (size < obj->base.size)
|
||||
size <<= 1;
|
||||
|
||||
size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
|
||||
if (obj->gtt_space->size != size)
|
||||
return false;
|
||||
|
||||
@ -387,15 +376,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
|
||||
|
||||
obj->map_and_fenceable =
|
||||
obj->gtt_space == NULL ||
|
||||
(obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
|
||||
(obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end &&
|
||||
i915_gem_object_fence_ok(obj, args->tiling_mode));
|
||||
|
||||
/* Rebind if we need a change of alignment */
|
||||
if (!obj->map_and_fenceable) {
|
||||
u32 unfenced_alignment =
|
||||
i915_gem_get_unfenced_gtt_alignment(dev,
|
||||
obj->base.size,
|
||||
args->tiling_mode);
|
||||
i915_gem_get_gtt_alignment(dev, obj->base.size,
|
||||
args->tiling_mode,
|
||||
false);
|
||||
if (obj->gtt_offset & (unfenced_alignment - 1))
|
||||
ret = i915_gem_object_unbind(obj);
|
||||
}
|
||||
@ -415,6 +404,18 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
|
||||
/* we have to maintain this existing ABI... */
|
||||
args->stride = obj->stride;
|
||||
args->tiling_mode = obj->tiling_mode;
|
||||
|
||||
/* Try to preallocate memory required to save swizzling on put-pages */
|
||||
if (i915_gem_object_needs_bit17_swizzle(obj)) {
|
||||
if (obj->bit_17 == NULL) {
|
||||
obj->bit_17 = kmalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT) *
|
||||
sizeof(long), GFP_KERNEL);
|
||||
}
|
||||
} else {
|
||||
kfree(obj->bit_17);
|
||||
obj->bit_17 = NULL;
|
||||
}
|
||||
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
|
@ -45,32 +45,7 @@
|
||||
|
||||
#define MAX_NOPID ((u32)~0)
|
||||
|
||||
/**
|
||||
* Interrupts that are always left unmasked.
|
||||
*
|
||||
* Since pipe events are edge-triggered from the PIPESTAT register to IIR,
|
||||
* we leave them always unmasked in IMR and then control enabling them through
|
||||
* PIPESTAT alone.
|
||||
*/
|
||||
#define I915_INTERRUPT_ENABLE_FIX \
|
||||
(I915_ASLE_INTERRUPT | \
|
||||
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
|
||||
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
|
||||
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \
|
||||
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \
|
||||
I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
|
||||
|
||||
/** Interrupts that we mask and unmask at runtime. */
|
||||
#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
|
||||
|
||||
#define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
|
||||
PIPE_VBLANK_INTERRUPT_STATUS)
|
||||
|
||||
#define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
|
||||
PIPE_VBLANK_INTERRUPT_ENABLE)
|
||||
|
||||
#define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \
|
||||
DRM_I915_VBLANK_PIPE_B)
|
||||
|
||||
/* For display hotplug interrupt */
|
||||
static void
|
||||
@ -215,6 +190,33 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
|
||||
return I915_READ(reg);
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle hotplug events outside the interrupt handler proper.
|
||||
*/
|
||||
static void i915_hotplug_work_func(struct work_struct *work)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
|
||||
hotplug_work);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
/* HPD irq before everything is fully set up. */
|
||||
if (!dev_priv->enable_hotplug_processing)
|
||||
return;
|
||||
|
||||
mutex_lock(&mode_config->mutex);
|
||||
DRM_DEBUG_KMS("running encoder hotplug functions\n");
|
||||
|
||||
list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
|
||||
if (encoder->hot_plug)
|
||||
encoder->hot_plug(encoder);
|
||||
|
||||
mutex_unlock(&mode_config->mutex);
|
||||
|
||||
/* Just fire off a uevent and let userspace tell us what to do */
|
||||
drm_helper_hpd_irq_event(dev);
|
||||
}
|
||||
|
||||
static void notify_ring(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring)
|
||||
@ -402,6 +404,20 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
|
||||
// queue_work(dev_priv->wq, &dev_priv->rps.work);
|
||||
}
|
||||
|
||||
static void gmbus_irq_handler(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
|
||||
wake_up_all(&dev_priv->gmbus_wait_queue);
|
||||
}
|
||||
|
||||
static void dp_aux_irq_handler(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
|
||||
wake_up_all(&dev_priv->gmbus_wait_queue);
|
||||
}
|
||||
|
||||
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = (struct drm_device *) arg;
|
||||
@ -411,7 +427,6 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
|
||||
unsigned long irqflags;
|
||||
int pipe;
|
||||
u32 pipe_stats[I915_MAX_PIPES];
|
||||
bool blc_event;
|
||||
|
||||
atomic_inc(&dev_priv->irq_received);
|
||||
|
||||
@ -462,19 +477,19 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
|
||||
|
||||
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
|
||||
hotplug_status);
|
||||
// if (hotplug_status & dev_priv->hotplug_supported_mask)
|
||||
// queue_work(dev_priv->wq,
|
||||
// &dev_priv->hotplug_work);
|
||||
if (hotplug_status & dev_priv->hotplug_supported_mask)
|
||||
queue_work(dev_priv->wq,
|
||||
&dev_priv->hotplug_work);
|
||||
|
||||
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
|
||||
I915_READ(PORT_HOTPLUG_STAT);
|
||||
}
|
||||
|
||||
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
|
||||
blc_event = true;
|
||||
if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
|
||||
gmbus_irq_handler(dev);
|
||||
|
||||
if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
|
||||
gen6_queue_rps_work(dev_priv, pm_iir);
|
||||
// if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
|
||||
// gen6_queue_rps_work(dev_priv, pm_iir);
|
||||
|
||||
I915_WRITE(GTIIR, gt_iir);
|
||||
I915_WRITE(GEN6_PMIIR, pm_iir);
|
||||
@ -490,15 +505,19 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
int pipe;
|
||||
|
||||
printf("%s\n", __FUNCTION__);
|
||||
if (pch_iir & SDE_HOTPLUG_MASK)
|
||||
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
|
||||
|
||||
if (pch_iir & SDE_AUDIO_POWER_MASK)
|
||||
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
|
||||
(pch_iir & SDE_AUDIO_POWER_MASK) >>
|
||||
SDE_AUDIO_POWER_SHIFT);
|
||||
|
||||
if (pch_iir & SDE_AUX_MASK)
|
||||
dp_aux_irq_handler(dev);
|
||||
|
||||
if (pch_iir & SDE_GMBUS)
|
||||
DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
|
||||
gmbus_irq_handler(dev);
|
||||
|
||||
if (pch_iir & SDE_AUDIO_HDCP_MASK)
|
||||
DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
|
||||
@ -532,16 +551,19 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
int pipe;
|
||||
|
||||
if (pch_iir & SDE_HOTPLUG_MASK_CPT)
|
||||
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
|
||||
|
||||
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
|
||||
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
|
||||
(pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
|
||||
SDE_AUDIO_POWER_SHIFT_CPT);
|
||||
|
||||
if (pch_iir & SDE_AUX_MASK_CPT)
|
||||
DRM_DEBUG_DRIVER("AUX channel interrupt\n");
|
||||
dp_aux_irq_handler(dev);
|
||||
|
||||
if (pch_iir & SDE_GMBUS_CPT)
|
||||
DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
|
||||
gmbus_irq_handler(dev);
|
||||
|
||||
if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
|
||||
DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
|
||||
@ -560,7 +582,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = (struct drm_device *) arg;
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
u32 de_iir, gt_iir, de_ier, pm_iir;
|
||||
u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
|
||||
irqreturn_t ret = IRQ_NONE;
|
||||
int i;
|
||||
|
||||
@ -570,6 +592,15 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
|
||||
de_ier = I915_READ(DEIER);
|
||||
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
|
||||
|
||||
/* Disable south interrupts. We'll only write to SDEIIR once, so further
|
||||
* interrupts will will be stored on its back queue, and then we'll be
|
||||
* able to process them after we restore SDEIER (as soon as we restore
|
||||
* it, we'll get an interrupt if SDEIIR still has something to process
|
||||
* due to its back queue). */
|
||||
sde_ier = I915_READ(SDEIER);
|
||||
I915_WRITE(SDEIER, 0);
|
||||
POSTING_READ(SDEIER);
|
||||
|
||||
gt_iir = I915_READ(GTIIR);
|
||||
if (gt_iir) {
|
||||
snb_gt_irq_handler(dev, dev_priv, gt_iir);
|
||||
@ -579,6 +610,8 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
|
||||
|
||||
de_iir = I915_READ(DEIIR);
|
||||
if (de_iir) {
|
||||
if (de_iir & DE_AUX_CHANNEL_A_IVB)
|
||||
dp_aux_irq_handler(dev);
|
||||
#if 0
|
||||
if (de_iir & DE_GSE_IVB)
|
||||
intel_opregion_gse_intr(dev);
|
||||
@ -596,8 +629,6 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
|
||||
if (de_iir & DE_PCH_EVENT_IVB) {
|
||||
u32 pch_iir = I915_READ(SDEIIR);
|
||||
|
||||
// if (pch_iir & SDE_HOTPLUG_MASK_CPT)
|
||||
// queue_work(dev_priv->wq, &dev_priv->hotplug_work);
|
||||
cpt_irq_handler(dev, pch_iir);
|
||||
|
||||
/* clear PCH hotplug event before clear CPU irq */
|
||||
@ -618,6 +649,8 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
|
||||
|
||||
I915_WRITE(DEIER, de_ier);
|
||||
POSTING_READ(DEIER);
|
||||
I915_WRITE(SDEIER, sde_ier);
|
||||
POSTING_READ(SDEIER);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -637,7 +670,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
|
||||
struct drm_device *dev = (struct drm_device *) arg;
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
int ret = IRQ_NONE;
|
||||
u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
|
||||
u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
|
||||
|
||||
atomic_inc(&dev_priv->irq_received);
|
||||
|
||||
@ -646,13 +679,20 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
|
||||
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
|
||||
POSTING_READ(DEIER);
|
||||
|
||||
/* Disable south interrupts. We'll only write to SDEIIR once, so further
|
||||
* interrupts will will be stored on its back queue, and then we'll be
|
||||
* able to process them after we restore SDEIER (as soon as we restore
|
||||
* it, we'll get an interrupt if SDEIIR still has something to process
|
||||
* due to its back queue). */
|
||||
sde_ier = I915_READ(SDEIER);
|
||||
I915_WRITE(SDEIER, 0);
|
||||
POSTING_READ(SDEIER);
|
||||
|
||||
de_iir = I915_READ(DEIIR);
|
||||
gt_iir = I915_READ(GTIIR);
|
||||
pch_iir = I915_READ(SDEIIR);
|
||||
pm_iir = I915_READ(GEN6_PMIIR);
|
||||
|
||||
if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
|
||||
(!IS_GEN6(dev) || pm_iir == 0))
|
||||
if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
|
||||
goto done;
|
||||
|
||||
ret = IRQ_HANDLED;
|
||||
@ -661,6 +701,10 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
|
||||
ilk_gt_irq_handler(dev, dev_priv, gt_iir);
|
||||
else
|
||||
snb_gt_irq_handler(dev, dev_priv, gt_iir);
|
||||
|
||||
if (de_iir & DE_AUX_CHANNEL_A)
|
||||
dp_aux_irq_handler(dev);
|
||||
|
||||
#if 0
|
||||
if (de_iir & DE_GSE)
|
||||
intel_opregion_gse_intr(dev);
|
||||
@ -684,12 +728,15 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
|
||||
|
||||
/* check event from PCH */
|
||||
if (de_iir & DE_PCH_EVENT) {
|
||||
// if (pch_iir & hotplug_mask)
|
||||
// queue_work(dev_priv->wq, &dev_priv->hotplug_work);
|
||||
u32 pch_iir = I915_READ(SDEIIR);
|
||||
|
||||
if (HAS_PCH_CPT(dev))
|
||||
cpt_irq_handler(dev, pch_iir);
|
||||
else
|
||||
ibx_irq_handler(dev, pch_iir);
|
||||
|
||||
/* should clear PCH hotplug event before clear CPU irq */
|
||||
I915_WRITE(SDEIIR, pch_iir);
|
||||
}
|
||||
#if 0
|
||||
if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
|
||||
@ -698,8 +745,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
|
||||
if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
|
||||
gen6_queue_rps_work(dev_priv, pm_iir);
|
||||
#endif
|
||||
/* should clear PCH hotplug event before clear CPU irq */
|
||||
I915_WRITE(SDEIIR, pch_iir);
|
||||
I915_WRITE(GTIIR, gt_iir);
|
||||
I915_WRITE(DEIIR, de_iir);
|
||||
I915_WRITE(GEN6_PMIIR, pm_iir);
|
||||
@ -707,6 +752,8 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
|
||||
done:
|
||||
I915_WRITE(DEIER, de_ier);
|
||||
POSTING_READ(DEIER);
|
||||
I915_WRITE(SDEIER, sde_ier);
|
||||
POSTING_READ(SDEIER);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -733,7 +780,7 @@ static void i915_get_extra_instdone(struct drm_device *dev,
|
||||
instdone[1] = I915_READ(INSTDONE1);
|
||||
break;
|
||||
default:
|
||||
WARN(1, "Unsupported platform\n");
|
||||
WARN_ONCE(1, "Unsupported platform\n");
|
||||
case 7:
|
||||
instdone[0] = I915_READ(GEN7_INSTDONE_1);
|
||||
instdone[1] = I915_READ(GEN7_SC_INSTDONE);
|
||||
@ -771,7 +818,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
|
||||
goto unwind;
|
||||
|
||||
local_irq_save(flags);
|
||||
if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
|
||||
if (reloc_offset < dev_priv->gtt.mappable_end &&
|
||||
src->has_global_gtt_mapping) {
|
||||
void __iomem *s;
|
||||
|
||||
@ -780,10 +827,18 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
|
||||
* captures what the GPU read.
|
||||
*/
|
||||
|
||||
s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
|
||||
s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
|
||||
reloc_offset);
|
||||
memcpy_fromio(d, s, PAGE_SIZE);
|
||||
io_mapping_unmap_atomic(s);
|
||||
} else if (src->stolen) {
|
||||
unsigned long offset;
|
||||
|
||||
offset = dev_priv->mm.stolen_base;
|
||||
offset += src->stolen->start;
|
||||
offset += i << PAGE_SHIFT;
|
||||
|
||||
memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
|
||||
} else {
|
||||
struct page *page;
|
||||
void *s;
|
||||
@ -930,6 +985,8 @@ static void i915_gem_record_fences(struct drm_device *dev,
|
||||
error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
|
||||
break;
|
||||
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
@ -943,6 +1000,18 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
|
||||
if (!ring->get_seqno)
|
||||
return NULL;
|
||||
|
||||
if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
|
||||
u32 acthd = I915_READ(ACTHD);
|
||||
|
||||
if (WARN_ON(ring->id != RCS))
|
||||
return NULL;
|
||||
|
||||
obj = ring->private;
|
||||
if (acthd >= obj->gtt_offset &&
|
||||
acthd < obj->gtt_offset + obj->base.size)
|
||||
return i915_error_object_create(dev_priv, obj);
|
||||
}
|
||||
|
||||
seqno = ring->get_seqno(ring, false);
|
||||
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
|
||||
if (obj->ring != ring)
|
||||
@ -1066,9 +1135,9 @@ static void i915_capture_error_state(struct drm_device *dev)
|
||||
unsigned long flags;
|
||||
int i, pipe;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->error_lock, flags);
|
||||
error = dev_priv->first_error;
|
||||
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
|
||||
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
|
||||
error = dev_priv->gpu_error.first_error;
|
||||
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
|
||||
if (error)
|
||||
return;
|
||||
|
||||
@ -1079,7 +1148,8 @@ static void i915_capture_error_state(struct drm_device *dev)
|
||||
return;
|
||||
}
|
||||
|
||||
DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
|
||||
DRM_INFO("capturing error event; look for more information in"
|
||||
"/sys/kernel/debug/dri/%d/i915_error_state\n",
|
||||
dev->primary->index);
|
||||
|
||||
kref_init(&error->ref);
|
||||
@ -1162,12 +1232,12 @@ static void i915_capture_error_state(struct drm_device *dev)
|
||||
error->overlay = intel_overlay_capture_error_state(dev);
|
||||
error->display = intel_display_capture_error_state(dev);
|
||||
|
||||
spin_lock_irqsave(&dev_priv->error_lock, flags);
|
||||
if (dev_priv->first_error == NULL) {
|
||||
dev_priv->first_error = error;
|
||||
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
|
||||
if (dev_priv->gpu_error.first_error == NULL) {
|
||||
dev_priv->gpu_error.first_error = error;
|
||||
error = NULL;
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
|
||||
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
|
||||
|
||||
if (error)
|
||||
i915_error_state_free(&error->ref);
|
||||
@ -1179,10 +1249,10 @@ void i915_destroy_error_state(struct drm_device *dev)
|
||||
struct drm_i915_error_state *error;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->error_lock, flags);
|
||||
error = dev_priv->first_error;
|
||||
dev_priv->first_error = NULL;
|
||||
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
|
||||
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
|
||||
error = dev_priv->gpu_error.first_error;
|
||||
dev_priv->gpu_error.first_error = NULL;
|
||||
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
|
||||
|
||||
if (error)
|
||||
kref_put(&error->ref, i915_error_state_free);
|
||||
@ -1303,11 +1373,12 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
|
||||
i915_report_and_clear_eir(dev);
|
||||
|
||||
if (wedged) {
|
||||
// INIT_COMPLETION(dev_priv->error_completion);
|
||||
atomic_set(&dev_priv->mm.wedged, 1);
|
||||
atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
|
||||
&dev_priv->gpu_error.reset_counter);
|
||||
|
||||
/*
|
||||
* Wakeup waiting processes so they don't hang
|
||||
* Wakeup waiting processes so that the reset work item
|
||||
* doesn't deadlock trying to grab various locks.
|
||||
*/
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
wake_up_all(&ring->irq_queue);
|
||||
@ -1579,7 +1650,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
|
||||
* This register is the same on all known PCH chips.
|
||||
*/
|
||||
|
||||
static void ironlake_enable_pch_hotplug(struct drm_device *dev)
|
||||
static void ibx_enable_hotplug(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
u32 hotplug;
|
||||
@ -1592,14 +1663,36 @@ static void ironlake_enable_pch_hotplug(struct drm_device *dev)
|
||||
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
|
||||
}
|
||||
|
||||
static void ibx_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
u32 mask;
|
||||
|
||||
if (HAS_PCH_IBX(dev))
|
||||
mask = SDE_HOTPLUG_MASK |
|
||||
SDE_GMBUS |
|
||||
SDE_AUX_MASK;
|
||||
else
|
||||
mask = SDE_HOTPLUG_MASK_CPT |
|
||||
SDE_GMBUS_CPT |
|
||||
SDE_AUX_MASK_CPT;
|
||||
|
||||
I915_WRITE(SDEIIR, I915_READ(SDEIIR));
|
||||
I915_WRITE(SDEIMR, ~mask);
|
||||
I915_WRITE(SDEIER, mask);
|
||||
POSTING_READ(SDEIER);
|
||||
|
||||
ibx_enable_hotplug(dev);
|
||||
}
|
||||
|
||||
static int ironlake_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
/* enable kind of interrupts always enabled */
|
||||
u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
|
||||
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
|
||||
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
|
||||
DE_AUX_CHANNEL_A;
|
||||
u32 render_irqs;
|
||||
u32 hotplug_mask;
|
||||
|
||||
dev_priv->irq_mask = ~display_mask;
|
||||
|
||||
@ -1627,33 +1720,13 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
|
||||
I915_WRITE(GTIER, render_irqs);
|
||||
POSTING_READ(GTIER);
|
||||
|
||||
if (HAS_PCH_CPT(dev)) {
|
||||
hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
|
||||
SDE_PORTB_HOTPLUG_CPT |
|
||||
SDE_PORTC_HOTPLUG_CPT |
|
||||
SDE_PORTD_HOTPLUG_CPT);
|
||||
} else {
|
||||
hotplug_mask = (SDE_CRT_HOTPLUG |
|
||||
SDE_PORTB_HOTPLUG |
|
||||
SDE_PORTC_HOTPLUG |
|
||||
SDE_PORTD_HOTPLUG |
|
||||
SDE_AUX_MASK);
|
||||
}
|
||||
|
||||
dev_priv->pch_irq_mask = ~hotplug_mask;
|
||||
|
||||
I915_WRITE(SDEIIR, I915_READ(SDEIIR));
|
||||
I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
|
||||
I915_WRITE(SDEIER, hotplug_mask);
|
||||
POSTING_READ(SDEIER);
|
||||
|
||||
// ironlake_enable_pch_hotplug(dev);
|
||||
ibx_irq_postinstall(dev);
|
||||
|
||||
if (IS_IRONLAKE_M(dev)) {
|
||||
/* Clear & enable PCU event interrupts */
|
||||
I915_WRITE(DEIIR, DE_PCU_EVENT);
|
||||
I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
|
||||
// ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
|
||||
ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1667,9 +1740,9 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
|
||||
DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
|
||||
DE_PLANEC_FLIP_DONE_IVB |
|
||||
DE_PLANEB_FLIP_DONE_IVB |
|
||||
DE_PLANEA_FLIP_DONE_IVB;
|
||||
DE_PLANEA_FLIP_DONE_IVB |
|
||||
DE_AUX_CHANNEL_A_IVB;
|
||||
u32 render_irqs;
|
||||
u32 hotplug_mask;
|
||||
|
||||
dev_priv->irq_mask = ~display_mask;
|
||||
|
||||
@ -1693,18 +1766,7 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
|
||||
I915_WRITE(GTIER, render_irqs);
|
||||
POSTING_READ(GTIER);
|
||||
|
||||
hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
|
||||
SDE_PORTB_HOTPLUG_CPT |
|
||||
SDE_PORTC_HOTPLUG_CPT |
|
||||
SDE_PORTD_HOTPLUG_CPT);
|
||||
dev_priv->pch_irq_mask = ~hotplug_mask;
|
||||
|
||||
I915_WRITE(SDEIIR, I915_READ(SDEIIR));
|
||||
I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
|
||||
I915_WRITE(SDEIER, hotplug_mask);
|
||||
POSTING_READ(SDEIER);
|
||||
|
||||
// ironlake_enable_pch_hotplug(dev);
|
||||
ibx_irq_postinstall(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1713,7 +1775,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
u32 enable_mask;
|
||||
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
|
||||
u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
|
||||
u32 render_irqs;
|
||||
u16 msid;
|
||||
@ -1742,6 +1803,9 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
|
||||
// msid |= (1<<14);
|
||||
// pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
|
||||
|
||||
I915_WRITE(PORT_HOTPLUG_EN, 0);
|
||||
POSTING_READ(PORT_HOTPLUG_EN);
|
||||
|
||||
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
|
||||
I915_WRITE(VLV_IER, enable_mask);
|
||||
I915_WRITE(VLV_IIR, 0xffffffff);
|
||||
@ -1750,6 +1814,7 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
|
||||
POSTING_READ(VLV_IER);
|
||||
|
||||
i915_enable_pipestat(dev_priv, 0, pipestat_enable);
|
||||
i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
|
||||
i915_enable_pipestat(dev_priv, 1, pipestat_enable);
|
||||
|
||||
I915_WRITE(VLV_IIR, 0xffffffff);
|
||||
@ -1770,14 +1835,22 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
|
||||
#endif
|
||||
|
||||
I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
|
||||
#if 0 /* FIXME: check register definitions; some have moved */
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void valleyview_hpd_irq_setup(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
|
||||
|
||||
/* Note HDMI and DP share bits */
|
||||
if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
|
||||
hotplug_en |= HDMIB_HOTPLUG_INT_EN;
|
||||
if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
|
||||
hotplug_en |= HDMIC_HOTPLUG_INT_EN;
|
||||
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
|
||||
hotplug_en |= HDMID_HOTPLUG_INT_EN;
|
||||
if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
|
||||
hotplug_en |= PORTB_HOTPLUG_INT_EN;
|
||||
if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
|
||||
hotplug_en |= PORTC_HOTPLUG_INT_EN;
|
||||
if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
|
||||
hotplug_en |= PORTD_HOTPLUG_INT_EN;
|
||||
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
|
||||
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
|
||||
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
|
||||
@ -1786,11 +1859,8 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
|
||||
hotplug_en |= CRT_HOTPLUG_INT_EN;
|
||||
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
|
||||
}
|
||||
#endif
|
||||
|
||||
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void valleyview_irq_uninstall(struct drm_device *dev)
|
||||
@ -2022,28 +2092,40 @@ static int i915_irq_postinstall(struct drm_device *dev)
|
||||
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
|
||||
I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
|
||||
I915_USER_INTERRUPT;
|
||||
#if 0
|
||||
|
||||
if (I915_HAS_HOTPLUG(dev)) {
|
||||
I915_WRITE(PORT_HOTPLUG_EN, 0);
|
||||
POSTING_READ(PORT_HOTPLUG_EN);
|
||||
|
||||
/* Enable in IER... */
|
||||
enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
|
||||
/* and unmask in IMR */
|
||||
dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
|
||||
}
|
||||
#endif
|
||||
|
||||
I915_WRITE(IMR, dev_priv->irq_mask);
|
||||
I915_WRITE(IER, enable_mask);
|
||||
POSTING_READ(IER);
|
||||
|
||||
// intel_opregion_enable_asle(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void i915_hpd_irq_setup(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
u32 hotplug_en;
|
||||
|
||||
if (I915_HAS_HOTPLUG(dev)) {
|
||||
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
|
||||
#if 0
|
||||
if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
|
||||
hotplug_en |= HDMIB_HOTPLUG_INT_EN;
|
||||
if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
|
||||
hotplug_en |= HDMIC_HOTPLUG_INT_EN;
|
||||
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
|
||||
hotplug_en |= HDMID_HOTPLUG_INT_EN;
|
||||
hotplug_en = I915_READ(PORT_HOTPLUG_EN);
|
||||
|
||||
if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
|
||||
hotplug_en |= PORTB_HOTPLUG_INT_EN;
|
||||
if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
|
||||
hotplug_en |= PORTC_HOTPLUG_INT_EN;
|
||||
if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
|
||||
hotplug_en |= PORTD_HOTPLUG_INT_EN;
|
||||
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
|
||||
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
|
||||
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
|
||||
@ -2052,15 +2134,11 @@ static int i915_irq_postinstall(struct drm_device *dev)
|
||||
hotplug_en |= CRT_HOTPLUG_INT_EN;
|
||||
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Ignore TV since it's buggy */
|
||||
|
||||
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
|
||||
}
|
||||
|
||||
// intel_opregion_enable_asle(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static irqreturn_t i915_irq_handler(int irq, void *arg)
|
||||
@ -2119,9 +2197,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
|
||||
|
||||
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
|
||||
hotplug_status);
|
||||
// if (hotplug_status & dev_priv->hotplug_supported_mask)
|
||||
// queue_work(dev_priv->wq,
|
||||
// &dev_priv->hotplug_work);
|
||||
if (hotplug_status & dev_priv->hotplug_supported_mask)
|
||||
queue_work(dev_priv->wq,
|
||||
&dev_priv->hotplug_work);
|
||||
|
||||
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
|
||||
POSTING_READ(PORT_HOTPLUG_STAT);
|
||||
@ -2220,7 +2298,6 @@ static void i965_irq_preinstall(struct drm_device * dev)
|
||||
static int i965_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
u32 hotplug_en;
|
||||
u32 enable_mask;
|
||||
u32 error_mask;
|
||||
|
||||
@ -2241,6 +2318,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
|
||||
|
||||
dev_priv->pipestat[0] = 0;
|
||||
dev_priv->pipestat[1] = 0;
|
||||
i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
|
||||
|
||||
/*
|
||||
* Enable some error detection, note the instruction error mask
|
||||
@ -2261,15 +2339,27 @@ static int i965_irq_postinstall(struct drm_device *dev)
|
||||
I915_WRITE(IER, enable_mask);
|
||||
POSTING_READ(IER);
|
||||
|
||||
I915_WRITE(PORT_HOTPLUG_EN, 0);
|
||||
POSTING_READ(PORT_HOTPLUG_EN);
|
||||
|
||||
// intel_opregion_enable_asle(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void i965_hpd_irq_setup(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
u32 hotplug_en;
|
||||
|
||||
/* Note HDMI and DP share hotplug bits */
|
||||
hotplug_en = 0;
|
||||
#if 0
|
||||
if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
|
||||
hotplug_en |= HDMIB_HOTPLUG_INT_EN;
|
||||
if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
|
||||
hotplug_en |= HDMIC_HOTPLUG_INT_EN;
|
||||
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
|
||||
hotplug_en |= HDMID_HOTPLUG_INT_EN;
|
||||
if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
|
||||
hotplug_en |= PORTB_HOTPLUG_INT_EN;
|
||||
if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
|
||||
hotplug_en |= PORTC_HOTPLUG_INT_EN;
|
||||
if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
|
||||
hotplug_en |= PORTD_HOTPLUG_INT_EN;
|
||||
if (IS_G4X(dev)) {
|
||||
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
|
||||
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
|
||||
@ -2292,14 +2382,10 @@ static int i965_irq_postinstall(struct drm_device *dev)
|
||||
hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
|
||||
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Ignore TV since it's buggy */
|
||||
|
||||
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
|
||||
|
||||
// intel_opregion_enable_asle(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static irqreturn_t i965_irq_handler(int irq, void *arg)
|
||||
@ -2358,9 +2444,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
|
||||
|
||||
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
|
||||
hotplug_status);
|
||||
// if (hotplug_status & dev_priv->hotplug_supported_mask)
|
||||
// queue_work(dev_priv->wq,
|
||||
// &dev_priv->hotplug_work);
|
||||
if (hotplug_status & dev_priv->hotplug_supported_mask)
|
||||
queue_work(dev_priv->wq,
|
||||
&dev_priv->hotplug_work);
|
||||
|
||||
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
|
||||
I915_READ(PORT_HOTPLUG_STAT);
|
||||
@ -2395,6 +2481,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
|
||||
// if (blc_event || (iir & I915_ASLE_INTERRUPT))
|
||||
// intel_opregion_asle_intr(dev);
|
||||
|
||||
if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
|
||||
gmbus_irq_handler(dev);
|
||||
|
||||
/* With MSI, interrupts are only generated when iir
|
||||
* transitions from zero to nonzero. If another bit got
|
||||
* set while we were handling the existing iir bits, then
|
||||
@ -2445,20 +2534,22 @@ void intel_irq_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
|
||||
|
||||
// pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
|
||||
|
||||
|
||||
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
dev->driver->irq_handler = valleyview_irq_handler;
|
||||
dev->driver->irq_preinstall = valleyview_irq_preinstall;
|
||||
dev->driver->irq_postinstall = valleyview_irq_postinstall;
|
||||
} else if (IS_IVYBRIDGE(dev)) {
|
||||
dev_priv->display.hpd_irq_setup = valleyview_hpd_irq_setup;
|
||||
} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
|
||||
/* Share pre & uninstall handlers with ILK/SNB */
|
||||
dev->driver->irq_handler = ivybridge_irq_handler;
|
||||
dev->driver->irq_preinstall = ironlake_irq_preinstall;
|
||||
dev->driver->irq_postinstall = ivybridge_irq_postinstall;
|
||||
} else if (IS_HASWELL(dev)) {
|
||||
/* Share interrupts handling with IVB */
|
||||
dev->driver->irq_handler = ivybridge_irq_handler;
|
||||
dev->driver->irq_preinstall = ironlake_irq_preinstall;
|
||||
dev->driver->irq_postinstall = ivybridge_irq_postinstall;
|
||||
} else if (HAS_PCH_SPLIT(dev)) {
|
||||
dev->driver->irq_handler = ironlake_irq_handler;
|
||||
dev->driver->irq_preinstall = ironlake_irq_preinstall;
|
||||
@ -2469,16 +2560,25 @@ void intel_irq_init(struct drm_device *dev)
|
||||
dev->driver->irq_preinstall = i915_irq_preinstall;
|
||||
dev->driver->irq_postinstall = i915_irq_postinstall;
|
||||
dev->driver->irq_handler = i915_irq_handler;
|
||||
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
|
||||
} else {
|
||||
dev->driver->irq_preinstall = i965_irq_preinstall;
|
||||
dev->driver->irq_postinstall = i965_irq_postinstall;
|
||||
dev->driver->irq_handler = i965_irq_handler;
|
||||
dev_priv->display.hpd_irq_setup = i965_hpd_irq_setup;
|
||||
}
|
||||
}
|
||||
|
||||
// printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
|
||||
}
|
||||
|
||||
void intel_hpd_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->display.hpd_irq_setup)
|
||||
dev_priv->display.hpd_irq_setup(dev);
|
||||
}
|
||||
|
||||
|
||||
irqreturn_t intel_irq_handler(struct drm_device *dev)
|
||||
{
|
||||
|
||||
|
@ -141,8 +141,15 @@
|
||||
#define VGA_MSR_MEM_EN (1<<1)
|
||||
#define VGA_MSR_CGA_MODE (1<<0)
|
||||
|
||||
#define VGA_SR_INDEX 0x3c4
|
||||
#define VGA_SR_DATA 0x3c5
|
||||
/*
|
||||
* SR01 is the only VGA register touched on non-UMS setups.
|
||||
* VLV doesn't do UMS, so the sequencer index/data registers
|
||||
* are the only VGA registers which need to include
|
||||
* display_mmio_offset.
|
||||
*/
|
||||
#define VGA_SR_INDEX (dev_priv->info->display_mmio_offset + 0x3c4)
|
||||
#define SR01 1
|
||||
#define VGA_SR_DATA (dev_priv->info->display_mmio_offset + 0x3c5)
|
||||
|
||||
#define VGA_AR_INDEX 0x3c0
|
||||
#define VGA_AR_VID_EN (1<<5)
|
||||
@ -301,6 +308,7 @@
|
||||
#define DISPLAY_PLANE_A (0<<20)
|
||||
#define DISPLAY_PLANE_B (1<<20)
|
||||
#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
|
||||
#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */
|
||||
#define PIPE_CONTROL_CS_STALL (1<<20)
|
||||
#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
|
||||
#define PIPE_CONTROL_QW_WRITE (1<<14)
|
||||
@ -335,17 +343,19 @@
|
||||
* 0x801c/3c: core clock bits
|
||||
* 0x8048/68: low pass filter coefficients
|
||||
* 0x8100: fast clock controls
|
||||
*
|
||||
* DPIO is VLV only.
|
||||
*/
|
||||
#define DPIO_PKT 0x2100
|
||||
#define DPIO_PKT (VLV_DISPLAY_BASE + 0x2100)
|
||||
#define DPIO_RID (0<<24)
|
||||
#define DPIO_OP_WRITE (1<<16)
|
||||
#define DPIO_OP_READ (0<<16)
|
||||
#define DPIO_PORTID (0x12<<8)
|
||||
#define DPIO_BYTE (0xf<<4)
|
||||
#define DPIO_BUSY (1<<0) /* status only */
|
||||
#define DPIO_DATA 0x2104
|
||||
#define DPIO_REG 0x2108
|
||||
#define DPIO_CTL 0x2110
|
||||
#define DPIO_DATA (VLV_DISPLAY_BASE + 0x2104)
|
||||
#define DPIO_REG (VLV_DISPLAY_BASE + 0x2108)
|
||||
#define DPIO_CTL (VLV_DISPLAY_BASE + 0x2110)
|
||||
#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
|
||||
#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
|
||||
#define DPIO_SFR_BYPASS (1<<1)
|
||||
@ -556,13 +566,13 @@
|
||||
#define IIR 0x020a4
|
||||
#define IMR 0x020a8
|
||||
#define ISR 0x020ac
|
||||
#define VLV_GUNIT_CLOCK_GATE 0x182060
|
||||
#define VLV_GUNIT_CLOCK_GATE (VLV_DISPLAY_BASE + 0x2060)
|
||||
#define GCFG_DIS (1<<8)
|
||||
#define VLV_IIR_RW 0x182084
|
||||
#define VLV_IER 0x1820a0
|
||||
#define VLV_IIR 0x1820a4
|
||||
#define VLV_IMR 0x1820a8
|
||||
#define VLV_ISR 0x1820ac
|
||||
#define VLV_IIR_RW (VLV_DISPLAY_BASE + 0x2084)
|
||||
#define VLV_IER (VLV_DISPLAY_BASE + 0x20a0)
|
||||
#define VLV_IIR (VLV_DISPLAY_BASE + 0x20a4)
|
||||
#define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8)
|
||||
#define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac)
|
||||
#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
|
||||
#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
|
||||
#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
|
||||
@ -735,6 +745,7 @@
|
||||
#define GEN7_FF_TS_SCHED_HS0 (0x3<<16)
|
||||
#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16)
|
||||
#define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */
|
||||
#define GEN7_FF_VS_REF_CNT_FFME (1 << 15)
|
||||
#define GEN7_FF_VS_SCHED_HS1 (0x5<<12)
|
||||
#define GEN7_FF_VS_SCHED_HS0 (0x3<<12)
|
||||
#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */
|
||||
@ -921,8 +932,8 @@
|
||||
#define VGA1_PD_P1_DIV_2 (1 << 13)
|
||||
#define VGA1_PD_P1_SHIFT 8
|
||||
#define VGA1_PD_P1_MASK (0x1f << 8)
|
||||
#define _DPLL_A 0x06014
|
||||
#define _DPLL_B 0x06018
|
||||
#define _DPLL_A (dev_priv->info->display_mmio_offset + 0x6014)
|
||||
#define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018)
|
||||
#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
|
||||
#define DPLL_VCO_ENABLE (1 << 31)
|
||||
#define DPLL_DVO_HIGH_SPEED (1 << 30)
|
||||
@ -943,23 +954,6 @@
|
||||
#define DPLL_LOCK_VLV (1<<15)
|
||||
#define DPLL_INTEGRATED_CLOCK_VLV (1<<13)
|
||||
|
||||
#define SRX_INDEX 0x3c4
|
||||
#define SRX_DATA 0x3c5
|
||||
#define SR01 1
|
||||
#define SR01_SCREEN_OFF (1<<5)
|
||||
|
||||
#define PPCR 0x61204
|
||||
#define PPCR_ON (1<<0)
|
||||
|
||||
#define DVOB 0x61140
|
||||
#define DVOB_ON (1<<31)
|
||||
#define DVOC 0x61160
|
||||
#define DVOC_ON (1<<31)
|
||||
#define LVDS 0x61180
|
||||
#define LVDS_ON (1<<31)
|
||||
|
||||
/* Scratch pad debug 0 reg:
|
||||
*/
|
||||
#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
|
||||
/*
|
||||
* The i830 generation, in LVDS mode, defines P1 as the bit number set within
|
||||
@ -998,7 +992,7 @@
|
||||
#define SDVO_MULTIPLIER_MASK 0x000000ff
|
||||
#define SDVO_MULTIPLIER_SHIFT_HIRES 4
|
||||
#define SDVO_MULTIPLIER_SHIFT_VGA 0
|
||||
#define _DPLL_A_MD 0x0601c /* 965+ only */
|
||||
#define _DPLL_A_MD (dev_priv->info->display_mmio_offset + 0x601c) /* 965+ only */
|
||||
/*
|
||||
* UDI pixel divider, controlling how many pixels are stuffed into a packet.
|
||||
*
|
||||
@ -1035,7 +1029,7 @@
|
||||
*/
|
||||
#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
|
||||
#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
|
||||
#define _DPLL_B_MD 0x06020 /* 965+ only */
|
||||
#define _DPLL_B_MD (dev_priv->info->display_mmio_offset + 0x6020) /* 965+ only */
|
||||
#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
|
||||
|
||||
#define _FPA0 0x06040
|
||||
@ -1178,15 +1172,15 @@
|
||||
#define RAMCLK_GATE_D 0x6210 /* CRL only */
|
||||
#define DEUC 0x6214 /* CRL only */
|
||||
|
||||
#define FW_BLC_SELF_VLV 0x6500
|
||||
#define FW_BLC_SELF_VLV (VLV_DISPLAY_BASE + 0x6500)
|
||||
#define FW_CSPWRDWNEN (1<<15)
|
||||
|
||||
/*
|
||||
* Palette regs
|
||||
*/
|
||||
|
||||
#define _PALETTE_A 0x0a000
|
||||
#define _PALETTE_B 0x0a800
|
||||
#define _PALETTE_A (dev_priv->info->display_mmio_offset + 0xa000)
|
||||
#define _PALETTE_B (dev_priv->info->display_mmio_offset + 0xa800)
|
||||
#define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B)
|
||||
|
||||
/* MCH MMIO space */
|
||||
@ -1242,6 +1236,10 @@
|
||||
#define MAD_DIMM_A_SIZE_SHIFT 0
|
||||
#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT)
|
||||
|
||||
/** snb MCH registers for priority tuning */
|
||||
#define MCH_SSKPD (MCHBAR_MIRROR_BASE_SNB + 0x5d10)
|
||||
#define MCH_SSKPD_WM0_MASK 0x3f
|
||||
#define MCH_SSKPD_WM0_VAL 0xc
|
||||
|
||||
/* Clocking configuration register */
|
||||
#define CLKCFG 0x10c00
|
||||
@ -1551,26 +1549,26 @@
|
||||
*/
|
||||
|
||||
/* Pipe A timing regs */
|
||||
#define _HTOTAL_A 0x60000
|
||||
#define _HBLANK_A 0x60004
|
||||
#define _HSYNC_A 0x60008
|
||||
#define _VTOTAL_A 0x6000c
|
||||
#define _VBLANK_A 0x60010
|
||||
#define _VSYNC_A 0x60014
|
||||
#define _PIPEASRC 0x6001c
|
||||
#define _BCLRPAT_A 0x60020
|
||||
#define _VSYNCSHIFT_A 0x60028
|
||||
#define _HTOTAL_A (dev_priv->info->display_mmio_offset + 0x60000)
|
||||
#define _HBLANK_A (dev_priv->info->display_mmio_offset + 0x60004)
|
||||
#define _HSYNC_A (dev_priv->info->display_mmio_offset + 0x60008)
|
||||
#define _VTOTAL_A (dev_priv->info->display_mmio_offset + 0x6000c)
|
||||
#define _VBLANK_A (dev_priv->info->display_mmio_offset + 0x60010)
|
||||
#define _VSYNC_A (dev_priv->info->display_mmio_offset + 0x60014)
|
||||
#define _PIPEASRC (dev_priv->info->display_mmio_offset + 0x6001c)
|
||||
#define _BCLRPAT_A (dev_priv->info->display_mmio_offset + 0x60020)
|
||||
#define _VSYNCSHIFT_A (dev_priv->info->display_mmio_offset + 0x60028)
|
||||
|
||||
/* Pipe B timing regs */
|
||||
#define _HTOTAL_B 0x61000
|
||||
#define _HBLANK_B 0x61004
|
||||
#define _HSYNC_B 0x61008
|
||||
#define _VTOTAL_B 0x6100c
|
||||
#define _VBLANK_B 0x61010
|
||||
#define _VSYNC_B 0x61014
|
||||
#define _PIPEBSRC 0x6101c
|
||||
#define _BCLRPAT_B 0x61020
|
||||
#define _VSYNCSHIFT_B 0x61028
|
||||
#define _HTOTAL_B (dev_priv->info->display_mmio_offset + 0x61000)
|
||||
#define _HBLANK_B (dev_priv->info->display_mmio_offset + 0x61004)
|
||||
#define _HSYNC_B (dev_priv->info->display_mmio_offset + 0x61008)
|
||||
#define _VTOTAL_B (dev_priv->info->display_mmio_offset + 0x6100c)
|
||||
#define _VBLANK_B (dev_priv->info->display_mmio_offset + 0x61010)
|
||||
#define _VSYNC_B (dev_priv->info->display_mmio_offset + 0x61014)
|
||||
#define _PIPEBSRC (dev_priv->info->display_mmio_offset + 0x6101c)
|
||||
#define _BCLRPAT_B (dev_priv->info->display_mmio_offset + 0x61020)
|
||||
#define _VSYNCSHIFT_B (dev_priv->info->display_mmio_offset + 0x61028)
|
||||
|
||||
|
||||
#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
|
||||
@ -1615,9 +1613,9 @@
|
||||
#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
|
||||
#define ADPA_USE_VGA_HVPOLARITY (1<<15)
|
||||
#define ADPA_SETS_HVPOLARITY 0
|
||||
#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
|
||||
#define ADPA_VSYNC_CNTL_DISABLE (1<<10)
|
||||
#define ADPA_VSYNC_CNTL_ENABLE 0
|
||||
#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
|
||||
#define ADPA_HSYNC_CNTL_DISABLE (1<<11)
|
||||
#define ADPA_HSYNC_CNTL_ENABLE 0
|
||||
#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
|
||||
#define ADPA_VSYNC_ACTIVE_LOW 0
|
||||
@ -1631,13 +1629,10 @@
|
||||
|
||||
|
||||
/* Hotplug control (945+ only) */
|
||||
#define PORT_HOTPLUG_EN 0x61110
|
||||
#define HDMIB_HOTPLUG_INT_EN (1 << 29)
|
||||
#define DPB_HOTPLUG_INT_EN (1 << 29)
|
||||
#define HDMIC_HOTPLUG_INT_EN (1 << 28)
|
||||
#define DPC_HOTPLUG_INT_EN (1 << 28)
|
||||
#define HDMID_HOTPLUG_INT_EN (1 << 27)
|
||||
#define DPD_HOTPLUG_INT_EN (1 << 27)
|
||||
#define PORT_HOTPLUG_EN (dev_priv->info->display_mmio_offset + 0x61110)
|
||||
#define PORTB_HOTPLUG_INT_EN (1 << 29)
|
||||
#define PORTC_HOTPLUG_INT_EN (1 << 28)
|
||||
#define PORTD_HOTPLUG_INT_EN (1 << 27)
|
||||
#define SDVOB_HOTPLUG_INT_EN (1 << 26)
|
||||
#define SDVOC_HOTPLUG_INT_EN (1 << 25)
|
||||
#define TV_HOTPLUG_INT_EN (1 << 18)
|
||||
@ -1658,21 +1653,14 @@
|
||||
#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
|
||||
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
|
||||
|
||||
#define PORT_HOTPLUG_STAT 0x61114
|
||||
#define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114)
|
||||
/* HDMI/DP bits are gen4+ */
|
||||
#define DPB_HOTPLUG_LIVE_STATUS (1 << 29)
|
||||
#define DPC_HOTPLUG_LIVE_STATUS (1 << 28)
|
||||
#define DPD_HOTPLUG_LIVE_STATUS (1 << 27)
|
||||
#define DPD_HOTPLUG_INT_STATUS (3 << 21)
|
||||
#define DPC_HOTPLUG_INT_STATUS (3 << 19)
|
||||
#define DPB_HOTPLUG_INT_STATUS (3 << 17)
|
||||
/* HDMI bits are shared with the DP bits */
|
||||
#define HDMIB_HOTPLUG_LIVE_STATUS (1 << 29)
|
||||
#define HDMIC_HOTPLUG_LIVE_STATUS (1 << 28)
|
||||
#define HDMID_HOTPLUG_LIVE_STATUS (1 << 27)
|
||||
#define HDMID_HOTPLUG_INT_STATUS (3 << 21)
|
||||
#define HDMIC_HOTPLUG_INT_STATUS (3 << 19)
|
||||
#define HDMIB_HOTPLUG_INT_STATUS (3 << 17)
|
||||
#define PORTB_HOTPLUG_LIVE_STATUS (1 << 29)
|
||||
#define PORTC_HOTPLUG_LIVE_STATUS (1 << 28)
|
||||
#define PORTD_HOTPLUG_LIVE_STATUS (1 << 27)
|
||||
#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
|
||||
#define PORTC_HOTPLUG_INT_STATUS (3 << 19)
|
||||
#define PORTB_HOTPLUG_INT_STATUS (3 << 17)
|
||||
/* CRT/TV common between gen3+ */
|
||||
#define CRT_HOTPLUG_INT_STATUS (1 << 11)
|
||||
#define TV_HOTPLUG_INT_STATUS (1 << 10)
|
||||
@ -1877,7 +1865,7 @@
|
||||
#define PP_DIVISOR 0x61210
|
||||
|
||||
/* Panel fitting */
|
||||
#define PFIT_CONTROL 0x61230
|
||||
#define PFIT_CONTROL (dev_priv->info->display_mmio_offset + 0x61230)
|
||||
#define PFIT_ENABLE (1 << 31)
|
||||
#define PFIT_PIPE_MASK (3 << 29)
|
||||
#define PFIT_PIPE_SHIFT 29
|
||||
@ -1895,9 +1883,7 @@
|
||||
#define PFIT_SCALING_PROGRAMMED (1 << 26)
|
||||
#define PFIT_SCALING_PILLAR (2 << 26)
|
||||
#define PFIT_SCALING_LETTER (3 << 26)
|
||||
#define PFIT_PGM_RATIOS 0x61234
|
||||
#define PFIT_VERT_SCALE_MASK 0xfff00000
|
||||
#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
|
||||
#define PFIT_PGM_RATIOS (dev_priv->info->display_mmio_offset + 0x61234)
|
||||
/* Pre-965 */
|
||||
#define PFIT_VERT_SCALE_SHIFT 20
|
||||
#define PFIT_VERT_SCALE_MASK 0xfff00000
|
||||
@ -1909,7 +1895,7 @@
|
||||
#define PFIT_HORIZ_SCALE_SHIFT_965 0
|
||||
#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff
|
||||
|
||||
#define PFIT_AUTO_RATIOS 0x61238
|
||||
#define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238)
|
||||
|
||||
/* Backlight control */
|
||||
#define BLC_PWM_CTL2 0x61250 /* 965+ only */
|
||||
@ -2639,10 +2625,10 @@
|
||||
/* Display & cursor control */
|
||||
|
||||
/* Pipe A */
|
||||
#define _PIPEADSL 0x70000
|
||||
#define _PIPEADSL (dev_priv->info->display_mmio_offset + 0x70000)
|
||||
#define DSL_LINEMASK_GEN2 0x00000fff
|
||||
#define DSL_LINEMASK_GEN3 0x00001fff
|
||||
#define _PIPEACONF 0x70008
|
||||
#define _PIPEACONF (dev_priv->info->display_mmio_offset + 0x70008)
|
||||
#define PIPECONF_ENABLE (1<<31)
|
||||
#define PIPECONF_DISABLE 0
|
||||
#define PIPECONF_DOUBLE_WIDE (1<<30)
|
||||
@ -2671,18 +2657,19 @@
|
||||
#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */
|
||||
#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */
|
||||
#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
|
||||
#define PIPECONF_BPP_MASK (0x000000e0)
|
||||
#define PIPECONF_BPP_8 (0<<5)
|
||||
#define PIPECONF_BPP_10 (1<<5)
|
||||
#define PIPECONF_BPP_6 (2<<5)
|
||||
#define PIPECONF_BPP_12 (3<<5)
|
||||
#define PIPECONF_COLOR_RANGE_SELECT (1 << 13)
|
||||
#define PIPECONF_BPC_MASK (0x7 << 5)
|
||||
#define PIPECONF_8BPC (0<<5)
|
||||
#define PIPECONF_10BPC (1<<5)
|
||||
#define PIPECONF_6BPC (2<<5)
|
||||
#define PIPECONF_12BPC (3<<5)
|
||||
#define PIPECONF_DITHER_EN (1<<4)
|
||||
#define PIPECONF_DITHER_TYPE_MASK (0x0000000c)
|
||||
#define PIPECONF_DITHER_TYPE_SP (0<<2)
|
||||
#define PIPECONF_DITHER_TYPE_ST1 (1<<2)
|
||||
#define PIPECONF_DITHER_TYPE_ST2 (2<<2)
|
||||
#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
|
||||
#define _PIPEASTAT 0x70024
|
||||
#define _PIPEASTAT (dev_priv->info->display_mmio_offset + 0x70024)
|
||||
#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
|
||||
#define SPRITE1_FLIPDONE_INT_EN_VLV (1UL<<30)
|
||||
#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
|
||||
@ -2693,7 +2680,7 @@
|
||||
#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25)
|
||||
#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
|
||||
#define PIPE_DPST_EVENT_ENABLE (1UL<<23)
|
||||
#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<26)
|
||||
#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<22)
|
||||
#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
|
||||
#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
|
||||
#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
|
||||
@ -2703,7 +2690,7 @@
|
||||
#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16)
|
||||
#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
|
||||
#define SPRITE1_FLIPDONE_INT_STATUS_VLV (1UL<<15)
|
||||
#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<15)
|
||||
#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<14)
|
||||
#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
|
||||
#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
|
||||
#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
|
||||
@ -2719,11 +2706,6 @@
|
||||
#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
|
||||
#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
|
||||
#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
|
||||
#define PIPE_BPC_MASK (7 << 5) /* Ironlake */
|
||||
#define PIPE_8BPC (0 << 5)
|
||||
#define PIPE_10BPC (1 << 5)
|
||||
#define PIPE_6BPC (2 << 5)
|
||||
#define PIPE_12BPC (3 << 5)
|
||||
|
||||
#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
|
||||
#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF)
|
||||
@ -2732,7 +2714,7 @@
|
||||
#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
|
||||
#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
|
||||
|
||||
#define VLV_DPFLIPSTAT 0x70028
|
||||
#define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028)
|
||||
#define PIPEB_LINE_COMPARE_INT_EN (1<<29)
|
||||
#define PIPEB_HLINE_INT_EN (1<<28)
|
||||
#define PIPEB_VBLANK_INT_EN (1<<27)
|
||||
@ -2746,7 +2728,7 @@
|
||||
#define SPRITEA_FLIPDONE_INT_EN (1<<17)
|
||||
#define PLANEA_FLIPDONE_INT_EN (1<<16)
|
||||
|
||||
#define DPINVGTT 0x7002c /* VLV only */
|
||||
#define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV only */
|
||||
#define CURSORB_INVALID_GTT_INT_EN (1<<23)
|
||||
#define CURSORA_INVALID_GTT_INT_EN (1<<22)
|
||||
#define SPRITED_INVALID_GTT_INT_EN (1<<21)
|
||||
@ -2774,7 +2756,7 @@
|
||||
#define DSPARB_BEND_SHIFT 9 /* on 855 */
|
||||
#define DSPARB_AEND_SHIFT 0
|
||||
|
||||
#define DSPFW1 0x70034
|
||||
#define DSPFW1 (dev_priv->info->display_mmio_offset + 0x70034)
|
||||
#define DSPFW_SR_SHIFT 23
|
||||
#define DSPFW_SR_MASK (0x1ff<<23)
|
||||
#define DSPFW_CURSORB_SHIFT 16
|
||||
@ -2782,11 +2764,11 @@
|
||||
#define DSPFW_PLANEB_SHIFT 8
|
||||
#define DSPFW_PLANEB_MASK (0x7f<<8)
|
||||
#define DSPFW_PLANEA_MASK (0x7f)
|
||||
#define DSPFW2 0x70038
|
||||
#define DSPFW2 (dev_priv->info->display_mmio_offset + 0x70038)
|
||||
#define DSPFW_CURSORA_MASK 0x00003f00
|
||||
#define DSPFW_CURSORA_SHIFT 8
|
||||
#define DSPFW_PLANEC_MASK (0x7f)
|
||||
#define DSPFW3 0x7003c
|
||||
#define DSPFW3 (dev_priv->info->display_mmio_offset + 0x7003c)
|
||||
#define DSPFW_HPLL_SR_EN (1<<31)
|
||||
#define DSPFW_CURSOR_SR_SHIFT 24
|
||||
#define PINEVIEW_SELF_REFRESH_EN (1<<30)
|
||||
@ -2798,13 +2780,13 @@
|
||||
/* drain latency register values*/
|
||||
#define DRAIN_LATENCY_PRECISION_32 32
|
||||
#define DRAIN_LATENCY_PRECISION_16 16
|
||||
#define VLV_DDL1 0x70050
|
||||
#define VLV_DDL1 (VLV_DISPLAY_BASE + 0x70050)
|
||||
#define DDL_CURSORA_PRECISION_32 (1<<31)
|
||||
#define DDL_CURSORA_PRECISION_16 (0<<31)
|
||||
#define DDL_CURSORA_SHIFT 24
|
||||
#define DDL_PLANEA_PRECISION_32 (1<<7)
|
||||
#define DDL_PLANEA_PRECISION_16 (0<<7)
|
||||
#define VLV_DDL2 0x70054
|
||||
#define VLV_DDL2 (VLV_DISPLAY_BASE + 0x70054)
|
||||
#define DDL_CURSORB_PRECISION_32 (1<<31)
|
||||
#define DDL_CURSORB_PRECISION_16 (0<<31)
|
||||
#define DDL_CURSORB_SHIFT 24
|
||||
@ -2948,10 +2930,10 @@
|
||||
* } while (high1 != high2);
|
||||
* frame = (high1 << 8) | low1;
|
||||
*/
|
||||
#define _PIPEAFRAMEHIGH 0x70040
|
||||
#define _PIPEAFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x70040)
|
||||
#define PIPE_FRAME_HIGH_MASK 0x0000ffff
|
||||
#define PIPE_FRAME_HIGH_SHIFT 0
|
||||
#define _PIPEAFRAMEPIXEL 0x70044
|
||||
#define _PIPEAFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x70044)
|
||||
#define PIPE_FRAME_LOW_MASK 0xff000000
|
||||
#define PIPE_FRAME_LOW_SHIFT 24
|
||||
#define PIPE_PIXEL_MASK 0x00ffffff
|
||||
@ -2962,11 +2944,12 @@
|
||||
#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45)
|
||||
|
||||
/* Cursor A & B regs */
|
||||
#define _CURACNTR 0x70080
|
||||
#define _CURACNTR (dev_priv->info->display_mmio_offset + 0x70080)
|
||||
/* Old style CUR*CNTR flags (desktop 8xx) */
|
||||
#define CURSOR_ENABLE 0x80000000
|
||||
#define CURSOR_GAMMA_ENABLE 0x40000000
|
||||
#define CURSOR_STRIDE_MASK 0x30000000
|
||||
#define CURSOR_PIPE_CSC_ENABLE (1<<24)
|
||||
#define CURSOR_FORMAT_SHIFT 24
|
||||
#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT)
|
||||
#define CURSOR_FORMAT_2C (0x00 << CURSOR_FORMAT_SHIFT)
|
||||
@ -2983,16 +2966,16 @@
|
||||
#define MCURSOR_PIPE_A 0x00
|
||||
#define MCURSOR_PIPE_B (1 << 28)
|
||||
#define MCURSOR_GAMMA_ENABLE (1 << 26)
|
||||
#define _CURABASE 0x70084
|
||||
#define _CURAPOS 0x70088
|
||||
#define _CURABASE (dev_priv->info->display_mmio_offset + 0x70084)
|
||||
#define _CURAPOS (dev_priv->info->display_mmio_offset + 0x70088)
|
||||
#define CURSOR_POS_MASK 0x007FF
|
||||
#define CURSOR_POS_SIGN 0x8000
|
||||
#define CURSOR_X_SHIFT 0
|
||||
#define CURSOR_Y_SHIFT 16
|
||||
#define CURSIZE 0x700a0
|
||||
#define _CURBCNTR 0x700c0
|
||||
#define _CURBBASE 0x700c4
|
||||
#define _CURBPOS 0x700c8
|
||||
#define _CURBCNTR (dev_priv->info->display_mmio_offset + 0x700c0)
|
||||
#define _CURBBASE (dev_priv->info->display_mmio_offset + 0x700c4)
|
||||
#define _CURBPOS (dev_priv->info->display_mmio_offset + 0x700c8)
|
||||
|
||||
#define _CURBCNTR_IVB 0x71080
|
||||
#define _CURBBASE_IVB 0x71084
|
||||
@ -3007,7 +2990,7 @@
|
||||
#define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB)
|
||||
|
||||
/* Display A control */
|
||||
#define _DSPACNTR 0x70180
|
||||
#define _DSPACNTR (dev_priv->info->display_mmio_offset + 0x70180)
|
||||
#define DISPLAY_PLANE_ENABLE (1<<31)
|
||||
#define DISPLAY_PLANE_DISABLE 0
|
||||
#define DISPPLANE_GAMMA_ENABLE (1<<30)
|
||||
@ -3028,6 +3011,7 @@
|
||||
#define DISPPLANE_RGBA888 (0xf<<26)
|
||||
#define DISPPLANE_STEREO_ENABLE (1<<25)
|
||||
#define DISPPLANE_STEREO_DISABLE 0
|
||||
#define DISPPLANE_PIPE_CSC_ENABLE (1<<24)
|
||||
#define DISPPLANE_SEL_PIPE_SHIFT 24
|
||||
#define DISPPLANE_SEL_PIPE_MASK (3<<DISPPLANE_SEL_PIPE_SHIFT)
|
||||
#define DISPPLANE_SEL_PIPE_A 0
|
||||
@ -3040,14 +3024,14 @@
|
||||
#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
|
||||
#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
|
||||
#define DISPPLANE_TILED (1<<10)
|
||||
#define _DSPAADDR 0x70184
|
||||
#define _DSPASTRIDE 0x70188
|
||||
#define _DSPAPOS 0x7018C /* reserved */
|
||||
#define _DSPASIZE 0x70190
|
||||
#define _DSPASURF 0x7019C /* 965+ only */
|
||||
#define _DSPATILEOFF 0x701A4 /* 965+ only */
|
||||
#define _DSPAOFFSET 0x701A4 /* HSW */
|
||||
#define _DSPASURFLIVE 0x701AC
|
||||
#define _DSPAADDR (dev_priv->info->display_mmio_offset + 0x70184)
|
||||
#define _DSPASTRIDE (dev_priv->info->display_mmio_offset + 0x70188)
|
||||
#define _DSPAPOS (dev_priv->info->display_mmio_offset + 0x7018C) /* reserved */
|
||||
#define _DSPASIZE (dev_priv->info->display_mmio_offset + 0x70190)
|
||||
#define _DSPASURF (dev_priv->info->display_mmio_offset + 0x7019C) /* 965+ only */
|
||||
#define _DSPATILEOFF (dev_priv->info->display_mmio_offset + 0x701A4) /* 965+ only */
|
||||
#define _DSPAOFFSET (dev_priv->info->display_mmio_offset + 0x701A4) /* HSW */
|
||||
#define _DSPASURFLIVE (dev_priv->info->display_mmio_offset + 0x701AC)
|
||||
|
||||
#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
|
||||
#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
|
||||
@ -3068,44 +3052,44 @@
|
||||
(I915_WRITE((reg), (gfx_addr) | I915_LO_DISPBASE(I915_READ(reg))))
|
||||
|
||||
/* VBIOS flags */
|
||||
#define SWF00 0x71410
|
||||
#define SWF01 0x71414
|
||||
#define SWF02 0x71418
|
||||
#define SWF03 0x7141c
|
||||
#define SWF04 0x71420
|
||||
#define SWF05 0x71424
|
||||
#define SWF06 0x71428
|
||||
#define SWF10 0x70410
|
||||
#define SWF11 0x70414
|
||||
#define SWF14 0x71420
|
||||
#define SWF30 0x72414
|
||||
#define SWF31 0x72418
|
||||
#define SWF32 0x7241c
|
||||
#define SWF00 (dev_priv->info->display_mmio_offset + 0x71410)
|
||||
#define SWF01 (dev_priv->info->display_mmio_offset + 0x71414)
|
||||
#define SWF02 (dev_priv->info->display_mmio_offset + 0x71418)
|
||||
#define SWF03 (dev_priv->info->display_mmio_offset + 0x7141c)
|
||||
#define SWF04 (dev_priv->info->display_mmio_offset + 0x71420)
|
||||
#define SWF05 (dev_priv->info->display_mmio_offset + 0x71424)
|
||||
#define SWF06 (dev_priv->info->display_mmio_offset + 0x71428)
|
||||
#define SWF10 (dev_priv->info->display_mmio_offset + 0x70410)
|
||||
#define SWF11 (dev_priv->info->display_mmio_offset + 0x70414)
|
||||
#define SWF14 (dev_priv->info->display_mmio_offset + 0x71420)
|
||||
#define SWF30 (dev_priv->info->display_mmio_offset + 0x72414)
|
||||
#define SWF31 (dev_priv->info->display_mmio_offset + 0x72418)
|
||||
#define SWF32 (dev_priv->info->display_mmio_offset + 0x7241c)
|
||||
|
||||
/* Pipe B */
|
||||
#define _PIPEBDSL 0x71000
|
||||
#define _PIPEBCONF 0x71008
|
||||
#define _PIPEBSTAT 0x71024
|
||||
#define _PIPEBFRAMEHIGH 0x71040
|
||||
#define _PIPEBFRAMEPIXEL 0x71044
|
||||
#define _PIPEBDSL (dev_priv->info->display_mmio_offset + 0x71000)
|
||||
#define _PIPEBCONF (dev_priv->info->display_mmio_offset + 0x71008)
|
||||
#define _PIPEBSTAT (dev_priv->info->display_mmio_offset + 0x71024)
|
||||
#define _PIPEBFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x71040)
|
||||
#define _PIPEBFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x71044)
|
||||
#define _PIPEB_FRMCOUNT_GM45 0x71040
|
||||
#define _PIPEB_FLIPCOUNT_GM45 0x71044
|
||||
|
||||
|
||||
/* Display B control */
|
||||
#define _DSPBCNTR 0x71180
|
||||
#define _DSPBCNTR (dev_priv->info->display_mmio_offset + 0x71180)
|
||||
#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
|
||||
#define DISPPLANE_ALPHA_TRANS_DISABLE 0
|
||||
#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0
|
||||
#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
|
||||
#define _DSPBADDR 0x71184
|
||||
#define _DSPBSTRIDE 0x71188
|
||||
#define _DSPBPOS 0x7118C
|
||||
#define _DSPBSIZE 0x71190
|
||||
#define _DSPBSURF 0x7119C
|
||||
#define _DSPBTILEOFF 0x711A4
|
||||
#define _DSPBOFFSET 0x711A4
|
||||
#define _DSPBSURFLIVE 0x711AC
|
||||
#define _DSPBADDR (dev_priv->info->display_mmio_offset + 0x71184)
|
||||
#define _DSPBSTRIDE (dev_priv->info->display_mmio_offset + 0x71188)
|
||||
#define _DSPBPOS (dev_priv->info->display_mmio_offset + 0x7118C)
|
||||
#define _DSPBSIZE (dev_priv->info->display_mmio_offset + 0x71190)
|
||||
#define _DSPBSURF (dev_priv->info->display_mmio_offset + 0x7119C)
|
||||
#define _DSPBTILEOFF (dev_priv->info->display_mmio_offset + 0x711A4)
|
||||
#define _DSPBOFFSET (dev_priv->info->display_mmio_offset + 0x711A4)
|
||||
#define _DSPBSURFLIVE (dev_priv->info->display_mmio_offset + 0x711AC)
|
||||
|
||||
/* Sprite A control */
|
||||
#define _DVSACNTR 0x72180
|
||||
@ -3116,6 +3100,7 @@
|
||||
#define DVS_FORMAT_RGBX101010 (1<<25)
|
||||
#define DVS_FORMAT_RGBX888 (2<<25)
|
||||
#define DVS_FORMAT_RGBX161616 (3<<25)
|
||||
#define DVS_PIPE_CSC_ENABLE (1<<24)
|
||||
#define DVS_SOURCE_KEY (1<<22)
|
||||
#define DVS_RGB_ORDER_XBGR (1<<20)
|
||||
#define DVS_YUV_BYTE_ORDER_MASK (3<<16)
|
||||
@ -3183,7 +3168,7 @@
|
||||
#define SPRITE_FORMAT_RGBX161616 (3<<25)
|
||||
#define SPRITE_FORMAT_YUV444 (4<<25)
|
||||
#define SPRITE_FORMAT_XR_BGR101010 (5<<25) /* Extended range */
|
||||
#define SPRITE_CSC_ENABLE (1<<24)
|
||||
#define SPRITE_PIPE_CSC_ENABLE (1<<24)
|
||||
#define SPRITE_SOURCE_KEY (1<<22)
|
||||
#define SPRITE_RGB_ORDER_RGBX (1<<20) /* only for 888 and 161616 */
|
||||
#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1<<19)
|
||||
@ -3254,6 +3239,8 @@
|
||||
# define VGA_2X_MODE (1 << 30)
|
||||
# define VGA_PIPE_B_SELECT (1 << 29)
|
||||
|
||||
#define VLV_VGACNTRL (VLV_DISPLAY_BASE + 0x71400)
|
||||
|
||||
/* Ironlake */
|
||||
|
||||
#define CPU_VGACNTRL 0x41000
|
||||
@ -3294,41 +3281,41 @@
|
||||
#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
|
||||
|
||||
|
||||
#define _PIPEA_DATA_M1 0x60030
|
||||
#define _PIPEA_DATA_M1 (dev_priv->info->display_mmio_offset + 0x60030)
|
||||
#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
|
||||
#define TU_SIZE_MASK 0x7e000000
|
||||
#define PIPE_DATA_M1_OFFSET 0
|
||||
#define _PIPEA_DATA_N1 0x60034
|
||||
#define _PIPEA_DATA_N1 (dev_priv->info->display_mmio_offset + 0x60034)
|
||||
#define PIPE_DATA_N1_OFFSET 0
|
||||
|
||||
#define _PIPEA_DATA_M2 0x60038
|
||||
#define _PIPEA_DATA_M2 (dev_priv->info->display_mmio_offset + 0x60038)
|
||||
#define PIPE_DATA_M2_OFFSET 0
|
||||
#define _PIPEA_DATA_N2 0x6003c
|
||||
#define _PIPEA_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6003c)
|
||||
#define PIPE_DATA_N2_OFFSET 0
|
||||
|
||||
#define _PIPEA_LINK_M1 0x60040
|
||||
#define _PIPEA_LINK_M1 (dev_priv->info->display_mmio_offset + 0x60040)
|
||||
#define PIPE_LINK_M1_OFFSET 0
|
||||
#define _PIPEA_LINK_N1 0x60044
|
||||
#define _PIPEA_LINK_N1 (dev_priv->info->display_mmio_offset + 0x60044)
|
||||
#define PIPE_LINK_N1_OFFSET 0
|
||||
|
||||
#define _PIPEA_LINK_M2 0x60048
|
||||
#define _PIPEA_LINK_M2 (dev_priv->info->display_mmio_offset + 0x60048)
|
||||
#define PIPE_LINK_M2_OFFSET 0
|
||||
#define _PIPEA_LINK_N2 0x6004c
|
||||
#define _PIPEA_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6004c)
|
||||
#define PIPE_LINK_N2_OFFSET 0
|
||||
|
||||
/* PIPEB timing regs are same start from 0x61000 */
|
||||
|
||||
#define _PIPEB_DATA_M1 0x61030
|
||||
#define _PIPEB_DATA_N1 0x61034
|
||||
#define _PIPEB_DATA_M1 (dev_priv->info->display_mmio_offset + 0x61030)
|
||||
#define _PIPEB_DATA_N1 (dev_priv->info->display_mmio_offset + 0x61034)
|
||||
|
||||
#define _PIPEB_DATA_M2 0x61038
|
||||
#define _PIPEB_DATA_N2 0x6103c
|
||||
#define _PIPEB_DATA_M2 (dev_priv->info->display_mmio_offset + 0x61038)
|
||||
#define _PIPEB_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6103c)
|
||||
|
||||
#define _PIPEB_LINK_M1 0x61040
|
||||
#define _PIPEB_LINK_N1 0x61044
|
||||
#define _PIPEB_LINK_M1 (dev_priv->info->display_mmio_offset + 0x61040)
|
||||
#define _PIPEB_LINK_N1 (dev_priv->info->display_mmio_offset + 0x61044)
|
||||
|
||||
#define _PIPEB_LINK_M2 0x61048
|
||||
#define _PIPEB_LINK_N2 0x6104c
|
||||
#define _PIPEB_LINK_M2 (dev_priv->info->display_mmio_offset + 0x61048)
|
||||
#define _PIPEB_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6104c)
|
||||
|
||||
#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
|
||||
#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
|
||||
@ -3581,27 +3568,30 @@
|
||||
#define PORTD_PULSE_DURATION_6ms (2 << 18)
|
||||
#define PORTD_PULSE_DURATION_100ms (3 << 18)
|
||||
#define PORTD_PULSE_DURATION_MASK (3 << 18)
|
||||
#define PORTD_HOTPLUG_NO_DETECT (0)
|
||||
#define PORTD_HOTPLUG_STATUS_MASK (0x3 << 16)
|
||||
#define PORTD_HOTPLUG_NO_DETECT (0 << 16)
|
||||
#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
|
||||
#define PORTD_HOTPLUG_LONG_DETECT (1 << 17)
|
||||
#define PORTD_HOTPLUG_LONG_DETECT (2 << 16)
|
||||
#define PORTC_HOTPLUG_ENABLE (1 << 12)
|
||||
#define PORTC_PULSE_DURATION_2ms (0)
|
||||
#define PORTC_PULSE_DURATION_4_5ms (1 << 10)
|
||||
#define PORTC_PULSE_DURATION_6ms (2 << 10)
|
||||
#define PORTC_PULSE_DURATION_100ms (3 << 10)
|
||||
#define PORTC_PULSE_DURATION_MASK (3 << 10)
|
||||
#define PORTC_HOTPLUG_NO_DETECT (0)
|
||||
#define PORTC_HOTPLUG_STATUS_MASK (0x3 << 8)
|
||||
#define PORTC_HOTPLUG_NO_DETECT (0 << 8)
|
||||
#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
|
||||
#define PORTC_HOTPLUG_LONG_DETECT (1 << 9)
|
||||
#define PORTC_HOTPLUG_LONG_DETECT (2 << 8)
|
||||
#define PORTB_HOTPLUG_ENABLE (1 << 4)
|
||||
#define PORTB_PULSE_DURATION_2ms (0)
|
||||
#define PORTB_PULSE_DURATION_4_5ms (1 << 2)
|
||||
#define PORTB_PULSE_DURATION_6ms (2 << 2)
|
||||
#define PORTB_PULSE_DURATION_100ms (3 << 2)
|
||||
#define PORTB_PULSE_DURATION_MASK (3 << 2)
|
||||
#define PORTB_HOTPLUG_NO_DETECT (0)
|
||||
#define PORTB_HOTPLUG_STATUS_MASK (0x3 << 0)
|
||||
#define PORTB_HOTPLUG_NO_DETECT (0 << 0)
|
||||
#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
|
||||
#define PORTB_HOTPLUG_LONG_DETECT (1 << 1)
|
||||
#define PORTB_HOTPLUG_LONG_DETECT (2 << 0)
|
||||
|
||||
#define PCH_GPIOA 0xc5010
|
||||
#define PCH_GPIOB 0xc5014
|
||||
@ -3722,13 +3712,13 @@
|
||||
#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
|
||||
#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
|
||||
|
||||
#define VLV_VIDEO_DIP_CTL_A 0x60200
|
||||
#define VLV_VIDEO_DIP_DATA_A 0x60208
|
||||
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
|
||||
#define VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200)
|
||||
#define VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208)
|
||||
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210)
|
||||
|
||||
#define VLV_VIDEO_DIP_CTL_B 0x61170
|
||||
#define VLV_VIDEO_DIP_DATA_B 0x61174
|
||||
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B 0x61178
|
||||
#define VLV_VIDEO_DIP_CTL_B (VLV_DISPLAY_BASE + 0x61170)
|
||||
#define VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174)
|
||||
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178)
|
||||
|
||||
#define VLV_TVIDEO_DIP_CTL(pipe) \
|
||||
_PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B)
|
||||
@ -3820,8 +3810,6 @@
|
||||
#define TRANS_FSYNC_DELAY_HB2 (1<<27)
|
||||
#define TRANS_FSYNC_DELAY_HB3 (2<<27)
|
||||
#define TRANS_FSYNC_DELAY_HB4 (3<<27)
|
||||
#define TRANS_DP_AUDIO_ONLY (1<<26)
|
||||
#define TRANS_DP_VIDEO_AUDIO (0<<26)
|
||||
#define TRANS_INTERLACE_MASK (7<<21)
|
||||
#define TRANS_PROGRESSIVE (0<<21)
|
||||
#define TRANS_INTERLACED (3<<21)
|
||||
@ -3927,7 +3915,7 @@
|
||||
#define FDI_10BPC (1<<16)
|
||||
#define FDI_6BPC (2<<16)
|
||||
#define FDI_12BPC (3<<16)
|
||||
#define FDI_LINK_REVERSE_OVERWRITE (1<<15)
|
||||
#define FDI_RX_LINK_REVERSAL_OVERRIDE (1<<15)
|
||||
#define FDI_DMI_LINK_REVERSE_MASK (1<<14)
|
||||
#define FDI_RX_PLL_ENABLE (1<<13)
|
||||
#define FDI_FS_ERR_CORRECT_ENABLE (1<<11)
|
||||
@ -4020,17 +4008,17 @@
|
||||
#define LVDS_DETECTED (1 << 1)
|
||||
|
||||
/* vlv has 2 sets of panel control regs. */
|
||||
#define PIPEA_PP_STATUS 0x61200
|
||||
#define PIPEA_PP_CONTROL 0x61204
|
||||
#define PIPEA_PP_ON_DELAYS 0x61208
|
||||
#define PIPEA_PP_OFF_DELAYS 0x6120c
|
||||
#define PIPEA_PP_DIVISOR 0x61210
|
||||
#define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200)
|
||||
#define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204)
|
||||
#define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208)
|
||||
#define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c)
|
||||
#define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210)
|
||||
|
||||
#define PIPEB_PP_STATUS 0x61300
|
||||
#define PIPEB_PP_CONTROL 0x61304
|
||||
#define PIPEB_PP_ON_DELAYS 0x61308
|
||||
#define PIPEB_PP_OFF_DELAYS 0x6130c
|
||||
#define PIPEB_PP_DIVISOR 0x61310
|
||||
#define PIPEB_PP_STATUS (VLV_DISPLAY_BASE + 0x61300)
|
||||
#define PIPEB_PP_CONTROL (VLV_DISPLAY_BASE + 0x61304)
|
||||
#define PIPEB_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61308)
|
||||
#define PIPEB_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6130c)
|
||||
#define PIPEB_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61310)
|
||||
|
||||
#define PCH_PP_STATUS 0xc7200
|
||||
#define PCH_PP_CONTROL 0xc7204
|
||||
@ -4211,7 +4199,9 @@
|
||||
#define GEN6_RP_INTERRUPT_LIMITS 0xA014
|
||||
#define GEN6_RPSTAT1 0xA01C
|
||||
#define GEN6_CAGF_SHIFT 8
|
||||
#define HSW_CAGF_SHIFT 7
|
||||
#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
|
||||
#define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT)
|
||||
#define GEN6_RP_CONTROL 0xA024
|
||||
#define GEN6_RP_MEDIA_TURBO (1<<11)
|
||||
#define GEN6_RP_MEDIA_MODE_MASK (3<<9)
|
||||
@ -4280,8 +4270,8 @@
|
||||
#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
|
||||
#define GEN6_PCODE_WRITE_RC6VIDS 0x4
|
||||
#define GEN6_PCODE_READ_RC6VIDS 0x5
|
||||
#define GEN6_ENCODE_RC6_VID(mv) (((mv) / 5) - 245) < 0 ?: 0
|
||||
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) > 0 ? ((vids) * 5) + 245 : 0)
|
||||
#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
|
||||
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
|
||||
#define GEN6_PCODE_DATA 0x138128
|
||||
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
|
||||
|
||||
@ -4322,7 +4312,7 @@
|
||||
#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
|
||||
#define DOP_CLOCK_GATING_DISABLE (1<<0)
|
||||
|
||||
#define G4X_AUD_VID_DID 0x62020
|
||||
#define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020)
|
||||
#define INTEL_AUDIO_DEVCL 0x808629FB
|
||||
#define INTEL_AUDIO_DEVBLC 0x80862801
|
||||
#define INTEL_AUDIO_DEVCTG 0x80862802
|
||||
@ -4438,10 +4428,10 @@
|
||||
#define AUDIO_CP_READY_C (1<<9)
|
||||
|
||||
/* HSW Power Wells */
|
||||
#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */
|
||||
#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */
|
||||
#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */
|
||||
#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */
|
||||
#define HSW_PWR_WELL_BIOS 0x45400 /* CTL1 */
|
||||
#define HSW_PWR_WELL_DRIVER 0x45404 /* CTL2 */
|
||||
#define HSW_PWR_WELL_KVMR 0x45408 /* CTL3 */
|
||||
#define HSW_PWR_WELL_DEBUG 0x4540C /* CTL4 */
|
||||
#define HSW_PWR_WELL_ENABLE (1<<31)
|
||||
#define HSW_PWR_WELL_STATE (1<<30)
|
||||
#define HSW_PWR_WELL_CTL5 0x45410
|
||||
@ -4524,6 +4514,7 @@
|
||||
#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
|
||||
#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
|
||||
#define DDI_BUF_EMP_MASK (0xf<<24)
|
||||
#define DDI_BUF_PORT_REVERSAL (1<<16)
|
||||
#define DDI_BUF_IS_IDLE (1<<7)
|
||||
#define DDI_A_4_LANES (1<<4)
|
||||
#define DDI_PORT_WIDTH_X1 (0<<1)
|
||||
@ -4657,4 +4648,51 @@
|
||||
#define WM_DBG_DISALLOW_MAXFIFO (1<<1)
|
||||
#define WM_DBG_DISALLOW_SPRITE (1<<2)
|
||||
|
||||
/* pipe CSC */
|
||||
#define _PIPE_A_CSC_COEFF_RY_GY 0x49010
|
||||
#define _PIPE_A_CSC_COEFF_BY 0x49014
|
||||
#define _PIPE_A_CSC_COEFF_RU_GU 0x49018
|
||||
#define _PIPE_A_CSC_COEFF_BU 0x4901c
|
||||
#define _PIPE_A_CSC_COEFF_RV_GV 0x49020
|
||||
#define _PIPE_A_CSC_COEFF_BV 0x49024
|
||||
#define _PIPE_A_CSC_MODE 0x49028
|
||||
#define _PIPE_A_CSC_PREOFF_HI 0x49030
|
||||
#define _PIPE_A_CSC_PREOFF_ME 0x49034
|
||||
#define _PIPE_A_CSC_PREOFF_LO 0x49038
|
||||
#define _PIPE_A_CSC_POSTOFF_HI 0x49040
|
||||
#define _PIPE_A_CSC_POSTOFF_ME 0x49044
|
||||
#define _PIPE_A_CSC_POSTOFF_LO 0x49048
|
||||
|
||||
#define _PIPE_B_CSC_COEFF_RY_GY 0x49110
|
||||
#define _PIPE_B_CSC_COEFF_BY 0x49114
|
||||
#define _PIPE_B_CSC_COEFF_RU_GU 0x49118
|
||||
#define _PIPE_B_CSC_COEFF_BU 0x4911c
|
||||
#define _PIPE_B_CSC_COEFF_RV_GV 0x49120
|
||||
#define _PIPE_B_CSC_COEFF_BV 0x49124
|
||||
#define _PIPE_B_CSC_MODE 0x49128
|
||||
#define _PIPE_B_CSC_PREOFF_HI 0x49130
|
||||
#define _PIPE_B_CSC_PREOFF_ME 0x49134
|
||||
#define _PIPE_B_CSC_PREOFF_LO 0x49138
|
||||
#define _PIPE_B_CSC_POSTOFF_HI 0x49140
|
||||
#define _PIPE_B_CSC_POSTOFF_ME 0x49144
|
||||
#define _PIPE_B_CSC_POSTOFF_LO 0x49148
|
||||
|
||||
#define CSC_BLACK_SCREEN_OFFSET (1 << 2)
|
||||
#define CSC_POSITION_BEFORE_GAMMA (1 << 1)
|
||||
#define CSC_MODE_YUV_TO_RGB (1 << 0)
|
||||
|
||||
#define PIPE_CSC_COEFF_RY_GY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RY_GY, _PIPE_B_CSC_COEFF_RY_GY)
|
||||
#define PIPE_CSC_COEFF_BY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BY, _PIPE_B_CSC_COEFF_BY)
|
||||
#define PIPE_CSC_COEFF_RU_GU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RU_GU, _PIPE_B_CSC_COEFF_RU_GU)
|
||||
#define PIPE_CSC_COEFF_BU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BU, _PIPE_B_CSC_COEFF_BU)
|
||||
#define PIPE_CSC_COEFF_RV_GV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RV_GV, _PIPE_B_CSC_COEFF_RV_GV)
|
||||
#define PIPE_CSC_COEFF_BV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BV, _PIPE_B_CSC_COEFF_BV)
|
||||
#define PIPE_CSC_MODE(pipe) _PIPE(pipe, _PIPE_A_CSC_MODE, _PIPE_B_CSC_MODE)
|
||||
#define PIPE_CSC_PREOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_HI, _PIPE_B_CSC_PREOFF_HI)
|
||||
#define PIPE_CSC_PREOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_ME, _PIPE_B_CSC_PREOFF_ME)
|
||||
#define PIPE_CSC_PREOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_LO, _PIPE_B_CSC_PREOFF_LO)
|
||||
#define PIPE_CSC_POSTOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_HI, _PIPE_B_CSC_POSTOFF_HI)
|
||||
#define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
|
||||
#define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
|
||||
|
||||
#endif /* _I915_REG_H_ */
|
||||
|
@ -44,6 +44,9 @@
|
||||
|
||||
struct intel_crt {
|
||||
struct intel_encoder base;
|
||||
/* DPMS state is stored in the connector, which we need in the
|
||||
* encoder's enable/disable callbacks */
|
||||
struct intel_connector *connector;
|
||||
bool force_hotplug_required;
|
||||
u32 adpa_reg;
|
||||
};
|
||||
@ -80,29 +83,6 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
|
||||
return true;
|
||||
}
|
||||
|
||||
static void intel_disable_crt(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct intel_crt *crt = intel_encoder_to_crt(encoder);
|
||||
u32 temp;
|
||||
|
||||
temp = I915_READ(crt->adpa_reg);
|
||||
temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
|
||||
temp &= ~ADPA_DAC_ENABLE;
|
||||
I915_WRITE(crt->adpa_reg, temp);
|
||||
}
|
||||
|
||||
static void intel_enable_crt(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct intel_crt *crt = intel_encoder_to_crt(encoder);
|
||||
u32 temp;
|
||||
|
||||
temp = I915_READ(crt->adpa_reg);
|
||||
temp |= ADPA_DAC_ENABLE;
|
||||
I915_WRITE(crt->adpa_reg, temp);
|
||||
}
|
||||
|
||||
/* Note: The caller is required to filter out dpms modes not supported by the
|
||||
* platform. */
|
||||
static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
|
||||
@ -134,6 +114,19 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
|
||||
I915_WRITE(crt->adpa_reg, temp);
|
||||
}
|
||||
|
||||
static void intel_disable_crt(struct intel_encoder *encoder)
|
||||
{
|
||||
intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF);
|
||||
}
|
||||
|
||||
static void intel_enable_crt(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_crt *crt = intel_encoder_to_crt(encoder);
|
||||
|
||||
intel_crt_set_dpms(encoder, crt->connector->base.dpms);
|
||||
}
|
||||
|
||||
|
||||
static void intel_crt_dpms(struct drm_connector *connector, int mode)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
@ -259,6 +252,8 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
|
||||
u32 adpa;
|
||||
bool ret;
|
||||
|
||||
ENTER();
|
||||
|
||||
/* The first time through, trigger an explicit detection cycle */
|
||||
if (crt->force_hotplug_required) {
|
||||
bool turn_off_dac = HAS_PCH_SPLIT(dev);
|
||||
@ -266,59 +261,64 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
|
||||
|
||||
crt->force_hotplug_required = 0;
|
||||
|
||||
save_adpa = adpa = I915_READ(PCH_ADPA);
|
||||
save_adpa = adpa = I915_READ(crt->adpa_reg);
|
||||
DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
|
||||
|
||||
adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
|
||||
if (turn_off_dac)
|
||||
adpa &= ~ADPA_DAC_ENABLE;
|
||||
|
||||
I915_WRITE(PCH_ADPA, adpa);
|
||||
I915_WRITE(crt->adpa_reg, adpa);
|
||||
|
||||
if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
|
||||
if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
|
||||
1000))
|
||||
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
|
||||
|
||||
if (turn_off_dac) {
|
||||
I915_WRITE(PCH_ADPA, save_adpa);
|
||||
POSTING_READ(PCH_ADPA);
|
||||
I915_WRITE(crt->adpa_reg, save_adpa);
|
||||
POSTING_READ(crt->adpa_reg);
|
||||
}
|
||||
}
|
||||
|
||||
/* Check the status to see if both blue and green are on now */
|
||||
adpa = I915_READ(PCH_ADPA);
|
||||
adpa = I915_READ(crt->adpa_reg);
|
||||
if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
|
||||
ret = true;
|
||||
else
|
||||
ret = false;
|
||||
DRM_DEBUG_KMS("ironlake hotplug adpa=0x%x, result %d\n", adpa, ret);
|
||||
|
||||
LEAVE();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct intel_crt *crt = intel_attached_crt(connector);
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 adpa;
|
||||
bool ret;
|
||||
u32 save_adpa;
|
||||
|
||||
save_adpa = adpa = I915_READ(ADPA);
|
||||
ENTER();
|
||||
|
||||
save_adpa = adpa = I915_READ(crt->adpa_reg);
|
||||
DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
|
||||
|
||||
adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
|
||||
|
||||
I915_WRITE(ADPA, adpa);
|
||||
I915_WRITE(crt->adpa_reg, adpa);
|
||||
|
||||
if (wait_for((I915_READ(ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
|
||||
if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
|
||||
1000)) {
|
||||
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
|
||||
I915_WRITE(ADPA, save_adpa);
|
||||
I915_WRITE(crt->adpa_reg, save_adpa);
|
||||
}
|
||||
|
||||
/* Check the status to see if both blue and green are on now */
|
||||
adpa = I915_READ(ADPA);
|
||||
adpa = I915_READ(crt->adpa_reg);
|
||||
if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
|
||||
ret = true;
|
||||
else
|
||||
@ -329,6 +329,8 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
|
||||
/* FIXME: debug force function and remove */
|
||||
ret = true;
|
||||
|
||||
LEAVE();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -348,6 +350,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
|
||||
bool ret = false;
|
||||
int i, tries = 0;
|
||||
|
||||
ENTER();
|
||||
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
return intel_ironlake_crt_detect_hotplug(connector);
|
||||
|
||||
@ -386,6 +390,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
|
||||
/* and put the bits back */
|
||||
I915_WRITE(PORT_HOTPLUG_EN, orig);
|
||||
|
||||
LEAVE();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -394,6 +400,8 @@ static struct edid *intel_crt_get_edid(struct drm_connector *connector,
|
||||
{
|
||||
struct edid *edid;
|
||||
|
||||
ENTER();
|
||||
|
||||
edid = drm_get_edid(connector, i2c);
|
||||
|
||||
if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
|
||||
@ -403,6 +411,8 @@ static struct edid *intel_crt_get_edid(struct drm_connector *connector,
|
||||
intel_gmbus_force_bit(i2c, false);
|
||||
}
|
||||
|
||||
LEAVE();
|
||||
|
||||
return edid;
|
||||
}
|
||||
|
||||
@ -664,11 +674,11 @@ static void intel_crt_reset(struct drm_connector *connector)
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
u32 adpa;
|
||||
|
||||
adpa = I915_READ(PCH_ADPA);
|
||||
adpa = I915_READ(crt->adpa_reg);
|
||||
adpa &= ~ADPA_CRT_HOTPLUG_MASK;
|
||||
adpa |= ADPA_HOTPLUG_BITS;
|
||||
I915_WRITE(PCH_ADPA, adpa);
|
||||
POSTING_READ(PCH_ADPA);
|
||||
I915_WRITE(crt->adpa_reg, adpa);
|
||||
POSTING_READ(crt->adpa_reg);
|
||||
|
||||
DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
|
||||
crt->force_hotplug_required = 1;
|
||||
@ -683,7 +693,6 @@ static void intel_crt_reset(struct drm_connector *connector)
|
||||
static const struct drm_encoder_helper_funcs crt_encoder_funcs = {
|
||||
.mode_fixup = intel_crt_mode_fixup,
|
||||
.mode_set = intel_crt_mode_set,
|
||||
.disable = intel_encoder_noop,
|
||||
};
|
||||
|
||||
static const struct drm_connector_funcs intel_crt_connector_funcs = {
|
||||
@ -723,6 +732,7 @@ void intel_crt_init(struct drm_device *dev)
|
||||
}
|
||||
|
||||
connector = &intel_connector->base;
|
||||
crt->connector = intel_connector;
|
||||
drm_connector_init(dev, &intel_connector->base,
|
||||
&intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
|
||||
|
||||
@ -753,7 +763,7 @@ void intel_crt_init(struct drm_device *dev)
|
||||
|
||||
crt->base.disable = intel_disable_crt;
|
||||
crt->base.enable = intel_enable_crt;
|
||||
if (IS_HASWELL(dev))
|
||||
if (HAS_DDI(dev))
|
||||
crt->base.get_hw_state = intel_ddi_get_hw_state;
|
||||
else
|
||||
crt->base.get_hw_state = intel_crt_get_hw_state;
|
||||
@ -777,10 +787,14 @@ void intel_crt_init(struct drm_device *dev)
|
||||
dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
|
||||
|
||||
/*
|
||||
* TODO: find a proper way to discover whether we need to set the
|
||||
* polarity reversal bit or not, instead of relying on the BIOS.
|
||||
* TODO: find a proper way to discover whether we need to set the the
|
||||
* polarity and link reversal bits or not, instead of relying on the
|
||||
* BIOS.
|
||||
*/
|
||||
if (HAS_PCH_LPT(dev))
|
||||
dev_priv->fdi_rx_polarity_reversed =
|
||||
!!(I915_READ(_FDI_RXA_CTL) & FDI_RX_POLARITY_REVERSED_LPT);
|
||||
if (HAS_PCH_LPT(dev)) {
|
||||
u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
|
||||
FDI_RX_LINK_REVERSAL_OVERRIDE;
|
||||
|
||||
dev_priv->fdi_rx_config = I915_READ(_FDI_RXA_CTL) & fdi_config;
|
||||
}
|
||||
}
|
||||
|
@ -84,7 +84,8 @@ static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
|
||||
* in either FDI or DP modes only, as HDMI connections will work with both
|
||||
* of those
|
||||
*/
|
||||
void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, bool use_fdi_mode)
|
||||
static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
|
||||
bool use_fdi_mode)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 reg;
|
||||
@ -114,16 +115,17 @@ void intel_prepare_ddi(struct drm_device *dev)
|
||||
{
|
||||
int port;
|
||||
|
||||
if (IS_HASWELL(dev)) {
|
||||
if (!HAS_DDI(dev))
|
||||
return;
|
||||
|
||||
for (port = PORT_A; port < PORT_E; port++)
|
||||
intel_prepare_ddi_buffers(dev, port, false);
|
||||
|
||||
/* DDI E is the suggested one to work in FDI mode, so program is as such by
|
||||
* default. It will have to be re-programmed in case a digital DP output
|
||||
* will be detected on it
|
||||
/* DDI E is the suggested one to work in FDI mode, so program is as such
|
||||
* by default. It will have to be re-programmed in case a digital DP
|
||||
* output will be detected on it
|
||||
*/
|
||||
intel_prepare_ddi_buffers(dev, PORT_E, true);
|
||||
}
|
||||
}
|
||||
|
||||
static const long hsw_ddi_buf_ctl_values[] = {
|
||||
@ -178,10 +180,8 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
|
||||
FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
|
||||
|
||||
/* Enable the PCH Receiver FDI PLL */
|
||||
rx_ctl_val = FDI_RX_PLL_ENABLE | FDI_RX_ENHANCE_FRAME_ENABLE |
|
||||
((intel_crtc->fdi_lanes - 1) << 19);
|
||||
if (dev_priv->fdi_rx_polarity_reversed)
|
||||
rx_ctl_val |= FDI_RX_POLARITY_REVERSED_LPT;
|
||||
rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
|
||||
FDI_RX_PLL_ENABLE | ((intel_crtc->fdi_lanes - 1) << 19);
|
||||
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
|
||||
POSTING_READ(_FDI_RXA_CTL);
|
||||
udelay(220);
|
||||
@ -203,7 +203,10 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
|
||||
DP_TP_CTL_LINK_TRAIN_PAT1 |
|
||||
DP_TP_CTL_ENABLE);
|
||||
|
||||
/* Configure and enable DDI_BUF_CTL for DDI E with next voltage */
|
||||
/* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
|
||||
* DDI E does not support port reversal, the functionality is
|
||||
* achieved on the PCH side in FDI_RX_CTL, so no need to set the
|
||||
* port reversal bit */
|
||||
I915_WRITE(DDI_BUF_CTL(PORT_E),
|
||||
DDI_BUF_CTL_ENABLE |
|
||||
((intel_crtc->fdi_lanes - 1) << 1) |
|
||||
@ -675,10 +678,14 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
|
||||
DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n",
|
||||
port_name(port), pipe_name(pipe));
|
||||
|
||||
intel_crtc->eld_vld = false;
|
||||
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct intel_digital_port *intel_dig_port =
|
||||
enc_to_dig_port(encoder);
|
||||
|
||||
intel_dp->DP = DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
|
||||
intel_dp->DP = intel_dig_port->port_reversal |
|
||||
DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
|
||||
switch (intel_dp->lane_count) {
|
||||
case 1:
|
||||
intel_dp->DP |= DDI_PORT_WIDTH_X1;
|
||||
@ -985,7 +992,13 @@ void intel_ddi_enable_pipe_func(struct drm_crtc *crtc)
|
||||
if (cpu_transcoder == TRANSCODER_EDP) {
|
||||
switch (pipe) {
|
||||
case PIPE_A:
|
||||
/* Can only use the always-on power well for eDP when
|
||||
* not using the panel fitter, and when not using motion
|
||||
* blur mitigation (which we don't support). */
|
||||
if (dev_priv->pch_pf_size)
|
||||
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
|
||||
else
|
||||
temp |= TRANS_DDI_EDP_INPUT_A_ON;
|
||||
break;
|
||||
case PIPE_B:
|
||||
temp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
|
||||
@ -1069,7 +1082,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
|
||||
if (port == PORT_A)
|
||||
cpu_transcoder = TRANSCODER_EDP;
|
||||
else
|
||||
cpu_transcoder = pipe;
|
||||
cpu_transcoder = (enum transcoder) pipe;
|
||||
|
||||
tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
|
||||
|
||||
@ -1285,34 +1298,58 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
|
||||
static void intel_enable_ddi(struct intel_encoder *intel_encoder)
|
||||
{
|
||||
struct drm_encoder *encoder = &intel_encoder->base;
|
||||
struct drm_crtc *crtc = encoder->crtc;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int pipe = intel_crtc->pipe;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
enum port port = intel_ddi_get_encoder_port(intel_encoder);
|
||||
int type = intel_encoder->type;
|
||||
uint32_t tmp;
|
||||
|
||||
if (type == INTEL_OUTPUT_HDMI) {
|
||||
struct intel_digital_port *intel_dig_port =
|
||||
enc_to_dig_port(encoder);
|
||||
|
||||
/* In HDMI/DVI mode, the port width, and swing/emphasis values
|
||||
* are ignored so nothing special needs to be done besides
|
||||
* enabling the port.
|
||||
*/
|
||||
I915_WRITE(DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE);
|
||||
I915_WRITE(DDI_BUF_CTL(port),
|
||||
intel_dig_port->port_reversal | DDI_BUF_CTL_ENABLE);
|
||||
} else if (type == INTEL_OUTPUT_EDP) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
ironlake_edp_backlight_on(intel_dp);
|
||||
}
|
||||
|
||||
if (intel_crtc->eld_vld) {
|
||||
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
|
||||
tmp |= ((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
|
||||
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_disable_ddi(struct intel_encoder *intel_encoder)
|
||||
{
|
||||
struct drm_encoder *encoder = &intel_encoder->base;
|
||||
struct drm_crtc *crtc = encoder->crtc;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int pipe = intel_crtc->pipe;
|
||||
int type = intel_encoder->type;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t tmp;
|
||||
|
||||
if (type == INTEL_OUTPUT_EDP) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
ironlake_edp_backlight_off(intel_dp);
|
||||
}
|
||||
|
||||
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
|
||||
tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
|
||||
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
|
||||
}
|
||||
|
||||
int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
|
||||
@ -1354,8 +1391,8 @@ void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
|
||||
enum port port = intel_dig_port->port;
|
||||
bool wait;
|
||||
uint32_t val;
|
||||
bool wait = false;
|
||||
|
||||
if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
|
||||
val = I915_READ(DDI_BUF_CTL(port));
|
||||
@ -1452,11 +1489,11 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
|
||||
static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = {
|
||||
.mode_fixup = intel_ddi_mode_fixup,
|
||||
.mode_set = intel_ddi_mode_set,
|
||||
.disable = intel_encoder_noop,
|
||||
};
|
||||
|
||||
void intel_ddi_init(struct drm_device *dev, enum port port)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_digital_port *intel_dig_port;
|
||||
struct intel_encoder *intel_encoder;
|
||||
struct drm_encoder *encoder;
|
||||
@ -1497,6 +1534,8 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
|
||||
intel_encoder->get_hw_state = intel_ddi_get_hw_state;
|
||||
|
||||
intel_dig_port->port = port;
|
||||
intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) &
|
||||
DDI_BUF_PORT_REVERSAL;
|
||||
if (hdmi_connector)
|
||||
intel_dig_port->hdmi.sdvox_reg = DDI_BUF_CTL(port);
|
||||
else
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -148,15 +148,6 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp)
|
||||
return max_link_bw;
|
||||
}
|
||||
|
||||
static int
|
||||
intel_dp_link_clock(uint8_t link_bw)
|
||||
{
|
||||
if (link_bw == DP_LINK_BW_2_7)
|
||||
return 270000;
|
||||
else
|
||||
return 162000;
|
||||
}
|
||||
|
||||
/*
|
||||
* The units on the numbers in the next two are... bizarre. Examples will
|
||||
* make it clearer; this one parallels an example in the eDP spec.
|
||||
@ -191,7 +182,8 @@ intel_dp_adjust_dithering(struct intel_dp *intel_dp,
|
||||
struct drm_display_mode *mode,
|
||||
bool adjust_mode)
|
||||
{
|
||||
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
|
||||
int max_link_clock =
|
||||
drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
|
||||
int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
|
||||
int max_rate, mode_rate;
|
||||
|
||||
@ -330,6 +322,49 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
|
||||
}
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t ch_ctl = intel_dp->output_reg + 0x10;
|
||||
uint32_t status;
|
||||
bool done;
|
||||
|
||||
if (IS_HASWELL(dev)) {
|
||||
switch (intel_dig_port->port) {
|
||||
case PORT_A:
|
||||
ch_ctl = DPA_AUX_CH_CTL;
|
||||
break;
|
||||
case PORT_B:
|
||||
ch_ctl = PCH_DPB_AUX_CH_CTL;
|
||||
break;
|
||||
case PORT_C:
|
||||
ch_ctl = PCH_DPC_AUX_CH_CTL;
|
||||
break;
|
||||
case PORT_D:
|
||||
ch_ctl = PCH_DPD_AUX_CH_CTL;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
|
||||
if (has_aux_irq)
|
||||
done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
|
||||
msecs_to_jiffies(10));
|
||||
else
|
||||
done = wait_for_atomic(C, 10) == 0;
|
||||
if (!done)
|
||||
DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
|
||||
has_aux_irq);
|
||||
#undef C
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static int
|
||||
intel_dp_aux_ch(struct intel_dp *intel_dp,
|
||||
uint8_t *send, int send_bytes,
|
||||
@ -341,11 +376,17 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t ch_ctl = output_reg + 0x10;
|
||||
uint32_t ch_data = ch_ctl + 4;
|
||||
int i;
|
||||
int recv_bytes;
|
||||
int i, ret, recv_bytes;
|
||||
uint32_t status;
|
||||
uint32_t aux_clock_divider;
|
||||
int try, precharge;
|
||||
bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
|
||||
|
||||
/* dp aux is extremely sensitive to irq latency, hence request the
|
||||
* lowest possible wakeup latency and so prevent the cpu from going into
|
||||
* deep sleep states.
|
||||
*/
|
||||
// pm_qos_update_request(&dev_priv->pm_qos, 0);
|
||||
|
||||
if (IS_HASWELL(dev)) {
|
||||
switch (intel_dig_port->port) {
|
||||
@ -379,7 +420,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
|
||||
* clock divider.
|
||||
*/
|
||||
if (is_cpu_edp(intel_dp)) {
|
||||
if (IS_HASWELL(dev))
|
||||
if (HAS_DDI(dev))
|
||||
aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1;
|
||||
else if (IS_VALLEYVIEW(dev))
|
||||
aux_clock_divider = 100;
|
||||
@ -399,7 +440,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
|
||||
|
||||
/* Try to wait for any previous AUX channel activity */
|
||||
for (try = 0; try < 3; try++) {
|
||||
status = I915_READ(ch_ctl);
|
||||
status = I915_READ_NOTRACE(ch_ctl);
|
||||
if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
|
||||
break;
|
||||
msleep(1);
|
||||
@ -408,7 +449,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
|
||||
if (try == 3) {
|
||||
WARN(1, "dp_aux_ch not started status 0x%08x\n",
|
||||
I915_READ(ch_ctl));
|
||||
return -EBUSY;
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Must try at least 3 times according to DP spec */
|
||||
@ -421,6 +463,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
|
||||
/* Send the command and wait for it to complete */
|
||||
I915_WRITE(ch_ctl,
|
||||
DP_AUX_CH_CTL_SEND_BUSY |
|
||||
(has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
|
||||
DP_AUX_CH_CTL_TIME_OUT_400us |
|
||||
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
|
||||
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
|
||||
@ -428,12 +471,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
|
||||
DP_AUX_CH_CTL_DONE |
|
||||
DP_AUX_CH_CTL_TIME_OUT_ERROR |
|
||||
DP_AUX_CH_CTL_RECEIVE_ERROR);
|
||||
for (;;) {
|
||||
status = I915_READ(ch_ctl);
|
||||
if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
|
||||
break;
|
||||
udelay(100);
|
||||
}
|
||||
|
||||
status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
|
||||
|
||||
/* Clear done status and any errors */
|
||||
I915_WRITE(ch_ctl,
|
||||
@ -451,7 +490,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
|
||||
|
||||
if ((status & DP_AUX_CH_CTL_DONE) == 0) {
|
||||
DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
|
||||
return -EBUSY;
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Check for timeout or receive error.
|
||||
@ -459,14 +499,16 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
|
||||
*/
|
||||
if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
|
||||
DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
|
||||
return -EIO;
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Timeouts occur when the device isn't connected, so they're
|
||||
* "normal" -- don't fill the kernel log with these */
|
||||
if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
|
||||
DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
|
||||
return -ETIMEDOUT;
|
||||
ret = -ETIMEDOUT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Unload any bytes sent back from the other side */
|
||||
@ -479,7 +521,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
|
||||
unpack_aux(I915_READ(ch_data + i),
|
||||
recv + i, recv_bytes - i);
|
||||
|
||||
return recv_bytes;
|
||||
ret = recv_bytes;
|
||||
out:
|
||||
// pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Write data to the aux channel in native mode */
|
||||
@ -718,16 +764,35 @@ intel_dp_mode_fixup(struct drm_encoder *encoder,
|
||||
return false;
|
||||
|
||||
bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
|
||||
|
||||
if (intel_dp->color_range_auto) {
|
||||
/*
|
||||
* See:
|
||||
* CEA-861-E - 5.1 Default Encoding Parameters
|
||||
* VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
|
||||
*/
|
||||
if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
|
||||
intel_dp->color_range = DP_COLOR_RANGE_16_235;
|
||||
else
|
||||
intel_dp->color_range = 0;
|
||||
}
|
||||
|
||||
if (intel_dp->color_range)
|
||||
adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE;
|
||||
|
||||
mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
|
||||
|
||||
for (clock = 0; clock <= max_clock; clock++) {
|
||||
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
|
||||
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
|
||||
int link_bw_clock =
|
||||
drm_dp_bw_code_to_link_rate(bws[clock]);
|
||||
int link_avail = intel_dp_max_data_rate(link_bw_clock,
|
||||
lane_count);
|
||||
|
||||
if (mode_rate <= link_avail) {
|
||||
intel_dp->link_bw = bws[clock];
|
||||
intel_dp->lane_count = lane_count;
|
||||
adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
|
||||
adjusted_mode->clock = link_bw_clock;
|
||||
DRM_DEBUG_KMS("DP link bw %02x lane "
|
||||
"count %d clock %d bpp %d\n",
|
||||
intel_dp->link_bw, intel_dp->lane_count,
|
||||
@ -742,39 +807,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder,
|
||||
return false;
|
||||
}
|
||||
|
||||
struct intel_dp_m_n {
|
||||
uint32_t tu;
|
||||
uint32_t gmch_m;
|
||||
uint32_t gmch_n;
|
||||
uint32_t link_m;
|
||||
uint32_t link_n;
|
||||
};
|
||||
|
||||
static void
|
||||
intel_reduce_ratio(uint32_t *num, uint32_t *den)
|
||||
{
|
||||
while (*num > 0xffffff || *den > 0xffffff) {
|
||||
*num >>= 1;
|
||||
*den >>= 1;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
intel_dp_compute_m_n(int bpp,
|
||||
int nlanes,
|
||||
int pixel_clock,
|
||||
int link_clock,
|
||||
struct intel_dp_m_n *m_n)
|
||||
{
|
||||
m_n->tu = 64;
|
||||
m_n->gmch_m = (pixel_clock * bpp) >> 3;
|
||||
m_n->gmch_n = link_clock * nlanes;
|
||||
intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
|
||||
m_n->link_m = pixel_clock;
|
||||
m_n->link_n = link_clock;
|
||||
intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
|
||||
}
|
||||
|
||||
void
|
||||
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
@ -785,9 +817,10 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int lane_count = 4;
|
||||
struct intel_dp_m_n m_n;
|
||||
struct intel_link_m_n m_n;
|
||||
int pipe = intel_crtc->pipe;
|
||||
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
|
||||
int target_clock;
|
||||
|
||||
/*
|
||||
* Find the lane count in the intel_encoder private
|
||||
@ -803,13 +836,22 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
|
||||
}
|
||||
}
|
||||
|
||||
target_clock = mode->clock;
|
||||
for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
|
||||
if (intel_encoder->type == INTEL_OUTPUT_EDP) {
|
||||
target_clock = intel_edp_target_clock(intel_encoder,
|
||||
mode);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Compute the GMCH and Link ratios. The '3' here is
|
||||
* the number of bytes_per_pixel post-LUT, which we always
|
||||
* set up for 8-bits of R/G/B, or 3 bytes total.
|
||||
*/
|
||||
intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
|
||||
mode->clock, adjusted_mode->clock, &m_n);
|
||||
intel_link_compute_m_n(intel_crtc->bpp, lane_count,
|
||||
target_clock, adjusted_mode->clock, &m_n);
|
||||
|
||||
if (IS_HASWELL(dev)) {
|
||||
I915_WRITE(PIPE_DATA_M1(cpu_transcoder),
|
||||
@ -851,6 +893,32 @@ void intel_dp_init_link_config(struct intel_dp *intel_dp)
|
||||
}
|
||||
}
|
||||
|
||||
static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 dpa_ctl;
|
||||
|
||||
DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
|
||||
dpa_ctl = I915_READ(DP_A);
|
||||
dpa_ctl &= ~DP_PLL_FREQ_MASK;
|
||||
|
||||
if (clock < 200000) {
|
||||
/* For a long time we've carried around a ILK-DevA w/a for the
|
||||
* 160MHz clock. If we're really unlucky, it's still required.
|
||||
*/
|
||||
DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
|
||||
dpa_ctl |= DP_PLL_FREQ_160MHZ;
|
||||
} else {
|
||||
dpa_ctl |= DP_PLL_FREQ_270MHZ;
|
||||
}
|
||||
|
||||
I915_WRITE(DP_A, dpa_ctl);
|
||||
|
||||
POSTING_READ(DP_A);
|
||||
udelay(500);
|
||||
}
|
||||
|
||||
static void
|
||||
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
@ -926,6 +994,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
|
||||
else
|
||||
intel_dp->DP |= DP_PLL_FREQ_270MHZ;
|
||||
} else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
|
||||
if (!HAS_PCH_SPLIT(dev))
|
||||
intel_dp->DP |= intel_dp->color_range;
|
||||
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
|
||||
@ -950,6 +1019,9 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
|
||||
} else {
|
||||
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
|
||||
}
|
||||
|
||||
if (is_cpu_edp(intel_dp))
|
||||
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
|
||||
}
|
||||
|
||||
#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
|
||||
@ -1057,6 +1129,8 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 pp;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
|
||||
|
||||
if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
|
||||
pp = ironlake_get_pp_control(dev_priv);
|
||||
pp &= ~EDP_FORCE_VDD;
|
||||
@ -1543,7 +1617,7 @@ intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ST
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
intel_dp_signal_levels(uint8_t train_set)
|
||||
intel_gen4_signal_levels(uint8_t train_set)
|
||||
{
|
||||
uint32_t signal_levels = 0;
|
||||
|
||||
@ -1641,7 +1715,7 @@ intel_gen7_edp_signal_levels(uint8_t train_set)
|
||||
|
||||
/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
|
||||
static uint32_t
|
||||
intel_dp_signal_levels_hsw(uint8_t train_set)
|
||||
intel_hsw_signal_levels(uint8_t train_set)
|
||||
{
|
||||
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
|
||||
DP_TRAIN_PRE_EMPHASIS_MASK);
|
||||
@ -1673,6 +1747,34 @@ intel_dp_signal_levels_hsw(uint8_t train_set)
|
||||
}
|
||||
}
|
||||
|
||||
/* Properly updates "DP" with the correct signal levels. */
|
||||
static void
|
||||
intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
uint32_t signal_levels, mask;
|
||||
uint8_t train_set = intel_dp->train_set[0];
|
||||
|
||||
if (IS_HASWELL(dev)) {
|
||||
signal_levels = intel_hsw_signal_levels(train_set);
|
||||
mask = DDI_BUF_EMP_MASK;
|
||||
} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
|
||||
signal_levels = intel_gen7_edp_signal_levels(train_set);
|
||||
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
|
||||
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
|
||||
signal_levels = intel_gen6_edp_signal_levels(train_set);
|
||||
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
|
||||
} else {
|
||||
signal_levels = intel_gen4_signal_levels(train_set);
|
||||
mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
|
||||
|
||||
*DP = (*DP & ~mask) | signal_levels;
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_dp_set_link_train(struct intel_dp *intel_dp,
|
||||
uint32_t dp_reg_value,
|
||||
@ -1696,6 +1798,8 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
|
||||
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
|
||||
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
|
||||
case DP_TRAINING_PATTERN_DISABLE:
|
||||
|
||||
if (port != PORT_A) {
|
||||
temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
|
||||
I915_WRITE(DP_TP_CTL(port), temp);
|
||||
|
||||
@ -1704,6 +1808,8 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
|
||||
DRM_ERROR("Timed out waiting for DP idle patterns\n");
|
||||
|
||||
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
|
||||
}
|
||||
|
||||
temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
|
||||
|
||||
break;
|
||||
@ -1791,7 +1897,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
|
||||
int voltage_tries, loop_tries;
|
||||
uint32_t DP = intel_dp->DP;
|
||||
|
||||
if (IS_HASWELL(dev))
|
||||
if (HAS_DDI(dev))
|
||||
intel_ddi_prepare_link_retrain(encoder);
|
||||
|
||||
/* Write the link configuration data */
|
||||
@ -1809,24 +1915,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
|
||||
for (;;) {
|
||||
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
|
||||
uint8_t link_status[DP_LINK_STATUS_SIZE];
|
||||
uint32_t signal_levels;
|
||||
|
||||
if (IS_HASWELL(dev)) {
|
||||
signal_levels = intel_dp_signal_levels_hsw(
|
||||
intel_dp->train_set[0]);
|
||||
DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
|
||||
} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
|
||||
signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
|
||||
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
|
||||
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
|
||||
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
|
||||
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
|
||||
} else {
|
||||
signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
|
||||
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
|
||||
}
|
||||
DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n",
|
||||
signal_levels);
|
||||
intel_dp_set_signal_levels(intel_dp, &DP);
|
||||
|
||||
/* Set training pattern 1 */
|
||||
if (!intel_dp_set_link_train(intel_dp, DP,
|
||||
@ -1850,7 +1940,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
|
||||
for (i = 0; i < intel_dp->lane_count; i++)
|
||||
if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
|
||||
break;
|
||||
if (i == intel_dp->lane_count && voltage_tries == 5) {
|
||||
if (i == intel_dp->lane_count) {
|
||||
++loop_tries;
|
||||
if (loop_tries == 5) {
|
||||
DRM_DEBUG_KMS("too many full retries, give up\n");
|
||||
@ -1882,7 +1972,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
|
||||
void
|
||||
intel_dp_complete_link_train(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_device *dev = intel_dp_to_dev(intel_dp);
|
||||
bool channel_eq = false;
|
||||
int tries, cr_tries;
|
||||
uint32_t DP = intel_dp->DP;
|
||||
@ -1892,8 +1981,6 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
|
||||
cr_tries = 0;
|
||||
channel_eq = false;
|
||||
for (;;) {
|
||||
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
|
||||
uint32_t signal_levels;
|
||||
uint8_t link_status[DP_LINK_STATUS_SIZE];
|
||||
|
||||
if (cr_tries > 5) {
|
||||
@ -1902,19 +1989,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
|
||||
break;
|
||||
}
|
||||
|
||||
if (IS_HASWELL(dev)) {
|
||||
signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]);
|
||||
DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
|
||||
} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
|
||||
signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
|
||||
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
|
||||
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
|
||||
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
|
||||
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
|
||||
} else {
|
||||
signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
|
||||
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
|
||||
}
|
||||
intel_dp_set_signal_levels(intel_dp, &DP);
|
||||
|
||||
/* channel eq pattern */
|
||||
if (!intel_dp_set_link_train(intel_dp, DP,
|
||||
@ -1964,6 +2039,8 @@ intel_dp_link_down(struct intel_dp *intel_dp)
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc =
|
||||
to_intel_crtc(intel_dig_port->base.base.crtc);
|
||||
uint32_t DP = intel_dp->DP;
|
||||
|
||||
/*
|
||||
@ -1981,7 +2058,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
|
||||
* intel_ddi_prepare_link_retrain will take care of redoing the link
|
||||
* train.
|
||||
*/
|
||||
if (IS_HASWELL(dev))
|
||||
if (HAS_DDI(dev))
|
||||
return;
|
||||
|
||||
if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
|
||||
@ -1998,7 +2075,8 @@ intel_dp_link_down(struct intel_dp *intel_dp)
|
||||
}
|
||||
POSTING_READ(intel_dp->output_reg);
|
||||
|
||||
msleep(17);
|
||||
/* We don't really know why we're doing this */
|
||||
intel_wait_for_vblank(dev, intel_crtc->pipe);
|
||||
|
||||
if (HAS_PCH_IBX(dev) &&
|
||||
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
|
||||
@ -2018,19 +2096,14 @@ intel_dp_link_down(struct intel_dp *intel_dp)
|
||||
/* Changes to enable or select take place the vblank
|
||||
* after being written.
|
||||
*/
|
||||
if (crtc == NULL) {
|
||||
/* We can arrive here never having been attached
|
||||
* to a CRTC, for instance, due to inheriting
|
||||
* random state from the BIOS.
|
||||
*
|
||||
* If the pipe is not running, play safe and
|
||||
* wait for the clocks to stabilise before
|
||||
* continuing.
|
||||
*/
|
||||
if (WARN_ON(crtc == NULL)) {
|
||||
/* We should never try to disable a port without a crtc
|
||||
* attached. For paranoia keep the code around for a
|
||||
* bit. */
|
||||
POSTING_READ(intel_dp->output_reg);
|
||||
msleep(50);
|
||||
} else
|
||||
intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
|
||||
intel_wait_for_vblank(dev, intel_crtc->pipe);
|
||||
}
|
||||
|
||||
DP &= ~DP_AUDIO_OUTPUT_ENABLE;
|
||||
@ -2042,10 +2115,16 @@ intel_dp_link_down(struct intel_dp *intel_dp)
|
||||
static bool
|
||||
intel_dp_get_dpcd(struct intel_dp *intel_dp)
|
||||
{
|
||||
char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
|
||||
|
||||
if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
|
||||
sizeof(intel_dp->dpcd)) == 0)
|
||||
return false; /* aux transfer failed */
|
||||
|
||||
hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
|
||||
32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
|
||||
DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
|
||||
|
||||
if (intel_dp->dpcd[DP_DPCD_REV] == 0)
|
||||
return false; /* DPCD not present */
|
||||
|
||||
@ -2206,6 +2285,8 @@ static enum drm_connector_status
|
||||
ironlake_dp_detect(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_device *dev = intel_dp_to_dev(intel_dp);
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
enum drm_connector_status status;
|
||||
|
||||
/* Can't disconnect eDP, but you can close the lid... */
|
||||
@ -2216,6 +2297,9 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
|
||||
return status;
|
||||
}
|
||||
|
||||
if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
|
||||
return connector_status_disconnected;
|
||||
|
||||
return intel_dp_detect_dpcd(intel_dp);
|
||||
}
|
||||
|
||||
@ -2224,17 +2308,18 @@ g4x_dp_detect(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_device *dev = intel_dp_to_dev(intel_dp);
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
uint32_t bit;
|
||||
|
||||
switch (intel_dp->output_reg) {
|
||||
case DP_B:
|
||||
bit = DPB_HOTPLUG_LIVE_STATUS;
|
||||
switch (intel_dig_port->port) {
|
||||
case PORT_B:
|
||||
bit = PORTB_HOTPLUG_LIVE_STATUS;
|
||||
break;
|
||||
case DP_C:
|
||||
bit = DPC_HOTPLUG_LIVE_STATUS;
|
||||
case PORT_C:
|
||||
bit = PORTC_HOTPLUG_LIVE_STATUS;
|
||||
break;
|
||||
case DP_D:
|
||||
bit = DPD_HOTPLUG_LIVE_STATUS;
|
||||
case PORT_D:
|
||||
bit = PORTD_HOTPLUG_LIVE_STATUS;
|
||||
break;
|
||||
default:
|
||||
return connector_status_unknown;
|
||||
@ -2290,13 +2375,6 @@ intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *ada
|
||||
return intel_ddc_get_modes(connector, adapter);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
|
||||
*
|
||||
* \return true if DP port is connected.
|
||||
* \return false if DP port is disconnected.
|
||||
*/
|
||||
static enum drm_connector_status
|
||||
intel_dp_detect(struct drm_connector *connector, bool force)
|
||||
{
|
||||
@ -2306,7 +2384,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
|
||||
struct drm_device *dev = connector->dev;
|
||||
enum drm_connector_status status;
|
||||
struct edid *edid = NULL;
|
||||
char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
|
||||
|
||||
intel_dp->has_audio = false;
|
||||
|
||||
@ -2315,10 +2392,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
|
||||
else
|
||||
status = g4x_dp_detect(intel_dp);
|
||||
|
||||
// hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
|
||||
// 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
|
||||
// DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
|
||||
|
||||
if (status != connector_status_connected)
|
||||
return status;
|
||||
|
||||
@ -2396,7 +2469,7 @@ intel_dp_set_property(struct drm_connector *connector,
|
||||
ret = drm_object_property_set_value(&connector->base, property, val);
|
||||
if (ret)
|
||||
return ret;
|
||||
#if 0
|
||||
|
||||
if (property == dev_priv->force_audio_property) {
|
||||
int i = val;
|
||||
bool has_audio;
|
||||
@ -2419,13 +2492,23 @@ intel_dp_set_property(struct drm_connector *connector,
|
||||
}
|
||||
|
||||
if (property == dev_priv->broadcast_rgb_property) {
|
||||
if (val == !!intel_dp->color_range)
|
||||
return 0;
|
||||
|
||||
intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0;
|
||||
switch (val) {
|
||||
case INTEL_BROADCAST_RGB_AUTO:
|
||||
intel_dp->color_range_auto = true;
|
||||
break;
|
||||
case INTEL_BROADCAST_RGB_FULL:
|
||||
intel_dp->color_range_auto = false;
|
||||
intel_dp->color_range = 0;
|
||||
break;
|
||||
case INTEL_BROADCAST_RGB_LIMITED:
|
||||
intel_dp->color_range_auto = false;
|
||||
intel_dp->color_range = DP_COLOR_RANGE_16_235;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
goto done;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (is_edp(intel_dp) &&
|
||||
property == connector->dev->mode_config.scaling_mode_property) {
|
||||
@ -2446,11 +2529,8 @@ intel_dp_set_property(struct drm_connector *connector,
|
||||
return -EINVAL;
|
||||
|
||||
done:
|
||||
if (intel_encoder->base.crtc) {
|
||||
struct drm_crtc *crtc = intel_encoder->base.crtc;
|
||||
intel_set_mode(crtc, &crtc->mode,
|
||||
crtc->x, crtc->y, crtc->fb);
|
||||
}
|
||||
if (intel_encoder->base.crtc)
|
||||
intel_crtc_restore_mode(intel_encoder->base.crtc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2479,12 +2559,15 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
struct drm_device *dev = intel_dp_to_dev(intel_dp);
|
||||
|
||||
i2c_del_adapter(&intel_dp->adapter);
|
||||
drm_encoder_cleanup(encoder);
|
||||
if (is_edp(intel_dp)) {
|
||||
// cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
ironlake_panel_vdd_off_sync(intel_dp);
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
}
|
||||
kfree(intel_dig_port);
|
||||
}
|
||||
@ -2492,7 +2575,6 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
|
||||
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
|
||||
.mode_fixup = intel_dp_mode_fixup,
|
||||
.mode_set = intel_dp_mode_set,
|
||||
.disable = intel_encoder_noop,
|
||||
};
|
||||
|
||||
static const struct drm_connector_funcs intel_dp_connector_funcs = {
|
||||
@ -2567,6 +2649,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
|
||||
|
||||
intel_attach_force_audio_property(connector);
|
||||
intel_attach_broadcast_rgb_property(connector);
|
||||
intel_dp->color_range_auto = true;
|
||||
|
||||
if (is_edp(intel_dp)) {
|
||||
drm_mode_create_scaling_mode_property(connector->dev);
|
||||
@ -2756,7 +2839,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
intel_connector_attach_encoder(intel_connector, intel_encoder);
|
||||
drm_sysfs_connector_add(connector);
|
||||
|
||||
if (IS_HASWELL(dev))
|
||||
if (HAS_DDI(dev))
|
||||
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
|
||||
else
|
||||
intel_connector->get_hw_state = intel_connector_get_hw_state;
|
||||
@ -2768,15 +2851,15 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
name = "DPDDC-A";
|
||||
break;
|
||||
case PORT_B:
|
||||
dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS;
|
||||
dev_priv->hotplug_supported_mask |= PORTB_HOTPLUG_INT_STATUS;
|
||||
name = "DPDDC-B";
|
||||
break;
|
||||
case PORT_C:
|
||||
dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS;
|
||||
dev_priv->hotplug_supported_mask |= PORTC_HOTPLUG_INT_STATUS;
|
||||
name = "DPDDC-C";
|
||||
break;
|
||||
case PORT_D:
|
||||
dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS;
|
||||
dev_priv->hotplug_supported_mask |= PORTD_HOTPLUG_INT_STATUS;
|
||||
name = "DPDDC-D";
|
||||
break;
|
||||
default:
|
||||
|
@ -118,6 +118,11 @@
|
||||
* timings in the mode to prevent the crtc fixup from overwriting them.
|
||||
* Currently only lvds needs that. */
|
||||
#define INTEL_MODE_CRTC_TIMINGS_SET (0x20)
|
||||
/*
|
||||
* Set when limited 16-235 (as opposed to full 0-255) RGB color range is
|
||||
* to be used.
|
||||
*/
|
||||
#define INTEL_MODE_LIMITED_COLOR_RANGE (0x40)
|
||||
|
||||
static inline void
|
||||
intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
|
||||
@ -162,6 +167,7 @@ struct intel_encoder {
|
||||
bool cloneable;
|
||||
bool connectors_active;
|
||||
void (*hot_plug)(struct intel_encoder *);
|
||||
void (*pre_pll_enable)(struct intel_encoder *);
|
||||
void (*pre_enable)(struct intel_encoder *);
|
||||
void (*enable)(struct intel_encoder *);
|
||||
void (*disable)(struct intel_encoder *);
|
||||
@ -214,6 +220,7 @@ struct intel_crtc {
|
||||
* some outputs connected to this crtc.
|
||||
*/
|
||||
bool active;
|
||||
bool eld_vld;
|
||||
bool primary_disabled; /* is the crtc obscured by a plane? */
|
||||
bool lowfreq_avail;
|
||||
struct intel_overlay *overlay;
|
||||
@ -237,6 +244,9 @@ struct intel_crtc {
|
||||
/* We can share PLLs across outputs if the timings match */
|
||||
struct intel_pch_pll *pch_pll;
|
||||
uint32_t ddi_pll_sel;
|
||||
|
||||
/* reset counter value when the last flip was submitted */
|
||||
unsigned int reset_counter;
|
||||
};
|
||||
|
||||
struct intel_plane {
|
||||
@ -292,6 +302,9 @@ struct cxsr_latency {
|
||||
#define DIP_LEN_AVI 13
|
||||
#define DIP_AVI_PR_1 0
|
||||
#define DIP_AVI_PR_2 1
|
||||
#define DIP_AVI_RGB_QUANT_RANGE_DEFAULT (0 << 2)
|
||||
#define DIP_AVI_RGB_QUANT_RANGE_LIMITED (1 << 2)
|
||||
#define DIP_AVI_RGB_QUANT_RANGE_FULL (2 << 2)
|
||||
|
||||
#define DIP_TYPE_SPD 0x83
|
||||
#define DIP_VERSION_SPD 0x1
|
||||
@ -346,9 +359,11 @@ struct intel_hdmi {
|
||||
u32 sdvox_reg;
|
||||
int ddc_bus;
|
||||
uint32_t color_range;
|
||||
bool color_range_auto;
|
||||
bool has_hdmi_sink;
|
||||
bool has_audio;
|
||||
enum hdmi_force_audio force_audio;
|
||||
bool rgb_quant_range_selectable;
|
||||
void (*write_infoframe)(struct drm_encoder *encoder,
|
||||
struct dip_infoframe *frame);
|
||||
void (*set_infoframes)(struct drm_encoder *encoder,
|
||||
@ -365,6 +380,7 @@ struct intel_dp {
|
||||
bool has_audio;
|
||||
enum hdmi_force_audio force_audio;
|
||||
uint32_t color_range;
|
||||
bool color_range_auto;
|
||||
uint8_t link_bw;
|
||||
uint8_t lane_count;
|
||||
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
|
||||
@ -386,6 +402,7 @@ struct intel_dp {
|
||||
struct intel_digital_port {
|
||||
struct intel_encoder base;
|
||||
enum port port;
|
||||
u32 port_reversal;
|
||||
struct intel_dp dp;
|
||||
struct intel_hdmi hdmi;
|
||||
};
|
||||
@ -448,10 +465,10 @@ extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
|
||||
extern void intel_dvo_init(struct drm_device *dev);
|
||||
extern void intel_tv_init(struct drm_device *dev);
|
||||
extern void intel_mark_busy(struct drm_device *dev);
|
||||
extern void intel_mark_idle(struct drm_device *dev);
|
||||
extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj);
|
||||
extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj);
|
||||
extern void intel_mark_idle(struct drm_device *dev);
|
||||
extern bool intel_lvds_init(struct drm_device *dev);
|
||||
extern bool intel_is_dual_link_lvds(struct drm_device *dev);
|
||||
extern void intel_dp_init(struct drm_device *dev, int output_reg,
|
||||
enum port port);
|
||||
extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
@ -511,12 +528,12 @@ struct intel_set_config {
|
||||
bool mode_changed;
|
||||
};
|
||||
|
||||
extern bool intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
|
||||
extern int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
|
||||
int x, int y, struct drm_framebuffer *old_fb);
|
||||
extern void intel_modeset_disable(struct drm_device *dev);
|
||||
extern void intel_crtc_restore_mode(struct drm_crtc *crtc);
|
||||
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
|
||||
extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
|
||||
extern void intel_encoder_noop(struct drm_encoder *encoder);
|
||||
extern void intel_encoder_destroy(struct drm_encoder *encoder);
|
||||
extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode);
|
||||
extern bool intel_encoder_check_is_cloned(struct intel_encoder *encoder);
|
||||
@ -555,6 +572,9 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
|
||||
return container_of(intel_hdmi, struct intel_digital_port, hdmi);
|
||||
}
|
||||
|
||||
bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
|
||||
struct intel_digital_port *port);
|
||||
|
||||
extern void intel_connector_attach_encoder(struct intel_connector *connector,
|
||||
struct intel_encoder *encoder);
|
||||
extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
|
||||
@ -598,6 +618,7 @@ extern int intel_framebuffer_init(struct drm_device *dev,
|
||||
struct drm_mode_fb_cmd2 *mode_cmd,
|
||||
struct drm_i915_gem_object *obj);
|
||||
extern int intel_fbdev_init(struct drm_device *dev);
|
||||
extern void intel_fbdev_initial_config(struct drm_device *dev);
|
||||
extern void intel_fbdev_fini(struct drm_device *dev);
|
||||
extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
|
||||
extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
|
||||
@ -636,7 +657,8 @@ extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
|
||||
extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
|
||||
struct drm_display_mode *mode);
|
||||
|
||||
extern unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
|
||||
extern unsigned long intel_gen4_compute_page_offset(int *x, int *y,
|
||||
unsigned int tiling_mode,
|
||||
unsigned int bpp,
|
||||
unsigned int pitch);
|
||||
|
||||
@ -657,7 +679,8 @@ extern void intel_update_fbc(struct drm_device *dev);
|
||||
extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
|
||||
extern void intel_gpu_ips_teardown(void);
|
||||
|
||||
extern void intel_init_power_wells(struct drm_device *dev);
|
||||
extern void intel_init_power_well(struct drm_device *dev);
|
||||
extern void intel_set_power_well(struct drm_device *dev, bool enable);
|
||||
extern void intel_enable_gt_powersave(struct drm_device *dev);
|
||||
extern void intel_disable_gt_powersave(struct drm_device *dev);
|
||||
extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
|
||||
|
@ -345,7 +345,6 @@ static void intel_dvo_destroy(struct drm_connector *connector)
|
||||
static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
|
||||
.mode_fixup = intel_dvo_mode_fixup,
|
||||
.mode_set = intel_dvo_mode_set,
|
||||
.disable = intel_encoder_noop,
|
||||
};
|
||||
|
||||
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
|
||||
|
@ -91,9 +91,10 @@ static struct fb_ops intelfb_ops = {
|
||||
// .fb_debug_leave = drm_fb_helper_debug_leave,
|
||||
};
|
||||
|
||||
static int intelfb_create(struct intel_fbdev *ifbdev,
|
||||
static int intelfb_create(struct drm_fb_helper *helper,
|
||||
struct drm_fb_helper_surface_size *sizes)
|
||||
{
|
||||
struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper;
|
||||
struct drm_device *dev = ifbdev->helper.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct fb_info *info;
|
||||
@ -186,8 +187,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
|
||||
goto out_unpin;
|
||||
}
|
||||
info->apertures->ranges[0].base = dev->mode_config.fb_base;
|
||||
info->apertures->ranges[0].size =
|
||||
dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
|
||||
info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
|
||||
|
||||
info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
|
||||
info->fix.smem_len = size;
|
||||
@ -223,26 +223,10 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int intel_fb_find_or_create_single(struct drm_fb_helper *helper,
|
||||
struct drm_fb_helper_surface_size *sizes)
|
||||
{
|
||||
struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper;
|
||||
int new_fb = 0;
|
||||
int ret;
|
||||
|
||||
if (!helper->fb) {
|
||||
ret = intelfb_create(ifbdev, sizes);
|
||||
if (ret)
|
||||
return ret;
|
||||
new_fb = 1;
|
||||
}
|
||||
return new_fb;
|
||||
}
|
||||
|
||||
static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
|
||||
.gamma_set = intel_crtc_fb_gamma_set,
|
||||
.gamma_get = intel_crtc_fb_gamma_get,
|
||||
.fb_probe = intel_fb_find_or_create_single,
|
||||
.fb_probe = intelfb_create,
|
||||
};
|
||||
|
||||
|
||||
@ -268,9 +252,20 @@ int intel_fbdev_init(struct drm_device *dev)
|
||||
}
|
||||
|
||||
drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
|
||||
drm_fb_helper_initial_config(&ifbdev->helper, 32);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void intel_fbdev_initial_config(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
/* Due to peculiar init order wrt to hpd handling this is separate. */
|
||||
drm_fb_helper_initial_config(&dev_priv->fbdev->helper, 32);
|
||||
}
|
||||
|
||||
void intel_fb_output_poll_changed(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
|
||||
}
|
||||
|
@ -48,7 +48,7 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t enabled_bits;
|
||||
|
||||
enabled_bits = IS_HASWELL(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
|
||||
enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
|
||||
|
||||
WARN(I915_READ(intel_hdmi->sdvox_reg) & enabled_bits,
|
||||
"HDMI port enabled, expecting disabled\n");
|
||||
@ -331,6 +331,7 @@ static void intel_set_infoframe(struct drm_encoder *encoder,
|
||||
static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
struct dip_infoframe avi_if = {
|
||||
.type = DIP_TYPE_AVI,
|
||||
.ver = DIP_VERSION_AVI,
|
||||
@ -340,7 +341,14 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
|
||||
avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
|
||||
|
||||
avi_if.body.avi.VIC = drm_mode_cea_vic(adjusted_mode);
|
||||
if (intel_hdmi->rgb_quant_range_selectable) {
|
||||
if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
|
||||
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
|
||||
else
|
||||
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
|
||||
}
|
||||
|
||||
avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode);
|
||||
|
||||
intel_set_infoframe(encoder, &avi_if);
|
||||
}
|
||||
@ -364,7 +372,8 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
|
||||
u32 reg = VIDEO_DIP_CTL;
|
||||
u32 val = I915_READ(reg);
|
||||
u32 port;
|
||||
@ -391,11 +400,11 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
|
||||
return;
|
||||
}
|
||||
|
||||
switch (intel_hdmi->sdvox_reg) {
|
||||
case SDVOB:
|
||||
switch (intel_dig_port->port) {
|
||||
case PORT_B:
|
||||
port = VIDEO_DIP_PORT_B;
|
||||
break;
|
||||
case SDVOC:
|
||||
case PORT_C:
|
||||
port = VIDEO_DIP_PORT_C;
|
||||
break;
|
||||
default:
|
||||
@ -428,7 +437,8 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
|
||||
u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
u32 val = I915_READ(reg);
|
||||
u32 port;
|
||||
@ -447,14 +457,14 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
|
||||
return;
|
||||
}
|
||||
|
||||
switch (intel_hdmi->sdvox_reg) {
|
||||
case HDMIB:
|
||||
switch (intel_dig_port->port) {
|
||||
case PORT_B:
|
||||
port = VIDEO_DIP_PORT_B;
|
||||
break;
|
||||
case HDMIC:
|
||||
case PORT_C:
|
||||
port = VIDEO_DIP_PORT_C;
|
||||
break;
|
||||
case HDMID:
|
||||
case PORT_D:
|
||||
port = VIDEO_DIP_PORT_D;
|
||||
break;
|
||||
default:
|
||||
@ -766,46 +776,38 @@ bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
|
||||
const struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
|
||||
static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi)
|
||||
{
|
||||
struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t bit;
|
||||
|
||||
switch (intel_hdmi->sdvox_reg) {
|
||||
case SDVOB:
|
||||
bit = HDMIB_HOTPLUG_LIVE_STATUS;
|
||||
break;
|
||||
case SDVOC:
|
||||
bit = HDMIC_HOTPLUG_LIVE_STATUS;
|
||||
break;
|
||||
default:
|
||||
bit = 0;
|
||||
break;
|
||||
if (intel_hdmi->color_range_auto) {
|
||||
/* See CEA-861-E - 5.1 Default Encoding Parameters */
|
||||
if (intel_hdmi->has_hdmi_sink &&
|
||||
drm_match_cea_mode(adjusted_mode) > 1)
|
||||
intel_hdmi->color_range = SDVO_COLOR_RANGE_16_235;
|
||||
else
|
||||
intel_hdmi->color_range = 0;
|
||||
}
|
||||
|
||||
return I915_READ(PORT_HOTPLUG_STAT) & bit;
|
||||
if (intel_hdmi->color_range)
|
||||
adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static enum drm_connector_status
|
||||
intel_hdmi_detect(struct drm_connector *connector, bool force)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
|
||||
struct intel_digital_port *intel_dig_port =
|
||||
hdmi_to_dig_port(intel_hdmi);
|
||||
struct intel_encoder *intel_encoder = &intel_dig_port->base;
|
||||
struct drm_i915_private *dev_priv = connector->dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct edid *edid;
|
||||
enum drm_connector_status status = connector_status_disconnected;
|
||||
|
||||
if (IS_G4X(connector->dev) && !g4x_hdmi_connected(intel_hdmi))
|
||||
return status;
|
||||
|
||||
intel_hdmi->has_hdmi_sink = false;
|
||||
intel_hdmi->has_audio = false;
|
||||
intel_hdmi->rgb_quant_range_selectable = false;
|
||||
edid = drm_get_edid(connector,
|
||||
intel_gmbus_get_adapter(dev_priv,
|
||||
intel_hdmi->ddc_bus));
|
||||
@ -817,6 +819,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
|
||||
intel_hdmi->has_hdmi_sink =
|
||||
drm_detect_hdmi_monitor(edid);
|
||||
intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
|
||||
intel_hdmi->rgb_quant_range_selectable =
|
||||
drm_rgb_quant_range_selectable(edid);
|
||||
}
|
||||
kfree(edid);
|
||||
}
|
||||
@ -879,7 +883,7 @@ intel_hdmi_set_property(struct drm_connector *connector,
|
||||
ret = drm_object_property_set_value(&connector->base, property, val);
|
||||
if (ret)
|
||||
return ret;
|
||||
#if 0
|
||||
|
||||
if (property == dev_priv->force_audio_property) {
|
||||
enum hdmi_force_audio i = val;
|
||||
bool has_audio;
|
||||
@ -900,24 +904,31 @@ intel_hdmi_set_property(struct drm_connector *connector,
|
||||
intel_hdmi->has_audio = has_audio;
|
||||
goto done;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (property == dev_priv->broadcast_rgb_property) {
|
||||
if (val == !!intel_hdmi->color_range)
|
||||
return 0;
|
||||
|
||||
intel_hdmi->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
|
||||
switch (val) {
|
||||
case INTEL_BROADCAST_RGB_AUTO:
|
||||
intel_hdmi->color_range_auto = true;
|
||||
break;
|
||||
case INTEL_BROADCAST_RGB_FULL:
|
||||
intel_hdmi->color_range_auto = false;
|
||||
intel_hdmi->color_range = 0;
|
||||
break;
|
||||
case INTEL_BROADCAST_RGB_LIMITED:
|
||||
intel_hdmi->color_range_auto = false;
|
||||
intel_hdmi->color_range = SDVO_COLOR_RANGE_16_235;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
goto done;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
|
||||
done:
|
||||
if (intel_dig_port->base.base.crtc) {
|
||||
struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
|
||||
intel_set_mode(crtc, &crtc->mode,
|
||||
crtc->x, crtc->y, crtc->fb);
|
||||
}
|
||||
if (intel_dig_port->base.base.crtc)
|
||||
intel_crtc_restore_mode(intel_dig_port->base.base.crtc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -932,7 +943,6 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
|
||||
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
|
||||
.mode_fixup = intel_hdmi_mode_fixup,
|
||||
.mode_set = intel_hdmi_mode_set,
|
||||
.disable = intel_encoder_noop,
|
||||
};
|
||||
|
||||
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
|
||||
@ -958,6 +968,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
|
||||
{
|
||||
intel_attach_force_audio_property(connector);
|
||||
intel_attach_broadcast_rgb_property(connector);
|
||||
intel_hdmi->color_range_auto = true;
|
||||
}
|
||||
|
||||
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
@ -981,15 +992,15 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
switch (port) {
|
||||
case PORT_B:
|
||||
intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
|
||||
dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
|
||||
dev_priv->hotplug_supported_mask |= PORTB_HOTPLUG_INT_STATUS;
|
||||
break;
|
||||
case PORT_C:
|
||||
intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
|
||||
dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
|
||||
dev_priv->hotplug_supported_mask |= PORTC_HOTPLUG_INT_STATUS;
|
||||
break;
|
||||
case PORT_D:
|
||||
intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
|
||||
dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
|
||||
dev_priv->hotplug_supported_mask |= PORTD_HOTPLUG_INT_STATUS;
|
||||
break;
|
||||
case PORT_A:
|
||||
/* Internal port only for eDP. */
|
||||
@ -1014,7 +1025,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
intel_hdmi->set_infoframes = cpt_set_infoframes;
|
||||
}
|
||||
|
||||
if (IS_HASWELL(dev))
|
||||
if (HAS_DDI(dev))
|
||||
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
|
||||
else
|
||||
intel_connector->get_hw_state = intel_connector_get_hw_state;
|
||||
|
@ -63,6 +63,7 @@ intel_i2c_reset(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
|
||||
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
|
||||
}
|
||||
|
||||
static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
|
||||
@ -202,6 +203,77 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
|
||||
algo->data = bus;
|
||||
}
|
||||
|
||||
/*
|
||||
* gmbus on gen4 seems to be able to generate legacy interrupts even when in MSI
|
||||
* mode. This results in spurious interrupt warnings if the legacy irq no. is
|
||||
* shared with another device. The kernel then disables that interrupt source
|
||||
* and so prevents the other device from working properly.
|
||||
*/
|
||||
#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
|
||||
static int
|
||||
gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
|
||||
u32 gmbus2_status,
|
||||
u32 gmbus4_irq_en)
|
||||
{
|
||||
int i;
|
||||
int reg_offset = dev_priv->gpio_mmio_base;
|
||||
u32 gmbus2 = 0;
|
||||
DEFINE_WAIT(wait);
|
||||
|
||||
if (!HAS_GMBUS_IRQ(dev_priv->dev))
|
||||
gmbus4_irq_en = 0;
|
||||
|
||||
/* Important: The hw handles only the first bit, so set only one! Since
|
||||
* we also need to check for NAKs besides the hw ready/idle signal, we
|
||||
* need to wake up periodically and check that ourselves. */
|
||||
I915_WRITE(GMBUS4 + reg_offset, gmbus4_irq_en);
|
||||
|
||||
for (i = 0; i < msecs_to_jiffies(50) + 1; i++) {
|
||||
prepare_to_wait(&dev_priv->gmbus_wait_queue, &wait,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
|
||||
gmbus2 = I915_READ_NOTRACE(GMBUS2 + reg_offset);
|
||||
if (gmbus2 & (GMBUS_SATOER | gmbus2_status))
|
||||
break;
|
||||
|
||||
schedule_timeout(1);
|
||||
}
|
||||
finish_wait(&dev_priv->gmbus_wait_queue, &wait);
|
||||
|
||||
I915_WRITE(GMBUS4 + reg_offset, 0);
|
||||
|
||||
if (gmbus2 & GMBUS_SATOER)
|
||||
return -ENXIO;
|
||||
if (gmbus2 & gmbus2_status)
|
||||
return 0;
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static int
|
||||
gmbus_wait_idle(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int ret;
|
||||
int reg_offset = dev_priv->gpio_mmio_base;
|
||||
|
||||
#define C ((I915_READ_NOTRACE(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0)
|
||||
|
||||
if (!HAS_GMBUS_IRQ(dev_priv->dev))
|
||||
return wait_for(C, 10);
|
||||
|
||||
/* Important: The hw handles only the first bit, so set only one! */
|
||||
I915_WRITE(GMBUS4 + reg_offset, GMBUS_IDLE_EN);
|
||||
|
||||
ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 10);
|
||||
|
||||
I915_WRITE(GMBUS4 + reg_offset, 0);
|
||||
|
||||
if (ret)
|
||||
return 0;
|
||||
else
|
||||
return -ETIMEDOUT;
|
||||
#undef C
|
||||
}
|
||||
|
||||
static int
|
||||
gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
|
||||
u32 gmbus1_index)
|
||||
@ -219,15 +291,11 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
|
||||
while (len) {
|
||||
int ret;
|
||||
u32 val, loop = 0;
|
||||
u32 gmbus2;
|
||||
|
||||
ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
|
||||
(GMBUS_SATOER | GMBUS_HW_RDY),
|
||||
50);
|
||||
ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
|
||||
GMBUS_HW_RDY_EN);
|
||||
if (ret)
|
||||
return -ETIMEDOUT;
|
||||
if (gmbus2 & GMBUS_SATOER)
|
||||
return -ENXIO;
|
||||
return ret;
|
||||
|
||||
val = I915_READ(GMBUS3 + reg_offset);
|
||||
do {
|
||||
@ -261,7 +329,6 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
|
||||
GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
|
||||
while (len) {
|
||||
int ret;
|
||||
u32 gmbus2;
|
||||
|
||||
val = loop = 0;
|
||||
do {
|
||||
@ -270,13 +337,10 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
|
||||
|
||||
I915_WRITE(GMBUS3 + reg_offset, val);
|
||||
|
||||
ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
|
||||
(GMBUS_SATOER | GMBUS_HW_RDY),
|
||||
50);
|
||||
ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
|
||||
GMBUS_HW_RDY_EN);
|
||||
if (ret)
|
||||
return -ETIMEDOUT;
|
||||
if (gmbus2 & GMBUS_SATOER)
|
||||
return -ENXIO;
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -345,8 +409,6 @@ gmbus_xfer(struct i2c_adapter *adapter,
|
||||
I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
u32 gmbus2;
|
||||
|
||||
if (gmbus_is_index_read(msgs, i, num)) {
|
||||
ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
|
||||
i += 1; /* set i to the index of the read xfer */
|
||||
@ -361,13 +423,12 @@ gmbus_xfer(struct i2c_adapter *adapter,
|
||||
if (ret == -ENXIO)
|
||||
goto clear_err;
|
||||
|
||||
ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
|
||||
(GMBUS_SATOER | GMBUS_HW_WAIT_PHASE),
|
||||
50);
|
||||
ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE,
|
||||
GMBUS_HW_WAIT_EN);
|
||||
if (ret == -ENXIO)
|
||||
goto clear_err;
|
||||
if (ret)
|
||||
goto timeout;
|
||||
if (gmbus2 & GMBUS_SATOER)
|
||||
goto clear_err;
|
||||
}
|
||||
|
||||
/* Generate a STOP condition on the bus. Note that gmbus can't generata
|
||||
@ -380,8 +441,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
|
||||
* We will re-enable it at the start of the next xfer,
|
||||
* till then let it sleep.
|
||||
*/
|
||||
if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
|
||||
10)) {
|
||||
if (gmbus_wait_idle(dev_priv)) {
|
||||
DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n",
|
||||
adapter->name);
|
||||
ret = -ETIMEDOUT;
|
||||
@ -405,8 +465,7 @@ clear_err:
|
||||
* it's slow responding and only answers on the 2nd retry.
|
||||
*/
|
||||
ret = -ENXIO;
|
||||
if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
|
||||
10)) {
|
||||
if (gmbus_wait_idle(dev_priv)) {
|
||||
DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
|
||||
adapter->name);
|
||||
ret = -ETIMEDOUT;
|
||||
@ -465,10 +524,13 @@ int intel_setup_gmbus(struct drm_device *dev)
|
||||
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA;
|
||||
else if (IS_VALLEYVIEW(dev))
|
||||
dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
|
||||
else
|
||||
dev_priv->gpio_mmio_base = 0;
|
||||
|
||||
mutex_init(&dev_priv->gmbus_mutex);
|
||||
init_waitqueue_head(&dev_priv->gmbus_wait_queue);
|
||||
|
||||
for (i = 0; i < GMBUS_NUM_PORTS; i++) {
|
||||
struct intel_gmbus *bus = &dev_priv->gmbus[i];
|
||||
|
@ -51,7 +51,8 @@ struct intel_lvds_encoder {
|
||||
|
||||
u32 pfit_control;
|
||||
u32 pfit_pgm_ratios;
|
||||
bool pfit_dirty;
|
||||
bool is_dual_link;
|
||||
u32 reg;
|
||||
|
||||
struct intel_lvds_connector *attached_connector;
|
||||
};
|
||||
@ -71,15 +72,10 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 lvds_reg, tmp;
|
||||
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
|
||||
u32 tmp;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
lvds_reg = PCH_LVDS;
|
||||
} else {
|
||||
lvds_reg = LVDS;
|
||||
}
|
||||
|
||||
tmp = I915_READ(lvds_reg);
|
||||
tmp = I915_READ(lvds_encoder->reg);
|
||||
|
||||
if (!(tmp & LVDS_PORT_EN))
|
||||
return false;
|
||||
@ -92,6 +88,91 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
|
||||
return true;
|
||||
}
|
||||
|
||||
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
|
||||
* This is an exception to the general rule that mode_set doesn't turn
|
||||
* things on.
|
||||
*/
|
||||
static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct drm_display_mode *fixed_mode =
|
||||
lvds_encoder->attached_connector->base.panel.fixed_mode;
|
||||
int pipe = intel_crtc->pipe;
|
||||
u32 temp;
|
||||
|
||||
temp = I915_READ(lvds_encoder->reg);
|
||||
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
|
||||
|
||||
if (HAS_PCH_CPT(dev)) {
|
||||
temp &= ~PORT_TRANS_SEL_MASK;
|
||||
temp |= PORT_TRANS_SEL_CPT(pipe);
|
||||
} else {
|
||||
if (pipe == 1) {
|
||||
temp |= LVDS_PIPEB_SELECT;
|
||||
} else {
|
||||
temp &= ~LVDS_PIPEB_SELECT;
|
||||
}
|
||||
}
|
||||
|
||||
/* set the corresponsding LVDS_BORDER bit */
|
||||
temp |= dev_priv->lvds_border_bits;
|
||||
/* Set the B0-B3 data pairs corresponding to whether we're going to
|
||||
* set the DPLLs for dual-channel mode or not.
|
||||
*/
|
||||
if (lvds_encoder->is_dual_link)
|
||||
temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
|
||||
else
|
||||
temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
|
||||
|
||||
/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
|
||||
* appropriately here, but we need to look more thoroughly into how
|
||||
* panels behave in the two modes.
|
||||
*/
|
||||
|
||||
/* Set the dithering flag on LVDS as needed, note that there is no
|
||||
* special lvds dither control bit on pch-split platforms, dithering is
|
||||
* only controlled through the PIPECONF reg. */
|
||||
if (INTEL_INFO(dev)->gen == 4) {
|
||||
if (dev_priv->lvds_dither)
|
||||
temp |= LVDS_ENABLE_DITHER;
|
||||
else
|
||||
temp &= ~LVDS_ENABLE_DITHER;
|
||||
}
|
||||
temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
|
||||
if (fixed_mode->flags & DRM_MODE_FLAG_NHSYNC)
|
||||
temp |= LVDS_HSYNC_POLARITY;
|
||||
if (fixed_mode->flags & DRM_MODE_FLAG_NVSYNC)
|
||||
temp |= LVDS_VSYNC_POLARITY;
|
||||
|
||||
I915_WRITE(lvds_encoder->reg, temp);
|
||||
}
|
||||
|
||||
static void intel_pre_enable_lvds(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct intel_lvds_encoder *enc = to_lvds_encoder(&encoder->base);
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev) || !enc->pfit_control)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Enable automatic panel scaling so that non-native modes
|
||||
* fill the screen. The panel fitter should only be
|
||||
* adjusted whilst the pipe is disabled, according to
|
||||
* register description and PRM.
|
||||
*/
|
||||
DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
|
||||
enc->pfit_control,
|
||||
enc->pfit_pgm_ratios);
|
||||
|
||||
I915_WRITE(PFIT_PGM_RATIOS, enc->pfit_pgm_ratios);
|
||||
I915_WRITE(PFIT_CONTROL, enc->pfit_control);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the power state for the panel.
|
||||
*/
|
||||
@ -101,38 +182,20 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
|
||||
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 ctl_reg, lvds_reg, stat_reg;
|
||||
u32 ctl_reg, stat_reg;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
ctl_reg = PCH_PP_CONTROL;
|
||||
lvds_reg = PCH_LVDS;
|
||||
stat_reg = PCH_PP_STATUS;
|
||||
} else {
|
||||
ctl_reg = PP_CONTROL;
|
||||
lvds_reg = LVDS;
|
||||
stat_reg = PP_STATUS;
|
||||
}
|
||||
|
||||
I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
|
||||
|
||||
if (lvds_encoder->pfit_dirty) {
|
||||
/*
|
||||
* Enable automatic panel scaling so that non-native modes
|
||||
* fill the screen. The panel fitter should only be
|
||||
* adjusted whilst the pipe is disabled, according to
|
||||
* register description and PRM.
|
||||
*/
|
||||
DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
|
||||
lvds_encoder->pfit_control,
|
||||
lvds_encoder->pfit_pgm_ratios);
|
||||
|
||||
I915_WRITE(PFIT_PGM_RATIOS, lvds_encoder->pfit_pgm_ratios);
|
||||
I915_WRITE(PFIT_CONTROL, lvds_encoder->pfit_control);
|
||||
lvds_encoder->pfit_dirty = false;
|
||||
}
|
||||
I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) | LVDS_PORT_EN);
|
||||
|
||||
I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
|
||||
POSTING_READ(lvds_reg);
|
||||
POSTING_READ(lvds_encoder->reg);
|
||||
if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
|
||||
DRM_ERROR("timed out waiting for panel to power on\n");
|
||||
|
||||
@ -144,15 +207,13 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 ctl_reg, lvds_reg, stat_reg;
|
||||
u32 ctl_reg, stat_reg;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
ctl_reg = PCH_PP_CONTROL;
|
||||
lvds_reg = PCH_LVDS;
|
||||
stat_reg = PCH_PP_STATUS;
|
||||
} else {
|
||||
ctl_reg = PP_CONTROL;
|
||||
lvds_reg = LVDS;
|
||||
stat_reg = PP_STATUS;
|
||||
}
|
||||
|
||||
@ -162,13 +223,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
|
||||
if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
|
||||
DRM_ERROR("timed out waiting for panel to power off\n");
|
||||
|
||||
if (lvds_encoder->pfit_control) {
|
||||
I915_WRITE(PFIT_CONTROL, 0);
|
||||
lvds_encoder->pfit_dirty = true;
|
||||
}
|
||||
|
||||
I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
|
||||
POSTING_READ(lvds_reg);
|
||||
I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
|
||||
POSTING_READ(lvds_encoder->reg);
|
||||
}
|
||||
|
||||
static int intel_lvds_mode_valid(struct drm_connector *connector,
|
||||
@ -406,7 +462,6 @@ out:
|
||||
pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) {
|
||||
lvds_encoder->pfit_control = pfit_control;
|
||||
lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios;
|
||||
lvds_encoder->pfit_dirty = true;
|
||||
}
|
||||
dev_priv->lvds_border_bits = border;
|
||||
|
||||
@ -493,13 +548,14 @@ static const struct dmi_system_id intel_no_modeset_on_lid[] = {
|
||||
|
||||
#if 0
|
||||
/*
|
||||
* Lid events. Note the use of 'modeset_on_lid':
|
||||
* - we set it on lid close, and reset it on open
|
||||
* Lid events. Note the use of 'modeset':
|
||||
* - we set it to MODESET_ON_LID_OPEN on lid close,
|
||||
* and set it to MODESET_DONE on open
|
||||
* - we use it as a "only once" bit (ie we ignore
|
||||
* duplicate events where it was already properly
|
||||
* set/reset)
|
||||
* - the suspend/resume paths will also set it to
|
||||
* zero, since they restore the mode ("lid open").
|
||||
* duplicate events where it was already properly set)
|
||||
* - the suspend/resume paths will set it to
|
||||
* MODESET_SUSPENDED and ignore the lid open event,
|
||||
* because they restore the mode ("lid open").
|
||||
*/
|
||||
static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
|
||||
void *unused)
|
||||
@ -513,6 +569,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
|
||||
if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
|
||||
return NOTIFY_OK;
|
||||
|
||||
mutex_lock(&dev_priv->modeset_restore_lock);
|
||||
if (dev_priv->modeset_restore == MODESET_SUSPENDED)
|
||||
goto exit;
|
||||
/*
|
||||
* check and update the status of LVDS connector after receiving
|
||||
* the LID nofication event.
|
||||
@ -521,21 +580,24 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
|
||||
|
||||
/* Don't force modeset on machines where it causes a GPU lockup */
|
||||
if (dmi_check_system(intel_no_modeset_on_lid))
|
||||
return NOTIFY_OK;
|
||||
goto exit;
|
||||
if (!acpi_lid_open()) {
|
||||
dev_priv->modeset_on_lid = 1;
|
||||
return NOTIFY_OK;
|
||||
/* do modeset on next lid open event */
|
||||
dev_priv->modeset_restore = MODESET_ON_LID_OPEN;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (!dev_priv->modeset_on_lid)
|
||||
return NOTIFY_OK;
|
||||
if (dev_priv->modeset_restore == MODESET_DONE)
|
||||
goto exit;
|
||||
|
||||
dev_priv->modeset_on_lid = 0;
|
||||
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
drm_modeset_lock_all(dev);
|
||||
intel_modeset_setup_hw_state(dev, true);
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
dev_priv->modeset_restore = MODESET_DONE;
|
||||
|
||||
exit:
|
||||
mutex_unlock(&dev_priv->modeset_restore_lock);
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
#endif
|
||||
@ -591,8 +653,7 @@ static int intel_lvds_set_property(struct drm_connector *connector,
|
||||
* If the CRTC is enabled, the display will be changed
|
||||
* according to the new panel fitting mode.
|
||||
*/
|
||||
intel_set_mode(crtc, &crtc->mode,
|
||||
crtc->x, crtc->y, crtc->fb);
|
||||
intel_crtc_restore_mode(crtc);
|
||||
}
|
||||
}
|
||||
|
||||
@ -602,7 +663,6 @@ static int intel_lvds_set_property(struct drm_connector *connector,
|
||||
static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
|
||||
.mode_fixup = intel_lvds_mode_fixup,
|
||||
.mode_set = intel_lvds_mode_set,
|
||||
.disable = intel_encoder_noop,
|
||||
};
|
||||
|
||||
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
|
||||
@ -895,6 +955,53 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
|
||||
return false;
|
||||
}
|
||||
|
||||
static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
|
||||
{
|
||||
DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
|
||||
return 1;
|
||||
}
|
||||
bool intel_is_dual_link_lvds(struct drm_device *dev)
|
||||
{
|
||||
struct intel_encoder *encoder;
|
||||
struct intel_lvds_encoder *lvds_encoder;
|
||||
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
|
||||
base.head) {
|
||||
if (encoder->type == INTEL_OUTPUT_LVDS) {
|
||||
lvds_encoder = to_lvds_encoder(&encoder->base);
|
||||
|
||||
return lvds_encoder->is_dual_link;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
|
||||
{
|
||||
struct drm_device *dev = lvds_encoder->base.base.dev;
|
||||
unsigned int val;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
/* use the module option value if specified */
|
||||
if (i915_lvds_channel_mode > 0)
|
||||
return i915_lvds_channel_mode == 2;
|
||||
|
||||
// if (dmi_check_system(intel_dual_link_lvds))
|
||||
// return true;
|
||||
|
||||
/* BIOS should set the proper LVDS register value at boot, but
|
||||
* in reality, it doesn't set the value when the lid is closed;
|
||||
* we need to check "the value to be set" in VBT when LVDS
|
||||
* register is uninitialized.
|
||||
*/
|
||||
val = I915_READ(lvds_encoder->reg);
|
||||
if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
|
||||
val = dev_priv->bios_lvds_val;
|
||||
|
||||
return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
|
||||
}
|
||||
|
||||
static bool intel_lvds_supported(struct drm_device *dev)
|
||||
{
|
||||
/* With the introduction of the PCH we gained a dedicated
|
||||
@ -980,6 +1087,8 @@ bool intel_lvds_init(struct drm_device *dev)
|
||||
DRM_MODE_ENCODER_LVDS);
|
||||
|
||||
intel_encoder->enable = intel_enable_lvds;
|
||||
intel_encoder->pre_enable = intel_pre_enable_lvds;
|
||||
intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds;
|
||||
intel_encoder->disable = intel_disable_lvds;
|
||||
intel_encoder->get_hw_state = intel_lvds_get_hw_state;
|
||||
intel_connector->get_hw_state = intel_connector_get_hw_state;
|
||||
@ -1001,6 +1110,12 @@ bool intel_lvds_init(struct drm_device *dev)
|
||||
connector->interlace_allowed = false;
|
||||
connector->doublescan_allowed = false;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
lvds_encoder->reg = PCH_LVDS;
|
||||
} else {
|
||||
lvds_encoder->reg = LVDS;
|
||||
}
|
||||
|
||||
/* create the scaling mode property */
|
||||
drm_mode_create_scaling_mode_property(dev);
|
||||
drm_object_attach_property(&connector->base,
|
||||
@ -1101,6 +1216,10 @@ bool intel_lvds_init(struct drm_device *dev)
|
||||
goto failed;
|
||||
|
||||
out:
|
||||
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
|
||||
DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
|
||||
lvds_encoder->is_dual_link ? "dual" : "single");
|
||||
|
||||
/*
|
||||
* Unlock registers and just
|
||||
* leave them unlocked
|
||||
|
@ -28,7 +28,6 @@
|
||||
#include <linux/fb.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include "intel_drv.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
@ -85,7 +84,7 @@ intel_attach_force_audio_property(struct drm_connector *connector)
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_property *prop;
|
||||
#if 0
|
||||
|
||||
prop = dev_priv->force_audio_property;
|
||||
if (prop == NULL) {
|
||||
prop = drm_property_create_enum(dev, 0,
|
||||
@ -98,12 +97,12 @@ intel_attach_force_audio_property(struct drm_connector *connector)
|
||||
dev_priv->force_audio_property = prop;
|
||||
}
|
||||
drm_object_attach_property(&connector->base, prop, 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
static const struct drm_prop_enum_list broadcast_rgb_names[] = {
|
||||
{ 0, "Full" },
|
||||
{ 1, "Limited 16:235" },
|
||||
{ INTEL_BROADCAST_RGB_AUTO, "Automatic" },
|
||||
{ INTEL_BROADCAST_RGB_FULL, "Full" },
|
||||
{ INTEL_BROADCAST_RGB_LIMITED, "Limited 16:235" },
|
||||
};
|
||||
|
||||
void
|
||||
@ -112,7 +111,7 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector)
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_property *prop;
|
||||
#if 0
|
||||
|
||||
prop = dev_priv->broadcast_rgb_property;
|
||||
if (prop == NULL) {
|
||||
prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
|
||||
@ -126,5 +125,4 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector)
|
||||
}
|
||||
|
||||
drm_object_attach_property(&connector->base, prop, 0);
|
||||
#endif
|
||||
}
|
||||
|
@ -470,12 +470,6 @@ void intel_update_fbc(struct drm_device *dev)
|
||||
dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
|
||||
goto out_disable;
|
||||
}
|
||||
if (intel_fb->obj->base.size > dev_priv->cfb_size) {
|
||||
DRM_DEBUG_KMS("framebuffer too large, disabling "
|
||||
"compression\n");
|
||||
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
|
||||
goto out_disable;
|
||||
}
|
||||
if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
|
||||
(crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
|
||||
DRM_DEBUG_KMS("mode incompatible with compression, "
|
||||
@ -509,6 +503,14 @@ void intel_update_fbc(struct drm_device *dev)
|
||||
if (in_dbg_master())
|
||||
goto out_disable;
|
||||
|
||||
if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
|
||||
DRM_INFO("not enough stolen space for compressed buffer (need %zd bytes), disabling\n", intel_fb->obj->base.size);
|
||||
DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
|
||||
DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
|
||||
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
/* If the scanout has not changed, don't modify the FBC settings.
|
||||
* Note that we make the fundamental assumption that the fb->obj
|
||||
* cannot be unpinned (and have its GTT offset and fence revoked)
|
||||
@ -556,6 +558,7 @@ out_disable:
|
||||
DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
|
||||
intel_disable_fbc(dev);
|
||||
}
|
||||
i915_gem_stolen_cleanup_compression(dev);
|
||||
}
|
||||
|
||||
static void i915_pineview_get_mem_freq(struct drm_device *dev)
|
||||
@ -2309,7 +2312,6 @@ err_unpin:
|
||||
i915_gem_object_unpin(ctx);
|
||||
err_unref:
|
||||
drm_gem_object_unreference(&ctx->base);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -2595,7 +2597,7 @@ static void gen6_enable_rps(struct drm_device *dev)
|
||||
I915_WRITE(GEN6_RC_SLEEP, 0);
|
||||
I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
|
||||
I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
|
||||
I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
|
||||
I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
|
||||
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
|
||||
|
||||
/* Check if we are enabling RC6 */
|
||||
@ -3465,6 +3467,7 @@ void intel_disable_gt_powersave(struct drm_device *dev)
|
||||
ironlake_disable_rc6(dev);
|
||||
} else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) {
|
||||
gen6_disable_rps(dev);
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3590,6 +3593,19 @@ static void cpt_init_clock_gating(struct drm_device *dev)
|
||||
}
|
||||
}
|
||||
|
||||
static void gen6_check_mch_setup(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t tmp;
|
||||
|
||||
tmp = I915_READ(MCH_SSKPD);
|
||||
if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) {
|
||||
DRM_INFO("Wrong MCH_SSKPD value: 0x%08x\n", tmp);
|
||||
DRM_INFO("This can cause pipe underruns and display issues.\n");
|
||||
DRM_INFO("Please upgrade your BIOS to fix this.\n");
|
||||
}
|
||||
}
|
||||
|
||||
static void gen6_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -3682,6 +3698,8 @@ static void gen6_init_clock_gating(struct drm_device *dev)
|
||||
I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
|
||||
|
||||
cpt_init_clock_gating(dev);
|
||||
|
||||
gen6_check_mch_setup(dev);
|
||||
}
|
||||
|
||||
static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
|
||||
@ -3693,6 +3711,10 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
|
||||
reg |= GEN7_FF_VS_SCHED_HW;
|
||||
reg |= GEN7_FF_DS_SCHED_HW;
|
||||
|
||||
/* WaVSRefCountFullforceMissDisable */
|
||||
if (IS_HASWELL(dev_priv->dev))
|
||||
reg &= ~GEN7_FF_VS_REF_CNT_FFME;
|
||||
|
||||
I915_WRITE(GEN7_FF_THREAD_MODE, reg);
|
||||
}
|
||||
|
||||
@ -3863,6 +3885,8 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
|
||||
I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
|
||||
|
||||
cpt_init_clock_gating(dev);
|
||||
|
||||
gen6_check_mch_setup(dev);
|
||||
}
|
||||
|
||||
static void valleyview_init_clock_gating(struct drm_device *dev)
|
||||
@ -4056,35 +4080,57 @@ void intel_init_clock_gating(struct drm_device *dev)
|
||||
dev_priv->display.init_clock_gating(dev);
|
||||
}
|
||||
|
||||
/* Starting with Haswell, we have different power wells for
|
||||
* different parts of the GPU. This attempts to enable them all.
|
||||
*/
|
||||
void intel_init_power_wells(struct drm_device *dev)
|
||||
void intel_set_power_well(struct drm_device *dev, bool enable)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long power_wells[] = {
|
||||
HSW_PWR_WELL_CTL1,
|
||||
HSW_PWR_WELL_CTL2,
|
||||
HSW_PWR_WELL_CTL4
|
||||
};
|
||||
int i;
|
||||
bool is_enabled, enable_requested;
|
||||
uint32_t tmp;
|
||||
|
||||
if (!IS_HASWELL(dev))
|
||||
return;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
|
||||
is_enabled = tmp & HSW_PWR_WELL_STATE;
|
||||
enable_requested = tmp & HSW_PWR_WELL_ENABLE;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(power_wells); i++) {
|
||||
int well = I915_READ(power_wells[i]);
|
||||
if (enable) {
|
||||
if (!enable_requested)
|
||||
I915_WRITE(HSW_PWR_WELL_DRIVER, HSW_PWR_WELL_ENABLE);
|
||||
|
||||
if ((well & HSW_PWR_WELL_STATE) == 0) {
|
||||
I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE);
|
||||
if (wait_for((I915_READ(power_wells[i]) & HSW_PWR_WELL_STATE), 20))
|
||||
DRM_ERROR("Error enabling power well %lx\n", power_wells[i]);
|
||||
if (!is_enabled) {
|
||||
DRM_DEBUG_KMS("Enabling power well\n");
|
||||
if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
|
||||
HSW_PWR_WELL_STATE), 20))
|
||||
DRM_ERROR("Timeout enabling power well\n");
|
||||
}
|
||||
} else {
|
||||
if (enable_requested) {
|
||||
I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
|
||||
DRM_DEBUG_KMS("Requesting to disable the power well\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
/*
|
||||
* Starting with Haswell, we have a "Power Down Well" that can be turned off
|
||||
* when not needed anymore. We have 4 registers that can request the power well
|
||||
* to be enabled, and it will only be disabled if none of the registers is
|
||||
* requesting it to be enabled.
|
||||
*/
|
||||
void intel_init_power_well(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (!IS_HASWELL(dev))
|
||||
return;
|
||||
|
||||
/* For now, we need the power well to be always enabled. */
|
||||
intel_set_power_well(dev, true);
|
||||
|
||||
/* We're taking over the BIOS, so clear any requests made by it since
|
||||
* the driver is in charge now. */
|
||||
if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE)
|
||||
I915_WRITE(HSW_PWR_WELL_BIOS, 0);
|
||||
}
|
||||
|
||||
/* Set up chip specific power management-related functions */
|
||||
|
@ -320,6 +320,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
|
||||
* TLB invalidate requires a post-sync write.
|
||||
*/
|
||||
flags |= PIPE_CONTROL_QW_WRITE;
|
||||
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
|
||||
|
||||
/* Workaround: we must issue a pipe_control with CS-stall bit
|
||||
* set before a pipe_control command that has the state cache
|
||||
@ -333,7 +334,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
|
||||
|
||||
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
|
||||
intel_ring_emit(ring, flags);
|
||||
intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
|
||||
intel_ring_emit(ring, scratch_addr);
|
||||
intel_ring_emit(ring, 0);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
@ -465,6 +466,9 @@ init_pipe_control(struct intel_ring_buffer *ring)
|
||||
if (pc->cpu_page == NULL)
|
||||
goto err_unpin;
|
||||
|
||||
DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
|
||||
ring->name, pc->gtt_offset);
|
||||
|
||||
pc->obj = obj;
|
||||
ring->private = pc;
|
||||
return 0;
|
||||
@ -556,6 +560,8 @@ static int init_render_ring(struct intel_ring_buffer *ring)
|
||||
|
||||
static void render_ring_cleanup(struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
|
||||
if (!ring->private)
|
||||
return;
|
||||
|
||||
@ -605,6 +611,13 @@ gen6_add_request(struct intel_ring_buffer *ring)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
|
||||
u32 seqno)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
return dev_priv->last_seqno < seqno;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_ring_sync - sync the waiter to the signaller on seqno
|
||||
*
|
||||
@ -635,11 +648,20 @@ gen6_ring_sync(struct intel_ring_buffer *waiter,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* If seqno wrap happened, omit the wait with no-ops */
|
||||
if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
|
||||
intel_ring_emit(waiter,
|
||||
dw1 | signaller->semaphore_register[waiter->id]);
|
||||
dw1 |
|
||||
signaller->semaphore_register[waiter->id]);
|
||||
intel_ring_emit(waiter, seqno);
|
||||
intel_ring_emit(waiter, 0);
|
||||
intel_ring_emit(waiter, MI_NOOP);
|
||||
} else {
|
||||
intel_ring_emit(waiter, MI_NOOP);
|
||||
intel_ring_emit(waiter, MI_NOOP);
|
||||
intel_ring_emit(waiter, MI_NOOP);
|
||||
intel_ring_emit(waiter, MI_NOOP);
|
||||
}
|
||||
intel_ring_advance(waiter);
|
||||
|
||||
return 0;
|
||||
@ -720,6 +742,12 @@ ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
|
||||
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
|
||||
}
|
||||
|
||||
static void
|
||||
ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
|
||||
{
|
||||
intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
|
||||
}
|
||||
|
||||
static u32
|
||||
pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
|
||||
{
|
||||
@ -727,6 +755,13 @@ pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
|
||||
return pc->cpu_page[0];
|
||||
}
|
||||
|
||||
static void
|
||||
pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
|
||||
{
|
||||
struct pipe_control *pc = ring->private;
|
||||
pc->cpu_page[0] = seqno;
|
||||
}
|
||||
|
||||
static bool
|
||||
gen5_ring_get_irq(struct intel_ring_buffer *ring)
|
||||
{
|
||||
@ -1156,6 +1191,10 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
obj = NULL;
|
||||
if (!HAS_LLC(dev))
|
||||
obj = i915_gem_object_create_stolen(dev, ring->size);
|
||||
if (obj == NULL)
|
||||
obj = i915_gem_alloc_object(dev, ring->size);
|
||||
if (obj == NULL) {
|
||||
DRM_ERROR("Failed to allocate ringbuffer\n");
|
||||
@ -1174,7 +1213,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
||||
goto err_unpin;
|
||||
|
||||
ring->virtual_start =
|
||||
ioremap(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset,
|
||||
ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
|
||||
ring->size);
|
||||
if (ring->virtual_start == NULL) {
|
||||
DRM_ERROR("Failed to map ringbuffer.\n");
|
||||
@ -1197,7 +1236,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
||||
return 0;
|
||||
|
||||
err_unmap:
|
||||
FreeKernelSpace(ring->virtual_start);
|
||||
iounmap(ring->virtual_start);
|
||||
err_unpin:
|
||||
i915_gem_object_unpin(obj);
|
||||
err_unref:
|
||||
@ -1225,7 +1264,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
|
||||
|
||||
I915_WRITE_CTL(ring, 0);
|
||||
|
||||
// drm_core_ioremapfree(&ring->map, ring->dev);
|
||||
iounmap(ring->virtual_start);
|
||||
|
||||
i915_gem_object_unpin(ring->obj);
|
||||
drm_gem_object_unreference(&ring->obj->base);
|
||||
@ -1334,7 +1373,8 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
|
||||
|
||||
msleep(1);
|
||||
|
||||
ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
|
||||
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
|
||||
dev_priv->mm.interruptible);
|
||||
if (ret)
|
||||
return ret;
|
||||
} while (!time_after(GetTimerTicks(), end));
|
||||
@ -1396,14 +1436,35 @@ intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
|
||||
return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
|
||||
}
|
||||
|
||||
static int __intel_ring_begin(struct intel_ring_buffer *ring,
|
||||
int bytes)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (unlikely(ring->tail + bytes > ring->effective_size)) {
|
||||
ret = intel_wrap_ring_buffer(ring);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (unlikely(ring->space < bytes)) {
|
||||
ret = ring_wait_for_space(ring, bytes);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
}
|
||||
|
||||
ring->space -= bytes;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int intel_ring_begin(struct intel_ring_buffer *ring,
|
||||
int num_dwords)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = ring->dev->dev_private;
|
||||
int n = 4*num_dwords;
|
||||
int ret;
|
||||
|
||||
ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
|
||||
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
|
||||
dev_priv->mm.interruptible);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1412,20 +1473,21 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (unlikely(ring->tail + n > ring->effective_size)) {
|
||||
ret = intel_wrap_ring_buffer(ring);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t));
|
||||
}
|
||||
|
||||
void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
|
||||
BUG_ON(ring->outstanding_lazy_request);
|
||||
|
||||
if (INTEL_INFO(ring->dev)->gen >= 6) {
|
||||
I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
|
||||
I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
|
||||
}
|
||||
|
||||
if (unlikely(ring->space < n)) {
|
||||
ret = ring_wait_for_space(ring, n);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
}
|
||||
|
||||
ring->space -= n;
|
||||
return 0;
|
||||
ring->set_seqno(ring, seqno);
|
||||
}
|
||||
|
||||
void intel_ring_advance(struct intel_ring_buffer *ring)
|
||||
@ -1433,7 +1495,7 @@ void intel_ring_advance(struct intel_ring_buffer *ring)
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
|
||||
ring->tail &= ring->size - 1;
|
||||
if (dev_priv->stop_rings & intel_ring_flag(ring))
|
||||
if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
|
||||
return;
|
||||
ring->write_tail(ring, ring->tail);
|
||||
}
|
||||
@ -1590,6 +1652,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
|
||||
ring->irq_put = gen6_ring_put_irq;
|
||||
ring->irq_enable_mask = GT_USER_INTERRUPT;
|
||||
ring->get_seqno = gen6_ring_get_seqno;
|
||||
ring->set_seqno = ring_set_seqno;
|
||||
ring->sync_to = gen6_ring_sync;
|
||||
ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
|
||||
@ -1600,6 +1663,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
|
||||
ring->add_request = pc_render_add_request;
|
||||
ring->flush = gen4_render_ring_flush;
|
||||
ring->get_seqno = pc_render_get_seqno;
|
||||
ring->set_seqno = pc_render_set_seqno;
|
||||
ring->irq_get = gen5_ring_get_irq;
|
||||
ring->irq_put = gen5_ring_put_irq;
|
||||
ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
|
||||
@ -1610,6 +1674,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
|
||||
else
|
||||
ring->flush = gen4_render_ring_flush;
|
||||
ring->get_seqno = ring_get_seqno;
|
||||
ring->set_seqno = ring_set_seqno;
|
||||
if (IS_GEN2(dev)) {
|
||||
ring->irq_get = i8xx_ring_get_irq;
|
||||
ring->irq_put = i8xx_ring_put_irq;
|
||||
@ -1682,6 +1747,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
|
||||
else
|
||||
ring->flush = gen4_render_ring_flush;
|
||||
ring->get_seqno = ring_get_seqno;
|
||||
ring->set_seqno = ring_set_seqno;
|
||||
if (IS_GEN2(dev)) {
|
||||
ring->irq_get = i8xx_ring_get_irq;
|
||||
ring->irq_put = i8xx_ring_put_irq;
|
||||
@ -1743,6 +1809,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
|
||||
ring->flush = gen6_ring_flush;
|
||||
ring->add_request = gen6_add_request;
|
||||
ring->get_seqno = gen6_ring_get_seqno;
|
||||
ring->set_seqno = ring_set_seqno;
|
||||
ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
|
||||
ring->irq_get = gen6_ring_get_irq;
|
||||
ring->irq_put = gen6_ring_put_irq;
|
||||
@ -1758,6 +1825,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
|
||||
ring->flush = bsd_ring_flush;
|
||||
ring->add_request = i9xx_add_request;
|
||||
ring->get_seqno = ring_get_seqno;
|
||||
ring->set_seqno = ring_set_seqno;
|
||||
if (IS_GEN5(dev)) {
|
||||
ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
|
||||
ring->irq_get = gen5_ring_get_irq;
|
||||
@ -1787,6 +1855,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
|
||||
ring->flush = blt_ring_flush;
|
||||
ring->add_request = gen6_add_request;
|
||||
ring->get_seqno = gen6_ring_get_seqno;
|
||||
ring->set_seqno = ring_set_seqno;
|
||||
ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
|
||||
ring->irq_get = gen6_ring_get_irq;
|
||||
ring->irq_put = gen6_ring_put_irq;
|
||||
|
@ -90,6 +90,8 @@ struct intel_ring_buffer {
|
||||
*/
|
||||
u32 (*get_seqno)(struct intel_ring_buffer *ring,
|
||||
bool lazy_coherency);
|
||||
void (*set_seqno)(struct intel_ring_buffer *ring,
|
||||
u32 seqno);
|
||||
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
|
||||
u32 offset, u32 length,
|
||||
unsigned flags);
|
||||
@ -178,6 +180,13 @@ intel_read_status_page(struct intel_ring_buffer *ring,
|
||||
return ring->status_page.page_addr[reg];
|
||||
}
|
||||
|
||||
static inline void
|
||||
intel_write_status_page(struct intel_ring_buffer *ring,
|
||||
int reg, u32 value)
|
||||
{
|
||||
ring->status_page.page_addr[reg] = value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads a dword out of the status page, which is written to from the command
|
||||
* queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
|
||||
@ -208,7 +217,7 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
|
||||
}
|
||||
void intel_ring_advance(struct intel_ring_buffer *ring);
|
||||
int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
|
||||
|
||||
void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
|
||||
int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
|
||||
int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
|
||||
|
||||
|
@ -112,6 +112,7 @@ struct intel_sdvo {
|
||||
* It is only valid when using TMDS encoding and 8 bit per color mode.
|
||||
*/
|
||||
uint32_t color_range;
|
||||
bool color_range_auto;
|
||||
|
||||
/**
|
||||
* This is set if we're going to treat the device as TV-out.
|
||||
@ -134,6 +135,7 @@ struct intel_sdvo {
|
||||
bool is_hdmi;
|
||||
bool has_hdmi_monitor;
|
||||
bool has_hdmi_audio;
|
||||
bool rgb_quant_range_selectable;
|
||||
|
||||
/**
|
||||
* This is set if we detect output of sdvo device as LVDS and
|
||||
@ -955,7 +957,8 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
|
||||
&tx_rate, 1);
|
||||
}
|
||||
|
||||
static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
|
||||
static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct dip_infoframe avi_if = {
|
||||
.type = DIP_TYPE_AVI,
|
||||
@ -964,6 +967,13 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
|
||||
};
|
||||
uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
|
||||
|
||||
if (intel_sdvo->rgb_quant_range_selectable) {
|
||||
if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
|
||||
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
|
||||
else
|
||||
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
|
||||
}
|
||||
|
||||
intel_dip_infoframe_csum(&avi_if);
|
||||
|
||||
/* sdvo spec says that the ecc is handled by the hw, and it looks like
|
||||
@ -1073,6 +1083,18 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
|
||||
multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode);
|
||||
intel_mode_set_pixel_multiplier(adjusted_mode, multiplier);
|
||||
|
||||
if (intel_sdvo->color_range_auto) {
|
||||
/* See CEA-861-E - 5.1 Default Encoding Parameters */
|
||||
if (intel_sdvo->has_hdmi_monitor &&
|
||||
drm_match_cea_mode(adjusted_mode) > 1)
|
||||
intel_sdvo->color_range = SDVO_COLOR_RANGE_16_235;
|
||||
else
|
||||
intel_sdvo->color_range = 0;
|
||||
}
|
||||
|
||||
if (intel_sdvo->color_range)
|
||||
adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1130,7 +1152,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
|
||||
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
|
||||
intel_sdvo_set_colorimetry(intel_sdvo,
|
||||
SDVO_COLORIMETRY_RGB256);
|
||||
intel_sdvo_set_avi_infoframe(intel_sdvo);
|
||||
intel_sdvo_set_avi_infoframe(intel_sdvo, adjusted_mode);
|
||||
} else
|
||||
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
|
||||
|
||||
@ -1162,7 +1184,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
|
||||
/* The real mode polarity is set by the SDVO commands, using
|
||||
* struct intel_sdvo_dtd. */
|
||||
sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
|
||||
if (intel_sdvo->is_hdmi)
|
||||
if (!HAS_PCH_SPLIT(dev) && intel_sdvo->is_hdmi)
|
||||
sdvox |= intel_sdvo->color_range;
|
||||
if (INTEL_INFO(dev)->gen < 5)
|
||||
sdvox |= SDVO_BORDER_ENABLE;
|
||||
@ -1522,6 +1544,8 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
|
||||
if (intel_sdvo->is_hdmi) {
|
||||
intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
|
||||
intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
|
||||
intel_sdvo->rgb_quant_range_selectable =
|
||||
drm_rgb_quant_range_selectable(edid);
|
||||
}
|
||||
} else
|
||||
status = connector_status_disconnected;
|
||||
@ -1573,6 +1597,7 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
|
||||
|
||||
intel_sdvo->has_hdmi_monitor = false;
|
||||
intel_sdvo->has_hdmi_audio = false;
|
||||
intel_sdvo->rgb_quant_range_selectable = false;
|
||||
|
||||
if ((intel_sdvo_connector->output_flag & response) == 0)
|
||||
ret = connector_status_disconnected;
|
||||
@ -1884,7 +1909,6 @@ intel_sdvo_set_property(struct drm_connector *connector,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
#if 0
|
||||
if (property == dev_priv->force_audio_property) {
|
||||
int i = val;
|
||||
bool has_audio;
|
||||
@ -1907,13 +1931,23 @@ intel_sdvo_set_property(struct drm_connector *connector,
|
||||
}
|
||||
|
||||
if (property == dev_priv->broadcast_rgb_property) {
|
||||
if (val == !!intel_sdvo->color_range)
|
||||
return 0;
|
||||
|
||||
intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
|
||||
switch (val) {
|
||||
case INTEL_BROADCAST_RGB_AUTO:
|
||||
intel_sdvo->color_range_auto = true;
|
||||
break;
|
||||
case INTEL_BROADCAST_RGB_FULL:
|
||||
intel_sdvo->color_range_auto = false;
|
||||
intel_sdvo->color_range = 0;
|
||||
break;
|
||||
case INTEL_BROADCAST_RGB_LIMITED:
|
||||
intel_sdvo->color_range_auto = false;
|
||||
intel_sdvo->color_range = SDVO_COLOR_RANGE_16_235;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
goto done;
|
||||
}
|
||||
#endif
|
||||
|
||||
#define CHECK_PROPERTY(name, NAME) \
|
||||
if (intel_sdvo_connector->name == property) { \
|
||||
@ -2008,11 +2042,8 @@ set_value:
|
||||
|
||||
|
||||
done:
|
||||
if (intel_sdvo->base.base.crtc) {
|
||||
struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
|
||||
intel_set_mode(crtc, &crtc->mode,
|
||||
crtc->x, crtc->y, crtc->fb);
|
||||
}
|
||||
if (intel_sdvo->base.base.crtc)
|
||||
intel_crtc_restore_mode(intel_sdvo->base.base.crtc);
|
||||
|
||||
return 0;
|
||||
#undef CHECK_PROPERTY
|
||||
@ -2021,7 +2052,6 @@ done:
|
||||
static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
|
||||
.mode_fixup = intel_sdvo_mode_fixup,
|
||||
.mode_set = intel_sdvo_mode_set,
|
||||
.disable = intel_encoder_noop,
|
||||
};
|
||||
|
||||
static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
|
||||
@ -2211,13 +2241,16 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
|
||||
}
|
||||
|
||||
static void
|
||||
intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector)
|
||||
intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
|
||||
struct intel_sdvo_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->base.base.dev;
|
||||
|
||||
intel_attach_force_audio_property(&connector->base.base);
|
||||
if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev))
|
||||
if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) {
|
||||
intel_attach_broadcast_rgb_property(&connector->base.base);
|
||||
intel_sdvo->color_range_auto = true;
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
@ -2265,7 +2298,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
|
||||
|
||||
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
|
||||
if (intel_sdvo->is_hdmi)
|
||||
intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
|
||||
intel_sdvo_add_hdmi_properties(intel_sdvo, intel_sdvo_connector);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -50,6 +50,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
|
||||
u32 sprctl, sprscale = 0;
|
||||
unsigned long sprsurf_offset, linear_offset;
|
||||
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
bool scaling_was_enabled = dev_priv->sprite_scaling_enabled;
|
||||
|
||||
sprctl = I915_READ(SPRCTL(pipe));
|
||||
|
||||
@ -89,6 +90,9 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
|
||||
sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
|
||||
sprctl |= SPRITE_ENABLE;
|
||||
|
||||
if (IS_HASWELL(dev))
|
||||
sprctl |= SPRITE_PIPE_CSC_ENABLE;
|
||||
|
||||
/* Sizes are 0 based */
|
||||
src_w--;
|
||||
src_h--;
|
||||
@ -103,26 +107,22 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
|
||||
* when scaling is disabled.
|
||||
*/
|
||||
if (crtc_w != src_w || crtc_h != src_h) {
|
||||
if (!dev_priv->sprite_scaling_enabled) {
|
||||
dev_priv->sprite_scaling_enabled = true;
|
||||
dev_priv->sprite_scaling_enabled |= 1 << pipe;
|
||||
|
||||
if (!scaling_was_enabled) {
|
||||
intel_update_watermarks(dev);
|
||||
intel_wait_for_vblank(dev, pipe);
|
||||
}
|
||||
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
|
||||
} else {
|
||||
if (dev_priv->sprite_scaling_enabled) {
|
||||
dev_priv->sprite_scaling_enabled = false;
|
||||
/* potentially re-enable LP watermarks */
|
||||
intel_update_watermarks(dev);
|
||||
}
|
||||
}
|
||||
} else
|
||||
dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
|
||||
|
||||
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
|
||||
I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
|
||||
|
||||
linear_offset = y * fb->pitches[0] + x * pixel_size;
|
||||
sprsurf_offset =
|
||||
intel_gen4_compute_offset_xtiled(&x, &y,
|
||||
intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
|
||||
pixel_size, fb->pitches[0]);
|
||||
linear_offset -= sprsurf_offset;
|
||||
|
||||
@ -141,6 +141,10 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
|
||||
I915_WRITE(SPRCTL(pipe), sprctl);
|
||||
I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
|
||||
POSTING_READ(SPRSURF(pipe));
|
||||
|
||||
/* potentially re-enable LP watermarks */
|
||||
if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
|
||||
intel_update_watermarks(dev);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -150,6 +154,7 @@ ivb_disable_plane(struct drm_plane *plane)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
int pipe = intel_plane->pipe;
|
||||
bool scaling_was_enabled = dev_priv->sprite_scaling_enabled;
|
||||
|
||||
I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
|
||||
/* Can't leave the scaler enabled... */
|
||||
@ -159,7 +164,10 @@ ivb_disable_plane(struct drm_plane *plane)
|
||||
I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
|
||||
POSTING_READ(SPRSURF(pipe));
|
||||
|
||||
dev_priv->sprite_scaling_enabled = false;
|
||||
dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
|
||||
|
||||
/* potentially re-enable LP watermarks */
|
||||
if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
|
||||
intel_update_watermarks(dev);
|
||||
}
|
||||
|
||||
@ -287,7 +295,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
|
||||
|
||||
linear_offset = y * fb->pitches[0] + x * pixel_size;
|
||||
dvssurf_offset =
|
||||
intel_gen4_compute_offset_xtiled(&x, &y,
|
||||
intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
|
||||
pixel_size, fb->pitches[0]);
|
||||
linear_offset -= dvssurf_offset;
|
||||
|
||||
@ -591,7 +599,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
|
||||
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
drm_modeset_lock_all(dev);
|
||||
|
||||
obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
|
||||
if (!obj) {
|
||||
@ -604,7 +612,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
|
||||
ret = intel_plane->update_colorkey(plane, set);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
drm_modeset_unlock_all(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -618,7 +626,7 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
|
||||
int ret = 0;
|
||||
|
||||
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
drm_modeset_lock_all(dev);
|
||||
|
||||
obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
|
||||
if (!obj) {
|
||||
@ -631,7 +639,7 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
|
||||
intel_plane->get_colorkey(plane, get);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
drm_modeset_unlock_all(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -480,7 +480,7 @@ int init_cursor(cursor_t *cursor)
|
||||
/* You don't need to worry about fragmentation issues.
|
||||
* GTT space is continuous. I guarantee it. */
|
||||
|
||||
mapped = bits = (u32*)MapIoMem(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset,
|
||||
mapped = bits = (u32*)MapIoMem(dev_priv->gtt.mappable_base + obj->gtt_offset,
|
||||
CURSOR_WIDTH*CURSOR_HEIGHT*4, PG_SW);
|
||||
|
||||
if (unlikely(bits == NULL))
|
||||
@ -681,6 +681,12 @@ int i915_mask_update(struct drm_device *dev, void *data,
|
||||
u32 slot;
|
||||
int ret;
|
||||
|
||||
if(mask->handle == -2)
|
||||
{
|
||||
printf("%s handle %d\n", __FUNCTION__, mask->handle);
|
||||
return 0;
|
||||
}
|
||||
|
||||
obj = drm_gem_object_lookup(dev, file, mask->handle);
|
||||
if (obj == NULL)
|
||||
return -ENOENT;
|
||||
@ -883,6 +889,12 @@ int __queue_work(struct workqueue_struct *wq,
|
||||
return 1;
|
||||
};
|
||||
|
||||
bool queue_work(struct workqueue_struct *wq, struct work_struct *work)
|
||||
{
|
||||
return __queue_work(wq, work);
|
||||
}
|
||||
|
||||
|
||||
void __stdcall delayed_work_timer_fn(unsigned long __data)
|
||||
{
|
||||
struct delayed_work *dwork = (struct delayed_work *)__data;
|
||||
@ -962,4 +974,61 @@ void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec)
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
// wait->flags &= ~WQ_FLAG_EXCLUSIVE;
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
if (list_empty(&wait->task_list))
|
||||
__add_wait_queue(q, wait);
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* finish_wait - clean up after waiting in a queue
|
||||
* @q: waitqueue waited on
|
||||
* @wait: wait descriptor
|
||||
*
|
||||
* Sets current thread back to running state and removes
|
||||
* the wait descriptor from the given waitqueue if still
|
||||
* queued.
|
||||
*/
|
||||
void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
// __set_current_state(TASK_RUNNING);
|
||||
/*
|
||||
* We can check for list emptiness outside the lock
|
||||
* IFF:
|
||||
* - we use the "careful" check that verifies both
|
||||
* the next and prev pointers, so that there cannot
|
||||
* be any half-pending updates in progress on other
|
||||
* CPU's that we haven't seen yet (and that might
|
||||
* still change the stack area.
|
||||
* and
|
||||
* - all other users take the lock (ie we can only
|
||||
* have _one_ other CPU that looks at or modifies
|
||||
* the list).
|
||||
*/
|
||||
if (!list_empty_careful(&wait->task_list)) {
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
list_del_init(&wait->task_list);
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
}
|
||||
|
||||
DestroyEvent(wait->evnt);
|
||||
}
|
||||
|
||||
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
|
||||
{
|
||||
list_del_init(&wait->task_list);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -57,7 +57,7 @@ int x86_clflush_size;
|
||||
|
||||
int i915_modeset = 1;
|
||||
|
||||
u32_t drvEntry(int action, char *cmdline)
|
||||
u32_t __attribute__((externally_visible)) drvEntry(int action, char *cmdline)
|
||||
{
|
||||
|
||||
int err = 0;
|
||||
@ -82,10 +82,10 @@ u32_t drvEntry(int action, char *cmdline)
|
||||
return 0;
|
||||
};
|
||||
}
|
||||
dbgprintf("i915 RC 10.5\n cmdline: %s\n", cmdline);
|
||||
dbgprintf(" i915 v3.9-rc8\n cmdline: %s\n", cmdline);
|
||||
|
||||
cpu_detect();
|
||||
dbgprintf("\ncache line size %d\n", x86_clflush_size);
|
||||
// dbgprintf("\ncache line size %d\n", x86_clflush_size);
|
||||
|
||||
enum_pci_devices();
|
||||
|
||||
@ -105,6 +105,14 @@ u32_t drvEntry(int action, char *cmdline)
|
||||
return err;
|
||||
};
|
||||
|
||||
//int __declspec(dllexport) DllMain(int, char*) __attribute__ ((weak, alias ("drvEntry")));
|
||||
|
||||
//int __declspec(dllexport) DllMain( int hinstDLL, int fdwReason, void *lpReserved )
|
||||
//{
|
||||
//
|
||||
// return 1;
|
||||
//}
|
||||
|
||||
#define CURRENT_API 0x0200 /* 2.00 */
|
||||
#define COMPATIBLE_API 0x0100 /* 1.00 */
|
||||
|
||||
@ -138,7 +146,7 @@ u32_t drvEntry(int action, char *cmdline)
|
||||
#define SRV_I915_GEM_BUSY 28
|
||||
#define SRV_I915_GEM_SET_DOMAIN 29
|
||||
#define SRV_I915_GEM_MMAP 30
|
||||
|
||||
#define SRV_I915_GEM_MMAP_GTT 31
|
||||
#define SRV_I915_GEM_THROTTLE 32
|
||||
#define SRV_FBINFO 33
|
||||
#define SRV_I915_GEM_EXECBUFFER2 34
|
||||
@ -267,6 +275,11 @@ int _stdcall display_handler(ioctl_t *io)
|
||||
retval = i915_gem_mmap_ioctl(main_device, inp, file);
|
||||
break;
|
||||
|
||||
case SRV_I915_GEM_MMAP_GTT:
|
||||
retval = i915_gem_mmap_gtt_ioctl(main_device, inp, file);
|
||||
break;
|
||||
|
||||
|
||||
case SRV_FBINFO:
|
||||
retval = i915_fbinfo(inp);
|
||||
break;
|
||||
|
@ -1,4 +0,0 @@
|
||||
{ 0x0060005a, 0x210077be, 0x00000100, 0x008d0040 },
|
||||
{ 0x0060005a, 0x212077be, 0x00000100, 0x008d0080 },
|
||||
{ 0x0060005a, 0x214077be, 0x00000110, 0x008d0040 },
|
||||
{ 0x0060005a, 0x216077be, 0x00000110, 0x008d0080 },
|
@ -1,3 +0,0 @@
|
||||
{ 0x00000201, 0x20080061, 0x00000000, 0x00007000 },
|
||||
{ 0x00600001, 0x20e00022, 0x008d0000, 0x00000000 },
|
||||
{ 0x02800031, 0x23801cc9, 0x000000e0, 0x0a2a0102 },
|
@ -1,4 +0,0 @@
|
||||
{ 0x00800041, 0x21c077bd, 0x008d01c0, 0x008d0380 },
|
||||
{ 0x00800041, 0x220077bd, 0x008d0200, 0x008d0380 },
|
||||
{ 0x00800041, 0x224077bd, 0x008d0240, 0x008d0380 },
|
||||
{ 0x00800041, 0x228077bd, 0x008d0280, 0x008d0380 },
|
@ -1,4 +0,0 @@
|
||||
{ 0x0060005a, 0x204077be, 0x000000c0, 0x008d0040 },
|
||||
{ 0x0060005a, 0x206077be, 0x000000c0, 0x008d0080 },
|
||||
{ 0x0060005a, 0x208077be, 0x000000d0, 0x008d0040 },
|
||||
{ 0x0060005a, 0x20a077be, 0x000000d0, 0x008d0080 },
|
@ -1,12 +0,0 @@
|
||||
{ 0x0060005a, 0x23c077bd, 0x000000e0, 0x008d0040 },
|
||||
{ 0x0060005a, 0x23e077bd, 0x000000e0, 0x008d0080 },
|
||||
{ 0x01600038, 0x218003bd, 0x008d03c0, 0x00000000 },
|
||||
{ 0x01600038, 0x21a003bd, 0x008d03e0, 0x00000000 },
|
||||
{ 0x0060005a, 0x23c077bd, 0x000000c0, 0x008d0040 },
|
||||
{ 0x0060005a, 0x23e077bd, 0x000000c0, 0x008d0080 },
|
||||
{ 0x00600041, 0x204077be, 0x008d03c0, 0x008d0180 },
|
||||
{ 0x00600041, 0x206077be, 0x008d03e0, 0x008d01a0 },
|
||||
{ 0x0060005a, 0x23c077bd, 0x000000d0, 0x008d0040 },
|
||||
{ 0x0060005a, 0x23e077bd, 0x000000d0, 0x008d0080 },
|
||||
{ 0x00600041, 0x208077be, 0x008d03c0, 0x008d0180 },
|
||||
{ 0x00600041, 0x20a077be, 0x008d03e0, 0x008d01a0 },
|
@ -1,3 +0,0 @@
|
||||
{ 0x00000201, 0x20080061, 0x00000000, 0x00000000 },
|
||||
{ 0x00600001, 0x20200022, 0x008d0000, 0x00000000 },
|
||||
{ 0x02800031, 0x21c01cc9, 0x00000020, 0x0a8a0001 },
|
@ -1,17 +0,0 @@
|
||||
{ 0x00600001, 0x204003be, 0x008d01c0, 0x00000000 },
|
||||
{ 0x00600001, 0x206003be, 0x008d01e0, 0x00000000 },
|
||||
{ 0x00600001, 0x208003be, 0x008d0200, 0x00000000 },
|
||||
{ 0x00600001, 0x20a003be, 0x008d0220, 0x00000000 },
|
||||
{ 0x00600001, 0x20c003be, 0x008d0240, 0x00000000 },
|
||||
{ 0x00600001, 0x20e003be, 0x008d0260, 0x00000000 },
|
||||
{ 0x00600001, 0x210003be, 0x008d0280, 0x00000000 },
|
||||
{ 0x00600001, 0x212003be, 0x008d02a0, 0x00000000 },
|
||||
{ 0x05800031, 0x24001cc8, 0x00000040, 0x90019000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
@ -1,55 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2011 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Chris Wilson <chris@chris-wilson.co.uk>
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _SNA_COMPILER_H_
|
||||
#define _SNA_COMPILER_H_
|
||||
|
||||
#if defined(__GNUC__) && (__GNUC__ > 2) && defined(__OPTIMIZE__)
|
||||
#define noinline __attribute__((noinline))
|
||||
#define fastcall __attribute__((regparm(3)))
|
||||
#define must_check __attribute__((warn_unused_result))
|
||||
#define constant __attribute__((const))
|
||||
#else
|
||||
#define likely(expr) (expr)
|
||||
#define unlikely(expr) (expr)
|
||||
#define noinline
|
||||
#define fastcall
|
||||
#define must_check
|
||||
#define constant
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_VALGRIND
|
||||
#define VG(x) x
|
||||
#else
|
||||
#define VG(x)
|
||||
#endif
|
||||
|
||||
#define VG_CLEAR(s) VG(memset(&s, 0, sizeof(s)))
|
||||
|
||||
#define COMPILE_TIME_ASSERT(E) ((void)sizeof(char[1 - 2*!(E)]))
|
||||
|
||||
#endif /* _SNA_COMPILER_H_ */
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,538 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2011 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Chris Wilson <chris@chris-wilson.co.uk>
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef KGEM_H
|
||||
#define KGEM_H
|
||||
|
||||
#include "compiler.h"
|
||||
#include <linux/list.h>
|
||||
//#include <stdarg.h>
|
||||
|
||||
#include <i915_drm.h>
|
||||
|
||||
|
||||
#if DEBUG_KGEM
|
||||
#define DBG_HDR(x) ErrorF x
|
||||
#else
|
||||
#define DBG_HDR(x)
|
||||
#endif
|
||||
|
||||
struct kgem_bo {
|
||||
struct kgem_bo *proxy;
|
||||
|
||||
struct list_head list;
|
||||
struct list_head request;
|
||||
struct list_head vma;
|
||||
|
||||
void *map;
|
||||
uint32_t gaddr;
|
||||
|
||||
#define IS_CPU_MAP(ptr) ((uintptr_t)(ptr) & 1)
|
||||
#define IS_GTT_MAP(ptr) (ptr && ((uintptr_t)(ptr) & 1) == 0)
|
||||
struct kgem_request *rq;
|
||||
struct drm_i915_gem_exec_object2 *exec;
|
||||
|
||||
struct kgem_bo_binding {
|
||||
struct kgem_bo_binding *next;
|
||||
uint32_t format;
|
||||
uint16_t offset;
|
||||
} binding;
|
||||
|
||||
uint32_t unique_id;
|
||||
uint32_t refcnt;
|
||||
uint32_t handle;
|
||||
uint32_t presumed_offset;
|
||||
uint32_t delta;
|
||||
union {
|
||||
struct {
|
||||
uint32_t count:27;
|
||||
uint32_t bucket:5;
|
||||
#define NUM_CACHE_BUCKETS 16
|
||||
#define MAX_CACHE_SIZE (1 << (NUM_CACHE_BUCKETS+12))
|
||||
} pages;
|
||||
uint32_t bytes;
|
||||
} size;
|
||||
uint32_t pitch : 18; /* max 128k */
|
||||
uint32_t tiling : 2;
|
||||
uint32_t reusable : 1;
|
||||
uint32_t dirty : 1;
|
||||
uint32_t domain : 2;
|
||||
uint32_t needs_flush : 1;
|
||||
uint32_t vmap : 1;
|
||||
uint32_t io : 1;
|
||||
uint32_t flush : 1;
|
||||
uint32_t scanout : 1;
|
||||
uint32_t sync : 1;
|
||||
uint32_t purged : 1;
|
||||
};
|
||||
#define DOMAIN_NONE 0
|
||||
#define DOMAIN_CPU 1
|
||||
#define DOMAIN_GTT 2
|
||||
#define DOMAIN_GPU 3
|
||||
|
||||
struct kgem_request {
|
||||
struct list_head list;
|
||||
struct kgem_bo *bo;
|
||||
struct list_head buffers;
|
||||
};
|
||||
|
||||
enum {
|
||||
MAP_GTT = 0,
|
||||
MAP_CPU,
|
||||
NUM_MAP_TYPES,
|
||||
};
|
||||
|
||||
struct kgem {
|
||||
int fd;
|
||||
int wedged;
|
||||
int gen;
|
||||
|
||||
uint32_t unique_id;
|
||||
|
||||
enum kgem_mode {
|
||||
/* order matches I915_EXEC_RING ordering */
|
||||
KGEM_NONE = 0,
|
||||
KGEM_RENDER,
|
||||
KGEM_BSD,
|
||||
KGEM_BLT,
|
||||
} mode, ring;
|
||||
|
||||
struct list_head flushing;
|
||||
struct list_head large;
|
||||
struct list_head active[NUM_CACHE_BUCKETS][3];
|
||||
struct list_head inactive[NUM_CACHE_BUCKETS];
|
||||
struct list_head partial;
|
||||
struct list_head requests;
|
||||
struct kgem_request *next_request;
|
||||
|
||||
struct {
|
||||
struct list_head inactive[NUM_CACHE_BUCKETS];
|
||||
int16_t count;
|
||||
} vma[NUM_MAP_TYPES];
|
||||
|
||||
uint16_t nbatch;
|
||||
uint16_t surface;
|
||||
uint16_t nexec;
|
||||
uint16_t nreloc;
|
||||
uint16_t nfence;
|
||||
uint16_t max_batch_size;
|
||||
|
||||
uint32_t flush:1;
|
||||
uint32_t sync:1;
|
||||
uint32_t need_expire:1;
|
||||
uint32_t need_purge:1;
|
||||
uint32_t need_retire:1;
|
||||
uint32_t scanout:1;
|
||||
uint32_t flush_now:1;
|
||||
uint32_t busy:1;
|
||||
|
||||
uint32_t has_vmap :1;
|
||||
uint32_t has_relaxed_fencing :1;
|
||||
uint32_t has_semaphores :1;
|
||||
uint32_t has_llc :1;
|
||||
uint32_t has_cpu_bo :1;
|
||||
|
||||
uint16_t fence_max;
|
||||
uint16_t half_cpu_cache_pages;
|
||||
uint32_t aperture_total, aperture_high, aperture_low, aperture_mappable;
|
||||
uint32_t aperture, aperture_fenced;
|
||||
uint32_t min_alignment;
|
||||
uint32_t max_upload_tile_size, max_copy_tile_size;
|
||||
uint32_t max_gpu_size, max_cpu_size;
|
||||
uint32_t large_object_size, max_object_size;
|
||||
uint32_t partial_buffer_size;
|
||||
|
||||
// void (*context_switch)(struct kgem *kgem, int new_mode);
|
||||
void (*retire)(struct kgem *kgem);
|
||||
|
||||
uint32_t *batch;
|
||||
uint32_t *batch_ptr;
|
||||
int batch_idx;
|
||||
struct drm_i915_gem_object *batch_obj;
|
||||
|
||||
struct drm_i915_gem_exec_object2 exec[256];
|
||||
struct drm_i915_gem_relocation_entry reloc[384];
|
||||
};
|
||||
|
||||
typedef struct
|
||||
{
|
||||
struct drm_i915_gem_object *batch;
|
||||
struct list_head objects;
|
||||
u32 exec_start;
|
||||
u32 exec_len;
|
||||
|
||||
}batchbuffer_t;
|
||||
|
||||
#define KGEM_BATCH_RESERVED 1
|
||||
#define KGEM_RELOC_RESERVED 4
|
||||
#define KGEM_EXEC_RESERVED 1
|
||||
|
||||
#define KGEM_BATCH_SIZE(K) ((K)->max_batch_size-KGEM_BATCH_RESERVED)
|
||||
#define KGEM_EXEC_SIZE(K) (int)(ARRAY_SIZE((K)->exec)-KGEM_EXEC_RESERVED)
|
||||
#define KGEM_RELOC_SIZE(K) (int)(ARRAY_SIZE((K)->reloc)-KGEM_RELOC_RESERVED)
|
||||
|
||||
void kgem_init(struct kgem *kgem, int gen);
|
||||
void kgem_reset(struct kgem *kgem);
|
||||
|
||||
struct kgem_bo *kgem_create_map(struct kgem *kgem,
|
||||
void *ptr, uint32_t size,
|
||||
bool read_only);
|
||||
|
||||
struct kgem_bo *kgem_create_for_name(struct kgem *kgem, uint32_t name);
|
||||
|
||||
struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size);
|
||||
struct kgem_bo *kgem_create_proxy(struct kgem_bo *target,
|
||||
int offset, int length);
|
||||
|
||||
//struct kgem_bo *kgem_upload_source_image(struct kgem *kgem,
|
||||
// const void *data,
|
||||
// BoxPtr box,
|
||||
// int stride, int bpp);
|
||||
|
||||
int kgem_choose_tiling(struct kgem *kgem,
|
||||
int tiling, int width, int height, int bpp);
|
||||
unsigned kgem_can_create_2d(struct kgem *kgem, int width, int height, int depth);
|
||||
#define KGEM_CAN_CREATE_GPU 0x1
|
||||
#define KGEM_CAN_CREATE_CPU 0x2
|
||||
#define KGEM_CAN_CREATE_LARGE 0x4
|
||||
|
||||
struct kgem_bo *
|
||||
kgem_replace_bo(struct kgem *kgem,
|
||||
struct kgem_bo *src,
|
||||
uint32_t width,
|
||||
uint32_t height,
|
||||
uint32_t pitch,
|
||||
uint32_t bpp);
|
||||
enum {
|
||||
CREATE_EXACT = 0x1,
|
||||
CREATE_INACTIVE = 0x2,
|
||||
CREATE_CPU_MAP = 0x4,
|
||||
CREATE_GTT_MAP = 0x8,
|
||||
CREATE_SCANOUT = 0x10,
|
||||
};
|
||||
struct kgem_bo *kgem_create_2d(struct kgem *kgem,
|
||||
int width,
|
||||
int height,
|
||||
int bpp,
|
||||
int tiling,
|
||||
uint32_t flags);
|
||||
|
||||
uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format);
|
||||
void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset);
|
||||
|
||||
bool kgem_retire(struct kgem *kgem);
|
||||
|
||||
void _kgem_submit(struct kgem *kgem, batchbuffer_t *exb);
|
||||
//static inline void kgem_submit(struct kgem *kgem)
|
||||
//{
|
||||
// if (kgem->nbatch)
|
||||
// _kgem_submit(kgem);
|
||||
//}
|
||||
|
||||
/*
|
||||
static inline void kgem_bo_submit(struct kgem *kgem, struct kgem_bo *bo)
|
||||
{
|
||||
if (bo->exec)
|
||||
_kgem_submit(kgem);
|
||||
}
|
||||
|
||||
void __kgem_flush(struct kgem *kgem, struct kgem_bo *bo);
|
||||
static inline void kgem_bo_flush(struct kgem *kgem, struct kgem_bo *bo)
|
||||
{
|
||||
kgem_bo_submit(kgem, bo);
|
||||
|
||||
if (!bo->needs_flush)
|
||||
return;
|
||||
|
||||
__kgem_flush(kgem, bo);
|
||||
|
||||
bo->needs_flush = false;
|
||||
}
|
||||
*/
|
||||
static inline struct kgem_bo *kgem_bo_reference(struct kgem_bo *bo)
|
||||
{
|
||||
bo->refcnt++;
|
||||
return bo;
|
||||
}
|
||||
|
||||
void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo);
|
||||
static inline void kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
|
||||
{
|
||||
assert(bo->refcnt);
|
||||
if (--bo->refcnt == 0)
|
||||
_kgem_bo_destroy(kgem, bo);
|
||||
}
|
||||
|
||||
void kgem_clear_dirty(struct kgem *kgem);
|
||||
|
||||
static inline void kgem_set_mode(struct kgem *kgem, enum kgem_mode mode)
|
||||
{
|
||||
assert(!kgem->wedged);
|
||||
|
||||
#if DEBUG_FLUSH_BATCH
|
||||
kgem_submit(kgem);
|
||||
#endif
|
||||
|
||||
if (kgem->mode == mode)
|
||||
return;
|
||||
|
||||
// kgem->context_switch(kgem, mode);
|
||||
kgem->mode = mode;
|
||||
}
|
||||
|
||||
|
||||
static inline void _kgem_set_mode(struct kgem *kgem, enum kgem_mode mode)
|
||||
{
|
||||
assert(kgem->mode == KGEM_NONE);
|
||||
// kgem->context_switch(kgem, mode);
|
||||
kgem->mode = mode;
|
||||
}
|
||||
|
||||
static inline bool kgem_check_batch(struct kgem *kgem, int num_dwords)
|
||||
{
|
||||
return likely(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED <= kgem->surface);
|
||||
}
|
||||
|
||||
static inline bool kgem_check_reloc(struct kgem *kgem, int n)
|
||||
{
|
||||
return likely(kgem->nreloc + n <= KGEM_RELOC_SIZE(kgem));
|
||||
}
|
||||
|
||||
static inline bool kgem_check_exec(struct kgem *kgem, int n)
|
||||
{
|
||||
return likely(kgem->nexec + n <= KGEM_EXEC_SIZE(kgem));
|
||||
}
|
||||
|
||||
static inline bool kgem_check_batch_with_surfaces(struct kgem *kgem,
|
||||
int num_dwords,
|
||||
int num_surfaces)
|
||||
{
|
||||
return (int)(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED) <= (int)(kgem->surface - num_surfaces*8) &&
|
||||
kgem_check_reloc(kgem, num_surfaces);
|
||||
}
|
||||
|
||||
static inline uint32_t *kgem_get_batch(struct kgem *kgem, int num_dwords)
|
||||
{
|
||||
// if (!kgem_check_batch(kgem, num_dwords))
|
||||
// _kgem_submit(kgem);
|
||||
|
||||
return kgem->batch + kgem->nbatch;
|
||||
}
|
||||
|
||||
static inline void kgem_advance_batch(struct kgem *kgem, int num_dwords)
|
||||
{
|
||||
kgem->nbatch += num_dwords;
|
||||
}
|
||||
|
||||
bool kgem_check_bo(struct kgem *kgem, ...) __attribute__((sentinel(0)));
|
||||
bool kgem_check_bo_fenced(struct kgem *kgem, ...) __attribute__((sentinel(0)));
|
||||
|
||||
void _kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo);
|
||||
static inline void kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo)
|
||||
{
|
||||
if (bo->proxy)
|
||||
bo = bo->proxy;
|
||||
|
||||
if (bo->exec == NULL)
|
||||
_kgem_add_bo(kgem, bo);
|
||||
}
|
||||
|
||||
#define KGEM_RELOC_FENCED 0x8000
|
||||
uint32_t kgem_add_reloc(struct kgem *kgem,
|
||||
uint32_t pos,
|
||||
struct kgem_bo *bo,
|
||||
uint32_t read_write_domains,
|
||||
uint32_t delta);
|
||||
|
||||
void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo);
|
||||
void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo);
|
||||
void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
|
||||
void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo);
|
||||
uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo);
|
||||
|
||||
Bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
|
||||
const void *data, int length);
|
||||
|
||||
int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo);
|
||||
void kgem_get_tile_size(struct kgem *kgem, int tiling,
|
||||
int *tile_width, int *tile_height, int *tile_size);
|
||||
|
||||
static inline int kgem_bo_size(struct kgem_bo *bo)
|
||||
{
|
||||
assert(!(bo->proxy && bo->io));
|
||||
return PAGE_SIZE * bo->size.pages.count;
|
||||
}
|
||||
|
||||
static inline int kgem_buffer_size(struct kgem_bo *bo)
|
||||
{
|
||||
assert(bo->proxy && bo->io);
|
||||
return bo->size.bytes;
|
||||
}
|
||||
|
||||
/*
|
||||
static inline bool kgem_bo_blt_pitch_is_ok(struct kgem *kgem,
|
||||
struct kgem_bo *bo)
|
||||
{
|
||||
int pitch = bo->pitch;
|
||||
if (kgem->gen >= 40 && bo->tiling)
|
||||
pitch /= 4;
|
||||
if (pitch > MAXSHORT) {
|
||||
DBG(("%s: can not blt to handle=%d, adjusted pitch=%d\n",
|
||||
__FUNCTION__, pitch));
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool kgem_bo_can_blt(struct kgem *kgem,
|
||||
struct kgem_bo *bo)
|
||||
{
|
||||
if (bo->tiling == I915_TILING_Y) {
|
||||
DBG(("%s: can not blt to handle=%d, tiling=Y\n",
|
||||
__FUNCTION__, bo->handle));
|
||||
return false;
|
||||
}
|
||||
|
||||
return kgem_bo_blt_pitch_is_ok(kgem, bo);
|
||||
}
|
||||
*/
|
||||
static inline bool kgem_bo_is_mappable(struct kgem *kgem,
|
||||
struct kgem_bo *bo)
|
||||
{
|
||||
DBG_HDR(("%s: domain=%d, offset: %d size: %d\n",
|
||||
__FUNCTION__, bo->domain, bo->presumed_offset, kgem_bo_size(bo)));
|
||||
|
||||
if (bo->domain == DOMAIN_GTT)
|
||||
return true;
|
||||
|
||||
if (IS_GTT_MAP(bo->map))
|
||||
return true;
|
||||
|
||||
if (kgem->gen < 40 && bo->tiling &&
|
||||
bo->presumed_offset & (kgem_bo_fenced_size(kgem, bo) - 1))
|
||||
return false;
|
||||
|
||||
if (!bo->presumed_offset)
|
||||
return kgem_bo_size(bo) <= kgem->aperture_mappable / 4;
|
||||
|
||||
return bo->presumed_offset + kgem_bo_size(bo) <= kgem->aperture_mappable;
|
||||
}
|
||||
|
||||
static inline bool kgem_bo_mapped(struct kgem_bo *bo)
|
||||
{
|
||||
DBG_HDR(("%s: map=%p, tiling=%d\n", __FUNCTION__, bo->map, bo->tiling));
|
||||
|
||||
if (bo->map == NULL)
|
||||
return false;
|
||||
|
||||
return IS_CPU_MAP(bo->map) == !bo->tiling;
|
||||
}
|
||||
|
||||
static inline bool kgem_bo_is_busy(struct kgem_bo *bo)
|
||||
{
|
||||
DBG_HDR(("%s: domain: %d exec? %d, rq? %d\n",
|
||||
__FUNCTION__, bo->domain, bo->exec != NULL, bo->rq != NULL));
|
||||
assert(bo->proxy == NULL);
|
||||
return bo->rq;
|
||||
}
|
||||
/*
|
||||
static inline bool kgem_bo_map_will_stall(struct kgem *kgem, struct kgem_bo *bo)
|
||||
{
|
||||
DBG(("%s? handle=%d, domain=%d, offset=%x, size=%x\n",
|
||||
__FUNCTION__, bo->handle,
|
||||
bo->domain, bo->presumed_offset, bo->size));
|
||||
|
||||
if (!kgem_bo_is_mappable(kgem, bo))
|
||||
return true;
|
||||
|
||||
if (kgem->wedged)
|
||||
return false;
|
||||
|
||||
if (kgem_bo_is_busy(bo))
|
||||
return true;
|
||||
|
||||
if (bo->presumed_offset == 0)
|
||||
return !list_is_empty(&kgem->requests);
|
||||
|
||||
return false;
|
||||
}
|
||||
*/
|
||||
|
||||
static inline bool kgem_bo_is_dirty(struct kgem_bo *bo)
|
||||
{
|
||||
if (bo == NULL)
|
||||
return FALSE;
|
||||
|
||||
return bo->dirty;
|
||||
}
|
||||
|
||||
static inline void kgem_bo_mark_dirty(struct kgem_bo *bo)
|
||||
{
|
||||
DBG_HDR(("%s: handle=%d\n", __FUNCTION__, bo->handle));
|
||||
bo->dirty = true;
|
||||
}
|
||||
|
||||
void kgem_sync(struct kgem *kgem);
|
||||
|
||||
#define KGEM_BUFFER_WRITE 0x1
|
||||
#define KGEM_BUFFER_INPLACE 0x2
|
||||
#define KGEM_BUFFER_LAST 0x4
|
||||
|
||||
#define KGEM_BUFFER_WRITE_INPLACE (KGEM_BUFFER_WRITE | KGEM_BUFFER_INPLACE)
|
||||
|
||||
struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
|
||||
uint32_t size, uint32_t flags,
|
||||
void **ret);
|
||||
struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
|
||||
int width, int height, int bpp,
|
||||
uint32_t flags,
|
||||
void **ret);
|
||||
void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *bo);
|
||||
|
||||
void kgem_throttle(struct kgem *kgem);
|
||||
#define MAX_INACTIVE_TIME 10
|
||||
bool kgem_expire_cache(struct kgem *kgem);
|
||||
void kgem_purge_cache(struct kgem *kgem);
|
||||
void kgem_cleanup_cache(struct kgem *kgem);
|
||||
|
||||
#if HAS_EXTRA_DEBUG
|
||||
void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch);
|
||||
#else
|
||||
static inline void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch)
|
||||
{
|
||||
(void)kgem;
|
||||
(void)nbatch;
|
||||
}
|
||||
#endif
|
||||
|
||||
#undef DBG_HDR
|
||||
|
||||
u32 get_buffer_offset(uint32_t handle);
|
||||
|
||||
|
||||
#endif /* KGEM_H */
|
@ -1,387 +0,0 @@
|
||||
#include <drmP.h>
|
||||
#include <drm.h>
|
||||
#include "i915_drm.h"
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <errno-base.h>
|
||||
#include <memory.h>
|
||||
|
||||
#include <syscall.h>
|
||||
|
||||
#include "../bitmap.h"
|
||||
|
||||
#include "sna.h"
|
||||
|
||||
struct kgem_bo *create_bo(bitmap_t *bitmap);
|
||||
|
||||
static Bool sna_solid_cache_init(struct sna *sna);
|
||||
|
||||
struct sna *sna_device;
|
||||
|
||||
void no_render_init(struct sna *sna)
|
||||
{
|
||||
struct sna_render *render = &sna->render;
|
||||
|
||||
memset (render,0, sizeof (*render));
|
||||
|
||||
render->vertices = render->vertex_data;
|
||||
render->vertex_size = ARRAY_SIZE(render->vertex_data);
|
||||
|
||||
// render->composite = no_render_composite;
|
||||
|
||||
// render->copy_boxes = no_render_copy_boxes;
|
||||
// render->copy = no_render_copy;
|
||||
|
||||
// render->fill_boxes = no_render_fill_boxes;
|
||||
// render->fill = no_render_fill;
|
||||
// render->fill_one = no_render_fill_one;
|
||||
// render->clear = no_render_clear;
|
||||
|
||||
// render->reset = no_render_reset;
|
||||
// render->flush = no_render_flush;
|
||||
// render->fini = no_render_fini;
|
||||
|
||||
// sna->kgem.context_switch = no_render_context_switch;
|
||||
// sna->kgem.retire = no_render_retire;
|
||||
|
||||
// if (sna->kgem.gen >= 60)
|
||||
sna->kgem.ring = KGEM_RENDER;
|
||||
}
|
||||
|
||||
|
||||
Bool sna_accel_init(struct sna *sna)
|
||||
{
|
||||
const char *backend;
|
||||
|
||||
// list_init(&sna->deferred_free);
|
||||
// list_init(&sna->dirty_pixmaps);
|
||||
// list_init(&sna->active_pixmaps);
|
||||
// list_init(&sna->inactive_clock[0]);
|
||||
// list_init(&sna->inactive_clock[1]);
|
||||
|
||||
// sna_accel_install_timers(sna);
|
||||
|
||||
|
||||
backend = "no";
|
||||
sna->have_render = false;
|
||||
sna->default_tiling = 0; //I915_TILING_X;
|
||||
no_render_init(sna);
|
||||
|
||||
if ((sna->have_render = gen6_render_init(sna)))
|
||||
backend = "SandyBridge";
|
||||
|
||||
/*
|
||||
if (sna->chipset.info->gen >= 80) {
|
||||
} else if (sna->chipset.info->gen >= 70) {
|
||||
if ((sna->have_render = gen7_render_init(sna)))
|
||||
backend = "IvyBridge";
|
||||
} else if (sna->chipset.info->gen >= 60) {
|
||||
if ((sna->have_render = gen6_render_init(sna)))
|
||||
backend = "SandyBridge";
|
||||
} else if (sna->chipset.info->gen >= 50) {
|
||||
if ((sna->have_render = gen5_render_init(sna)))
|
||||
backend = "Ironlake";
|
||||
} else if (sna->chipset.info->gen >= 40) {
|
||||
if ((sna->have_render = gen4_render_init(sna)))
|
||||
backend = "Broadwater";
|
||||
} else if (sna->chipset.info->gen >= 30) {
|
||||
if ((sna->have_render = gen3_render_init(sna)))
|
||||
backend = "gen3";
|
||||
} else if (sna->chipset.info->gen >= 20) {
|
||||
if ((sna->have_render = gen2_render_init(sna)))
|
||||
backend = "gen2";
|
||||
}
|
||||
*/
|
||||
DBG(("%s(backend=%s, have_render=%d)\n",
|
||||
__FUNCTION__, backend, sna->have_render));
|
||||
|
||||
kgem_reset(&sna->kgem);
|
||||
|
||||
if (!sna_solid_cache_init(sna))
|
||||
return FALSE;
|
||||
|
||||
sna_device = sna;
|
||||
#if 0
|
||||
{
|
||||
struct kgem_bo *screen_bo;
|
||||
bitmap_t screen;
|
||||
|
||||
screen.pitch = 1024*4;
|
||||
screen.gaddr = 0;
|
||||
screen.width = 1024;
|
||||
screen.height = 768;
|
||||
screen.obj = (void*)-1;
|
||||
|
||||
screen_bo = create_bo(&screen);
|
||||
|
||||
sna->render.clear(sna, &screen, screen_bo);
|
||||
}
|
||||
#endif
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
int sna_init()
|
||||
{
|
||||
struct sna *sna;
|
||||
|
||||
DBG(("%s\n", __FUNCTION__));
|
||||
|
||||
sna = kzalloc(sizeof(struct sna), 0);
|
||||
if (sna == NULL)
|
||||
return FALSE;
|
||||
|
||||
// sna->mode.cpp = 4;
|
||||
|
||||
kgem_init(&sna->kgem, 60);
|
||||
/*
|
||||
if (!xf86ReturnOptValBool(sna->Options,
|
||||
OPTION_RELAXED_FENCING,
|
||||
sna->kgem.has_relaxed_fencing)) {
|
||||
xf86DrvMsg(scrn->scrnIndex,
|
||||
sna->kgem.has_relaxed_fencing ? X_CONFIG : X_PROBED,
|
||||
"Disabling use of relaxed fencing\n");
|
||||
sna->kgem.has_relaxed_fencing = 0;
|
||||
}
|
||||
if (!xf86ReturnOptValBool(sna->Options,
|
||||
OPTION_VMAP,
|
||||
sna->kgem.has_vmap)) {
|
||||
xf86DrvMsg(scrn->scrnIndex,
|
||||
sna->kgem.has_vmap ? X_CONFIG : X_PROBED,
|
||||
"Disabling use of vmap\n");
|
||||
sna->kgem.has_vmap = 0;
|
||||
}
|
||||
*/
|
||||
|
||||
/* Disable tiling by default */
|
||||
sna->tiling = SNA_TILING_DISABLE;
|
||||
|
||||
/* Default fail-safe value of 75 Hz */
|
||||
// sna->vblank_interval = 1000 * 1000 * 1000 / 75;
|
||||
|
||||
sna->flags = 0;
|
||||
sna->flags |= SNA_NO_THROTTLE;
|
||||
sna->flags |= SNA_NO_DELAYED_FLUSH;
|
||||
|
||||
return sna_accel_init(sna);
|
||||
}
|
||||
|
||||
|
||||
static Bool sna_solid_cache_init(struct sna *sna)
|
||||
{
|
||||
struct sna_solid_cache *cache = &sna->render.solid_cache;
|
||||
|
||||
DBG(("%s\n", __FUNCTION__));
|
||||
|
||||
cache->cache_bo =
|
||||
kgem_create_linear(&sna->kgem, sizeof(cache->color));
|
||||
if (!cache->cache_bo)
|
||||
return FALSE;
|
||||
|
||||
/*
|
||||
* Initialise [0] with white since it is very common and filling the
|
||||
* zeroth slot simplifies some of the checks.
|
||||
*/
|
||||
cache->color[0] = 0xffffffff;
|
||||
cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
|
||||
cache->bo[0]->pitch = 4;
|
||||
cache->dirty = 1;
|
||||
cache->size = 1;
|
||||
cache->last = 0;
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
void
|
||||
sna_render_flush_solid(struct sna *sna)
|
||||
{
|
||||
struct sna_solid_cache *cache = &sna->render.solid_cache;
|
||||
|
||||
DBG(("sna_render_flush_solid(size=%d)\n", cache->size));
|
||||
assert(cache->dirty);
|
||||
assert(cache->size);
|
||||
|
||||
kgem_bo_write(&sna->kgem, cache->cache_bo,
|
||||
cache->color, cache->size*sizeof(uint32_t));
|
||||
cache->dirty = 0;
|
||||
cache->last = 0;
|
||||
}
|
||||
|
||||
static void
|
||||
sna_render_finish_solid(struct sna *sna, bool force)
|
||||
{
|
||||
struct sna_solid_cache *cache = &sna->render.solid_cache;
|
||||
int i;
|
||||
|
||||
DBG(("sna_render_finish_solid(force=%d, domain=%d, busy=%d, dirty=%d)\n",
|
||||
force, cache->cache_bo->domain, cache->cache_bo->rq != NULL, cache->dirty));
|
||||
|
||||
if (!force && cache->cache_bo->domain != DOMAIN_GPU)
|
||||
return;
|
||||
|
||||
if (cache->dirty)
|
||||
sna_render_flush_solid(sna);
|
||||
|
||||
for (i = 0; i < cache->size; i++) {
|
||||
if (cache->bo[i] == NULL)
|
||||
continue;
|
||||
|
||||
kgem_bo_destroy(&sna->kgem, cache->bo[i]);
|
||||
cache->bo[i] = NULL;
|
||||
}
|
||||
kgem_bo_destroy(&sna->kgem, cache->cache_bo);
|
||||
|
||||
DBG(("sna_render_finish_solid reset\n"));
|
||||
|
||||
cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(cache->color));
|
||||
cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
|
||||
cache->bo[0]->pitch = 4;
|
||||
if (force)
|
||||
cache->size = 1;
|
||||
}
|
||||
|
||||
|
||||
struct kgem_bo *
|
||||
sna_render_get_solid(struct sna *sna, uint32_t color)
|
||||
{
|
||||
struct sna_solid_cache *cache = &sna->render.solid_cache;
|
||||
int i;
|
||||
|
||||
DBG(("%s: %08x\n", __FUNCTION__, color));
|
||||
|
||||
// if ((color & 0xffffff) == 0) /* alpha only */
|
||||
// return kgem_bo_reference(sna->render.alpha_cache.bo[color>>24]);
|
||||
|
||||
if (color == 0xffffffff) {
|
||||
DBG(("%s(white)\n", __FUNCTION__));
|
||||
return kgem_bo_reference(cache->bo[0]);
|
||||
}
|
||||
|
||||
if (cache->color[cache->last] == color) {
|
||||
DBG(("sna_render_get_solid(%d) = %x (last)\n",
|
||||
cache->last, color));
|
||||
return kgem_bo_reference(cache->bo[cache->last]);
|
||||
}
|
||||
|
||||
for (i = 1; i < cache->size; i++) {
|
||||
if (cache->color[i] == color) {
|
||||
if (cache->bo[i] == NULL) {
|
||||
DBG(("sna_render_get_solid(%d) = %x (recreate)\n",
|
||||
i, color));
|
||||
goto create;
|
||||
} else {
|
||||
DBG(("sna_render_get_solid(%d) = %x (old)\n",
|
||||
i, color));
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sna_render_finish_solid(sna, i == ARRAY_SIZE(cache->color));
|
||||
|
||||
i = cache->size++;
|
||||
cache->color[i] = color;
|
||||
cache->dirty = 1;
|
||||
DBG(("sna_render_get_solid(%d) = %x (new)\n", i, color));
|
||||
|
||||
create:
|
||||
cache->bo[i] = kgem_create_proxy(cache->cache_bo,
|
||||
i*sizeof(uint32_t), sizeof(uint32_t));
|
||||
cache->bo[i]->pitch = 4;
|
||||
|
||||
done:
|
||||
cache->last = i;
|
||||
return kgem_bo_reference(cache->bo[i]);
|
||||
}
|
||||
|
||||
int sna_blit_copy(bitmap_t *dst_bitmap, int dst_x, int dst_y,
|
||||
int w, int h, bitmap_t *src_bitmap, int src_x, int src_y)
|
||||
|
||||
{
|
||||
batchbuffer_t execbuffer;
|
||||
|
||||
struct kgem_bo src_bo, dst_bo;
|
||||
|
||||
memset(&execbuffer, 0, sizeof(execbuffer));
|
||||
memset(&src_bo, 0, sizeof(src_bo));
|
||||
memset(&dst_bo, 0, sizeof(dst_bo));
|
||||
|
||||
INIT_LIST_HEAD(&execbuffer.objects);
|
||||
|
||||
src_bo.gaddr = src_bitmap->gaddr;
|
||||
src_bo.pitch = src_bitmap->pitch;
|
||||
src_bo.tiling = 0;
|
||||
|
||||
dst_bo.gaddr = dst_bitmap->gaddr;
|
||||
dst_bo.pitch = dst_bitmap->pitch;
|
||||
dst_bo.tiling = 0;
|
||||
|
||||
sna_device->render.copy(sna_device, 0, src_bitmap, &src_bo,
|
||||
dst_bitmap, &dst_bo, dst_x, dst_y,
|
||||
src_x, src_y, w, h);
|
||||
|
||||
INIT_LIST_HEAD(&execbuffer.objects);
|
||||
list_add_tail(&src_bitmap->obj->exec_list, &execbuffer.objects);
|
||||
|
||||
_kgem_submit(&sna_device->kgem, &execbuffer);
|
||||
|
||||
};
|
||||
|
||||
|
||||
int sna_blit_tex(bitmap_t *dst_bitmap, int dst_x, int dst_y,
|
||||
int w, int h, bitmap_t *src_bitmap, int src_x, int src_y,
|
||||
bitmap_t *mask_bitmap)
|
||||
|
||||
{
|
||||
struct sna_composite_op cop;
|
||||
batchbuffer_t execbuffer;
|
||||
BoxRec box;
|
||||
|
||||
struct kgem_bo src_bo, mask_bo, dst_bo;
|
||||
|
||||
memset(&cop, 0, sizeof(cop));
|
||||
memset(&execbuffer, 0, sizeof(execbuffer));
|
||||
memset(&src_bo, 0, sizeof(src_bo));
|
||||
memset(&dst_bo, 0, sizeof(dst_bo));
|
||||
memset(&mask_bo, 0, sizeof(mask_bo));
|
||||
|
||||
src_bo.gaddr = src_bitmap->gaddr;
|
||||
src_bo.pitch = src_bitmap->pitch;
|
||||
src_bo.tiling = 0;
|
||||
|
||||
dst_bo.gaddr = dst_bitmap->gaddr;
|
||||
dst_bo.pitch = dst_bitmap->pitch;
|
||||
dst_bo.tiling = 0;
|
||||
|
||||
mask_bo.gaddr = mask_bitmap->gaddr;
|
||||
mask_bo.pitch = mask_bitmap->pitch;
|
||||
mask_bo.tiling = 0;
|
||||
|
||||
box.x1 = dst_x;
|
||||
box.y1 = dst_y;
|
||||
box.x2 = dst_x+w;
|
||||
box.y2 = dst_y+h;
|
||||
|
||||
sna_device->render.composite(sna_device, 0,
|
||||
src_bitmap, &src_bo,
|
||||
mask_bitmap, &mask_bo,
|
||||
dst_bitmap, &dst_bo,
|
||||
src_x, src_y,
|
||||
src_x, src_y,
|
||||
dst_x, dst_y,
|
||||
w, h, &cop);
|
||||
|
||||
cop.box(sna_device, &cop, &box);
|
||||
cop.done(sna_device, &cop);
|
||||
|
||||
INIT_LIST_HEAD(&execbuffer.objects);
|
||||
list_add_tail(&src_bitmap->obj->exec_list, &execbuffer.objects);
|
||||
list_add_tail(&mask_bitmap->obj->exec_list, &execbuffer.objects);
|
||||
|
||||
_kgem_submit(&sna_device->kgem, &execbuffer);
|
||||
|
||||
};
|
||||
|
@ -1,125 +0,0 @@
|
||||
|
||||
#define FALSE 0
|
||||
#define TRUE 1
|
||||
|
||||
#define DBG(x)
|
||||
//#define DBG(x) dbgprintf x
|
||||
|
||||
#define assert(x)
|
||||
|
||||
|
||||
#include "compiler.h"
|
||||
#include <linux/kernel.h>
|
||||
|
||||
struct pixman_box16
|
||||
{
|
||||
int16_t x1, y1, x2, y2;
|
||||
};
|
||||
|
||||
typedef struct pixman_box16 BoxRec;
|
||||
typedef unsigned int CARD32;
|
||||
|
||||
#include "sna_render.h"
|
||||
#include "kgem.h"
|
||||
|
||||
#define PictOpClear 0
|
||||
#define PictOpSrc 1
|
||||
#define PictOpDst 2
|
||||
#define PictOpOver 3
|
||||
#define PictOpOverReverse 4
|
||||
#define PictOpIn 5
|
||||
#define PictOpInReverse 6
|
||||
#define PictOpOut 7
|
||||
#define PictOpOutReverse 8
|
||||
#define PictOpAtop 9
|
||||
#define PictOpAtopReverse 10
|
||||
#define PictOpXor 11
|
||||
#define PictOpAdd 12
|
||||
#define PictOpSaturate 13
|
||||
#define PictOpMaximum 13
|
||||
|
||||
struct sna {
|
||||
unsigned flags;
|
||||
#define SNA_NO_THROTTLE 0x1
|
||||
#define SNA_NO_DELAYED_FLUSH 0x2
|
||||
|
||||
// int timer[NUM_TIMERS];
|
||||
|
||||
// uint16_t timer_active;
|
||||
// uint16_t timer_ready;
|
||||
|
||||
// int vblank_interval;
|
||||
|
||||
// struct list deferred_free;
|
||||
// struct list dirty_pixmaps;
|
||||
// struct list active_pixmaps;
|
||||
// struct list inactive_clock[2];
|
||||
|
||||
unsigned int tiling;
|
||||
#define SNA_TILING_DISABLE 0x0
|
||||
#define SNA_TILING_FB 0x1
|
||||
#define SNA_TILING_2D 0x2
|
||||
#define SNA_TILING_3D 0x4
|
||||
#define SNA_TILING_ALL (~0)
|
||||
|
||||
int Chipset;
|
||||
// EntityInfoPtr pEnt;
|
||||
// struct pci_device *PciInfo;
|
||||
// struct intel_chipset chipset;
|
||||
|
||||
// PicturePtr clear;
|
||||
struct {
|
||||
uint32_t fill_bo;
|
||||
uint32_t fill_pixel;
|
||||
uint32_t fill_alu;
|
||||
} blt_state;
|
||||
union {
|
||||
// struct gen2_render_state gen2;
|
||||
// struct gen3_render_state gen3;
|
||||
// struct gen4_render_state gen4;
|
||||
// struct gen5_render_state gen5;
|
||||
struct gen6_render_state gen6;
|
||||
// struct gen7_render_state gen7;
|
||||
} render_state;
|
||||
uint32_t have_render;
|
||||
uint32_t default_tiling;
|
||||
|
||||
// Bool directRenderingOpen;
|
||||
// char *deviceName;
|
||||
|
||||
/* Broken-out options. */
|
||||
// OptionInfoPtr Options;
|
||||
|
||||
/* Driver phase/state information */
|
||||
// Bool suspended;
|
||||
|
||||
struct kgem kgem;
|
||||
struct sna_render render;
|
||||
};
|
||||
|
||||
static inline int vertex_space(struct sna *sna)
|
||||
{
|
||||
return sna->render.vertex_size - sna->render.vertex_used;
|
||||
}
|
||||
|
||||
static inline void vertex_emit(struct sna *sna, float v)
|
||||
{
|
||||
assert(sna->render.vertex_used < sna->render.vertex_size);
|
||||
sna->render.vertices[sna->render.vertex_used++] = v;
|
||||
}
|
||||
|
||||
static inline void vertex_emit_2s(struct sna *sna, int16_t x, int16_t y)
|
||||
{
|
||||
int16_t *v = (int16_t *)&sna->render.vertices[sna->render.vertex_used++];
|
||||
assert(sna->render.vertex_used <= sna->render.vertex_size);
|
||||
v[0] = x;
|
||||
v[1] = y;
|
||||
}
|
||||
|
||||
static inline void batch_emit(struct sna *sna, uint32_t dword)
|
||||
{
|
||||
assert(sna->kgem.mode != KGEM_NONE);
|
||||
assert(sna->kgem.nbatch + KGEM_BATCH_RESERVED < sna->kgem.surface);
|
||||
sna->kgem.batch[sna->kgem.nbatch++] = dword;
|
||||
}
|
||||
|
@ -1,81 +0,0 @@
|
||||
#ifndef SNA_REG_H
|
||||
#define SNA_REG_H
|
||||
|
||||
/* Flush */
|
||||
#define MI_FLUSH (0x04<<23)
|
||||
#define MI_FLUSH_DW (0x26<<23)
|
||||
|
||||
#define MI_WRITE_DIRTY_STATE (1<<4)
|
||||
#define MI_END_SCENE (1<<3)
|
||||
#define MI_GLOBAL_SNAPSHOT_COUNT_RESET (1<<3)
|
||||
#define MI_INHIBIT_RENDER_CACHE_FLUSH (1<<2)
|
||||
#define MI_STATE_INSTRUCTION_CACHE_FLUSH (1<<1)
|
||||
#define MI_INVALIDATE_MAP_CACHE (1<<0)
|
||||
/* broadwater flush bits */
|
||||
#define BRW_MI_GLOBAL_SNAPSHOT_RESET (1 << 3)
|
||||
|
||||
#define MI_BATCH_BUFFER_END (0xA << 23)
|
||||
|
||||
/* Noop */
|
||||
#define MI_NOOP 0x00
|
||||
#define MI_NOOP_WRITE_ID (1<<22)
|
||||
#define MI_NOOP_ID_MASK (1<<22 - 1)
|
||||
|
||||
/* Wait for Events */
|
||||
#define MI_WAIT_FOR_EVENT (0x03<<23)
|
||||
#define MI_WAIT_FOR_PIPEB_SVBLANK (1<<18)
|
||||
#define MI_WAIT_FOR_PIPEA_SVBLANK (1<<17)
|
||||
#define MI_WAIT_FOR_OVERLAY_FLIP (1<<16)
|
||||
#define MI_WAIT_FOR_PIPEB_VBLANK (1<<7)
|
||||
#define MI_WAIT_FOR_PIPEB_SCAN_LINE_WINDOW (1<<5)
|
||||
#define MI_WAIT_FOR_PIPEA_VBLANK (1<<3)
|
||||
#define MI_WAIT_FOR_PIPEA_SCAN_LINE_WINDOW (1<<1)
|
||||
|
||||
/* Set the scan line for MI_WAIT_FOR_PIPE?_SCAN_LINE_WINDOW */
|
||||
#define MI_LOAD_SCAN_LINES_INCL (0x12<<23)
|
||||
#define MI_LOAD_SCAN_LINES_DISPLAY_PIPEA (0)
|
||||
#define MI_LOAD_SCAN_LINES_DISPLAY_PIPEB (0x1<<20)
|
||||
|
||||
/* BLT commands */
|
||||
#define BLT_WRITE_ALPHA (1<<21)
|
||||
#define BLT_WRITE_RGB (1<<20)
|
||||
#define BLT_SRC_TILED (1<<15)
|
||||
#define BLT_DST_TILED (1<<11)
|
||||
|
||||
#define COLOR_BLT_CMD ((2<<29)|(0x40<<22)|(0x3))
|
||||
#define XY_COLOR_BLT ((2<<29)|(0x50<<22)|(0x4))
|
||||
#define XY_SETUP_BLT ((2<<29)|(1<<22)|6)
|
||||
#define XY_SETUP_MONO_PATTERN_SL_BLT ((2<<29)|(0x11<<22)|7)
|
||||
#define XY_SETUP_CLIP ((2<<29)|(3<<22)|1)
|
||||
#define XY_SCANLINE_BLT ((2<<29)|(0x25<<22)|1)
|
||||
#define XY_TEXT_IMMEDIATE_BLT ((2<<29)|(0x31<<22)|(1<<16))
|
||||
#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
|
||||
#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|0x4)
|
||||
#define XY_PAT_BLT_IMMEDIATE ((2<<29)|(0x72<<22))
|
||||
#define XY_MONO_PAT ((0x2<<29)|(0x52<<22)|0x7)
|
||||
#define XY_MONO_SRC_COPY ((0x2<<29)|(0x54<<22)|(0x6))
|
||||
#define XY_MONO_SRC_COPY_IMM ((0x2<<29)|(0x71<<22))
|
||||
#define XY_FULL_MONO_PATTERN_BLT ((0x2<<29)|(0x57<<22)|0xa)
|
||||
#define XY_FULL_MONO_PATTERN_MONO_SRC_BLT ((0x2<<29)|(0x58<<22)|0xa)
|
||||
|
||||
/* FLUSH commands */
|
||||
#define BRW_3D(Pipeline,Opcode,Subopcode) \
|
||||
((3 << 29) | \
|
||||
((Pipeline) << 27) | \
|
||||
((Opcode) << 24) | \
|
||||
((Subopcode) << 16))
|
||||
#define PIPE_CONTROL BRW_3D(3, 2, 0)
|
||||
#define PIPE_CONTROL_NOWRITE (0 << 14)
|
||||
#define PIPE_CONTROL_WRITE_QWORD (1 << 14)
|
||||
#define PIPE_CONTROL_WRITE_DEPTH (2 << 14)
|
||||
#define PIPE_CONTROL_WRITE_TIME (3 << 14)
|
||||
#define PIPE_CONTROL_DEPTH_STALL (1 << 13)
|
||||
#define PIPE_CONTROL_WC_FLUSH (1 << 12)
|
||||
#define PIPE_CONTROL_IS_FLUSH (1 << 11)
|
||||
#define PIPE_CONTROL_TC_FLUSH (1 << 10)
|
||||
#define PIPE_CONTROL_NOTIFY_ENABLE (1 << 8)
|
||||
#define PIPE_CONTROL_GLOBAL_GTT (1 << 2)
|
||||
#define PIPE_CONTROL_LOCAL_PGTT (0 << 2)
|
||||
#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1 << 0)
|
||||
|
||||
#endif
|
@ -1,478 +0,0 @@
|
||||
#ifndef SNA_RENDER_H
|
||||
#define SNA_RENDER_H
|
||||
|
||||
typedef int Bool;
|
||||
|
||||
#define GRADIENT_CACHE_SIZE 16
|
||||
|
||||
struct sna;
|
||||
|
||||
struct sna_composite_rectangles {
|
||||
struct sna_coordinate {
|
||||
int16_t x, y;
|
||||
} src, mask, dst;
|
||||
int16_t width, height;
|
||||
};
|
||||
|
||||
struct sna_composite_op {
|
||||
fastcall void (*blt)(struct sna *sna, const struct sna_composite_op *op,
|
||||
const struct sna_composite_rectangles *r);
|
||||
fastcall void (*box)(struct sna *sna,
|
||||
const struct sna_composite_op *op,
|
||||
const BoxRec *box);
|
||||
void (*boxes)(struct sna *sna, const struct sna_composite_op *op,
|
||||
const BoxRec *box, int nbox);
|
||||
void (*done)(struct sna *sna, const struct sna_composite_op *op);
|
||||
|
||||
struct sna_damage **damage;
|
||||
|
||||
uint32_t op;
|
||||
|
||||
struct {
|
||||
bitmap_t *pixmap;
|
||||
CARD32 format;
|
||||
struct kgem_bo *bo;
|
||||
int16_t x, y;
|
||||
uint16_t width, height;
|
||||
} dst;
|
||||
|
||||
struct sna_composite_channel {
|
||||
struct kgem_bo *bo;
|
||||
// PictTransform *transform;
|
||||
uint16_t width;
|
||||
uint16_t height;
|
||||
uint32_t pict_format;
|
||||
uint32_t card_format;
|
||||
uint32_t filter;
|
||||
uint32_t repeat;
|
||||
uint32_t is_affine : 1;
|
||||
uint32_t is_solid : 1;
|
||||
uint32_t is_linear : 1;
|
||||
uint32_t is_opaque : 1;
|
||||
uint32_t alpha_fixup : 1;
|
||||
uint32_t rb_reversed : 1;
|
||||
int16_t offset[2];
|
||||
float scale[2];
|
||||
|
||||
// pixman_transform_t embedded_transform;
|
||||
|
||||
union {
|
||||
struct {
|
||||
uint32_t pixel;
|
||||
float linear_dx;
|
||||
float linear_dy;
|
||||
float linear_offset;
|
||||
} gen2;
|
||||
struct gen3_shader_channel {
|
||||
int type;
|
||||
uint32_t mode;
|
||||
uint32_t constants;
|
||||
} gen3;
|
||||
} u;
|
||||
} src, mask;
|
||||
uint32_t is_affine : 1;
|
||||
uint32_t has_component_alpha : 1;
|
||||
uint32_t need_magic_ca_pass : 1;
|
||||
uint32_t rb_reversed : 1;
|
||||
|
||||
int16_t floats_per_vertex;
|
||||
int16_t floats_per_rect;
|
||||
fastcall void (*prim_emit)(struct sna *sna,
|
||||
const struct sna_composite_op *op,
|
||||
const struct sna_composite_rectangles *r);
|
||||
|
||||
struct sna_composite_redirect {
|
||||
struct kgem_bo *real_bo;
|
||||
struct sna_damage **real_damage, *damage;
|
||||
BoxRec box;
|
||||
} redirect;
|
||||
|
||||
union {
|
||||
struct sna_blt_state {
|
||||
bitmap_t *src_pixmap;
|
||||
int16_t sx, sy;
|
||||
|
||||
uint32_t inplace :1;
|
||||
uint32_t overwrites:1;
|
||||
uint32_t bpp : 6;
|
||||
|
||||
uint32_t cmd;
|
||||
uint32_t br13;
|
||||
uint32_t pitch[2];
|
||||
uint32_t pixel;
|
||||
struct kgem_bo *bo[2];
|
||||
} blt;
|
||||
|
||||
struct {
|
||||
float constants[8];
|
||||
uint32_t num_constants;
|
||||
} gen3;
|
||||
|
||||
struct {
|
||||
int wm_kernel;
|
||||
int ve_id;
|
||||
} gen4;
|
||||
|
||||
struct {
|
||||
int wm_kernel;
|
||||
int ve_id;
|
||||
} gen5;
|
||||
|
||||
struct {
|
||||
int wm_kernel;
|
||||
int nr_surfaces;
|
||||
int nr_inputs;
|
||||
int ve_id;
|
||||
} gen6;
|
||||
|
||||
struct {
|
||||
int wm_kernel;
|
||||
int nr_surfaces;
|
||||
int nr_inputs;
|
||||
int ve_id;
|
||||
} gen7;
|
||||
|
||||
void *priv;
|
||||
} u;
|
||||
};
|
||||
|
||||
|
||||
struct sna_render {
|
||||
int max_3d_size;
|
||||
int max_3d_pitch;
|
||||
|
||||
Bool (*composite)(struct sna *sna, uint8_t op,
|
||||
bitmap_t *src, struct kgem_bo *src_bo,
|
||||
bitmap_t *mask, struct kgem_bo *mask_bo,
|
||||
bitmap_t *dst, struct kgem_bo *dst_bo,
|
||||
int16_t src_x, int16_t src_y,
|
||||
int16_t msk_x, int16_t msk_y,
|
||||
int16_t dst_x, int16_t dst_y,
|
||||
int16_t w, int16_t h,
|
||||
struct sna_composite_op *tmp);
|
||||
/*
|
||||
Bool (*composite_spans)(struct sna *sna, uint8_t op,
|
||||
PicturePtr dst, PicturePtr src,
|
||||
int16_t src_x, int16_t src_y,
|
||||
int16_t dst_x, int16_t dst_y,
|
||||
int16_t w, int16_t h,
|
||||
unsigned flags,
|
||||
struct sna_composite_spans_op *tmp);
|
||||
#define COMPOSITE_SPANS_RECTILINEAR 0x1
|
||||
|
||||
Bool (*video)(struct sna *sna,
|
||||
struct sna_video *video,
|
||||
struct sna_video_frame *frame,
|
||||
RegionPtr dstRegion,
|
||||
short src_w, short src_h,
|
||||
short drw_w, short drw_h,
|
||||
PixmapPtr pixmap);
|
||||
|
||||
Bool (*fill_boxes)(struct sna *sna,
|
||||
CARD8 op,
|
||||
PictFormat format,
|
||||
const xRenderColor *color,
|
||||
PixmapPtr dst, struct kgem_bo *dst_bo,
|
||||
const BoxRec *box, int n);
|
||||
Bool (*fill)(struct sna *sna, uint8_t alu,
|
||||
PixmapPtr dst, struct kgem_bo *dst_bo,
|
||||
uint32_t color,
|
||||
struct sna_fill_op *tmp);
|
||||
Bool (*fill_one)(struct sna *sna, PixmapPtr dst, struct kgem_bo *dst_bo,
|
||||
uint32_t color,
|
||||
int16_t x1, int16_t y1, int16_t x2, int16_t y2,
|
||||
uint8_t alu);
|
||||
*/
|
||||
Bool (*clear)(struct sna *sna, bitmap_t *dst, struct kgem_bo *dst_bo);
|
||||
/*
|
||||
Bool (*copy_boxes)(struct sna *sna, uint8_t alu,
|
||||
PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
|
||||
PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
|
||||
const BoxRec *box, int n);
|
||||
*/
|
||||
Bool (*copy)(struct sna *sna, uint8_t alu,
|
||||
bitmap_t *src, struct kgem_bo *src_bo,
|
||||
bitmap_t *dst, struct kgem_bo *dst_bo,
|
||||
int dst_x, int dst_y, int src_x, int src_y,
|
||||
int w, int h);
|
||||
|
||||
void (*flush)(struct sna *sna);
|
||||
void (*reset)(struct sna *sna);
|
||||
void (*fini)(struct sna *sna);
|
||||
|
||||
// struct sna_alpha_cache {
|
||||
// struct kgem_bo *cache_bo;
|
||||
// struct kgem_bo *bo[256];
|
||||
// } alpha_cache;
|
||||
|
||||
struct sna_solid_cache {
|
||||
struct kgem_bo *cache_bo;
|
||||
uint32_t color[1024];
|
||||
struct kgem_bo *bo[1024];
|
||||
int last;
|
||||
int size;
|
||||
int dirty;
|
||||
} solid_cache;
|
||||
|
||||
// struct {
|
||||
// struct sna_gradient_cache {
|
||||
// struct kgem_bo *bo;
|
||||
// int nstops;
|
||||
// PictGradientStop *stops;
|
||||
// } cache[GRADIENT_CACHE_SIZE];
|
||||
// int size;
|
||||
// } gradient_cache;
|
||||
|
||||
// struct sna_glyph_cache{
|
||||
// PicturePtr picture;
|
||||
// struct sna_glyph **glyphs;
|
||||
// uint16_t count;
|
||||
// uint16_t evict;
|
||||
// } glyph[2];
|
||||
|
||||
uint16_t vertex_start;
|
||||
uint16_t vertex_index;
|
||||
uint16_t vertex_used;
|
||||
uint16_t vertex_size;
|
||||
uint16_t vertex_reloc[8];
|
||||
|
||||
struct kgem_bo *vbo;
|
||||
float *vertices;
|
||||
|
||||
float vertex_data[1024];
|
||||
};
|
||||
|
||||
enum {
|
||||
GEN6_WM_KERNEL_NOMASK = 0,
|
||||
GEN6_WM_KERNEL_MASK,
|
||||
|
||||
GEN6_KERNEL_COUNT
|
||||
};
|
||||
|
||||
struct gen6_render_state {
|
||||
struct kgem_bo *general_bo;
|
||||
|
||||
uint32_t vs_state;
|
||||
uint32_t sf_state;
|
||||
uint32_t sf_mask_state;
|
||||
uint32_t wm_state;
|
||||
uint32_t wm_kernel[GEN6_KERNEL_COUNT];
|
||||
|
||||
uint32_t cc_vp;
|
||||
uint32_t cc_blend;
|
||||
|
||||
uint32_t drawrect_offset;
|
||||
uint32_t drawrect_limit;
|
||||
uint32_t blend;
|
||||
uint32_t samplers;
|
||||
uint32_t kernel;
|
||||
|
||||
uint16_t num_sf_outputs;
|
||||
uint16_t vb_id;
|
||||
uint16_t ve_id;
|
||||
uint16_t vertex_offset;
|
||||
uint16_t last_primitive;
|
||||
int16_t floats_per_vertex;
|
||||
uint16_t surface_table;
|
||||
|
||||
Bool needs_invariant;
|
||||
};
|
||||
|
||||
|
||||
|
||||
struct sna_static_stream {
|
||||
uint32_t size, used;
|
||||
uint8_t *data;
|
||||
};
|
||||
|
||||
int sna_static_stream_init(struct sna_static_stream *stream);
|
||||
uint32_t sna_static_stream_add(struct sna_static_stream *stream,
|
||||
const void *data, uint32_t len, uint32_t align);
|
||||
void *sna_static_stream_map(struct sna_static_stream *stream,
|
||||
uint32_t len, uint32_t align);
|
||||
uint32_t sna_static_stream_offsetof(struct sna_static_stream *stream,
|
||||
void *ptr);
|
||||
struct kgem_bo *sna_static_stream_fini(struct sna *sna,
|
||||
struct sna_static_stream *stream);
|
||||
|
||||
/*
|
||||
struct kgem_bo *
|
||||
sna_render_get_solid(struct sna *sna,
|
||||
uint32_t color);
|
||||
|
||||
void
|
||||
sna_render_flush_solid(struct sna *sna);
|
||||
|
||||
struct kgem_bo *
|
||||
sna_render_get_gradient(struct sna *sna,
|
||||
PictGradient *pattern);
|
||||
|
||||
uint32_t sna_rgba_for_color(uint32_t color, int depth);
|
||||
Bool sna_picture_is_solid(PicturePtr picture, uint32_t *color);
|
||||
*/
|
||||
void no_render_init(struct sna *sna);
|
||||
|
||||
Bool gen2_render_init(struct sna *sna);
|
||||
Bool gen3_render_init(struct sna *sna);
|
||||
Bool gen4_render_init(struct sna *sna);
|
||||
Bool gen5_render_init(struct sna *sna);
|
||||
Bool gen6_render_init(struct sna *sna);
|
||||
Bool gen7_render_init(struct sna *sna);
|
||||
/*
|
||||
Bool sna_tiling_composite(uint32_t op,
|
||||
PicturePtr src,
|
||||
PicturePtr mask,
|
||||
PicturePtr dst,
|
||||
int16_t src_x, int16_t src_y,
|
||||
int16_t mask_x, int16_t mask_y,
|
||||
int16_t dst_x, int16_t dst_y,
|
||||
int16_t width, int16_t height,
|
||||
struct sna_composite_op *tmp);
|
||||
Bool sna_tiling_fill_boxes(struct sna *sna,
|
||||
CARD8 op,
|
||||
PictFormat format,
|
||||
const xRenderColor *color,
|
||||
PixmapPtr dst, struct kgem_bo *dst_bo,
|
||||
const BoxRec *box, int n);
|
||||
|
||||
Bool sna_tiling_copy_boxes(struct sna *sna, uint8_t alu,
|
||||
PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
|
||||
PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
|
||||
const BoxRec *box, int n);
|
||||
|
||||
Bool sna_tiling_blt_copy_boxes(struct sna *sna, uint8_t alu,
|
||||
struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
|
||||
struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
|
||||
int bpp, const BoxRec *box, int nbox);
|
||||
|
||||
Bool sna_blt_composite(struct sna *sna,
|
||||
uint32_t op,
|
||||
PicturePtr src,
|
||||
PicturePtr dst,
|
||||
int16_t src_x, int16_t src_y,
|
||||
int16_t dst_x, int16_t dst_y,
|
||||
int16_t width, int16_t height,
|
||||
struct sna_composite_op *tmp);
|
||||
|
||||
bool sna_blt_fill(struct sna *sna, uint8_t alu,
|
||||
struct kgem_bo *bo,
|
||||
int bpp,
|
||||
uint32_t pixel,
|
||||
struct sna_fill_op *fill);
|
||||
|
||||
bool sna_blt_copy(struct sna *sna, uint8_t alu,
|
||||
struct kgem_bo *src,
|
||||
struct kgem_bo *dst,
|
||||
int bpp,
|
||||
struct sna_copy_op *copy);
|
||||
|
||||
Bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
|
||||
struct kgem_bo *bo,
|
||||
int bpp,
|
||||
uint32_t pixel,
|
||||
const BoxRec *box, int n);
|
||||
|
||||
Bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
|
||||
struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
|
||||
struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
|
||||
int bpp,
|
||||
const BoxRec *box, int n);
|
||||
Bool sna_blt_copy_boxes_fallback(struct sna *sna, uint8_t alu,
|
||||
PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
|
||||
PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
|
||||
const BoxRec *box, int nbox);
|
||||
|
||||
Bool _sna_get_pixel_from_rgba(uint32_t *pixel,
|
||||
uint16_t red,
|
||||
uint16_t green,
|
||||
uint16_t blue,
|
||||
uint16_t alpha,
|
||||
uint32_t format);
|
||||
|
||||
static inline Bool
|
||||
sna_get_pixel_from_rgba(uint32_t * pixel,
|
||||
uint16_t red,
|
||||
uint16_t green,
|
||||
uint16_t blue,
|
||||
uint16_t alpha,
|
||||
uint32_t format)
|
||||
{
|
||||
switch (format) {
|
||||
case PICT_x8r8g8b8:
|
||||
alpha = 0xffff;
|
||||
case PICT_a8r8g8b8:
|
||||
*pixel = ((alpha >> 8 << 24) |
|
||||
(red >> 8 << 16) |
|
||||
(green & 0xff00) |
|
||||
(blue >> 8));
|
||||
return TRUE;
|
||||
case PICT_a8:
|
||||
*pixel = alpha >> 8;
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
return _sna_get_pixel_from_rgba(pixel, red, green, blue, alpha, format);
|
||||
}
|
||||
|
||||
int
|
||||
sna_render_pixmap_bo(struct sna *sna,
|
||||
struct sna_composite_channel *channel,
|
||||
PixmapPtr pixmap,
|
||||
int16_t x, int16_t y,
|
||||
int16_t w, int16_t h,
|
||||
int16_t dst_x, int16_t dst_y);
|
||||
|
||||
bool
|
||||
sna_render_pixmap_partial(struct sna *sna,
|
||||
PixmapPtr pixmap,
|
||||
struct kgem_bo *bo,
|
||||
struct sna_composite_channel *channel,
|
||||
int16_t x, int16_t y,
|
||||
int16_t w, int16_t h);
|
||||
|
||||
int
|
||||
sna_render_picture_extract(struct sna *sna,
|
||||
PicturePtr picture,
|
||||
struct sna_composite_channel *channel,
|
||||
int16_t x, int16_t y,
|
||||
int16_t w, int16_t h,
|
||||
int16_t dst_x, int16_t dst_y);
|
||||
|
||||
int
|
||||
sna_render_picture_fixup(struct sna *sna,
|
||||
PicturePtr picture,
|
||||
struct sna_composite_channel *channel,
|
||||
int16_t x, int16_t y,
|
||||
int16_t w, int16_t h,
|
||||
int16_t dst_x, int16_t dst_y);
|
||||
|
||||
int
|
||||
sna_render_picture_convert(struct sna *sna,
|
||||
PicturePtr picture,
|
||||
struct sna_composite_channel *channel,
|
||||
PixmapPtr pixmap,
|
||||
int16_t x, int16_t y,
|
||||
int16_t w, int16_t h,
|
||||
int16_t dst_x, int16_t dst_y);
|
||||
|
||||
inline static void sna_render_composite_redirect_init(struct sna_composite_op *op)
|
||||
{
|
||||
struct sna_composite_redirect *t = &op->redirect;
|
||||
t->real_bo = NULL;
|
||||
t->damage = NULL;
|
||||
}
|
||||
|
||||
Bool
|
||||
sna_render_composite_redirect(struct sna *sna,
|
||||
struct sna_composite_op *op,
|
||||
int x, int y, int width, int height);
|
||||
|
||||
void
|
||||
sna_render_composite_redirect_done(struct sna *sna,
|
||||
const struct sna_composite_op *op);
|
||||
|
||||
bool
|
||||
sna_composite_mask_is_opaque(PicturePtr mask);
|
||||
*/
|
||||
|
||||
#endif /* SNA_RENDER_H */
|
@ -1,108 +0,0 @@
|
||||
/*
|
||||
* Copyright © 2011 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Chris Wilson <chris@chris-wilson.co.uk>
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include "../bitmap.h"
|
||||
|
||||
#include "sna.h"
|
||||
#include "sna_render.h"
|
||||
#include <memory.h>
|
||||
|
||||
#if DEBUG_STREAM
|
||||
#undef DBG
|
||||
#define DBG(x) ErrorF x
|
||||
#endif
|
||||
|
||||
int sna_static_stream_init(struct sna_static_stream *stream)
|
||||
{
|
||||
stream->used = 0;
|
||||
stream->size = 64*1024;
|
||||
|
||||
stream->data = malloc(stream->size);
|
||||
return stream->data != NULL;
|
||||
}
|
||||
|
||||
static uint32_t sna_static_stream_alloc(struct sna_static_stream *stream,
|
||||
uint32_t len, uint32_t align)
|
||||
{
|
||||
uint32_t offset = ALIGN(stream->used, align);
|
||||
uint32_t size = offset + len;
|
||||
|
||||
if (size > stream->size) {
|
||||
/*
|
||||
do
|
||||
stream->size *= 2;
|
||||
while (stream->size < size);
|
||||
|
||||
stream->data = realloc(stream->data, stream->size);
|
||||
*/
|
||||
dbgprintf("%s: EPIC FAIL\n", __FUNCTION__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
stream->used = size;
|
||||
return offset;
|
||||
}
|
||||
|
||||
uint32_t sna_static_stream_add(struct sna_static_stream *stream,
|
||||
const void *data, uint32_t len, uint32_t align)
|
||||
{
|
||||
uint32_t offset = sna_static_stream_alloc(stream, len, align);
|
||||
memcpy(stream->data + offset, data, len);
|
||||
return offset;
|
||||
}
|
||||
|
||||
void *sna_static_stream_map(struct sna_static_stream *stream,
|
||||
uint32_t len, uint32_t align)
|
||||
{
|
||||
uint32_t offset = sna_static_stream_alloc(stream, len, align);
|
||||
return memset(stream->data + offset, 0, len);
|
||||
}
|
||||
|
||||
uint32_t sna_static_stream_offsetof(struct sna_static_stream *stream, void *ptr)
|
||||
{
|
||||
return (uint8_t *)ptr - stream->data;
|
||||
}
|
||||
|
||||
|
||||
struct kgem_bo *sna_static_stream_fini(struct sna *sna,
|
||||
struct sna_static_stream *stream)
|
||||
{
|
||||
struct kgem_bo *bo;
|
||||
|
||||
DBG(("uploaded %d bytes of static state\n", stream->used));
|
||||
|
||||
bo = kgem_create_linear(&sna->kgem, stream->used);
|
||||
if (bo && !kgem_bo_write(&sna->kgem, bo, stream->data, stream->used)) {
|
||||
// kgem_bo_destroy(&sna->kgem, bo);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
free(stream->data);
|
||||
LEAVE();
|
||||
return bo;
|
||||
}
|
@ -4,6 +4,7 @@
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
#include <linux/hdmi.h>
|
||||
|
||||
|
||||
struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
|
||||
@ -102,3 +103,410 @@ void shmem_file_delete(struct file *filep)
|
||||
if(filep->pages)
|
||||
kfree(filep->pages);
|
||||
}
|
||||
|
||||
/**
|
||||
* hdmi_avi_infoframe_init() - initialize an HDMI AVI infoframe
|
||||
* @frame: HDMI AVI infoframe
|
||||
*
|
||||
* Returns 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame)
|
||||
{
|
||||
memset(frame, 0, sizeof(*frame));
|
||||
|
||||
frame->type = HDMI_INFOFRAME_TYPE_AVI;
|
||||
frame->version = 2;
|
||||
frame->length = 13;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void *check_bytes8(const u8 *start, u8 value, unsigned int bytes)
|
||||
{
|
||||
while (bytes) {
|
||||
if (*start != value)
|
||||
return (void *)start;
|
||||
start++;
|
||||
bytes--;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* memchr_inv - Find an unmatching character in an area of memory.
|
||||
* @start: The memory area
|
||||
* @c: Find a character other than c
|
||||
* @bytes: The size of the area.
|
||||
*
|
||||
* returns the address of the first character other than @c, or %NULL
|
||||
* if the whole buffer contains just @c.
|
||||
*/
|
||||
void *memchr_inv(const void *start, int c, size_t bytes)
|
||||
{
|
||||
u8 value = c;
|
||||
u64 value64;
|
||||
unsigned int words, prefix;
|
||||
|
||||
if (bytes <= 16)
|
||||
return check_bytes8(start, value, bytes);
|
||||
|
||||
value64 = value;
|
||||
#if defined(ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
|
||||
value64 *= 0x0101010101010101;
|
||||
#elif defined(ARCH_HAS_FAST_MULTIPLIER)
|
||||
value64 *= 0x01010101;
|
||||
value64 |= value64 << 32;
|
||||
#else
|
||||
value64 |= value64 << 8;
|
||||
value64 |= value64 << 16;
|
||||
value64 |= value64 << 32;
|
||||
#endif
|
||||
|
||||
prefix = (unsigned long)start % 8;
|
||||
if (prefix) {
|
||||
u8 *r;
|
||||
|
||||
prefix = 8 - prefix;
|
||||
r = check_bytes8(start, value, prefix);
|
||||
if (r)
|
||||
return r;
|
||||
start += prefix;
|
||||
bytes -= prefix;
|
||||
}
|
||||
|
||||
words = bytes / 8;
|
||||
|
||||
while (words) {
|
||||
if (*(u64 *)start != value64)
|
||||
return check_bytes8(start, value, 8);
|
||||
start += 8;
|
||||
words--;
|
||||
}
|
||||
|
||||
return check_bytes8(start, value, bytes % 8);
|
||||
}
|
||||
|
||||
|
||||
|
||||
int dma_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
int nelems, int dir)
|
||||
{
|
||||
struct scatterlist *s;
|
||||
int i;
|
||||
|
||||
for_each_sg(sglist, s, nelems, i) {
|
||||
s->dma_address = (dma_addr_t)sg_phys(s);
|
||||
#ifdef CONFIG_NEED_SG_DMA_LENGTH
|
||||
s->dma_length = s->length;
|
||||
#endif
|
||||
}
|
||||
|
||||
return nelems;
|
||||
}
|
||||
|
||||
|
||||
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
|
||||
{
|
||||
int i;
|
||||
|
||||
i = vsnprintf(buf, size, fmt, args);
|
||||
|
||||
if (likely(i < size))
|
||||
return i;
|
||||
if (size != 0)
|
||||
return size - 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int scnprintf(char *buf, size_t size, const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
int i;
|
||||
|
||||
va_start(args, fmt);
|
||||
i = vscnprintf(buf, size, fmt, args);
|
||||
va_end(args);
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
|
||||
|
||||
#define _U 0x01 /* upper */
|
||||
#define _L 0x02 /* lower */
|
||||
#define _D 0x04 /* digit */
|
||||
#define _C 0x08 /* cntrl */
|
||||
#define _P 0x10 /* punct */
|
||||
#define _S 0x20 /* white space (space/lf/tab) */
|
||||
#define _X 0x40 /* hex digit */
|
||||
#define _SP 0x80 /* hard space (0x20) */
|
||||
|
||||
extern const unsigned char _ctype[];
|
||||
|
||||
#define __ismask(x) (_ctype[(int)(unsigned char)(x)])
|
||||
|
||||
#define isalnum(c) ((__ismask(c)&(_U|_L|_D)) != 0)
|
||||
#define isalpha(c) ((__ismask(c)&(_U|_L)) != 0)
|
||||
#define iscntrl(c) ((__ismask(c)&(_C)) != 0)
|
||||
#define isdigit(c) ((__ismask(c)&(_D)) != 0)
|
||||
#define isgraph(c) ((__ismask(c)&(_P|_U|_L|_D)) != 0)
|
||||
#define islower(c) ((__ismask(c)&(_L)) != 0)
|
||||
#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
|
||||
#define ispunct(c) ((__ismask(c)&(_P)) != 0)
|
||||
/* Note: isspace() must return false for %NUL-terminator */
|
||||
#define isspace(c) ((__ismask(c)&(_S)) != 0)
|
||||
#define isupper(c) ((__ismask(c)&(_U)) != 0)
|
||||
#define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0)
|
||||
|
||||
#define isascii(c) (((unsigned char)(c))<=0x7f)
|
||||
#define toascii(c) (((unsigned char)(c))&0x7f)
|
||||
|
||||
static inline unsigned char __tolower(unsigned char c)
|
||||
{
|
||||
if (isupper(c))
|
||||
c -= 'A'-'a';
|
||||
return c;
|
||||
}
|
||||
|
||||
static inline unsigned char __toupper(unsigned char c)
|
||||
{
|
||||
if (islower(c))
|
||||
c -= 'a'-'A';
|
||||
return c;
|
||||
}
|
||||
|
||||
#define tolower(c) __tolower(c)
|
||||
#define toupper(c) __toupper(c)
|
||||
|
||||
/*
|
||||
* Fast implementation of tolower() for internal usage. Do not use in your
|
||||
* code.
|
||||
*/
|
||||
static inline char _tolower(const char c)
|
||||
{
|
||||
return c | 0x20;
|
||||
}
|
||||
|
||||
|
||||
|
||||
//const char hex_asc[] = "0123456789abcdef";
|
||||
|
||||
/**
|
||||
* hex_to_bin - convert a hex digit to its real value
|
||||
* @ch: ascii character represents hex digit
|
||||
*
|
||||
* hex_to_bin() converts one hex digit to its actual value or -1 in case of bad
|
||||
* input.
|
||||
*/
|
||||
int hex_to_bin(char ch)
|
||||
{
|
||||
if ((ch >= '0') && (ch <= '9'))
|
||||
return ch - '0';
|
||||
ch = tolower(ch);
|
||||
if ((ch >= 'a') && (ch <= 'f'))
|
||||
return ch - 'a' + 10;
|
||||
return -1;
|
||||
}
|
||||
EXPORT_SYMBOL(hex_to_bin);
|
||||
|
||||
/**
|
||||
* hex2bin - convert an ascii hexadecimal string to its binary representation
|
||||
* @dst: binary result
|
||||
* @src: ascii hexadecimal string
|
||||
* @count: result length
|
||||
*
|
||||
* Return 0 on success, -1 in case of bad input.
|
||||
*/
|
||||
int hex2bin(u8 *dst, const char *src, size_t count)
|
||||
{
|
||||
while (count--) {
|
||||
int hi = hex_to_bin(*src++);
|
||||
int lo = hex_to_bin(*src++);
|
||||
|
||||
if ((hi < 0) || (lo < 0))
|
||||
return -1;
|
||||
|
||||
*dst++ = (hi << 4) | lo;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(hex2bin);
|
||||
|
||||
/**
|
||||
* hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory
|
||||
* @buf: data blob to dump
|
||||
* @len: number of bytes in the @buf
|
||||
* @rowsize: number of bytes to print per line; must be 16 or 32
|
||||
* @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
|
||||
* @linebuf: where to put the converted data
|
||||
* @linebuflen: total size of @linebuf, including space for terminating NUL
|
||||
* @ascii: include ASCII after the hex output
|
||||
*
|
||||
* hex_dump_to_buffer() works on one "line" of output at a time, i.e.,
|
||||
* 16 or 32 bytes of input data converted to hex + ASCII output.
|
||||
*
|
||||
* Given a buffer of u8 data, hex_dump_to_buffer() converts the input data
|
||||
* to a hex + ASCII dump at the supplied memory location.
|
||||
* The converted output is always NUL-terminated.
|
||||
*
|
||||
* E.g.:
|
||||
* hex_dump_to_buffer(frame->data, frame->len, 16, 1,
|
||||
* linebuf, sizeof(linebuf), true);
|
||||
*
|
||||
* example output buffer:
|
||||
* 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
|
||||
*/
|
||||
void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
|
||||
int groupsize, char *linebuf, size_t linebuflen,
|
||||
bool ascii)
|
||||
{
|
||||
const u8 *ptr = buf;
|
||||
u8 ch;
|
||||
int j, lx = 0;
|
||||
int ascii_column;
|
||||
|
||||
if (rowsize != 16 && rowsize != 32)
|
||||
rowsize = 16;
|
||||
|
||||
if (!len)
|
||||
goto nil;
|
||||
if (len > rowsize) /* limit to one line at a time */
|
||||
len = rowsize;
|
||||
if ((len % groupsize) != 0) /* no mixed size output */
|
||||
groupsize = 1;
|
||||
|
||||
switch (groupsize) {
|
||||
case 8: {
|
||||
const u64 *ptr8 = buf;
|
||||
int ngroups = len / groupsize;
|
||||
|
||||
for (j = 0; j < ngroups; j++)
|
||||
lx += scnprintf(linebuf + lx, linebuflen - lx,
|
||||
"%s%16.16llx", j ? " " : "",
|
||||
(unsigned long long)*(ptr8 + j));
|
||||
ascii_column = 17 * ngroups + 2;
|
||||
break;
|
||||
}
|
||||
|
||||
case 4: {
|
||||
const u32 *ptr4 = buf;
|
||||
int ngroups = len / groupsize;
|
||||
|
||||
for (j = 0; j < ngroups; j++)
|
||||
lx += scnprintf(linebuf + lx, linebuflen - lx,
|
||||
"%s%8.8x", j ? " " : "", *(ptr4 + j));
|
||||
ascii_column = 9 * ngroups + 2;
|
||||
break;
|
||||
}
|
||||
|
||||
case 2: {
|
||||
const u16 *ptr2 = buf;
|
||||
int ngroups = len / groupsize;
|
||||
|
||||
for (j = 0; j < ngroups; j++)
|
||||
lx += scnprintf(linebuf + lx, linebuflen - lx,
|
||||
"%s%4.4x", j ? " " : "", *(ptr2 + j));
|
||||
ascii_column = 5 * ngroups + 2;
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
|
||||
ch = ptr[j];
|
||||
linebuf[lx++] = hex_asc_hi(ch);
|
||||
linebuf[lx++] = hex_asc_lo(ch);
|
||||
linebuf[lx++] = ' ';
|
||||
}
|
||||
if (j)
|
||||
lx--;
|
||||
|
||||
ascii_column = 3 * rowsize + 2;
|
||||
break;
|
||||
}
|
||||
if (!ascii)
|
||||
goto nil;
|
||||
|
||||
while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
|
||||
linebuf[lx++] = ' ';
|
||||
for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) {
|
||||
ch = ptr[j];
|
||||
linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.';
|
||||
}
|
||||
nil:
|
||||
linebuf[lx++] = '\0';
|
||||
}
|
||||
|
||||
/**
|
||||
* print_hex_dump - print a text hex dump to syslog for a binary blob of data
|
||||
* @level: kernel log level (e.g. KERN_DEBUG)
|
||||
* @prefix_str: string to prefix each line with;
|
||||
* caller supplies trailing spaces for alignment if desired
|
||||
* @prefix_type: controls whether prefix of an offset, address, or none
|
||||
* is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE)
|
||||
* @rowsize: number of bytes to print per line; must be 16 or 32
|
||||
* @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
|
||||
* @buf: data blob to dump
|
||||
* @len: number of bytes in the @buf
|
||||
* @ascii: include ASCII after the hex output
|
||||
*
|
||||
* Given a buffer of u8 data, print_hex_dump() prints a hex + ASCII dump
|
||||
* to the kernel log at the specified kernel log level, with an optional
|
||||
* leading prefix.
|
||||
*
|
||||
* print_hex_dump() works on one "line" of output at a time, i.e.,
|
||||
* 16 or 32 bytes of input data converted to hex + ASCII output.
|
||||
* print_hex_dump() iterates over the entire input @buf, breaking it into
|
||||
* "line size" chunks to format and print.
|
||||
*
|
||||
* E.g.:
|
||||
* print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_ADDRESS,
|
||||
* 16, 1, frame->data, frame->len, true);
|
||||
*
|
||||
* Example output using %DUMP_PREFIX_OFFSET and 1-byte mode:
|
||||
* 0009ab42: 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
|
||||
* Example output using %DUMP_PREFIX_ADDRESS and 4-byte mode:
|
||||
* ffffffff88089af0: 73727170 77767574 7b7a7978 7f7e7d7c pqrstuvwxyz{|}~.
|
||||
*/
|
||||
void print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
|
||||
int rowsize, int groupsize,
|
||||
const void *buf, size_t len, bool ascii)
|
||||
{
|
||||
const u8 *ptr = buf;
|
||||
int i, linelen, remaining = len;
|
||||
unsigned char linebuf[32 * 3 + 2 + 32 + 1];
|
||||
|
||||
if (rowsize != 16 && rowsize != 32)
|
||||
rowsize = 16;
|
||||
|
||||
for (i = 0; i < len; i += rowsize) {
|
||||
linelen = min(remaining, rowsize);
|
||||
remaining -= rowsize;
|
||||
|
||||
hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
|
||||
linebuf, sizeof(linebuf), ascii);
|
||||
|
||||
switch (prefix_type) {
|
||||
case DUMP_PREFIX_ADDRESS:
|
||||
printk("%s%s%p: %s\n",
|
||||
level, prefix_str, ptr + i, linebuf);
|
||||
break;
|
||||
case DUMP_PREFIX_OFFSET:
|
||||
printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
|
||||
break;
|
||||
default:
|
||||
printk("%s%s%s\n", level, prefix_str, linebuf);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
|
||||
const void *buf, size_t len)
|
||||
{
|
||||
print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1,
|
||||
buf, len, true);
|
||||
}
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user