i915: enable hotplug && power savings

git-svn-id: svn://kolibrios.org@3482 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge) 2013-04-26 11:01:23 +00:00
parent d67b35544c
commit c38691bc69
23 changed files with 365 additions and 256 deletions

View File

@ -1,4 +1,5 @@
CC = gcc
AS = as
@ -7,7 +8,7 @@ DRV_INCLUDES = $(DRV_TOPDIR)/include
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/linux -I$(DRV_INCLUDES)/linux/asm
DEFINES = -DKOLIBRI -D__KERNEL__ -DCONFIG_X86_32
CFLAGS = -c -Os $(INCLUDES) $(DEFINES) -march=i586 -fomit-frame-pointer -fno-builtin-printf \
CFLAGS = -c -Os $(INCLUDES) $(DEFINES) -march=i686 -fomit-frame-pointer -fno-builtin-printf \
-mno-stack-arg-probe -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2
NAME:= libddk
@ -31,6 +32,7 @@ NAME_SRCS:= \
linux/ctype.c \
linux/string.c \
linux/time.c \
linux/workqueue.c \
malloc/malloc.c \
stdio/vsprintf.c \
string/_memmove.S \

View File

@ -24,13 +24,14 @@
.global _FreeKernelSpace
.global _FreePage
.global _GetCpuFreq
.global _GetDisplay
.global _GetEvent
.global _GetPgAddr
.global _GetPid
.global _GetService
.global _GetTimerTicks
.global _GetStackBase
.global _GetTimerTicks
.global _GetWindowRect
.global _KernelAlloc
@ -91,6 +92,8 @@
.def _FreeKernelSpace; .scl 2; .type 32; .endef
.def _FreePage; .scl 2; .type 32; .endef
.def _GetDisplay; .scl 2; .type 32; .endef
.def _GetDisplay; .scl 2; .type 32; .endef
.def _GetEvent; .scl 2; .type 32; .endef
.def _GetPid; .scl 2; .type 32; .endef
@ -158,6 +161,7 @@ _DiskMediaChanged:
_FreeKernelSpace:
_FreePage:
_GetCpuFreq:
_GetDisplay:
_GetEvent:
_GetPid:
@ -229,6 +233,8 @@ _WaitEventTimeout:
.ascii " -export:FreeKernelSpace" # stdcall
.ascii " -export:FreePage" #
.ascii " -export:GetCpuFreq" #
.ascii " -export:GetDisplay" # stdcall
.ascii " -export:GetEvent" #
.ascii " -export:GetPid" #

View File

@ -0,0 +1,105 @@
#include <linux/kernel.h>
#include <linux/workqueue.h>
#include <ddk.h>
struct workqueue_struct *alloc_workqueue(const char *fmt,
unsigned int flags,
int max_active)
{
struct workqueue_struct *wq;
wq = kzalloc(sizeof(*wq),0);
if (!wq)
goto err;
INIT_LIST_HEAD(&wq->worklist);
INIT_LIST_HEAD(&wq->delayed_worklist);
return wq;
err:
return NULL;
}
void run_workqueue(struct workqueue_struct *cwq)
{
unsigned long irqflags;
// dbgprintf("wq: %x head %x, next %x\n",
// cwq, &cwq->worklist, cwq->worklist.next);
repeat:
spin_lock_irqsave(&cwq->lock, irqflags);
while (!list_empty(&cwq->worklist))
{
struct work_struct *work = list_entry(cwq->worklist.next,
struct work_struct, entry);
work_func_t f = work->func;
list_del_init(cwq->worklist.next);
// dbgprintf("head %x, next %x\n",
// &cwq->worklist, cwq->worklist.next);
spin_unlock_irqrestore(&cwq->lock, irqflags);
f(work);
spin_lock_irqsave(&cwq->lock, irqflags);
}
spin_unlock_irqrestore(&cwq->lock, irqflags);
delay(1);
goto repeat;
}
bool queue_work(struct workqueue_struct *wq, struct work_struct *work)
{
unsigned long flags;
if(!list_empty(&work->entry))
return 0;
// dbgprintf("%s %p queue: %p\n", __FUNCTION__, work, wq);
spin_lock_irqsave(&wq->lock, flags);
list_add_tail(&work->entry, &wq->worklist);
spin_unlock_irqrestore(&wq->lock, flags);
return 1;
};
void __stdcall delayed_work_timer_fn(unsigned long __data)
{
struct delayed_work *dwork = (struct delayed_work *)__data;
struct workqueue_struct *wq = dwork->work.data;
queue_work(wq, &dwork->work);
}
int queue_delayed_work(struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay)
{
struct work_struct *work = &dwork->work;
if (delay == 0)
return queue_work(wq, &dwork->work);
// dbgprintf("%s %p queue: %p\n", __FUNCTION__, &dwork->work, wq);
work->data = wq;
TimerHs(delay,0, delayed_work_timer_fn, dwork);
return 1;
}
bool schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
{
return queue_delayed_work(system_wq, dwork, delay);
}

View File

@ -74,6 +74,9 @@
//#include <linux/poll.h>
//#include <asm/pgalloc.h>
#include <linux/workqueue.h>
#include "drm.h"
#include <linux/idr.h>
@ -972,6 +975,7 @@ struct drm_driver {
void (*gem_free_object) (struct drm_gem_object *obj);
int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
u32 driver_features;
};
@ -1204,6 +1208,11 @@ struct drm_device {
#define DRM_SWITCH_POWER_OFF 1
#define DRM_SWITCH_POWER_CHANGING 2
static __inline__ int drm_core_check_feature(struct drm_device *dev,
int feature)
{
return ((dev->driver->driver_features & feature) ? 1 : 0);
}
static inline int drm_dev_to_irq(struct drm_device *dev)
{

View File

@ -62,4 +62,18 @@
BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
#define printk_once(fmt, ...) \
({ \
static bool __print_once; \
\
if (!__print_once) { \
__print_once = true; \
printk(fmt, ##__VA_ARGS__); \
} \
})
#define pr_warn_once(fmt, ...) \
printk_once(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
#endif

View File

@ -302,6 +302,35 @@ extern u64 nsec_to_clock_t(u64 x);
extern u64 nsecs_to_jiffies64(u64 n);
extern unsigned long nsecs_to_jiffies(u64 n);
static unsigned long round_jiffies_common(unsigned long j, bool force_up)
{
int rem;
unsigned long original = j;
rem = j % HZ;
/*
* If the target jiffie is just after a whole second (which can happen
* due to delays of the timer irq, long irq off times etc etc) then
* we should round down to the whole second, not up. Use 1/4th second
* as cutoff for this rounding as an extreme upper bound for this.
* But never round down if @force_up is set.
*/
if (rem < HZ/4 && !force_up) /* round down */
j = j - rem;
else /* round up */
j = j - rem + HZ;
if (j <= GetTimerTicks()) /* rounding ate our timeout entirely; */
return original;
return j;
}
unsigned long round_jiffies_up_relative(unsigned long j);
#define TIMESTAMP_SIZE 30
#endif

View File

@ -411,5 +411,12 @@ struct pagelist {
(p) = (v); \
})
unsigned int hweight16(unsigned int w);
#define cpufreq_quick_get_max(x) GetCpuFreq()
extern unsigned int tsc_khz;
#endif

View File

@ -27,7 +27,7 @@ static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
list_add(&new->task_list, &head->task_list);
}
/*
#define __wait_event(wq, condition) \
do { \
DEFINE_WAIT(__wait); \
@ -41,7 +41,7 @@ do { \
finish_wait(&wq, &__wait); \
} while (0)
*/
#define wait_event_timeout(wq, condition, timeout) \
({ \
@ -133,76 +133,6 @@ init_waitqueue_head(wait_queue_head_t *q)
};
/*
* Workqueue flags and constants. For details, please refer to
* Documentation/workqueue.txt.
*/
enum {
WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
WQ_FREEZABLE = 1 << 2, /* freeze during suspend */
WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
WQ_HIGHPRI = 1 << 4, /* high priority */
WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
WQ_DRAINING = 1 << 6, /* internal: workqueue is draining */
WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
};
struct work_struct;
struct workqueue_struct {
spinlock_t lock;
struct list_head worklist;
};
typedef void (*work_func_t)(struct work_struct *work);
struct work_struct {
struct list_head entry;
struct workqueue_struct *data;
work_func_t func;
};
struct delayed_work {
struct work_struct work;
};
static inline struct delayed_work *to_delayed_work(struct work_struct *work)
{
return container_of(work, struct delayed_work, work);
}
struct workqueue_struct *alloc_workqueue_key(const char *fmt,
unsigned int flags, int max_active);
#define alloc_ordered_workqueue(fmt, flags, args...) \
alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args)
int queue_delayed_work(struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay);
#define INIT_WORK(_work, _func) \
do { \
INIT_LIST_HEAD(&(_work)->entry); \
(_work)->func = _func; \
} while (0)
#define INIT_DELAYED_WORK(_work, _func) \
do { \
INIT_LIST_HEAD(&(_work)->work.entry); \
(_work)->work.func = _func; \
} while (0)
struct completion {
unsigned int done;
wait_queue_head_t wait;
@ -221,7 +151,5 @@ int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *
#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
#endif

View File

@ -0,0 +1,85 @@
#ifndef _LINUX_WORKQUEUE_H
#define _LINUX_WORKQUEUE_H
#include <linux/list.h>
#include <syscall.h>
struct work_struct;
typedef void (*work_func_t)(struct work_struct *work);
/*
* Workqueue flags and constants. For details, please refer to
* Documentation/workqueue.txt.
*/
enum {
WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
WQ_FREEZABLE = 1 << 2, /* freeze during suspend */
WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
WQ_HIGHPRI = 1 << 4, /* high priority */
WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
WQ_DRAINING = 1 << 6, /* internal: workqueue is draining */
WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
};
struct workqueue_struct {
spinlock_t lock;
struct list_head worklist;
struct list_head delayed_worklist;
};
struct work_struct {
struct list_head entry;
struct workqueue_struct *data;
work_func_t func;
};
struct delayed_work {
struct work_struct work;
unsigned int delay;
};
static inline struct delayed_work *to_delayed_work(struct work_struct *work)
{
return container_of(work, struct delayed_work, work);
}
extern struct workqueue_struct *system_wq;
void run_workqueue(struct workqueue_struct *cwq);
struct workqueue_struct *alloc_workqueue_key(const char *fmt,
unsigned int flags, int max_active);
#define alloc_ordered_workqueue(fmt, flags, args...) \
alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args)
int queue_delayed_work(struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay);
bool schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
#define INIT_WORK(_work, _func) \
do { \
INIT_LIST_HEAD(&(_work)->entry); \
(_work)->func = _func; \
} while (0)
#define INIT_DELAYED_WORK(_work, _func) \
do { \
INIT_LIST_HEAD(&(_work)->work.entry); \
(_work)->work.func = _func; \
} while (0)
#endif /* _LINUX_WORKQUEUE_H */

View File

@ -58,6 +58,8 @@ u32_t IMPORT GetPid(void)__asm__("GetPid");
u32 STDCALL TimerHs(u32 delay, u32 interval,
void *fn, void *data)asm("TimerHs");
u64 IMPORT GetCpuFreq()__asm__("GetCpuFreq");
///////////////////////////////////////////////////////////////////////////////
void STDCALL SetMouseData(int btn, int x, int y,

View File

@ -1339,6 +1339,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
ret = -ENOMEM;
goto out_mtrrfree;
}
system_wq = dev_priv->wq;
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(dev);

View File

@ -64,7 +64,7 @@ MODULE_PARM_DESC(panel_ignore_lid,
"Override lid status (0=autodetect, 1=autodetect disabled [default], "
"-1=force lid closed, -2=force lid open)");
unsigned int i915_powersave __read_mostly = 0;
unsigned int i915_powersave __read_mostly = 1;
module_param_named(powersave, i915_powersave, int, 0600);
MODULE_PARM_DESC(powersave,
"Enable powersavings, fbc, downclocking, etc. (default: true)");
@ -74,7 +74,7 @@ module_param_named(semaphores, i915_semaphores, int, 0600);
MODULE_PARM_DESC(semaphores,
"Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
int i915_enable_rc6 __read_mostly = 0;
int i915_enable_rc6 __read_mostly = -1;
module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400);
MODULE_PARM_DESC(i915_enable_rc6,
"Enable power-saving render C-state 6. "
@ -83,7 +83,7 @@ MODULE_PARM_DESC(i915_enable_rc6,
"For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
"default: -1 (use per-chip default)");
int i915_enable_fbc __read_mostly = 0;
int i915_enable_fbc __read_mostly = -1;
module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
MODULE_PARM_DESC(i915_enable_fbc,
"Enable frame buffer compression for power savings "
@ -136,6 +136,10 @@ MODULE_PARM_DESC(preliminary_hw_support,
"Enable Haswell and ValleyView Support. "
"(default: false)");
int i915_disable_power_well __read_mostly = 0;
module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
MODULE_PARM_DESC(disable_power_well,
"Disable the power well when possible (default: false)");
#define PCI_VENDOR_ID_INTEL 0x8086
@ -502,9 +506,9 @@ static struct drm_driver driver = {
/* Don't use MTRRs here; the Xserver or userspace app should
* deal with them for Intel hardware.
*/
// .driver_features =
// DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
// DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME,
.driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_MODESET,
// .load = i915_driver_load,
// .unload = i915_driver_unload,
.open = i915_driver_open,

View File

@ -267,9 +267,6 @@ i915_gem_create(struct drm_file *file,
trace_i915_gem_object_create(obj);
*handle_p = handle;
// printf("%s obj %p handle %d\n", __FUNCTION__, obj, handle);
return 0;
}
@ -694,8 +691,6 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
out_unpin:
i915_gem_object_unpin(obj);
out:
printf("% s ret = %d\n", __FUNCTION__, ret);
return ret;
}
@ -1949,7 +1944,8 @@ i915_add_request(struct intel_ring_buffer *ring,
}
if (was_empty) {
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work, HZ);
&dev_priv->mm.retire_work,
round_jiffies_up_relative(HZ));
intel_mark_busy(dev_priv->dev);
}
}
@ -2135,7 +2131,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
/* Come back later if the device is busy... */
if (!mutex_trylock(&dev->struct_mutex)) {
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
round_jiffies_up_relative(HZ));
return;
}
@ -2153,7 +2150,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
}
if (!dev_priv->mm.suspended && !idle)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
round_jiffies_up_relative(HZ));
if (idle)
intel_mark_idle(dev);

View File

@ -252,8 +252,6 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
u32 adpa;
bool ret;
ENTER();
/* The first time through, trigger an explicit detection cycle */
if (crt->force_hotplug_required) {
bool turn_off_dac = HAS_PCH_SPLIT(dev);
@ -288,8 +286,6 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
ret = false;
DRM_DEBUG_KMS("ironlake hotplug adpa=0x%x, result %d\n", adpa, ret);
LEAVE();
return ret;
}
@ -302,8 +298,6 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
bool ret;
u32 save_adpa;
ENTER();
save_adpa = adpa = I915_READ(crt->adpa_reg);
DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
@ -329,8 +323,6 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
/* FIXME: debug force function and remove */
ret = true;
LEAVE();
return ret;
}
@ -350,8 +342,6 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
bool ret = false;
int i, tries = 0;
ENTER();
if (HAS_PCH_SPLIT(dev))
return intel_ironlake_crt_detect_hotplug(connector);
@ -390,8 +380,6 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
/* and put the bits back */
I915_WRITE(PORT_HOTPLUG_EN, orig);
LEAVE();
return ret;
}
@ -400,8 +388,6 @@ static struct edid *intel_crt_get_edid(struct drm_connector *connector,
{
struct edid *edid;
ENTER();
edid = drm_get_edid(connector, i2c);
if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
@ -411,8 +397,6 @@ static struct edid *intel_crt_get_edid(struct drm_connector *connector,
intel_gmbus_force_bit(i2c, false);
}
LEAVE();
return edid;
}

View File

@ -8237,6 +8237,9 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_mode_object *drmmode_obj;
struct intel_crtc *crtc;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
DRM_MODE_OBJECT_CRTC);
@ -8760,9 +8763,9 @@ void intel_modeset_init_hw(struct drm_device *dev)
intel_init_clock_gating(dev);
// mutex_lock(&dev->struct_mutex);
// intel_enable_gt_powersave(dev);
// mutex_unlock(&dev->struct_mutex);
mutex_lock(&dev->struct_mutex);
intel_enable_gt_powersave(dev);
mutex_unlock(&dev->struct_mutex);
}
void intel_modeset_init(struct drm_device *dev)

View File

@ -1147,13 +1147,13 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
static void ironlake_panel_vdd_work(struct work_struct *__work)
{
// struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
// struct intel_dp, panel_vdd_work);
// struct drm_device *dev = intel_dp_to_dev(intel_dp);
//
// mutex_lock(&dev->mode_config.mutex);
// ironlake_panel_vdd_off_sync(intel_dp);
// mutex_unlock(&dev->mode_config.mutex);
struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
struct intel_dp, panel_vdd_work);
struct drm_device *dev = intel_dp_to_dev(intel_dp);
mutex_lock(&dev->mode_config.mutex);
ironlake_panel_vdd_off_sync(intel_dp);
mutex_unlock(&dev->mode_config.mutex);
}
void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
@ -1174,8 +1174,8 @@ void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
* time from now (relative to the power down delay)
* to keep the panel power up across a sequence of operations
*/
// schedule_delayed_work(&intel_dp->panel_vdd_work,
// msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
schedule_delayed_work(&intel_dp->panel_vdd_work,
msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
}
}

View File

@ -33,6 +33,8 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_dp_helper.h>
#define KBUILD_MODNAME "i915.dll"
#define cpu_relax() asm volatile("rep; nop")
#define _wait_for(COND, MS, W) ({ \

View File

@ -194,7 +194,7 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
/* XXX add code here to query mode clock or hardware clock
* and program max PWM appropriately.
*/
printk("fixme: max PWM is zero\n");
pr_warn_once("fixme: max PWM is zero\n");
return 1;
}

View File

@ -282,7 +282,6 @@ bool intel_fbc_enabled(struct drm_device *dev)
return dev_priv->display.fbc_enabled(dev);
}
#if 0
static void intel_fbc_work_fn(struct work_struct *__work)
{
struct intel_fbc_work *work =
@ -323,9 +322,9 @@ static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
* dev_priv->fbc_work, so we can perform the cancellation
* entirely asynchronously.
*/
if (cancel_delayed_work(&dev_priv->fbc_work->work))
// if (cancel_delayed_work(&dev_priv->fbc_work->work))
/* tasklet was killed before being run, clean up */
kfree(dev_priv->fbc_work);
// kfree(dev_priv->fbc_work);
/* Mark the work as no longer wanted so that if it does
* wake-up (because the work was already running and waiting
@ -334,7 +333,6 @@ static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
*/
dev_priv->fbc_work = NULL;
}
#endif
void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
@ -342,9 +340,9 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
// if (!dev_priv->display.enable_fbc)
if (!dev_priv->display.enable_fbc)
return;
#if 0
intel_cancel_fbc_work(dev_priv);
work = kzalloc(sizeof *work, GFP_KERNEL);
@ -374,20 +372,18 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
* waiting synchronously upon the vblank.
*/
schedule_delayed_work(&work->work, msecs_to_jiffies(50));
#endif
}
void intel_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
// intel_cancel_fbc_work(dev_priv);
intel_cancel_fbc_work(dev_priv);
// if (!dev_priv->display.disable_fbc)
// return;
if (!dev_priv->display.disable_fbc)
return;
// dev_priv->display.disable_fbc(dev);
dev_priv->display.disable_fbc(dev);
dev_priv->cfb_plane = -1;
}
@ -420,6 +416,8 @@ void intel_update_fbc(struct drm_device *dev)
struct drm_i915_gem_object *obj;
int enable_fbc;
ENTER();
if (!i915_powersave)
return;
@ -550,6 +548,8 @@ void intel_update_fbc(struct drm_device *dev)
}
intel_enable_fbc(crtc, 500);
LEAVE();
return;
out_disable:
@ -559,6 +559,7 @@ out_disable:
intel_disable_fbc(dev);
}
i915_gem_stolen_cleanup_compression(dev);
LEAVE();
}
static void i915_pineview_get_mem_freq(struct drm_device *dev)
@ -2411,7 +2412,7 @@ static void ironlake_enable_drps(struct drm_device *dev)
I915_READ(0x112e0);
dev_priv->ips.last_time1 = jiffies_to_msecs(GetTimerTicks());
dev_priv->ips.last_count2 = I915_READ(0x112f4);
// getrawmonotonic(&dev_priv->ips.last_time2);
getrawmonotonic(&dev_priv->ips.last_time2);
spin_unlock_irq(&mchdev_lock);
}
@ -2690,7 +2691,6 @@ static void gen6_enable_rps(struct drm_device *dev)
gen6_gt_force_wake_put(dev_priv);
}
#if 0
static void gen6_update_ring_freq(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@ -2737,7 +2737,6 @@ static void gen6_update_ring_freq(struct drm_device *dev)
ia_freq | gpu_freq);
}
}
#endif
void ironlake_teardown_rc6(struct drm_device *dev)
{
@ -3466,11 +3465,30 @@ void intel_disable_gt_powersave(struct drm_device *dev)
ironlake_disable_drps(dev);
ironlake_disable_rc6(dev);
} else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) {
// cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
mutex_lock(&dev_priv->rps.hw_lock);
gen6_disable_rps(dev);
mutex_unlock(&dev_priv->rps.hw_lock);
}
}
static void intel_gen6_powersave_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private,
rps.delayed_resume_work.work);
struct drm_device *dev = dev_priv->dev;
ENTER();
mutex_lock(&dev_priv->rps.hw_lock);
gen6_enable_rps(dev);
gen6_update_ring_freq(dev);
mutex_unlock(&dev_priv->rps.hw_lock);
LEAVE();
}
void intel_enable_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@ -3485,8 +3503,8 @@ void intel_enable_gt_powersave(struct drm_device *dev)
* done at any specific time, so do this out of our fast path
* to make resume and init faster.
*/
// schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
// round_jiffies_up_relative(HZ));
schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
round_jiffies_up_relative(HZ));
}
}
@ -4089,6 +4107,9 @@ void intel_set_power_well(struct drm_device *dev, bool enable)
if (!IS_HASWELL(dev))
return;
if (!i915_disable_power_well && !enable)
return;
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
is_enabled = tmp & HSW_PWR_WELL_STATE;
enable_requested = tmp & HSW_PWR_WELL_ENABLE;
@ -4468,6 +4489,8 @@ void intel_gt_init(struct drm_device *dev)
dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
}
INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
intel_gen6_powersave_work);
}
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)

View File

@ -37,15 +37,6 @@
#include "i915_drv.h"
#include "intel_sdvo_regs.h"
unsigned int hweight16(unsigned int w)
{
unsigned int res = w - ((w >> 1) & 0x5555);
res = (res & 0x3333) + ((res >> 2) & 0x3333);
res = (res + (res >> 4)) & 0x0F0F;
return (res + (res >> 8)) & 0x00FF;
}
#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)

View File

@ -594,6 +594,8 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct intel_plane *intel_plane;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
/* Make sure we don't try to enable both src & dest simultaneously */
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
@ -625,6 +627,8 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
struct intel_plane *intel_plane;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
drm_modeset_lock_all(dev);

View File

@ -836,118 +836,14 @@ int i915_mask_update(struct drm_device *dev, void *data,
void __stdcall run_workqueue(struct workqueue_struct *cwq)
{
unsigned long irqflags;
// dbgprintf("wq: %x head %x, next %x\n",
// cwq, &cwq->worklist, cwq->worklist.next);
spin_lock_irqsave(&cwq->lock, irqflags);
while (!list_empty(&cwq->worklist))
{
struct work_struct *work = list_entry(cwq->worklist.next,
struct work_struct, entry);
work_func_t f = work->func;
list_del_init(cwq->worklist.next);
// dbgprintf("head %x, next %x\n",
// &cwq->worklist, cwq->worklist.next);
spin_unlock_irqrestore(&cwq->lock, irqflags);
f(work);
spin_lock_irqsave(&cwq->lock, irqflags);
}
spin_unlock_irqrestore(&cwq->lock, irqflags);
}
static inline
int __queue_work(struct workqueue_struct *wq,
struct work_struct *work)
{
unsigned long flags;
// dbgprintf("wq: %x, work: %x\n",
// wq, work );
if(!list_empty(&work->entry))
return 0;
spin_lock_irqsave(&wq->lock, flags);
if(list_empty(&wq->worklist))
TimerHs(0,0, run_workqueue, wq);
list_add_tail(&work->entry, &wq->worklist);
spin_unlock_irqrestore(&wq->lock, flags);
// dbgprintf("wq: %x head %x, next %x\n",
// wq, &wq->worklist, wq->worklist.next);
return 1;
};
bool queue_work(struct workqueue_struct *wq, struct work_struct *work)
{
return __queue_work(wq, work);
}
void __stdcall delayed_work_timer_fn(unsigned long __data)
{
struct delayed_work *dwork = (struct delayed_work *)__data;
struct workqueue_struct *wq = dwork->work.data;
// dbgprintf("wq: %x, work: %x\n",
// wq, &dwork->work );
__queue_work(wq, &dwork->work);
}
int queue_delayed_work_on(struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay)
{
struct work_struct *work = &dwork->work;
work->data = wq;
TimerHs(0,0, delayed_work_timer_fn, dwork);
return 1;
}
int queue_delayed_work(struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay)
{
u32 flags;
// dbgprintf("wq: %x, work: %x\n",
// wq, &dwork->work );
if (delay == 0)
return __queue_work(wq, &dwork->work);
return queue_delayed_work_on(wq, dwork, delay);
}
struct workqueue_struct *alloc_workqueue(const char *fmt,
unsigned int flags,
int max_active)
{
struct workqueue_struct *wq;
wq = kzalloc(sizeof(*wq),0);
if (!wq)
goto err;
INIT_LIST_HEAD(&wq->worklist);
return wq;
err:
return NULL;
}
#define NSEC_PER_SEC 1000000000L
@ -1028,7 +924,21 @@ int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *
return 1;
}
unsigned int hweight16(unsigned int w)
{
unsigned int res = w - ((w >> 1) & 0x5555);
res = (res & 0x3333) + ((res >> 2) & 0x3333);
res = (res + (res >> 4)) & 0x0F0F;
return (res + (res >> 8)) & 0x00FF;
}
unsigned long round_jiffies_up_relative(unsigned long j)
{
unsigned long j0 = GetTimerTicks();
/* Use j0 because jiffies might change while we run */
return round_jiffies_common(j + j0, true) - j0;
}

View File

@ -53,7 +53,10 @@ int i915_mask_update(struct drm_device *dev, void *data,
static char log[256];
struct workqueue_struct *system_wq;
int x86_clflush_size;
unsigned int tsc_khz;
int i915_modeset = 1;
@ -102,16 +105,13 @@ u32_t __attribute__((externally_visible)) drvEntry(int action, char *cmdline)
if( err != 0)
dbgprintf("Set DISPLAY handler\n");
struct drm_i915_private *dev_priv = main_device->dev_private;
run_workqueue(dev_priv->wq);
return err;
};
//int __declspec(dllexport) DllMain(int, char*) __attribute__ ((weak, alias ("drvEntry")));
//int __declspec(dllexport) DllMain( int hinstDLL, int fdwReason, void *lpReserved )
//{
//
// return 1;
//}
#define CURRENT_API 0x0200 /* 2.00 */
#define COMPATIBLE_API 0x0100 /* 1.00 */
@ -394,6 +394,8 @@ void cpu_detect()
{
x86_clflush_size = ((misc >> 8) & 0xff) * 8;
}
tsc_khz = (unsigned int)(GetCpuFreq()/1000);
}