forked from KolibriOS/kolibrios
Linux workqueue and retire_work_handler
git-svn-id: svn://kolibrios.org@2360 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
parent
a66a87554f
commit
80ab648f04
@ -325,8 +325,6 @@ int get_driver_caps(hwcaps_t *caps)
|
||||
int ret = 0;
|
||||
ENTER();
|
||||
|
||||
dbgprintf("caps ptr %x\n", caps);
|
||||
|
||||
switch(caps->idx)
|
||||
{
|
||||
case 0:
|
||||
|
@ -508,15 +508,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
* so there is no point in running more than one instance of the
|
||||
* workqueue at any time: max_active = 1 and NON_REENTRANT.
|
||||
*/
|
||||
|
||||
// dev_priv->wq = alloc_workqueue("i915",
|
||||
// WQ_UNBOUND | WQ_NON_REENTRANT,
|
||||
// 1);
|
||||
// if (dev_priv->wq == NULL) {
|
||||
// DRM_ERROR("Failed to create our workqueue.\n");
|
||||
// ret = -ENOMEM;
|
||||
// goto out_mtrrfree;
|
||||
// }
|
||||
dev_priv->wq = alloc_workqueue("i915",
|
||||
WQ_UNBOUND | WQ_NON_REENTRANT,
|
||||
1);
|
||||
if (dev_priv->wq == NULL) {
|
||||
DRM_ERROR("Failed to create our workqueue.\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_mtrrfree;
|
||||
}
|
||||
|
||||
/* enable GEM by default */
|
||||
dev_priv->has_gem = 1;
|
||||
|
@ -40,6 +40,7 @@
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
|
||||
/* General customization:
|
||||
*/
|
||||
|
||||
@ -329,7 +330,7 @@ typedef struct drm_i915_private {
|
||||
u32 pch_irq_mask;
|
||||
|
||||
u32 hotplug_supported_mask;
|
||||
// struct work_struct hotplug_work;
|
||||
struct work_struct hotplug_work;
|
||||
|
||||
int tex_lru_log_granularity;
|
||||
int allow_batchbuffer;
|
||||
@ -397,10 +398,10 @@ typedef struct drm_i915_private {
|
||||
unsigned int fsb_freq, mem_freq, is_ddr3;
|
||||
|
||||
spinlock_t error_lock;
|
||||
// struct drm_i915_error_state *first_error;
|
||||
// struct work_struct error_work;
|
||||
// struct completion error_completion;
|
||||
// struct workqueue_struct *wq;
|
||||
// struct drm_i915_error_state *first_error;
|
||||
struct work_struct error_work;
|
||||
// struct completion error_completion;
|
||||
struct workqueue_struct *wq;
|
||||
|
||||
/* Display functions */
|
||||
struct drm_i915_display_funcs display;
|
||||
@ -642,7 +643,7 @@ typedef struct drm_i915_private {
|
||||
* fire periodically while the ring is running. When it
|
||||
* fires, go retire requests.
|
||||
*/
|
||||
// struct delayed_work retire_work;
|
||||
struct delayed_work retire_work;
|
||||
|
||||
/**
|
||||
* Are we in a non-interruptible section of code like
|
||||
@ -699,7 +700,7 @@ typedef struct drm_i915_private {
|
||||
bool lvds_downclock_avail;
|
||||
/* indicates the reduced downclock for LVDS*/
|
||||
int lvds_downclock;
|
||||
// struct work_struct idle_work;
|
||||
struct work_struct idle_work;
|
||||
struct timer_list idle_timer;
|
||||
bool busy;
|
||||
u16 orig_clock;
|
||||
@ -710,7 +711,7 @@ typedef struct drm_i915_private {
|
||||
|
||||
bool mchbar_need_disable;
|
||||
|
||||
// struct work_struct rps_work;
|
||||
struct work_struct rps_work;
|
||||
spinlock_t rps_lock;
|
||||
u32 pm_iir;
|
||||
|
||||
@ -1416,4 +1417,18 @@ typedef struct
|
||||
int freq;
|
||||
}videomode_t;
|
||||
|
||||
|
||||
static inline int mutex_trylock(struct mutex *lock)
|
||||
{
|
||||
if (likely(atomic_cmpxchg(&lock->count, 1, 0) == 1))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#endif
|
||||
|
@ -960,16 +960,16 @@ i915_add_request(struct intel_ring_buffer *ring,
|
||||
|
||||
ring->outstanding_lazy_request = false;
|
||||
|
||||
// if (!dev_priv->mm.suspended) {
|
||||
// if (i915_enable_hangcheck) {
|
||||
if (!dev_priv->mm.suspended) {
|
||||
if (i915_enable_hangcheck) {
|
||||
// mod_timer(&dev_priv->hangcheck_timer,
|
||||
// jiffies +
|
||||
// msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
|
||||
// }
|
||||
// if (was_empty)
|
||||
// queue_delayed_work(dev_priv->wq,
|
||||
// &dev_priv->mm.retire_work, HZ);
|
||||
// }
|
||||
}
|
||||
if (was_empty)
|
||||
queue_delayed_work(dev_priv->wq,
|
||||
&dev_priv->mm.retire_work, HZ);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -982,6 +982,16 @@ i915_add_request(struct intel_ring_buffer *ring,
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
@ -1072,14 +1082,57 @@ i915_gem_retire_requests(struct drm_device *dev)
|
||||
i915_gem_retire_requests_ring(&dev_priv->ring[i]);
|
||||
}
|
||||
|
||||
static void
|
||||
i915_gem_retire_work_handler(struct work_struct *work)
|
||||
{
|
||||
drm_i915_private_t *dev_priv;
|
||||
struct drm_device *dev;
|
||||
bool idle;
|
||||
int i;
|
||||
|
||||
// ENTER();
|
||||
|
||||
dev_priv = container_of(work, drm_i915_private_t,
|
||||
mm.retire_work.work);
|
||||
dev = dev_priv->dev;
|
||||
|
||||
/* Come back later if the device is busy... */
|
||||
if (!mutex_trylock(&dev->struct_mutex)) {
|
||||
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
|
||||
// LEAVE();
|
||||
return;
|
||||
}
|
||||
|
||||
i915_gem_retire_requests(dev);
|
||||
|
||||
/* Send a periodic flush down the ring so we don't hold onto GEM
|
||||
* objects indefinitely.
|
||||
*/
|
||||
idle = true;
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
struct intel_ring_buffer *ring = &dev_priv->ring[i];
|
||||
|
||||
if (!list_empty(&ring->gpu_write_list)) {
|
||||
struct drm_i915_gem_request *request;
|
||||
int ret;
|
||||
|
||||
ret = i915_gem_flush_ring(ring,
|
||||
0, I915_GEM_GPU_DOMAINS);
|
||||
request = kzalloc(sizeof(*request), GFP_KERNEL);
|
||||
if (ret || request == NULL ||
|
||||
i915_add_request(ring, NULL, request))
|
||||
kfree(request);
|
||||
}
|
||||
|
||||
idle &= list_empty(&ring->request_list);
|
||||
}
|
||||
|
||||
if (!dev_priv->mm.suspended && !idle)
|
||||
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
// LEAVE();
|
||||
}
|
||||
|
||||
/**
|
||||
* Waits for a sequence number to be signaled, and cleans up the
|
||||
@ -1326,7 +1379,7 @@ static int i915_ring_idle(struct intel_ring_buffer *ring)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0; //i915_wait_request(ring, i915_gem_next_request_seqno(ring));
|
||||
return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
|
||||
}
|
||||
|
||||
int
|
||||
@ -1923,9 +1976,9 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
|
||||
* of uncaching, which would allow us to flush all the LLC-cached data
|
||||
* with that bit in the PTE to main memory with just one PIPE_CONTROL.
|
||||
*/
|
||||
// ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
|
||||
// if (ret)
|
||||
// return ret;
|
||||
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* As the user may map the buffer once pinned in the display plane
|
||||
* (e.g. libkms for the bootup splash), we have to ensure that we
|
||||
@ -2123,6 +2176,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
|
||||
}
|
||||
obj->pin_mappable |= map_and_fenceable;
|
||||
|
||||
WARN_ON(i915_verify_lists(dev));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2132,6 +2186,7 @@ i915_gem_object_unpin(struct drm_i915_gem_object *obj)
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
WARN_ON(i915_verify_lists(dev));
|
||||
BUG_ON(obj->pin_count == 0);
|
||||
BUG_ON(obj->gtt_space == NULL);
|
||||
|
||||
@ -2141,6 +2196,7 @@ i915_gem_object_unpin(struct drm_i915_gem_object *obj)
|
||||
&dev_priv->mm.inactive_list);
|
||||
obj->pin_mappable = false;
|
||||
}
|
||||
WARN_ON(i915_verify_lists(dev));
|
||||
}
|
||||
|
||||
|
||||
@ -2424,6 +2480,8 @@ i915_gem_load(struct drm_device *dev)
|
||||
init_ring_lists(&dev_priv->ring[i]);
|
||||
for (i = 0; i < I915_MAX_NUM_FENCES; i++)
|
||||
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
|
||||
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
|
||||
i915_gem_retire_work_handler);
|
||||
|
||||
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
|
||||
if (IS_GEN3(dev)) {
|
||||
@ -2457,4 +2515,3 @@ i915_gem_load(struct drm_device *dev)
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
@ -143,7 +143,6 @@ void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
|
||||
agp_type);
|
||||
}
|
||||
|
||||
|
||||
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
|
@ -656,7 +656,7 @@ intel_parse_bios(struct drm_device *dev)
|
||||
size_t size;
|
||||
int i;
|
||||
|
||||
bios = (void*)pci_map_rom(pdev, &size);
|
||||
bios = pci_map_rom(pdev, &size);
|
||||
if (!bios)
|
||||
return -1;
|
||||
|
||||
|
@ -1859,7 +1859,7 @@ static void intel_update_fbc(struct drm_device *dev)
|
||||
if (enable_fbc < 0) {
|
||||
DRM_DEBUG_KMS("fbc set to per-chip default\n");
|
||||
enable_fbc = 1;
|
||||
if (INTEL_INFO(dev)->gen <= 5)
|
||||
if (INTEL_INFO(dev)->gen <= 6)
|
||||
enable_fbc = 0;
|
||||
}
|
||||
if (!enable_fbc) {
|
||||
@ -2171,18 +2171,12 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
ENTER();
|
||||
|
||||
ret = dev_priv->display.update_plane(crtc, fb, x, y);
|
||||
if (ret)
|
||||
{
|
||||
LEAVE();
|
||||
return ret;
|
||||
};
|
||||
|
||||
intel_update_fbc(dev);
|
||||
intel_increase_pllclock(crtc);
|
||||
LEAVE();
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2235,31 +2229,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
|
||||
LEAVE();
|
||||
return 0;
|
||||
|
||||
#if 0
|
||||
if (!dev->primary->master)
|
||||
{
|
||||
LEAVE();
|
||||
return 0;
|
||||
};
|
||||
|
||||
master_priv = dev->primary->master->driver_priv;
|
||||
if (!master_priv->sarea_priv)
|
||||
{
|
||||
LEAVE();
|
||||
return 0;
|
||||
};
|
||||
|
||||
if (intel_crtc->pipe) {
|
||||
master_priv->sarea_priv->pipeB_x = x;
|
||||
master_priv->sarea_priv->pipeB_y = y;
|
||||
} else {
|
||||
master_priv->sarea_priv->pipeA_x = x;
|
||||
master_priv->sarea_priv->pipeA_y = y;
|
||||
}
|
||||
LEAVE();
|
||||
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
@ -2835,8 +2804,8 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
|
||||
|
||||
obj = to_intel_framebuffer(crtc->fb)->obj;
|
||||
dev_priv = crtc->dev->dev_private;
|
||||
// wait_event(dev_priv->pending_flip_queue,
|
||||
// atomic_read(&obj->pending_flip) == 0);
|
||||
wait_event(dev_priv->pending_flip_queue,
|
||||
atomic_read(&obj->pending_flip) == 0);
|
||||
}
|
||||
|
||||
static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
|
||||
@ -5292,6 +5261,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
|
||||
}
|
||||
}
|
||||
|
||||
pipeconf &= ~PIPECONF_INTERLACE_MASK;
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
|
||||
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
|
||||
/* the chip adds 2 halflines automatically */
|
||||
@ -5302,7 +5272,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
|
||||
adjusted_mode->crtc_vsync_end -= 1;
|
||||
adjusted_mode->crtc_vsync_start -= 1;
|
||||
} else
|
||||
pipeconf &= ~PIPECONF_INTERLACE_MASK; /* progressive */
|
||||
pipeconf |= PIPECONF_PROGRESSIVE;
|
||||
|
||||
I915_WRITE(HTOTAL(pipe),
|
||||
(adjusted_mode->crtc_hdisplay - 1) |
|
||||
@ -5889,6 +5859,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
|
||||
}
|
||||
}
|
||||
|
||||
pipeconf &= ~PIPECONF_INTERLACE_MASK;
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
|
||||
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
|
||||
/* the chip adds 2 halflines automatically */
|
||||
@ -5899,7 +5870,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
|
||||
adjusted_mode->crtc_vsync_end -= 1;
|
||||
adjusted_mode->crtc_vsync_start -= 1;
|
||||
} else
|
||||
pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
|
||||
pipeconf |= PIPECONF_PROGRESSIVE;
|
||||
|
||||
I915_WRITE(HTOTAL(pipe),
|
||||
(adjusted_mode->crtc_hdisplay - 1) |
|
||||
@ -7043,10 +7014,6 @@ static void intel_setup_outputs(struct drm_device *dev)
|
||||
|
||||
|
||||
|
||||
static const struct drm_mode_config_funcs intel_mode_funcs = {
|
||||
.fb_create = NULL /*intel_user_framebuffer_create*/,
|
||||
.output_poll_changed = NULL /*intel_fb_output_poll_changed*/,
|
||||
};
|
||||
|
||||
|
||||
|
||||
@ -7100,7 +7067,10 @@ int intel_framebuffer_init(struct drm_device *dev,
|
||||
}
|
||||
|
||||
|
||||
|
||||
static const struct drm_mode_config_funcs intel_mode_funcs = {
|
||||
.fb_create = NULL /*intel_user_framebuffer_create*/,
|
||||
.output_poll_changed = NULL /*intel_fb_output_poll_changed*/,
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
@ -276,7 +276,7 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane)
|
||||
}
|
||||
|
||||
struct intel_unpin_work {
|
||||
// struct work_struct work;
|
||||
struct work_struct work;
|
||||
struct drm_device *dev;
|
||||
struct drm_i915_gem_object *old_fb_obj;
|
||||
struct drm_i915_gem_object *pending_flip_obj;
|
||||
@ -286,7 +286,7 @@ struct intel_unpin_work {
|
||||
};
|
||||
|
||||
struct intel_fbc_work {
|
||||
// struct delayed_work work;
|
||||
struct delayed_work work;
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_framebuffer *fb;
|
||||
int interval;
|
||||
|
@ -1007,3 +1007,120 @@ int blit_textured(u32 hbitmap, int dst_x, int dst_y,
|
||||
|
||||
};
|
||||
|
||||
|
||||
void __stdcall run_workqueue(struct workqueue_struct *cwq)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
|
||||
// dbgprintf("wq: %x head %x, next %x\n",
|
||||
// cwq, &cwq->worklist, cwq->worklist.next);
|
||||
|
||||
spin_lock_irqsave(&cwq->lock, irqflags);
|
||||
|
||||
while (!list_empty(&cwq->worklist))
|
||||
{
|
||||
struct work_struct *work = list_entry(cwq->worklist.next,
|
||||
struct work_struct, entry);
|
||||
work_func_t f = work->func;
|
||||
list_del_init(cwq->worklist.next);
|
||||
// dbgprintf("head %x, next %x\n",
|
||||
// &cwq->worklist, cwq->worklist.next);
|
||||
|
||||
spin_unlock_irqrestore(&cwq->lock, irqflags);
|
||||
f(work);
|
||||
spin_lock_irqsave(&cwq->lock, irqflags);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&cwq->lock, irqflags);
|
||||
}
|
||||
|
||||
|
||||
static inline
|
||||
int __queue_work(struct workqueue_struct *wq,
|
||||
struct work_struct *work)
|
||||
{
|
||||
unsigned long flags;
|
||||
// ENTER();
|
||||
|
||||
// dbgprintf("wq: %x, work: %x\n",
|
||||
// wq, work );
|
||||
|
||||
if(!list_empty(&work->entry))
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&wq->lock, flags);
|
||||
|
||||
if(list_empty(&wq->worklist))
|
||||
TimerHs(0,0, run_workqueue, wq);
|
||||
|
||||
list_add_tail(&work->entry, &wq->worklist);
|
||||
|
||||
spin_unlock_irqrestore(&wq->lock, flags);
|
||||
// dbgprintf("wq: %x head %x, next %x\n",
|
||||
// wq, &wq->worklist, wq->worklist.next);
|
||||
|
||||
// LEAVE();
|
||||
return 1;
|
||||
};
|
||||
|
||||
void __stdcall delayed_work_timer_fn(unsigned long __data)
|
||||
{
|
||||
// ENTER();
|
||||
struct delayed_work *dwork = (struct delayed_work *)__data;
|
||||
struct workqueue_struct *wq = dwork->work.data;
|
||||
|
||||
// dbgprintf("wq: %x, work: %x\n",
|
||||
// wq, &dwork->work );
|
||||
|
||||
__queue_work(wq, &dwork->work);
|
||||
// LEAVE();
|
||||
}
|
||||
|
||||
|
||||
int queue_delayed_work_on(struct workqueue_struct *wq,
|
||||
struct delayed_work *dwork, unsigned long delay)
|
||||
{
|
||||
struct work_struct *work = &dwork->work;
|
||||
|
||||
work->data = wq;
|
||||
TimerHs(0,0, delayed_work_timer_fn, dwork);
|
||||
return 1;
|
||||
}
|
||||
|
||||
int queue_delayed_work(struct workqueue_struct *wq,
|
||||
struct delayed_work *dwork, unsigned long delay)
|
||||
{
|
||||
u32 flags;
|
||||
// ENTER();
|
||||
|
||||
// dbgprintf("wq: %x, work: %x\n",
|
||||
// wq, &dwork->work );
|
||||
|
||||
if (delay == 0)
|
||||
return __queue_work(wq, &dwork->work);
|
||||
|
||||
return queue_delayed_work_on(wq, dwork, delay);
|
||||
}
|
||||
|
||||
|
||||
struct workqueue_struct *alloc_workqueue(const char *fmt,
|
||||
unsigned int flags,
|
||||
int max_active)
|
||||
{
|
||||
struct workqueue_struct *wq;
|
||||
|
||||
wq = kzalloc(sizeof(*wq),0);
|
||||
if (!wq)
|
||||
goto err;
|
||||
|
||||
INIT_LIST_HEAD(&wq->worklist);
|
||||
|
||||
return wq;
|
||||
err:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
4
drivers/video/drm/i915/render/exa_wm_mask_affine.g6b
Normal file
4
drivers/video/drm/i915/render/exa_wm_mask_affine.g6b
Normal file
@ -0,0 +1,4 @@
|
||||
{ 0x0060005a, 0x210077be, 0x00000100, 0x008d0040 },
|
||||
{ 0x0060005a, 0x212077be, 0x00000100, 0x008d0080 },
|
||||
{ 0x0060005a, 0x214077be, 0x00000110, 0x008d0040 },
|
||||
{ 0x0060005a, 0x216077be, 0x00000110, 0x008d0080 },
|
3
drivers/video/drm/i915/render/exa_wm_mask_sample_a.g6b
Normal file
3
drivers/video/drm/i915/render/exa_wm_mask_sample_a.g6b
Normal file
@ -0,0 +1,3 @@
|
||||
{ 0x00000201, 0x20080061, 0x00000000, 0x00007000 },
|
||||
{ 0x00600001, 0x20e00022, 0x008d0000, 0x00000000 },
|
||||
{ 0x02800031, 0x23801cc9, 0x000000e0, 0x0a2a0102 },
|
4
drivers/video/drm/i915/render/exa_wm_noca.g6b
Normal file
4
drivers/video/drm/i915/render/exa_wm_noca.g6b
Normal file
@ -0,0 +1,4 @@
|
||||
{ 0x00800041, 0x21c077bd, 0x008d01c0, 0x008d0380 },
|
||||
{ 0x00800041, 0x220077bd, 0x008d0200, 0x008d0380 },
|
||||
{ 0x00800041, 0x224077bd, 0x008d0240, 0x008d0380 },
|
||||
{ 0x00800041, 0x228077bd, 0x008d0280, 0x008d0380 },
|
@ -72,9 +72,12 @@ static const uint32_t ps_kernel_nomask_affine[][4] = {
|
||||
#include "exa_wm_write.g6b"
|
||||
};
|
||||
|
||||
static const uint32_t ps_kernel_nomask_projective[][4] = {
|
||||
#include "exa_wm_src_projective.g6b"
|
||||
static const uint32_t ps_kernel_masknoca_affine[][4] = {
|
||||
#include "exa_wm_src_affine.g6b"
|
||||
#include "exa_wm_src_sample_argb.g6b"
|
||||
#include "exa_wm_mask_affine.g6b"
|
||||
#include "exa_wm_mask_sample_a.g6b"
|
||||
#include "exa_wm_noca.g6b"
|
||||
#include "exa_wm_write.g6b"
|
||||
};
|
||||
|
||||
@ -88,8 +91,7 @@ static const struct wm_kernel_info {
|
||||
Bool has_mask;
|
||||
} wm_kernels[] = {
|
||||
KERNEL(NOMASK, ps_kernel_nomask_affine, FALSE),
|
||||
KERNEL(NOMASK_PROJECTIVE, ps_kernel_nomask_projective, FALSE),
|
||||
|
||||
KERNEL(MASK, ps_kernel_masknoca_affine, TRUE),
|
||||
};
|
||||
#undef KERNEL
|
||||
|
||||
@ -659,11 +661,6 @@ gen6_emit_drawing_rectangle(struct sna *sna,
|
||||
|
||||
OUT_BATCH(GEN6_PIPE_CONTROL | (4 - 2));
|
||||
OUT_BATCH(GEN6_PIPE_CONTROL_WRITE_TIME);
|
||||
// OUT_BATCH(kgem_add_reloc(&sna->kgem, sna->kgem.nbatch,
|
||||
// sna->render_state.gen6.general_bo,
|
||||
// I915_GEM_DOMAIN_INSTRUCTION << 16 |
|
||||
// I915_GEM_DOMAIN_INSTRUCTION,
|
||||
// 64));
|
||||
|
||||
OUT_BATCH(sna->render_state.gen6.general_bo->gaddr+64);
|
||||
|
||||
@ -1618,7 +1615,6 @@ gen6_emit_copy_state(struct sna *sna,
|
||||
bool dirty;
|
||||
|
||||
gen6_get_batch(sna);
|
||||
// dirty = kgem_bo_is_dirty(op->dst.bo);
|
||||
|
||||
binding_table = gen6_composite_get_binding_table(sna, &offset);
|
||||
|
||||
@ -1713,9 +1709,6 @@ gen6_render_copy(struct sna *sna, uint8_t alu,
|
||||
op.src.width = src->width;
|
||||
op.src.height = src->height;
|
||||
|
||||
// src_scale_x = ((float)src_w / frame->width) / (float)drw_w;
|
||||
// src_scale_y = ((float)src_h / frame->height) / (float)drw_h;
|
||||
|
||||
op.src.scale[0] = 1.f/w; //src->width;
|
||||
op.src.scale[1] = 1.f/h; //src->height;
|
||||
op.src.filter = SAMPLER_FILTER_BILINEAR;
|
||||
|
@ -243,7 +243,7 @@ struct sna_render {
|
||||
|
||||
enum {
|
||||
GEN6_WM_KERNEL_NOMASK = 0,
|
||||
GEN6_WM_KERNEL_NOMASK_PROJECTIVE,
|
||||
GEN6_WM_KERNEL_MASK,
|
||||
|
||||
GEN6_KERNEL_COUNT
|
||||
};
|
||||
@ -277,23 +277,6 @@ struct gen6_render_state {
|
||||
Bool needs_invariant;
|
||||
};
|
||||
|
||||
enum {
|
||||
GEN7_WM_KERNEL_NOMASK = 0,
|
||||
GEN7_WM_KERNEL_NOMASK_PROJECTIVE,
|
||||
|
||||
GEN7_WM_KERNEL_MASK,
|
||||
GEN7_WM_KERNEL_MASK_PROJECTIVE,
|
||||
|
||||
GEN7_WM_KERNEL_MASKCA,
|
||||
GEN7_WM_KERNEL_MASKCA_PROJECTIVE,
|
||||
|
||||
GEN7_WM_KERNEL_MASKCA_SRCALPHA,
|
||||
GEN7_WM_KERNEL_MASKCA_SRCALPHA_PROJECTIVE,
|
||||
|
||||
GEN7_WM_KERNEL_VIDEO_PLANAR,
|
||||
GEN7_WM_KERNEL_VIDEO_PACKED,
|
||||
GEN7_KERNEL_COUNT
|
||||
};
|
||||
|
||||
|
||||
struct sna_static_stream {
|
||||
|
Loading…
Reference in New Issue
Block a user