i915: irq handling && request retirement

Intel-2D: lock_bitmap() 

git-svn-id: svn://kolibrios.org@3266 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge) 2013-02-24 06:22:22 +00:00
parent 5973aeaf8b
commit ddfbaa9696
8 changed files with 122 additions and 62 deletions

View File

@ -3776,8 +3776,17 @@ uint32_t kgem_add_reloc(struct kgem *kgem,
assert((read_write_domain & 0x7fff) == 0 || bo != NULL);
// if( bo != NULL && bo->handle == -1)
// return 0;
if( bo != NULL && bo->handle == -2)
{
if (bo->exec == NULL)
kgem_add_bo(kgem, bo);
if (read_write_domain & 0x7fff && !bo->dirty) {
assert(!bo->snoop || kgem->can_blt_cpu);
__kgem_bo_mark_dirty(bo);
}
return 0;
};
index = kgem->nreloc++;
assert(index < ARRAY_SIZE(kgem->reloc));

View File

@ -32,6 +32,14 @@ static int call_service(ioctl_t *io)
return retval;
};
static inline void get_proc_info(char *info)
{
__asm__ __volatile__(
"int $0x40"
:
:"a"(9), "b"(info), "c"(-1));
}
const struct intel_device_info *
intel_detect_chipset(struct pci_device *pci);
@ -336,6 +344,14 @@ int sna_blit_copy(bitmap_t *src_bitmap, int dst_x, int dst_y,
struct _Pixmap src, dst;
struct kgem_bo *src_bo;
char proc_info[1024];
int winx, winy;
get_proc_info(proc_info);
winx = *(uint32_t*)(proc_info+34);
winy = *(uint32_t*)(proc_info+38);
memset(&src, 0, sizeof(src));
memset(&dst, 0, sizeof(dst));
@ -355,7 +371,7 @@ int sna_blit_copy(bitmap_t *src_bitmap, int dst_x, int dst_y,
&src, src_bo,
&dst, sna_fb.fb_bo, &copy) )
{
copy.blt(sna_device, &copy, src_x, src_y, w, h, dst_x, dst_y);
copy.blt(sna_device, &copy, src_x, src_y, w, h, winx+dst_x, winy+dst_y);
copy.done(sna_device, &copy);
}
@ -390,7 +406,21 @@ err_2:
err_1:
return -1;
};
void sna_lock_bitmap(bitmap_t *bitmap)
{
struct kgem_bo *bo;
bo = (struct kgem_bo *)bitmap->handle;
kgem_bo_sync__cpu(&sna_device->kgem, bo);
};
/*
int sna_blit_tex(bitmap_t *dst_bitmap, int dst_x, int dst_y,

View File

@ -33,6 +33,7 @@ SECTIONS
*(.debug$F)
*(.drectve)
*(.edata)
*(.eh_frame)
}
.idata ALIGN(__section_alignment__):

View File

@ -48,6 +48,8 @@ extern int x86_clflush_size;
#define rmb() asm volatile ("lfence")
#define wmb() asm volatile ("sfence")
struct drm_i915_gem_object *get_fb_obj();
unsigned long vm_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flag, unsigned long offset);
@ -1051,11 +1053,10 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
wait_forever = false;
}
// timeout_jiffies = timespec_to_jiffies(&wait_time);
timeout_jiffies = timespec_to_jiffies(&wait_time);
if (WARN_ON(!ring->irq_get(ring)))
return -ENODEV;
#if 0
/* Record current time in case interrupted by signal, or wedged * */
getrawmonotonic(&before);
@ -1064,6 +1065,11 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
atomic_read(&dev_priv->mm.wedged))
do {
if (interruptible)
end = wait_event_interruptible_timeout(ring->irq_queue,
EXIT_COND,
timeout_jiffies);
else
end = wait_event_timeout(ring->irq_queue, EXIT_COND,
timeout_jiffies);
@ -1089,24 +1095,13 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
case -ERESTARTSYS: /* Signal */
return (int)end;
case 0: /* Timeout */
// if (timeout)
// set_normalized_timespec(timeout, 0, 0);
if (timeout)
set_normalized_timespec(timeout, 0, 0);
return -ETIME;
default: /* Completed */
WARN_ON(end < 0); /* We're not aware of other errors */
return 0;
}
#endif
#define EXIT_COND \
(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
atomic_read(&dev_priv->mm.wedged))
wait_event(ring->irq_queue, EXIT_COND);
#undef EXIT_COND
ring->irq_put(ring);
return 0;
}
/**
@ -1917,8 +1912,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
{
uint32_t seqno;
ENTER();
if (list_empty(&ring->request_list))
return;
@ -1972,7 +1965,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
}
WARN_ON(i915_verify_lists(ring->dev));
LEAVE();
}
void
@ -1995,8 +1987,6 @@ i915_gem_retire_work_handler(struct work_struct *work)
bool idle;
int i;
ENTER();
dev_priv = container_of(work, drm_i915_private_t,
mm.retire_work.work);
dev = dev_priv->dev;
@ -2026,8 +2016,6 @@ i915_gem_retire_work_handler(struct work_struct *work)
intel_mark_idle(dev);
mutex_unlock(&dev->struct_mutex);
LEAVE();
}
/**

View File

@ -267,7 +267,7 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
struct eb_objects *eb)
{
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(64)];
struct drm_i915_gem_relocation_entry __user *user_relocs;
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
int remain, ret;
@ -367,6 +367,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
bool need_fence, need_mappable;
int ret;
// ENTER();
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
@ -375,7 +377,10 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);
if (ret)
{
FAIL();
return ret;
};
entry->flags |= __EXEC_OBJECT_HAS_PIN;
@ -383,7 +388,10 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
ret = i915_gem_object_get_fence(obj);
if (ret)
{
FAIL();
return ret;
};
if (i915_gem_object_pin_fence(obj))
entry->flags |= __EXEC_OBJECT_HAS_FENCE;
@ -401,6 +409,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
}
entry->offset = obj->gtt_offset;
// LEAVE();
return 0;
}
@ -433,6 +443,8 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
int retry;
// ENTER();
INIT_LIST_HEAD(&ordered_objects);
while (!list_empty(objects)) {
struct drm_i915_gem_exec_object2 *entry;
@ -514,7 +526,10 @@ err: /* Decrement pin count for bound objects */
i915_gem_execbuffer_unreserve_object(obj);
if (ret != -ENOSPC || retry++)
{
// LEAVE();
return ret;
};
// ret = i915_gem_evict_everything(ring->dev);
if (ret)
@ -554,8 +569,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
reloc_offset = malloc(count * sizeof(*reloc_offset));
reloc = malloc(total * sizeof(*reloc));
if (reloc == NULL || reloc_offset == NULL) {
free(reloc);
free(reloc_offset);
kfree(reloc);
kfree(reloc_offset);
mutex_lock(&dev->struct_mutex);
return -ENOMEM;
}
@ -609,7 +624,10 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
for (i = 0; i < count; i++) {
if(exec[i].handle == -2)
{
obj = get_fb_obj();
drm_gem_object_reference(&obj->base);
}
else
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
@ -645,8 +663,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
*/
err:
free(reloc);
free(reloc_offset);
kfree(reloc);
kfree(reloc_offset);
return ret;
}
@ -843,12 +861,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (!i915_gem_check_execbuffer(args)) {
DRM_DEBUG("execbuf with invalid offset/length\n");
FAIL();
return -EINVAL;
}
ret = validate_exec_list(exec, args->buffer_count);
if (ret)
{
FAIL();
return ret;
};
flags = 0;
if (args->flags & I915_EXEC_SECURE) {
@ -870,6 +892,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ctx_id != 0) {
DRM_DEBUG("Ring %s doesn't support contexts\n",
ring->name);
FAIL();
return -EPERM;
}
break;
@ -978,10 +1001,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
if(exec[i].handle == -2)
{
obj = get_fb_obj();
drm_gem_object_reference(&obj->base);
}
else
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
// printf("%s object %p handle %d\n", __FUNCTION__, obj, exec[i].handle);
if (&obj->base == NULL) {
DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
@ -1094,10 +1123,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
// i915_gem_execbuffer_move_to_active(&objects, ring);
// i915_gem_execbuffer_retire_commands(dev, file, ring);
ring->gpu_caches_dirty = true;
intel_ring_flush_all_caches(ring);
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
i915_gem_execbuffer_move_to_active(&objects, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring);
err:
eb_destroy(eb);
@ -1115,10 +1144,11 @@ err:
pre_mutex_err:
kfree(cliprects);
return ret;
}
int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_file *file)
@ -1127,18 +1157,24 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret;
// ENTER();
if (args->buffer_count < 1 ||
args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
FAIL();
return -EINVAL;
}
exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count, 0);
if (exec2_list == NULL)
exec2_list = malloc(sizeof(*exec2_list)*args->buffer_count);
// if (exec2_list == NULL)
// exec2_list = drm_malloc_ab(sizeof(*exec2_list),
// args->buffer_count);
if (exec2_list == NULL) {
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
FAIL();
return -ENOMEM;
}
ret = copy_from_user(exec2_list,
@ -1148,7 +1184,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
args->buffer_count, ret);
free(exec2_list);
kfree(exec2_list);
FAIL();
return -EFAULT;
}
@ -1166,6 +1203,9 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
}
}
free(exec2_list);
kfree(exec2_list);
// LEAVE();
return ret;
}

View File

@ -357,7 +357,7 @@ static void snb_gt_irq_handler(struct drm_device *dev,
struct drm_i915_private *dev_priv,
u32 gt_iir)
{
printf("%s\n", __FUNCTION__);
// printf("%s\n", __FUNCTION__);
if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
@ -413,8 +413,6 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
u32 pipe_stats[I915_MAX_PIPES];
bool blc_event;
printf("%s\n", __FUNCTION__);
atomic_inc(&dev_priv->irq_received);
while (true) {
@ -566,8 +564,6 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
irqreturn_t ret = IRQ_NONE;
int i;
printf("%s\n", __FUNCTION__);
atomic_inc(&dev_priv->irq_received);
/* disable master interrupt before clearing iir */
@ -643,8 +639,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
printf("%s\n", __FUNCTION__);
atomic_inc(&dev_priv->irq_received);
/* disable master interrupt before clearing iir */
@ -2488,7 +2482,7 @@ void intel_irq_init(struct drm_device *dev)
irqreturn_t intel_irq_handler(struct drm_device *dev)
{
printf("i915 irq\n");
// printf("i915 irq\n");
// printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;

View File

@ -7004,8 +7004,6 @@ void intel_mark_fb_idle(struct drm_i915_gem_object *obj)
struct drm_device *dev = obj->base.dev;
struct drm_crtc *crtc;
ENTER();
if (!i915_powersave)
return;

View File

@ -1341,8 +1341,8 @@ void __stdcall run_workqueue(struct workqueue_struct *cwq)
{
unsigned long irqflags;
dbgprintf("wq: %x head %x, next %x\n",
cwq, &cwq->worklist, cwq->worklist.next);
// dbgprintf("wq: %x head %x, next %x\n",
// cwq, &cwq->worklist, cwq->worklist.next);
spin_lock_irqsave(&cwq->lock, irqflags);
@ -1352,8 +1352,8 @@ void __stdcall run_workqueue(struct workqueue_struct *cwq)
struct work_struct, entry);
work_func_t f = work->func;
list_del_init(cwq->worklist.next);
dbgprintf("head %x, next %x\n",
&cwq->worklist, cwq->worklist.next);
// dbgprintf("head %x, next %x\n",
// &cwq->worklist, cwq->worklist.next);
spin_unlock_irqrestore(&cwq->lock, irqflags);
f(work);
@ -1370,8 +1370,8 @@ int __queue_work(struct workqueue_struct *wq,
{
unsigned long flags;
dbgprintf("wq: %x, work: %x\n",
wq, work );
// dbgprintf("wq: %x, work: %x\n",
// wq, work );
if(!list_empty(&work->entry))
return 0;
@ -1384,8 +1384,8 @@ int __queue_work(struct workqueue_struct *wq,
list_add_tail(&work->entry, &wq->worklist);
spin_unlock_irqrestore(&wq->lock, flags);
dbgprintf("wq: %x head %x, next %x\n",
wq, &wq->worklist, wq->worklist.next);
// dbgprintf("wq: %x head %x, next %x\n",
// wq, &wq->worklist, wq->worklist.next);
return 1;
};
@ -1395,8 +1395,8 @@ void __stdcall delayed_work_timer_fn(unsigned long __data)
struct delayed_work *dwork = (struct delayed_work *)__data;
struct workqueue_struct *wq = dwork->work.data;
dbgprintf("wq: %x, work: %x\n",
wq, &dwork->work );
// dbgprintf("wq: %x, work: %x\n",
// wq, &dwork->work );
__queue_work(wq, &dwork->work);
}
@ -1417,8 +1417,8 @@ int queue_delayed_work(struct workqueue_struct *wq,
{
u32 flags;
dbgprintf("wq: %x, work: %x\n",
wq, &dwork->work );
// dbgprintf("wq: %x, work: %x\n",
// wq, &dwork->work );
if (delay == 0)
return __queue_work(wq, &dwork->work);