i915: irq handling && request retirement

Intel-2D: lock_bitmap() 

git-svn-id: svn://kolibrios.org@3266 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge) 2013-02-24 06:22:22 +00:00
parent 5973aeaf8b
commit ddfbaa9696
8 changed files with 122 additions and 62 deletions

View File

@ -3776,8 +3776,17 @@ uint32_t kgem_add_reloc(struct kgem *kgem,
assert((read_write_domain & 0x7fff) == 0 || bo != NULL); assert((read_write_domain & 0x7fff) == 0 || bo != NULL);
// if( bo != NULL && bo->handle == -1) if( bo != NULL && bo->handle == -2)
// return 0; {
if (bo->exec == NULL)
kgem_add_bo(kgem, bo);
if (read_write_domain & 0x7fff && !bo->dirty) {
assert(!bo->snoop || kgem->can_blt_cpu);
__kgem_bo_mark_dirty(bo);
}
return 0;
};
index = kgem->nreloc++; index = kgem->nreloc++;
assert(index < ARRAY_SIZE(kgem->reloc)); assert(index < ARRAY_SIZE(kgem->reloc));

View File

@ -32,6 +32,14 @@ static int call_service(ioctl_t *io)
return retval; return retval;
}; };
static inline void get_proc_info(char *info)
{
__asm__ __volatile__(
"int $0x40"
:
:"a"(9), "b"(info), "c"(-1));
}
const struct intel_device_info * const struct intel_device_info *
intel_detect_chipset(struct pci_device *pci); intel_detect_chipset(struct pci_device *pci);
@ -336,6 +344,14 @@ int sna_blit_copy(bitmap_t *src_bitmap, int dst_x, int dst_y,
struct _Pixmap src, dst; struct _Pixmap src, dst;
struct kgem_bo *src_bo; struct kgem_bo *src_bo;
char proc_info[1024];
int winx, winy;
get_proc_info(proc_info);
winx = *(uint32_t*)(proc_info+34);
winy = *(uint32_t*)(proc_info+38);
memset(&src, 0, sizeof(src)); memset(&src, 0, sizeof(src));
memset(&dst, 0, sizeof(dst)); memset(&dst, 0, sizeof(dst));
@ -355,7 +371,7 @@ int sna_blit_copy(bitmap_t *src_bitmap, int dst_x, int dst_y,
&src, src_bo, &src, src_bo,
&dst, sna_fb.fb_bo, &copy) ) &dst, sna_fb.fb_bo, &copy) )
{ {
copy.blt(sna_device, &copy, src_x, src_y, w, h, dst_x, dst_y); copy.blt(sna_device, &copy, src_x, src_y, w, h, winx+dst_x, winy+dst_y);
copy.done(sna_device, &copy); copy.done(sna_device, &copy);
} }
@ -390,7 +406,21 @@ err_2:
err_1: err_1:
return -1; return -1;
}; };
void sna_lock_bitmap(bitmap_t *bitmap)
{
struct kgem_bo *bo;
bo = (struct kgem_bo *)bitmap->handle;
kgem_bo_sync__cpu(&sna_device->kgem, bo);
};
/* /*
int sna_blit_tex(bitmap_t *dst_bitmap, int dst_x, int dst_y, int sna_blit_tex(bitmap_t *dst_bitmap, int dst_x, int dst_y,

View File

@ -33,6 +33,7 @@ SECTIONS
*(.debug$F) *(.debug$F)
*(.drectve) *(.drectve)
*(.edata) *(.edata)
*(.eh_frame)
} }
.idata ALIGN(__section_alignment__): .idata ALIGN(__section_alignment__):

View File

@ -48,6 +48,8 @@ extern int x86_clflush_size;
#define rmb() asm volatile ("lfence") #define rmb() asm volatile ("lfence")
#define wmb() asm volatile ("sfence") #define wmb() asm volatile ("sfence")
struct drm_i915_gem_object *get_fb_obj();
unsigned long vm_mmap(struct file *file, unsigned long addr, unsigned long vm_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long len, unsigned long prot,
unsigned long flag, unsigned long offset); unsigned long flag, unsigned long offset);
@ -1051,11 +1053,10 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
wait_forever = false; wait_forever = false;
} }
// timeout_jiffies = timespec_to_jiffies(&wait_time); timeout_jiffies = timespec_to_jiffies(&wait_time);
if (WARN_ON(!ring->irq_get(ring))) if (WARN_ON(!ring->irq_get(ring)))
return -ENODEV; return -ENODEV;
#if 0
/* Record current time in case interrupted by signal, or wedged * */ /* Record current time in case interrupted by signal, or wedged * */
getrawmonotonic(&before); getrawmonotonic(&before);
@ -1064,6 +1065,11 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \ (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
atomic_read(&dev_priv->mm.wedged)) atomic_read(&dev_priv->mm.wedged))
do { do {
if (interruptible)
end = wait_event_interruptible_timeout(ring->irq_queue,
EXIT_COND,
timeout_jiffies);
else
end = wait_event_timeout(ring->irq_queue, EXIT_COND, end = wait_event_timeout(ring->irq_queue, EXIT_COND,
timeout_jiffies); timeout_jiffies);
@ -1089,24 +1095,13 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
case -ERESTARTSYS: /* Signal */ case -ERESTARTSYS: /* Signal */
return (int)end; return (int)end;
case 0: /* Timeout */ case 0: /* Timeout */
// if (timeout) if (timeout)
// set_normalized_timespec(timeout, 0, 0); set_normalized_timespec(timeout, 0, 0);
return -ETIME; return -ETIME;
default: /* Completed */ default: /* Completed */
WARN_ON(end < 0); /* We're not aware of other errors */ WARN_ON(end < 0); /* We're not aware of other errors */
return 0; return 0;
} }
#endif
#define EXIT_COND \
(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
atomic_read(&dev_priv->mm.wedged))
wait_event(ring->irq_queue, EXIT_COND);
#undef EXIT_COND
ring->irq_put(ring);
return 0;
} }
/** /**
@ -1917,8 +1912,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
{ {
uint32_t seqno; uint32_t seqno;
ENTER();
if (list_empty(&ring->request_list)) if (list_empty(&ring->request_list))
return; return;
@ -1972,7 +1965,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
} }
WARN_ON(i915_verify_lists(ring->dev)); WARN_ON(i915_verify_lists(ring->dev));
LEAVE();
} }
void void
@ -1995,8 +1987,6 @@ i915_gem_retire_work_handler(struct work_struct *work)
bool idle; bool idle;
int i; int i;
ENTER();
dev_priv = container_of(work, drm_i915_private_t, dev_priv = container_of(work, drm_i915_private_t,
mm.retire_work.work); mm.retire_work.work);
dev = dev_priv->dev; dev = dev_priv->dev;
@ -2026,8 +2016,6 @@ i915_gem_retire_work_handler(struct work_struct *work)
intel_mark_idle(dev); intel_mark_idle(dev);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
LEAVE();
} }
/** /**

View File

@ -267,7 +267,7 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
struct eb_objects *eb) struct eb_objects *eb)
{ {
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry)) #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)]; struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(64)];
struct drm_i915_gem_relocation_entry __user *user_relocs; struct drm_i915_gem_relocation_entry __user *user_relocs;
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
int remain, ret; int remain, ret;
@ -367,6 +367,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
bool need_fence, need_mappable; bool need_fence, need_mappable;
int ret; int ret;
// ENTER();
need_fence = need_fence =
has_fenced_gpu_access && has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE && entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
@ -375,7 +377,10 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false); ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);
if (ret) if (ret)
{
FAIL();
return ret; return ret;
};
entry->flags |= __EXEC_OBJECT_HAS_PIN; entry->flags |= __EXEC_OBJECT_HAS_PIN;
@ -383,7 +388,10 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
ret = i915_gem_object_get_fence(obj); ret = i915_gem_object_get_fence(obj);
if (ret) if (ret)
{
FAIL();
return ret; return ret;
};
if (i915_gem_object_pin_fence(obj)) if (i915_gem_object_pin_fence(obj))
entry->flags |= __EXEC_OBJECT_HAS_FENCE; entry->flags |= __EXEC_OBJECT_HAS_FENCE;
@ -401,6 +409,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
} }
entry->offset = obj->gtt_offset; entry->offset = obj->gtt_offset;
// LEAVE();
return 0; return 0;
} }
@ -433,6 +443,8 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
int retry; int retry;
// ENTER();
INIT_LIST_HEAD(&ordered_objects); INIT_LIST_HEAD(&ordered_objects);
while (!list_empty(objects)) { while (!list_empty(objects)) {
struct drm_i915_gem_exec_object2 *entry; struct drm_i915_gem_exec_object2 *entry;
@ -514,7 +526,10 @@ err: /* Decrement pin count for bound objects */
i915_gem_execbuffer_unreserve_object(obj); i915_gem_execbuffer_unreserve_object(obj);
if (ret != -ENOSPC || retry++) if (ret != -ENOSPC || retry++)
{
// LEAVE();
return ret; return ret;
};
// ret = i915_gem_evict_everything(ring->dev); // ret = i915_gem_evict_everything(ring->dev);
if (ret) if (ret)
@ -554,8 +569,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
reloc_offset = malloc(count * sizeof(*reloc_offset)); reloc_offset = malloc(count * sizeof(*reloc_offset));
reloc = malloc(total * sizeof(*reloc)); reloc = malloc(total * sizeof(*reloc));
if (reloc == NULL || reloc_offset == NULL) { if (reloc == NULL || reloc_offset == NULL) {
free(reloc); kfree(reloc);
free(reloc_offset); kfree(reloc_offset);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
return -ENOMEM; return -ENOMEM;
} }
@ -609,7 +624,10 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
if(exec[i].handle == -2) if(exec[i].handle == -2)
{
obj = get_fb_obj(); obj = get_fb_obj();
drm_gem_object_reference(&obj->base);
}
else else
obj = to_intel_bo(drm_gem_object_lookup(dev, file, obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle)); exec[i].handle));
@ -645,8 +663,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
*/ */
err: err:
free(reloc); kfree(reloc);
free(reloc_offset); kfree(reloc_offset);
return ret; return ret;
} }
@ -843,12 +861,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (!i915_gem_check_execbuffer(args)) { if (!i915_gem_check_execbuffer(args)) {
DRM_DEBUG("execbuf with invalid offset/length\n"); DRM_DEBUG("execbuf with invalid offset/length\n");
FAIL();
return -EINVAL; return -EINVAL;
} }
ret = validate_exec_list(exec, args->buffer_count); ret = validate_exec_list(exec, args->buffer_count);
if (ret) if (ret)
{
FAIL();
return ret; return ret;
};
flags = 0; flags = 0;
if (args->flags & I915_EXEC_SECURE) { if (args->flags & I915_EXEC_SECURE) {
@ -870,6 +892,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ctx_id != 0) { if (ctx_id != 0) {
DRM_DEBUG("Ring %s doesn't support contexts\n", DRM_DEBUG("Ring %s doesn't support contexts\n",
ring->name); ring->name);
FAIL();
return -EPERM; return -EPERM;
} }
break; break;
@ -978,10 +1001,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
if(exec[i].handle == -2) if(exec[i].handle == -2)
{
obj = get_fb_obj(); obj = get_fb_obj();
drm_gem_object_reference(&obj->base);
}
else else
obj = to_intel_bo(drm_gem_object_lookup(dev, file, obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle)); exec[i].handle));
// printf("%s object %p handle %d\n", __FUNCTION__, obj, exec[i].handle);
if (&obj->base == NULL) { if (&obj->base == NULL) {
DRM_DEBUG("Invalid object handle %d at index %d\n", DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i); exec[i].handle, i);
@ -1094,10 +1123,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err; goto err;
} }
// i915_gem_execbuffer_move_to_active(&objects, ring); trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
// i915_gem_execbuffer_retire_commands(dev, file, ring);
ring->gpu_caches_dirty = true; i915_gem_execbuffer_move_to_active(&objects, ring);
intel_ring_flush_all_caches(ring); i915_gem_execbuffer_retire_commands(dev, file, ring);
err: err:
eb_destroy(eb); eb_destroy(eb);
@ -1115,10 +1144,11 @@ err:
pre_mutex_err: pre_mutex_err:
kfree(cliprects); kfree(cliprects);
return ret; return ret;
} }
int int
i915_gem_execbuffer2(struct drm_device *dev, void *data, i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_file *file) struct drm_file *file)
@ -1127,18 +1157,24 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_i915_gem_exec_object2 *exec2_list = NULL; struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret; int ret;
// ENTER();
if (args->buffer_count < 1 || if (args->buffer_count < 1 ||
args->buffer_count > UINT_MAX / sizeof(*exec2_list)) { args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count); DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
FAIL();
return -EINVAL; return -EINVAL;
} }
exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count, 0); exec2_list = malloc(sizeof(*exec2_list)*args->buffer_count);
if (exec2_list == NULL)
exec2_list = malloc(sizeof(*exec2_list) * args->buffer_count); // if (exec2_list == NULL)
// exec2_list = drm_malloc_ab(sizeof(*exec2_list),
// args->buffer_count);
if (exec2_list == NULL) { if (exec2_list == NULL) {
DRM_DEBUG("Failed to allocate exec list for %d buffers\n", DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count); args->buffer_count);
FAIL();
return -ENOMEM; return -ENOMEM;
} }
ret = copy_from_user(exec2_list, ret = copy_from_user(exec2_list,
@ -1148,7 +1184,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
if (ret != 0) { if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n", DRM_DEBUG("copy %d exec entries failed %d\n",
args->buffer_count, ret); args->buffer_count, ret);
free(exec2_list); kfree(exec2_list);
FAIL();
return -EFAULT; return -EFAULT;
} }
@ -1166,6 +1203,9 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
} }
} }
free(exec2_list); kfree(exec2_list);
// LEAVE();
return ret; return ret;
} }

View File

@ -357,7 +357,7 @@ static void snb_gt_irq_handler(struct drm_device *dev,
struct drm_i915_private *dev_priv, struct drm_i915_private *dev_priv,
u32 gt_iir) u32 gt_iir)
{ {
printf("%s\n", __FUNCTION__); // printf("%s\n", __FUNCTION__);
if (gt_iir & (GEN6_RENDER_USER_INTERRUPT | if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT)) GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
@ -413,8 +413,6 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
u32 pipe_stats[I915_MAX_PIPES]; u32 pipe_stats[I915_MAX_PIPES];
bool blc_event; bool blc_event;
printf("%s\n", __FUNCTION__);
atomic_inc(&dev_priv->irq_received); atomic_inc(&dev_priv->irq_received);
while (true) { while (true) {
@ -566,8 +564,6 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
irqreturn_t ret = IRQ_NONE; irqreturn_t ret = IRQ_NONE;
int i; int i;
printf("%s\n", __FUNCTION__);
atomic_inc(&dev_priv->irq_received); atomic_inc(&dev_priv->irq_received);
/* disable master interrupt before clearing iir */ /* disable master interrupt before clearing iir */
@ -643,8 +639,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
int ret = IRQ_NONE; int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
printf("%s\n", __FUNCTION__);
atomic_inc(&dev_priv->irq_received); atomic_inc(&dev_priv->irq_received);
/* disable master interrupt before clearing iir */ /* disable master interrupt before clearing iir */
@ -2488,7 +2482,7 @@ void intel_irq_init(struct drm_device *dev)
irqreturn_t intel_irq_handler(struct drm_device *dev) irqreturn_t intel_irq_handler(struct drm_device *dev)
{ {
printf("i915 irq\n"); // printf("i915 irq\n");
// printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ; // printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;

View File

@ -7004,8 +7004,6 @@ void intel_mark_fb_idle(struct drm_i915_gem_object *obj)
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
struct drm_crtc *crtc; struct drm_crtc *crtc;
ENTER();
if (!i915_powersave) if (!i915_powersave)
return; return;

View File

@ -1341,8 +1341,8 @@ void __stdcall run_workqueue(struct workqueue_struct *cwq)
{ {
unsigned long irqflags; unsigned long irqflags;
dbgprintf("wq: %x head %x, next %x\n", // dbgprintf("wq: %x head %x, next %x\n",
cwq, &cwq->worklist, cwq->worklist.next); // cwq, &cwq->worklist, cwq->worklist.next);
spin_lock_irqsave(&cwq->lock, irqflags); spin_lock_irqsave(&cwq->lock, irqflags);
@ -1352,8 +1352,8 @@ void __stdcall run_workqueue(struct workqueue_struct *cwq)
struct work_struct, entry); struct work_struct, entry);
work_func_t f = work->func; work_func_t f = work->func;
list_del_init(cwq->worklist.next); list_del_init(cwq->worklist.next);
dbgprintf("head %x, next %x\n", // dbgprintf("head %x, next %x\n",
&cwq->worklist, cwq->worklist.next); // &cwq->worklist, cwq->worklist.next);
spin_unlock_irqrestore(&cwq->lock, irqflags); spin_unlock_irqrestore(&cwq->lock, irqflags);
f(work); f(work);
@ -1370,8 +1370,8 @@ int __queue_work(struct workqueue_struct *wq,
{ {
unsigned long flags; unsigned long flags;
dbgprintf("wq: %x, work: %x\n", // dbgprintf("wq: %x, work: %x\n",
wq, work ); // wq, work );
if(!list_empty(&work->entry)) if(!list_empty(&work->entry))
return 0; return 0;
@ -1384,8 +1384,8 @@ int __queue_work(struct workqueue_struct *wq,
list_add_tail(&work->entry, &wq->worklist); list_add_tail(&work->entry, &wq->worklist);
spin_unlock_irqrestore(&wq->lock, flags); spin_unlock_irqrestore(&wq->lock, flags);
dbgprintf("wq: %x head %x, next %x\n", // dbgprintf("wq: %x head %x, next %x\n",
wq, &wq->worklist, wq->worklist.next); // wq, &wq->worklist, wq->worklist.next);
return 1; return 1;
}; };
@ -1395,8 +1395,8 @@ void __stdcall delayed_work_timer_fn(unsigned long __data)
struct delayed_work *dwork = (struct delayed_work *)__data; struct delayed_work *dwork = (struct delayed_work *)__data;
struct workqueue_struct *wq = dwork->work.data; struct workqueue_struct *wq = dwork->work.data;
dbgprintf("wq: %x, work: %x\n", // dbgprintf("wq: %x, work: %x\n",
wq, &dwork->work ); // wq, &dwork->work );
__queue_work(wq, &dwork->work); __queue_work(wq, &dwork->work);
} }
@ -1417,8 +1417,8 @@ int queue_delayed_work(struct workqueue_struct *wq,
{ {
u32 flags; u32 flags;
dbgprintf("wq: %x, work: %x\n", // dbgprintf("wq: %x, work: %x\n",
wq, &dwork->work ); // wq, &dwork->work );
if (delay == 0) if (delay == 0)
return __queue_work(wq, &dwork->work); return __queue_work(wq, &dwork->work);