forked from KolibriOS/kolibrios
drm: update
git-svn-id: svn://kolibrios.org@5346 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
parent
cfd2f3ef88
commit
f55e40ca74
@ -73,6 +73,7 @@
|
||||
#include <ttm/ttm_placement.h>
|
||||
//#include <ttm/ttm_module.h>
|
||||
#include <ttm/ttm_execbuf_util.h>
|
||||
#include <linux/rwsem.h>
|
||||
|
||||
#include <drm/drm_gem.h>
|
||||
|
||||
|
@ -183,14 +183,9 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
|
||||
if (p->cs_flags & RADEON_CS_USE_VM)
|
||||
p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
|
||||
&p->validated);
|
||||
// if (need_mmap_lock)
|
||||
// down_read(¤t->mm->mmap_sem);
|
||||
|
||||
r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
|
||||
|
||||
// if (need_mmap_lock)
|
||||
// up_read(¤t->mm->mmap_sem);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -649,9 +644,9 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
||||
struct radeon_cs_parser parser;
|
||||
int r;
|
||||
|
||||
// down_read(&rdev->exclusive_lock);
|
||||
down_read(&rdev->exclusive_lock);
|
||||
if (!rdev->accel_working) {
|
||||
// up_read(&rdev->exclusive_lock);
|
||||
up_read(&rdev->exclusive_lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
/* initialize parser */
|
||||
@ -664,7 +659,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to initialize parser !\n");
|
||||
radeon_cs_parser_fini(&parser, r, false);
|
||||
// up_read(&rdev->exclusive_lock);
|
||||
up_read(&rdev->exclusive_lock);
|
||||
r = radeon_cs_handle_lockup(rdev, r);
|
||||
return r;
|
||||
}
|
||||
@ -678,7 +673,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
||||
|
||||
if (r) {
|
||||
radeon_cs_parser_fini(&parser, r, false);
|
||||
// up_read(&rdev->exclusive_lock);
|
||||
up_read(&rdev->exclusive_lock);
|
||||
r = radeon_cs_handle_lockup(rdev, r);
|
||||
return r;
|
||||
}
|
||||
@ -695,7 +690,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
||||
}
|
||||
out:
|
||||
radeon_cs_parser_fini(&parser, r, true);
|
||||
// up_read(&rdev->exclusive_lock);
|
||||
up_read(&rdev->exclusive_lock);
|
||||
r = radeon_cs_handle_lockup(rdev, r);
|
||||
return r;
|
||||
}
|
||||
|
@ -1282,9 +1282,8 @@ int radeon_device_init(struct radeon_device *rdev,
|
||||
mutex_init(&rdev->gpu_clock_mutex);
|
||||
mutex_init(&rdev->srbm_mutex);
|
||||
mutex_init(&rdev->grbm_idx_mutex);
|
||||
|
||||
// init_rwsem(&rdev->pm.mclk_lock);
|
||||
// init_rwsem(&rdev->exclusive_lock);
|
||||
init_rwsem(&rdev->pm.mclk_lock);
|
||||
init_rwsem(&rdev->exclusive_lock);
|
||||
init_waitqueue_head(&rdev->irq.vblank_queue);
|
||||
mutex_init(&rdev->mn_lock);
|
||||
// hash_init(rdev->mn_hash);
|
||||
@ -1456,8 +1455,12 @@ int radeon_gpu_reset(struct radeon_device *rdev)
|
||||
int i, r;
|
||||
int resched;
|
||||
|
||||
// down_write(&rdev->exclusive_lock);
|
||||
rdev->needs_reset = false;
|
||||
down_write(&rdev->exclusive_lock);
|
||||
|
||||
if (!rdev->needs_reset) {
|
||||
up_write(&rdev->exclusive_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
radeon_save_bios_scratch_regs(rdev);
|
||||
/* block TTM */
|
||||
@ -1498,7 +1501,10 @@ int radeon_gpu_reset(struct radeon_device *rdev)
|
||||
dev_info(rdev->dev, "GPU reset failed\n");
|
||||
}
|
||||
|
||||
// up_write(&rdev->exclusive_lock);
|
||||
rdev->needs_reset = r == -EAGAIN;
|
||||
rdev->in_reset = false;
|
||||
|
||||
up_read(&rdev->exclusive_lock);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -229,10 +229,9 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
||||
return r;
|
||||
}
|
||||
|
||||
static int radeon_mode_mmap(struct drm_file *filp,
|
||||
int radeon_mode_dumb_mmap(struct drm_file *filp,
|
||||
struct drm_device *dev,
|
||||
uint32_t handle, bool dumb,
|
||||
uint64_t *offset_p)
|
||||
uint32_t handle, uint64_t *offset_p)
|
||||
{
|
||||
struct drm_gem_object *gobj;
|
||||
struct radeon_bo *robj;
|
||||
@ -241,14 +240,6 @@ static int radeon_mode_mmap(struct drm_file *filp,
|
||||
if (gobj == NULL) {
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/*
|
||||
* We don't allow dumb mmaps on objects created using another
|
||||
* interface.
|
||||
*/
|
||||
WARN_ONCE(dumb && !(gobj->dumb || gobj->import_attach),
|
||||
"Illegal dumb map of GPU buffer.\n");
|
||||
|
||||
robj = gem_to_radeon_bo(gobj);
|
||||
*offset_p = radeon_bo_mmap_offset(robj);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
@ -260,8 +251,7 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
|
||||
{
|
||||
struct drm_radeon_gem_mmap *args = data;
|
||||
|
||||
return radeon_mode_mmap(filp, dev, args->handle, false,
|
||||
&args->addr_ptr);
|
||||
return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
|
||||
}
|
||||
|
||||
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
|
||||
|
@ -241,11 +241,11 @@ int radeon_bo_create(struct radeon_device *rdev,
|
||||
|
||||
radeon_ttm_placement_from_domain(bo, domain);
|
||||
/* Kernel allocation are uninterruptible */
|
||||
// down_read(&rdev->pm.mclk_lock);
|
||||
down_read(&rdev->pm.mclk_lock);
|
||||
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
|
||||
&bo->placement, page_align, !kernel, NULL,
|
||||
acc_size, sg, resv, &radeon_ttm_bo_destroy);
|
||||
// up_read(&rdev->pm.mclk_lock);
|
||||
up_read(&rdev->pm.mclk_lock);
|
||||
if (unlikely(r != 0)) {
|
||||
return r;
|
||||
}
|
||||
@ -488,9 +488,6 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
|
||||
u32 current_domain =
|
||||
radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
|
||||
|
||||
WARN_ONCE(bo->gem_base.dumb,
|
||||
"GPU use of dumb buffer is illegal.\n");
|
||||
|
||||
/* Check if this buffer will be moved and don't move it
|
||||
* if we have moved too many buffers for this IB already.
|
||||
*
|
||||
|
@ -251,7 +251,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
|
||||
return;
|
||||
|
||||
mutex_lock(&rdev->ddev->struct_mutex);
|
||||
// down_write(&rdev->pm.mclk_lock);
|
||||
down_write(&rdev->pm.mclk_lock);
|
||||
mutex_lock(&rdev->ring_lock);
|
||||
|
||||
/* wait for the rings to drain */
|
||||
@ -264,7 +264,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
|
||||
if (r) {
|
||||
/* needs a GPU reset dont reset here */
|
||||
mutex_unlock(&rdev->ring_lock);
|
||||
// up_write(&rdev->pm.mclk_lock);
|
||||
up_write(&rdev->pm.mclk_lock);
|
||||
mutex_unlock(&rdev->ddev->struct_mutex);
|
||||
return;
|
||||
}
|
||||
@ -300,7 +300,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
|
||||
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
|
||||
|
||||
mutex_unlock(&rdev->ring_lock);
|
||||
// up_write(&rdev->pm.mclk_lock);
|
||||
up_write(&rdev->pm.mclk_lock);
|
||||
mutex_unlock(&rdev->ddev->struct_mutex);
|
||||
}
|
||||
|
||||
@ -872,7 +872,7 @@ force:
|
||||
}
|
||||
|
||||
mutex_lock(&rdev->ddev->struct_mutex);
|
||||
// down_write(&rdev->pm.mclk_lock);
|
||||
down_write(&rdev->pm.mclk_lock);
|
||||
mutex_lock(&rdev->ring_lock);
|
||||
|
||||
/* update whether vce is active */
|
||||
@ -920,7 +920,7 @@ force:
|
||||
|
||||
done:
|
||||
mutex_unlock(&rdev->ring_lock);
|
||||
// up_write(&rdev->pm.mclk_lock);
|
||||
up_write(&rdev->pm.mclk_lock);
|
||||
mutex_unlock(&rdev->ddev->struct_mutex);
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,7 @@
|
||||
#include <linux/slab.h>
|
||||
//#include <linux/dma-mapping.h>
|
||||
|
||||
//#include <linux/atomic.h>
|
||||
#include <linux/atomic.h>
|
||||
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_page_alloc.h>
|
||||
|
Loading…
Reference in New Issue
Block a user