forked from KolibriOS/kolibrios
vmwgfx: 3.14-rc1
git-svn-id: svn://kolibrios.org@4569 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
parent
76c50442a0
commit
4130072f5a
@ -426,8 +426,20 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
|
|||||||
sync_obj = driver->sync_obj_ref(bo->sync_obj);
|
sync_obj = driver->sync_obj_ref(bo->sync_obj);
|
||||||
spin_unlock(&bdev->fence_lock);
|
spin_unlock(&bdev->fence_lock);
|
||||||
|
|
||||||
if (!ret)
|
if (!ret) {
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Make NO_EVICT bos immediately available to
|
||||||
|
* shrinkers, now that they are queued for
|
||||||
|
* destruction.
|
||||||
|
*/
|
||||||
|
if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
|
||||||
|
bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
|
||||||
|
ttm_bo_add_to_lru(bo);
|
||||||
|
}
|
||||||
|
|
||||||
ww_mutex_unlock(&bo->resv->lock);
|
ww_mutex_unlock(&bo->resv->lock);
|
||||||
|
}
|
||||||
|
|
||||||
kref_get(&bo->list_kref);
|
kref_get(&bo->list_kref);
|
||||||
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
|
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
|
||||||
@ -944,7 +956,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ttm_bo_mem_space);
|
EXPORT_SYMBOL(ttm_bo_mem_space);
|
||||||
|
|
||||||
int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
|
static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
|
||||||
struct ttm_placement *placement,
|
struct ttm_placement *placement,
|
||||||
bool interruptible,
|
bool interruptible,
|
||||||
bool no_wait_gpu)
|
bool no_wait_gpu)
|
||||||
@ -986,24 +998,32 @@ out_unlock:
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static int ttm_bo_mem_compat(struct ttm_placement *placement,
|
static bool ttm_bo_mem_compat(struct ttm_placement *placement,
|
||||||
struct ttm_mem_reg *mem)
|
struct ttm_mem_reg *mem,
|
||||||
|
uint32_t *new_flags)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (mem->mm_node && placement->lpfn != 0 &&
|
if (mem->mm_node && placement->lpfn != 0 &&
|
||||||
(mem->start < placement->fpfn ||
|
(mem->start < placement->fpfn ||
|
||||||
mem->start + mem->num_pages > placement->lpfn))
|
mem->start + mem->num_pages > placement->lpfn))
|
||||||
return -1;
|
return false;
|
||||||
|
|
||||||
for (i = 0; i < placement->num_placement; i++) {
|
for (i = 0; i < placement->num_placement; i++) {
|
||||||
if ((placement->placement[i] & mem->placement &
|
*new_flags = placement->placement[i];
|
||||||
TTM_PL_MASK_CACHING) &&
|
if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
|
||||||
(placement->placement[i] & mem->placement &
|
(*new_flags & mem->placement & TTM_PL_MASK_MEM))
|
||||||
TTM_PL_MASK_MEM))
|
return true;
|
||||||
return i;
|
|
||||||
}
|
}
|
||||||
return -1;
|
|
||||||
|
for (i = 0; i < placement->num_busy_placement; i++) {
|
||||||
|
*new_flags = placement->busy_placement[i];
|
||||||
|
if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
|
||||||
|
(*new_flags & mem->placement & TTM_PL_MASK_MEM))
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
int ttm_bo_validate(struct ttm_buffer_object *bo,
|
int ttm_bo_validate(struct ttm_buffer_object *bo,
|
||||||
@ -1012,6 +1032,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
|
|||||||
bool no_wait_gpu)
|
bool no_wait_gpu)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
uint32_t new_flags;
|
||||||
|
|
||||||
// BUG_ON(!ttm_bo_is_reserved(bo));
|
// BUG_ON(!ttm_bo_is_reserved(bo));
|
||||||
/* Check that range is valid */
|
/* Check that range is valid */
|
||||||
@ -1022,8 +1043,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
|
|||||||
/*
|
/*
|
||||||
* Check whether we need to move buffer.
|
* Check whether we need to move buffer.
|
||||||
*/
|
*/
|
||||||
ret = ttm_bo_mem_compat(placement, &bo->mem);
|
if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
|
||||||
if (ret < 0) {
|
|
||||||
// ret = ttm_bo_move_buffer(bo, placement, interruptible,
|
// ret = ttm_bo_move_buffer(bo, placement, interruptible,
|
||||||
// no_wait_gpu);
|
// no_wait_gpu);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -1033,7 +1053,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
|
|||||||
* Use the access and other non-mapping-related flag bits from
|
* Use the access and other non-mapping-related flag bits from
|
||||||
* the compatible memory placement flags to the active flags
|
* the compatible memory placement flags to the active flags
|
||||||
*/
|
*/
|
||||||
ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
|
ttm_flag_masked(&bo->mem.placement, new_flags,
|
||||||
~TTM_PL_MASK_MEMTYPE);
|
~TTM_PL_MASK_MEMTYPE);
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
@ -1103,6 +1123,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
|
|||||||
INIT_LIST_HEAD(&bo->ddestroy);
|
INIT_LIST_HEAD(&bo->ddestroy);
|
||||||
INIT_LIST_HEAD(&bo->swap);
|
INIT_LIST_HEAD(&bo->swap);
|
||||||
INIT_LIST_HEAD(&bo->io_reserve_lru);
|
INIT_LIST_HEAD(&bo->io_reserve_lru);
|
||||||
|
mutex_init(&bo->wu_mutex);
|
||||||
bo->bdev = bdev;
|
bo->bdev = bdev;
|
||||||
bo->glob = bdev->glob;
|
bo->glob = bdev->glob;
|
||||||
bo->type = type;
|
bo->type = type;
|
||||||
@ -1368,3 +1389,36 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int ttm_bo_wait(struct ttm_buffer_object *bo,
|
||||||
|
bool lazy, bool interruptible, bool no_wait)
|
||||||
|
{
|
||||||
|
struct ttm_bo_driver *driver = bo->bdev->driver;
|
||||||
|
struct ttm_bo_device *bdev = bo->bdev;
|
||||||
|
void *sync_obj;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
if (likely(bo->sync_obj == NULL))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ttm_bo_wait);
|
||||||
|
|
||||||
|
int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
|
||||||
|
{
|
||||||
|
struct ttm_bo_device *bdev = bo->bdev;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Using ttm_bo_reserve makes sure the lru lists are updated.
|
||||||
|
*/
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
|
||||||
|
|
||||||
|
void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
|
||||||
|
{
|
||||||
|
atomic_dec(&bo->cpu_writers);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
|
||||||
|
@ -187,7 +187,7 @@ void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
|
static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
|
||||||
void **virtual)
|
void **virtual)
|
||||||
{
|
{
|
||||||
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
|
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
|
||||||
@ -219,7 +219,7 @@ int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
|
static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
|
||||||
void *virtual)
|
void *virtual)
|
||||||
{
|
{
|
||||||
struct ttm_mem_type_manager *man;
|
struct ttm_mem_type_manager *man;
|
||||||
@ -343,20 +343,26 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Single TTM move. NOP.
|
||||||
|
*/
|
||||||
if (old_iomap == NULL && new_iomap == NULL)
|
if (old_iomap == NULL && new_iomap == NULL)
|
||||||
goto out2;
|
goto out2;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Don't move nonexistent data. Clear destination instead.
|
||||||
|
*/
|
||||||
if (old_iomap == NULL && ttm == NULL)
|
if (old_iomap == NULL && ttm == NULL)
|
||||||
goto out2;
|
goto out2;
|
||||||
|
|
||||||
if (ttm->state == tt_unpopulated) {
|
/*
|
||||||
|
* TTM might be null for moves within the same region.
|
||||||
|
*/
|
||||||
|
if (ttm && ttm->state == tt_unpopulated) {
|
||||||
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
|
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
|
||||||
if (ret) {
|
if (ret)
|
||||||
/* if we fail here don't nuke the mm node
|
|
||||||
* as the bo still owns it */
|
|
||||||
old_copy.mm_node = NULL;
|
|
||||||
goto out1;
|
goto out1;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
add = 0;
|
add = 0;
|
||||||
dir = 1;
|
dir = 1;
|
||||||
@ -381,12 +387,9 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
|
|||||||
prot);
|
prot);
|
||||||
} else
|
} else
|
||||||
ret = ttm_copy_io_page(new_iomap, old_iomap, page);
|
ret = ttm_copy_io_page(new_iomap, old_iomap, page);
|
||||||
if (ret) {
|
if (ret)
|
||||||
/* failing here, means keep old copy as-is */
|
|
||||||
old_copy.mm_node = NULL;
|
|
||||||
goto out1;
|
goto out1;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
mb();
|
mb();
|
||||||
out2:
|
out2:
|
||||||
old_copy = *old_mem;
|
old_copy = *old_mem;
|
||||||
@ -403,6 +406,11 @@ out1:
|
|||||||
ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
|
ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
|
||||||
out:
|
out:
|
||||||
ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
|
ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On error, keep the mm node!
|
||||||
|
*/
|
||||||
|
if (!ret)
|
||||||
ttm_bo_mem_put(bo, &old_copy);
|
ttm_bo_mem_put(bo, &old_copy);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -582,7 +590,7 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
|
|||||||
if (start_page > bo->num_pages)
|
if (start_page > bo->num_pages)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
#if 0
|
#if 0
|
||||||
if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
|
if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
#endif
|
#endif
|
||||||
(void) ttm_mem_io_lock(man, false);
|
(void) ttm_mem_io_lock(man, false);
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/**************************************************************************
|
/**************************************************************************
|
||||||
*
|
*
|
||||||
* Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
|
* Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA
|
||||||
* All Rights Reserved.
|
* All Rights Reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@ -26,6 +26,12 @@
|
|||||||
**************************************************************************/
|
**************************************************************************/
|
||||||
/*
|
/*
|
||||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||||
|
*
|
||||||
|
* While no substantial code is shared, the prime code is inspired by
|
||||||
|
* drm_prime.c, with
|
||||||
|
* Authors:
|
||||||
|
* Dave Airlie <airlied@redhat.com>
|
||||||
|
* Rob Clark <rob.clark@linaro.org>
|
||||||
*/
|
*/
|
||||||
/** @file ttm_ref_object.c
|
/** @file ttm_ref_object.c
|
||||||
*
|
*
|
||||||
@ -34,6 +40,7 @@
|
|||||||
* and release on file close.
|
* and release on file close.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct ttm_object_file
|
* struct ttm_object_file
|
||||||
*
|
*
|
||||||
@ -51,6 +58,8 @@
|
|||||||
|
|
||||||
#define pr_fmt(fmt) "[TTM] " fmt
|
#define pr_fmt(fmt) "[TTM] " fmt
|
||||||
|
|
||||||
|
#include <linux/mutex.h>
|
||||||
|
|
||||||
#include <drm/ttm/ttm_object.h>
|
#include <drm/ttm/ttm_object.h>
|
||||||
#include <drm/ttm/ttm_module.h>
|
#include <drm/ttm/ttm_module.h>
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
@ -69,7 +78,7 @@ static inline int __must_check kref_get_unless_zero(struct kref *kref)
|
|||||||
|
|
||||||
struct ttm_object_file {
|
struct ttm_object_file {
|
||||||
struct ttm_object_device *tdev;
|
struct ttm_object_device *tdev;
|
||||||
rwlock_t lock;
|
spinlock_t lock;
|
||||||
struct list_head ref_list;
|
struct list_head ref_list;
|
||||||
struct drm_open_hash ref_hash[TTM_REF_NUM];
|
struct drm_open_hash ref_hash[TTM_REF_NUM];
|
||||||
struct kref refcount;
|
struct kref refcount;
|
||||||
@ -124,6 +133,8 @@ struct ttm_ref_object {
|
|||||||
struct ttm_object_file *tfile;
|
struct ttm_object_file *tfile;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf);
|
||||||
|
|
||||||
static inline struct ttm_object_file *
|
static inline struct ttm_object_file *
|
||||||
ttm_object_file_ref(struct ttm_object_file *tfile)
|
ttm_object_file_ref(struct ttm_object_file *tfile)
|
||||||
{
|
{
|
||||||
@ -206,10 +217,9 @@ static void ttm_release_base(struct kref *kref)
|
|||||||
* call_rcu() or ttm_base_object_kfree().
|
* call_rcu() or ttm_base_object_kfree().
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (base->refcount_release) {
|
ttm_object_file_unref(&base->tfile);
|
||||||
ttm_object_file_unref(&base->tfile);
|
if (base->refcount_release)
|
||||||
base->refcount_release(&base);
|
base->refcount_release(&base);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ttm_base_object_unref(struct ttm_base_object **p_base)
|
void ttm_base_object_unref(struct ttm_base_object **p_base)
|
||||||
@ -225,33 +235,45 @@ EXPORT_SYMBOL(ttm_base_object_unref);
|
|||||||
struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
|
struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
|
||||||
uint32_t key)
|
uint32_t key)
|
||||||
{
|
{
|
||||||
struct ttm_object_device *tdev = tfile->tdev;
|
struct ttm_base_object *base = NULL;
|
||||||
struct ttm_base_object *base;
|
|
||||||
struct drm_hash_item *hash;
|
struct drm_hash_item *hash;
|
||||||
|
struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
// rcu_read_lock();
|
// rcu_read_lock();
|
||||||
ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash);
|
ret = drm_ht_find_item_rcu(ht, key, &hash);
|
||||||
|
|
||||||
if (likely(ret == 0)) {
|
if (likely(ret == 0)) {
|
||||||
base = drm_hash_entry(hash, struct ttm_base_object, hash);
|
base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
|
||||||
ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL;
|
if (!kref_get_unless_zero(&base->refcount))
|
||||||
|
base = NULL;
|
||||||
}
|
}
|
||||||
// rcu_read_unlock();
|
// rcu_read_unlock();
|
||||||
|
|
||||||
if (unlikely(ret != 0))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
if (tfile != base->tfile && !base->shareable) {
|
|
||||||
pr_err("Attempted access of non-shareable object\n");
|
|
||||||
ttm_base_object_unref(&base);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
return base;
|
return base;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ttm_base_object_lookup);
|
EXPORT_SYMBOL(ttm_base_object_lookup);
|
||||||
|
|
||||||
|
struct ttm_base_object *
|
||||||
|
ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
|
||||||
|
{
|
||||||
|
struct ttm_base_object *base = NULL;
|
||||||
|
struct drm_hash_item *hash;
|
||||||
|
struct drm_open_hash *ht = &tdev->object_hash;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = drm_ht_find_item_rcu(ht, key, &hash);
|
||||||
|
|
||||||
|
if (likely(ret == 0)) {
|
||||||
|
base = drm_hash_entry(hash, struct ttm_base_object, hash);
|
||||||
|
if (!kref_get_unless_zero(&base->refcount))
|
||||||
|
base = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return base;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ttm_base_object_lookup_for_ref);
|
||||||
|
|
||||||
int ttm_ref_object_add(struct ttm_object_file *tfile,
|
int ttm_ref_object_add(struct ttm_object_file *tfile,
|
||||||
struct ttm_base_object *base,
|
struct ttm_base_object *base,
|
||||||
enum ttm_ref_type ref_type, bool *existed)
|
enum ttm_ref_type ref_type, bool *existed)
|
||||||
@ -266,17 +288,15 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
|
|||||||
*existed = true;
|
*existed = true;
|
||||||
|
|
||||||
while (ret == -EINVAL) {
|
while (ret == -EINVAL) {
|
||||||
read_lock(&tfile->lock);
|
ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash);
|
||||||
ret = drm_ht_find_item(ht, base->hash.key, &hash);
|
|
||||||
|
|
||||||
if (ret == 0) {
|
if (ret == 0) {
|
||||||
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
|
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
|
||||||
kref_get(&ref->kref);
|
if (!kref_get_unless_zero(&ref->kref)) {
|
||||||
read_unlock(&tfile->lock);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
read_unlock(&tfile->lock);
|
|
||||||
ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
|
ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
|
||||||
false, false);
|
false, false);
|
||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
@ -293,19 +313,19 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
|
|||||||
ref->ref_type = ref_type;
|
ref->ref_type = ref_type;
|
||||||
kref_init(&ref->kref);
|
kref_init(&ref->kref);
|
||||||
|
|
||||||
write_lock(&tfile->lock);
|
spin_lock(&tfile->lock);
|
||||||
ret = drm_ht_insert_item(ht, &ref->hash);
|
ret = drm_ht_insert_item_rcu(ht, &ref->hash);
|
||||||
|
|
||||||
if (likely(ret == 0)) {
|
if (likely(ret == 0)) {
|
||||||
list_add_tail(&ref->head, &tfile->ref_list);
|
list_add_tail(&ref->head, &tfile->ref_list);
|
||||||
kref_get(&base->refcount);
|
kref_get(&base->refcount);
|
||||||
write_unlock(&tfile->lock);
|
spin_unlock(&tfile->lock);
|
||||||
if (existed != NULL)
|
if (existed != NULL)
|
||||||
*existed = false;
|
*existed = false;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
write_unlock(&tfile->lock);
|
spin_unlock(&tfile->lock);
|
||||||
BUG_ON(ret != -EINVAL);
|
BUG_ON(ret != -EINVAL);
|
||||||
|
|
||||||
ttm_mem_global_free(mem_glob, sizeof(*ref));
|
ttm_mem_global_free(mem_glob, sizeof(*ref));
|
||||||
@ -326,9 +346,9 @@ static void ttm_ref_object_release(struct kref *kref)
|
|||||||
struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
|
struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
|
||||||
|
|
||||||
ht = &tfile->ref_hash[ref->ref_type];
|
ht = &tfile->ref_hash[ref->ref_type];
|
||||||
(void)drm_ht_remove_item(ht, &ref->hash);
|
(void)drm_ht_remove_item_rcu(ht, &ref->hash);
|
||||||
list_del(&ref->head);
|
list_del(&ref->head);
|
||||||
write_unlock(&tfile->lock);
|
spin_unlock(&tfile->lock);
|
||||||
|
|
||||||
if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
|
if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
|
||||||
base->ref_obj_release(base, ref->ref_type);
|
base->ref_obj_release(base, ref->ref_type);
|
||||||
@ -336,7 +356,7 @@ static void ttm_ref_object_release(struct kref *kref)
|
|||||||
ttm_base_object_unref(&ref->obj);
|
ttm_base_object_unref(&ref->obj);
|
||||||
ttm_mem_global_free(mem_glob, sizeof(*ref));
|
ttm_mem_global_free(mem_glob, sizeof(*ref));
|
||||||
kfree(ref);
|
kfree(ref);
|
||||||
write_lock(&tfile->lock);
|
spin_lock(&tfile->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
|
int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
|
||||||
@ -347,15 +367,15 @@ int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
|
|||||||
struct drm_hash_item *hash;
|
struct drm_hash_item *hash;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
write_lock(&tfile->lock);
|
spin_lock(&tfile->lock);
|
||||||
ret = drm_ht_find_item(ht, key, &hash);
|
ret = drm_ht_find_item(ht, key, &hash);
|
||||||
if (unlikely(ret != 0)) {
|
if (unlikely(ret != 0)) {
|
||||||
write_unlock(&tfile->lock);
|
spin_unlock(&tfile->lock);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
|
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
|
||||||
kref_put(&ref->kref, ttm_ref_object_release);
|
kref_put(&ref->kref, ttm_ref_object_release);
|
||||||
write_unlock(&tfile->lock);
|
spin_unlock(&tfile->lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ttm_ref_object_base_unref);
|
EXPORT_SYMBOL(ttm_ref_object_base_unref);
|
||||||
@ -368,7 +388,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
|
|||||||
struct ttm_object_file *tfile = *p_tfile;
|
struct ttm_object_file *tfile = *p_tfile;
|
||||||
|
|
||||||
*p_tfile = NULL;
|
*p_tfile = NULL;
|
||||||
write_lock(&tfile->lock);
|
spin_lock(&tfile->lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Since we release the lock within the loop, we have to
|
* Since we release the lock within the loop, we have to
|
||||||
@ -384,7 +404,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
|
|||||||
for (i = 0; i < TTM_REF_NUM; ++i)
|
for (i = 0; i < TTM_REF_NUM; ++i)
|
||||||
drm_ht_remove(&tfile->ref_hash[i]);
|
drm_ht_remove(&tfile->ref_hash[i]);
|
||||||
|
|
||||||
write_unlock(&tfile->lock);
|
spin_unlock(&tfile->lock);
|
||||||
ttm_object_file_unref(&tfile);
|
ttm_object_file_unref(&tfile);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ttm_object_file_release);
|
EXPORT_SYMBOL(ttm_object_file_release);
|
||||||
@ -400,7 +420,7 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
|
|||||||
if (unlikely(tfile == NULL))
|
if (unlikely(tfile == NULL))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
rwlock_init(&tfile->lock);
|
spin_lock_init(&tfile->lock);
|
||||||
tfile->tdev = tdev;
|
tfile->tdev = tdev;
|
||||||
kref_init(&tfile->refcount);
|
kref_init(&tfile->refcount);
|
||||||
INIT_LIST_HEAD(&tfile->ref_list);
|
INIT_LIST_HEAD(&tfile->ref_list);
|
||||||
@ -424,9 +444,10 @@ out_err:
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ttm_object_file_init);
|
EXPORT_SYMBOL(ttm_object_file_init);
|
||||||
|
|
||||||
struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
|
struct ttm_object_device *
|
||||||
*mem_glob,
|
ttm_object_device_init(struct ttm_mem_global *mem_glob,
|
||||||
unsigned int hash_order)
|
unsigned int hash_order,
|
||||||
|
const struct dma_buf_ops *ops)
|
||||||
{
|
{
|
||||||
struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
|
struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
|
||||||
int ret;
|
int ret;
|
||||||
@ -438,10 +459,17 @@ struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
|
|||||||
spin_lock_init(&tdev->object_lock);
|
spin_lock_init(&tdev->object_lock);
|
||||||
atomic_set(&tdev->object_count, 0);
|
atomic_set(&tdev->object_count, 0);
|
||||||
ret = drm_ht_create(&tdev->object_hash, hash_order);
|
ret = drm_ht_create(&tdev->object_hash, hash_order);
|
||||||
|
if (ret != 0)
|
||||||
|
goto out_no_object_hash;
|
||||||
|
|
||||||
if (likely(ret == 0))
|
// tdev->ops = *ops;
|
||||||
|
// tdev->dmabuf_release = tdev->ops.release;
|
||||||
|
// tdev->ops.release = ttm_prime_dmabuf_release;
|
||||||
|
// tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) +
|
||||||
|
// ttm_round_pot(sizeof(struct file));
|
||||||
return tdev;
|
return tdev;
|
||||||
|
|
||||||
|
out_no_object_hash:
|
||||||
kfree(tdev);
|
kfree(tdev);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -41,7 +41,7 @@
|
|||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/seq_file.h> /* for seq_printf */
|
#include <linux/seq_file.h> /* for seq_printf */
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
//#include <linux/dma-mapping.h>
|
#include <linux/dma-mapping.h>
|
||||||
|
|
||||||
//#include <linux/atomic.h>
|
//#include <linux/atomic.h>
|
||||||
|
|
||||||
|
@ -172,9 +172,8 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
|
|||||||
ttm_tt_unbind(ttm);
|
ttm_tt_unbind(ttm);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (likely(ttm->pages != NULL)) {
|
// if (ttm->state == tt_unbound)
|
||||||
ttm->bdev->driver->ttm_tt_unpopulate(ttm);
|
// ttm_tt_unpopulate(ttm);
|
||||||
}
|
|
||||||
|
|
||||||
// if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
|
// if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
|
||||||
// ttm->swap_storage)
|
// ttm->swap_storage)
|
||||||
@ -368,7 +367,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
|
|||||||
page_cache_release(to_page);
|
page_cache_release(to_page);
|
||||||
}
|
}
|
||||||
|
|
||||||
ttm->bdev->driver->ttm_tt_unpopulate(ttm);
|
ttm_tt_unpopulate(ttm);
|
||||||
ttm->swap_storage = swap_storage;
|
ttm->swap_storage = swap_storage;
|
||||||
ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
|
ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
|
||||||
if (persistent_swap_storage)
|
if (persistent_swap_storage)
|
||||||
|
@ -7,20 +7,20 @@ FASM = fasm.exe
|
|||||||
DEFINES = -D__KERNEL__ -DCONFIG_X86_32
|
DEFINES = -D__KERNEL__ -DCONFIG_X86_32
|
||||||
|
|
||||||
DRV_TOPDIR = $(CURDIR)/../../..
|
DRV_TOPDIR = $(CURDIR)/../../..
|
||||||
|
DRV_INCLUDES = $(DRV_TOPDIR)/include
|
||||||
DRM_TOPDIR = $(CURDIR)/..
|
DRM_TOPDIR = $(CURDIR)/..
|
||||||
|
|
||||||
DRV_INCLUDES = $(DRV_TOPDIR)/include
|
|
||||||
|
|
||||||
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/drm \
|
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/drm \
|
||||||
-I$(DRV_INCLUDES)/linux
|
-I$(DRV_INCLUDES)/linux -I$(DRV_INCLUDES)/linux/uapi
|
||||||
|
|
||||||
CFLAGS = -c -O2 $(INCLUDES) -march=i686 -fomit-frame-pointer -fno-builtin-printf
|
CFLAGS = -c -O2 $(INCLUDES) -march=i686 -fomit-frame-pointer -fno-builtin-printf
|
||||||
|
CFLAGS+= -mno-ms-bitfields
|
||||||
|
|
||||||
LIBPATH:= $(DRV_TOPDIR)/ddk
|
LIBPATH:= $(DRV_TOPDIR)/ddk
|
||||||
|
|
||||||
LIBS:= -lddk -lcore -lgcc
|
LIBS:= -lddk -lcore -lgcc
|
||||||
|
|
||||||
LDFLAGS = -nostdlib -shared -s -Map atikms.map --image-base 0\
|
LDFLAGS = -nostdlib -shared -s --image-base 0\
|
||||||
--file-alignment 512 --section-alignment 4096
|
--file-alignment 512 --section-alignment 4096
|
||||||
|
|
||||||
|
|
||||||
@ -51,8 +51,10 @@ NAME_SRC= \
|
|||||||
vmwgfx_irq.c \
|
vmwgfx_irq.c \
|
||||||
vmwgfx_kms.c \
|
vmwgfx_kms.c \
|
||||||
vmwgfx_marker.c \
|
vmwgfx_marker.c \
|
||||||
|
vmwgfx_mob.c \
|
||||||
vmwgfx_resource.c \
|
vmwgfx_resource.c \
|
||||||
vmwgfx_scrn.c \
|
vmwgfx_scrn.c \
|
||||||
|
vmwgfx_shader.c \
|
||||||
vmwgfx_surface.c \
|
vmwgfx_surface.c \
|
||||||
vmwgfx_ttm_glue.c \
|
vmwgfx_ttm_glue.c \
|
||||||
../hdmi.c \
|
../hdmi.c \
|
||||||
|
@ -34,6 +34,8 @@
|
|||||||
|
|
||||||
#include "svga_reg.h"
|
#include "svga_reg.h"
|
||||||
|
|
||||||
|
typedef uint32 PPN;
|
||||||
|
typedef __le64 PPN64;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* 3D Hardware Version
|
* 3D Hardware Version
|
||||||
@ -71,6 +73,9 @@ typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
|
|||||||
#define SVGA3D_MAX_CONTEXT_IDS 256
|
#define SVGA3D_MAX_CONTEXT_IDS 256
|
||||||
#define SVGA3D_MAX_SURFACE_IDS (32 * 1024)
|
#define SVGA3D_MAX_SURFACE_IDS (32 * 1024)
|
||||||
|
|
||||||
|
#define SVGA3D_NUM_TEXTURE_UNITS 32
|
||||||
|
#define SVGA3D_NUM_LIGHTS 8
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Surface formats.
|
* Surface formats.
|
||||||
*
|
*
|
||||||
@ -81,6 +86,7 @@ typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
typedef enum SVGA3dSurfaceFormat {
|
typedef enum SVGA3dSurfaceFormat {
|
||||||
|
SVGA3D_FORMAT_MIN = 0,
|
||||||
SVGA3D_FORMAT_INVALID = 0,
|
SVGA3D_FORMAT_INVALID = 0,
|
||||||
|
|
||||||
SVGA3D_X8R8G8B8 = 1,
|
SVGA3D_X8R8G8B8 = 1,
|
||||||
@ -134,12 +140,6 @@ typedef enum SVGA3dSurfaceFormat {
|
|||||||
SVGA3D_RG_S10E5 = 35,
|
SVGA3D_RG_S10E5 = 35,
|
||||||
SVGA3D_RG_S23E8 = 36,
|
SVGA3D_RG_S23E8 = 36,
|
||||||
|
|
||||||
/*
|
|
||||||
* Any surface can be used as a buffer object, but SVGA3D_BUFFER is
|
|
||||||
* the most efficient format to use when creating new surfaces
|
|
||||||
* expressly for index or vertex data.
|
|
||||||
*/
|
|
||||||
|
|
||||||
SVGA3D_BUFFER = 37,
|
SVGA3D_BUFFER = 37,
|
||||||
|
|
||||||
SVGA3D_Z_D24X8 = 38,
|
SVGA3D_Z_D24X8 = 38,
|
||||||
@ -159,15 +159,114 @@ typedef enum SVGA3dSurfaceFormat {
|
|||||||
/* Video format with alpha */
|
/* Video format with alpha */
|
||||||
SVGA3D_AYUV = 45,
|
SVGA3D_AYUV = 45,
|
||||||
|
|
||||||
|
SVGA3D_R32G32B32A32_TYPELESS = 46,
|
||||||
|
SVGA3D_R32G32B32A32_FLOAT = 25,
|
||||||
|
SVGA3D_R32G32B32A32_UINT = 47,
|
||||||
|
SVGA3D_R32G32B32A32_SINT = 48,
|
||||||
|
SVGA3D_R32G32B32_TYPELESS = 49,
|
||||||
|
SVGA3D_R32G32B32_FLOAT = 50,
|
||||||
|
SVGA3D_R32G32B32_UINT = 51,
|
||||||
|
SVGA3D_R32G32B32_SINT = 52,
|
||||||
|
SVGA3D_R16G16B16A16_TYPELESS = 53,
|
||||||
|
SVGA3D_R16G16B16A16_FLOAT = 24,
|
||||||
|
SVGA3D_R16G16B16A16_UNORM = 41,
|
||||||
|
SVGA3D_R16G16B16A16_UINT = 54,
|
||||||
|
SVGA3D_R16G16B16A16_SNORM = 55,
|
||||||
|
SVGA3D_R16G16B16A16_SINT = 56,
|
||||||
|
SVGA3D_R32G32_TYPELESS = 57,
|
||||||
|
SVGA3D_R32G32_FLOAT = 36,
|
||||||
|
SVGA3D_R32G32_UINT = 58,
|
||||||
|
SVGA3D_R32G32_SINT = 59,
|
||||||
|
SVGA3D_R32G8X24_TYPELESS = 60,
|
||||||
|
SVGA3D_D32_FLOAT_S8X24_UINT = 61,
|
||||||
|
SVGA3D_R32_FLOAT_X8X24_TYPELESS = 62,
|
||||||
|
SVGA3D_X32_TYPELESS_G8X24_UINT = 63,
|
||||||
|
SVGA3D_R10G10B10A2_TYPELESS = 64,
|
||||||
|
SVGA3D_R10G10B10A2_UNORM = 26,
|
||||||
|
SVGA3D_R10G10B10A2_UINT = 65,
|
||||||
|
SVGA3D_R11G11B10_FLOAT = 66,
|
||||||
|
SVGA3D_R8G8B8A8_TYPELESS = 67,
|
||||||
|
SVGA3D_R8G8B8A8_UNORM = 68,
|
||||||
|
SVGA3D_R8G8B8A8_UNORM_SRGB = 69,
|
||||||
|
SVGA3D_R8G8B8A8_UINT = 70,
|
||||||
|
SVGA3D_R8G8B8A8_SNORM = 28,
|
||||||
|
SVGA3D_R8G8B8A8_SINT = 71,
|
||||||
|
SVGA3D_R16G16_TYPELESS = 72,
|
||||||
|
SVGA3D_R16G16_FLOAT = 35,
|
||||||
|
SVGA3D_R16G16_UNORM = 40,
|
||||||
|
SVGA3D_R16G16_UINT = 73,
|
||||||
|
SVGA3D_R16G16_SNORM = 39,
|
||||||
|
SVGA3D_R16G16_SINT = 74,
|
||||||
|
SVGA3D_R32_TYPELESS = 75,
|
||||||
|
SVGA3D_D32_FLOAT = 76,
|
||||||
|
SVGA3D_R32_FLOAT = 34,
|
||||||
|
SVGA3D_R32_UINT = 77,
|
||||||
|
SVGA3D_R32_SINT = 78,
|
||||||
|
SVGA3D_R24G8_TYPELESS = 79,
|
||||||
|
SVGA3D_D24_UNORM_S8_UINT = 80,
|
||||||
|
SVGA3D_R24_UNORM_X8_TYPELESS = 81,
|
||||||
|
SVGA3D_X24_TYPELESS_G8_UINT = 82,
|
||||||
|
SVGA3D_R8G8_TYPELESS = 83,
|
||||||
|
SVGA3D_R8G8_UNORM = 84,
|
||||||
|
SVGA3D_R8G8_UINT = 85,
|
||||||
|
SVGA3D_R8G8_SNORM = 27,
|
||||||
|
SVGA3D_R8G8_SINT = 86,
|
||||||
|
SVGA3D_R16_TYPELESS = 87,
|
||||||
|
SVGA3D_R16_FLOAT = 33,
|
||||||
|
SVGA3D_D16_UNORM = 8,
|
||||||
|
SVGA3D_R16_UNORM = 88,
|
||||||
|
SVGA3D_R16_UINT = 89,
|
||||||
|
SVGA3D_R16_SNORM = 90,
|
||||||
|
SVGA3D_R16_SINT = 91,
|
||||||
|
SVGA3D_R8_TYPELESS = 92,
|
||||||
|
SVGA3D_R8_UNORM = 93,
|
||||||
|
SVGA3D_R8_UINT = 94,
|
||||||
|
SVGA3D_R8_SNORM = 95,
|
||||||
|
SVGA3D_R8_SINT = 96,
|
||||||
|
SVGA3D_A8_UNORM = 32,
|
||||||
|
SVGA3D_R1_UNORM = 97,
|
||||||
|
SVGA3D_R9G9B9E5_SHAREDEXP = 98,
|
||||||
|
SVGA3D_R8G8_B8G8_UNORM = 99,
|
||||||
|
SVGA3D_G8R8_G8B8_UNORM = 100,
|
||||||
|
SVGA3D_BC1_TYPELESS = 101,
|
||||||
|
SVGA3D_BC1_UNORM = 15,
|
||||||
|
SVGA3D_BC1_UNORM_SRGB = 102,
|
||||||
|
SVGA3D_BC2_TYPELESS = 103,
|
||||||
|
SVGA3D_BC2_UNORM = 17,
|
||||||
|
SVGA3D_BC2_UNORM_SRGB = 104,
|
||||||
|
SVGA3D_BC3_TYPELESS = 105,
|
||||||
|
SVGA3D_BC3_UNORM = 19,
|
||||||
|
SVGA3D_BC3_UNORM_SRGB = 106,
|
||||||
|
SVGA3D_BC4_TYPELESS = 107,
|
||||||
SVGA3D_BC4_UNORM = 108,
|
SVGA3D_BC4_UNORM = 108,
|
||||||
|
SVGA3D_BC4_SNORM = 109,
|
||||||
|
SVGA3D_BC5_TYPELESS = 110,
|
||||||
SVGA3D_BC5_UNORM = 111,
|
SVGA3D_BC5_UNORM = 111,
|
||||||
|
SVGA3D_BC5_SNORM = 112,
|
||||||
|
SVGA3D_B5G6R5_UNORM = 3,
|
||||||
|
SVGA3D_B5G5R5A1_UNORM = 5,
|
||||||
|
SVGA3D_B8G8R8A8_UNORM = 2,
|
||||||
|
SVGA3D_B8G8R8X8_UNORM = 1,
|
||||||
|
SVGA3D_R10G10B10_XR_BIAS_A2_UNORM = 113,
|
||||||
|
SVGA3D_B8G8R8A8_TYPELESS = 114,
|
||||||
|
SVGA3D_B8G8R8A8_UNORM_SRGB = 115,
|
||||||
|
SVGA3D_B8G8R8X8_TYPELESS = 116,
|
||||||
|
SVGA3D_B8G8R8X8_UNORM_SRGB = 117,
|
||||||
|
|
||||||
/* Advanced D3D9 depth formats. */
|
/* Advanced D3D9 depth formats. */
|
||||||
SVGA3D_Z_DF16 = 118,
|
SVGA3D_Z_DF16 = 118,
|
||||||
SVGA3D_Z_DF24 = 119,
|
SVGA3D_Z_DF24 = 119,
|
||||||
SVGA3D_Z_D24S8_INT = 120,
|
SVGA3D_Z_D24S8_INT = 120,
|
||||||
|
|
||||||
SVGA3D_FORMAT_MAX
|
/* Planar video formats. */
|
||||||
|
SVGA3D_YV12 = 121,
|
||||||
|
|
||||||
|
/* Shader constant formats. */
|
||||||
|
SVGA3D_SURFACE_SHADERCONST_FLOAT = 122,
|
||||||
|
SVGA3D_SURFACE_SHADERCONST_INT = 123,
|
||||||
|
SVGA3D_SURFACE_SHADERCONST_BOOL = 124,
|
||||||
|
|
||||||
|
SVGA3D_FORMAT_MAX = 125,
|
||||||
} SVGA3dSurfaceFormat;
|
} SVGA3dSurfaceFormat;
|
||||||
|
|
||||||
typedef uint32 SVGA3dColor; /* a, r, g, b */
|
typedef uint32 SVGA3dColor; /* a, r, g, b */
|
||||||
@ -957,15 +1056,21 @@ typedef enum {
|
|||||||
} SVGA3dCubeFace;
|
} SVGA3dCubeFace;
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
|
SVGA3D_SHADERTYPE_INVALID = 0,
|
||||||
|
SVGA3D_SHADERTYPE_MIN = 1,
|
||||||
SVGA3D_SHADERTYPE_VS = 1,
|
SVGA3D_SHADERTYPE_VS = 1,
|
||||||
SVGA3D_SHADERTYPE_PS = 2,
|
SVGA3D_SHADERTYPE_PS = 2,
|
||||||
SVGA3D_SHADERTYPE_MAX
|
SVGA3D_SHADERTYPE_MAX = 3,
|
||||||
|
SVGA3D_SHADERTYPE_GS = 3,
|
||||||
} SVGA3dShaderType;
|
} SVGA3dShaderType;
|
||||||
|
|
||||||
|
#define SVGA3D_NUM_SHADERTYPE (SVGA3D_SHADERTYPE_MAX - SVGA3D_SHADERTYPE_MIN)
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
SVGA3D_CONST_TYPE_FLOAT = 0,
|
SVGA3D_CONST_TYPE_FLOAT = 0,
|
||||||
SVGA3D_CONST_TYPE_INT = 1,
|
SVGA3D_CONST_TYPE_INT = 1,
|
||||||
SVGA3D_CONST_TYPE_BOOL = 2,
|
SVGA3D_CONST_TYPE_BOOL = 2,
|
||||||
|
SVGA3D_CONST_TYPE_MAX
|
||||||
} SVGA3dShaderConstType;
|
} SVGA3dShaderConstType;
|
||||||
|
|
||||||
#define SVGA3D_MAX_SURFACE_FACES 6
|
#define SVGA3D_MAX_SURFACE_FACES 6
|
||||||
@ -1056,9 +1161,74 @@ typedef enum {
|
|||||||
#define SVGA_3D_CMD_GENERATE_MIPMAPS SVGA_3D_CMD_BASE + 31
|
#define SVGA_3D_CMD_GENERATE_MIPMAPS SVGA_3D_CMD_BASE + 31
|
||||||
#define SVGA_3D_CMD_ACTIVATE_SURFACE SVGA_3D_CMD_BASE + 40
|
#define SVGA_3D_CMD_ACTIVATE_SURFACE SVGA_3D_CMD_BASE + 40
|
||||||
#define SVGA_3D_CMD_DEACTIVATE_SURFACE SVGA_3D_CMD_BASE + 41
|
#define SVGA_3D_CMD_DEACTIVATE_SURFACE SVGA_3D_CMD_BASE + 41
|
||||||
#define SVGA_3D_CMD_MAX SVGA_3D_CMD_BASE + 42
|
#define SVGA_3D_CMD_SCREEN_DMA 1082
|
||||||
|
#define SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE 1083
|
||||||
|
#define SVGA_3D_CMD_OPEN_CONTEXT_SURFACE 1084
|
||||||
|
|
||||||
#define SVGA_3D_CMD_FUTURE_MAX 2000
|
#define SVGA_3D_CMD_LOGICOPS_BITBLT 1085
|
||||||
|
#define SVGA_3D_CMD_LOGICOPS_TRANSBLT 1086
|
||||||
|
#define SVGA_3D_CMD_LOGICOPS_STRETCHBLT 1087
|
||||||
|
#define SVGA_3D_CMD_LOGICOPS_COLORFILL 1088
|
||||||
|
#define SVGA_3D_CMD_LOGICOPS_ALPHABLEND 1089
|
||||||
|
#define SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND 1090
|
||||||
|
|
||||||
|
#define SVGA_3D_CMD_SET_OTABLE_BASE 1091
|
||||||
|
#define SVGA_3D_CMD_READBACK_OTABLE 1092
|
||||||
|
|
||||||
|
#define SVGA_3D_CMD_DEFINE_GB_MOB 1093
|
||||||
|
#define SVGA_3D_CMD_DESTROY_GB_MOB 1094
|
||||||
|
#define SVGA_3D_CMD_REDEFINE_GB_MOB 1095
|
||||||
|
#define SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING 1096
|
||||||
|
|
||||||
|
#define SVGA_3D_CMD_DEFINE_GB_SURFACE 1097
|
||||||
|
#define SVGA_3D_CMD_DESTROY_GB_SURFACE 1098
|
||||||
|
#define SVGA_3D_CMD_BIND_GB_SURFACE 1099
|
||||||
|
#define SVGA_3D_CMD_COND_BIND_GB_SURFACE 1100
|
||||||
|
#define SVGA_3D_CMD_UPDATE_GB_IMAGE 1101
|
||||||
|
#define SVGA_3D_CMD_UPDATE_GB_SURFACE 1102
|
||||||
|
#define SVGA_3D_CMD_READBACK_GB_IMAGE 1103
|
||||||
|
#define SVGA_3D_CMD_READBACK_GB_SURFACE 1104
|
||||||
|
#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE 1105
|
||||||
|
#define SVGA_3D_CMD_INVALIDATE_GB_SURFACE 1106
|
||||||
|
|
||||||
|
#define SVGA_3D_CMD_DEFINE_GB_CONTEXT 1107
|
||||||
|
#define SVGA_3D_CMD_DESTROY_GB_CONTEXT 1108
|
||||||
|
#define SVGA_3D_CMD_BIND_GB_CONTEXT 1109
|
||||||
|
#define SVGA_3D_CMD_READBACK_GB_CONTEXT 1110
|
||||||
|
#define SVGA_3D_CMD_INVALIDATE_GB_CONTEXT 1111
|
||||||
|
|
||||||
|
#define SVGA_3D_CMD_DEFINE_GB_SHADER 1112
|
||||||
|
#define SVGA_3D_CMD_DESTROY_GB_SHADER 1113
|
||||||
|
#define SVGA_3D_CMD_BIND_GB_SHADER 1114
|
||||||
|
|
||||||
|
#define SVGA_3D_CMD_SET_OTABLE_BASE64 1115
|
||||||
|
|
||||||
|
#define SVGA_3D_CMD_BEGIN_GB_QUERY 1116
|
||||||
|
#define SVGA_3D_CMD_END_GB_QUERY 1117
|
||||||
|
#define SVGA_3D_CMD_WAIT_FOR_GB_QUERY 1118
|
||||||
|
|
||||||
|
#define SVGA_3D_CMD_NOP 1119
|
||||||
|
|
||||||
|
#define SVGA_3D_CMD_ENABLE_GART 1120
|
||||||
|
#define SVGA_3D_CMD_DISABLE_GART 1121
|
||||||
|
#define SVGA_3D_CMD_MAP_MOB_INTO_GART 1122
|
||||||
|
#define SVGA_3D_CMD_UNMAP_GART_RANGE 1123
|
||||||
|
|
||||||
|
#define SVGA_3D_CMD_DEFINE_GB_SCREENTARGET 1124
|
||||||
|
#define SVGA_3D_CMD_DESTROY_GB_SCREENTARGET 1125
|
||||||
|
#define SVGA_3D_CMD_BIND_GB_SCREENTARGET 1126
|
||||||
|
#define SVGA_3D_CMD_UPDATE_GB_SCREENTARGET 1127
|
||||||
|
|
||||||
|
#define SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL 1128
|
||||||
|
#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129
|
||||||
|
|
||||||
|
#define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE 1130
|
||||||
|
|
||||||
|
#define SVGA_3D_CMD_DEFINE_GB_MOB64 1135
|
||||||
|
#define SVGA_3D_CMD_REDEFINE_GB_MOB64 1136
|
||||||
|
|
||||||
|
#define SVGA_3D_CMD_MAX 1142
|
||||||
|
#define SVGA_3D_CMD_FUTURE_MAX 3000
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Common substructures used in multiple FIFO commands:
|
* Common substructures used in multiple FIFO commands:
|
||||||
@ -1749,6 +1919,495 @@ struct {
|
|||||||
} SVGA3dCmdGenerateMipmaps; /* SVGA_3D_CMD_GENERATE_MIPMAPS */
|
} SVGA3dCmdGenerateMipmaps; /* SVGA_3D_CMD_GENERATE_MIPMAPS */
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Guest-backed surface definitions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef uint32 SVGAMobId;
|
||||||
|
|
||||||
|
typedef enum SVGAMobFormat {
|
||||||
|
SVGA3D_MOBFMT_INVALID = SVGA3D_INVALID_ID,
|
||||||
|
SVGA3D_MOBFMT_PTDEPTH_0 = 0,
|
||||||
|
SVGA3D_MOBFMT_PTDEPTH_1 = 1,
|
||||||
|
SVGA3D_MOBFMT_PTDEPTH_2 = 2,
|
||||||
|
SVGA3D_MOBFMT_RANGE = 3,
|
||||||
|
SVGA3D_MOBFMT_PTDEPTH64_0 = 4,
|
||||||
|
SVGA3D_MOBFMT_PTDEPTH64_1 = 5,
|
||||||
|
SVGA3D_MOBFMT_PTDEPTH64_2 = 6,
|
||||||
|
SVGA3D_MOBFMT_MAX,
|
||||||
|
} SVGAMobFormat;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Sizes of opaque types.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define SVGA3D_OTABLE_MOB_ENTRY_SIZE 16
|
||||||
|
#define SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE 8
|
||||||
|
#define SVGA3D_OTABLE_SURFACE_ENTRY_SIZE 64
|
||||||
|
#define SVGA3D_OTABLE_SHADER_ENTRY_SIZE 16
|
||||||
|
#define SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE 64
|
||||||
|
#define SVGA3D_CONTEXT_DATA_SIZE 16384
|
||||||
|
|
||||||
|
/*
|
||||||
|
* SVGA3dCmdSetOTableBase --
|
||||||
|
*
|
||||||
|
* This command allows the guest to specify the base PPN of the
|
||||||
|
* specified object table.
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
SVGA_OTABLE_MOB = 0,
|
||||||
|
SVGA_OTABLE_MIN = 0,
|
||||||
|
SVGA_OTABLE_SURFACE = 1,
|
||||||
|
SVGA_OTABLE_CONTEXT = 2,
|
||||||
|
SVGA_OTABLE_SHADER = 3,
|
||||||
|
SVGA_OTABLE_SCREEN_TARGET = 4,
|
||||||
|
SVGA_OTABLE_DX9_MAX = 5,
|
||||||
|
SVGA_OTABLE_MAX = 8
|
||||||
|
} SVGAOTableType;
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct {
|
||||||
|
SVGAOTableType type;
|
||||||
|
PPN baseAddress;
|
||||||
|
uint32 sizeInBytes;
|
||||||
|
uint32 validSizeInBytes;
|
||||||
|
SVGAMobFormat ptDepth;
|
||||||
|
}
|
||||||
|
__attribute__((__packed__))
|
||||||
|
SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct {
|
||||||
|
SVGAOTableType type;
|
||||||
|
PPN64 baseAddress;
|
||||||
|
uint32 sizeInBytes;
|
||||||
|
uint32 validSizeInBytes;
|
||||||
|
SVGAMobFormat ptDepth;
|
||||||
|
}
|
||||||
|
__attribute__((__packed__))
|
||||||
|
SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct {
|
||||||
|
SVGAOTableType type;
|
||||||
|
}
|
||||||
|
__attribute__((__packed__))
|
||||||
|
SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Define a memory object (Mob) in the OTable.
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct SVGA3dCmdDefineGBMob {
|
||||||
|
SVGAMobId mobid;
|
||||||
|
SVGAMobFormat ptDepth;
|
||||||
|
PPN base;
|
||||||
|
uint32 sizeInBytes;
|
||||||
|
}
|
||||||
|
__attribute__((__packed__))
|
||||||
|
SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Destroys an object in the OTable.
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct SVGA3dCmdDestroyGBMob {
|
||||||
|
SVGAMobId mobid;
|
||||||
|
}
|
||||||
|
__attribute__((__packed__))
|
||||||
|
SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Redefine an object in the OTable.
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct SVGA3dCmdRedefineGBMob {
|
||||||
|
SVGAMobId mobid;
|
||||||
|
SVGAMobFormat ptDepth;
|
||||||
|
PPN base;
|
||||||
|
uint32 sizeInBytes;
|
||||||
|
}
|
||||||
|
__attribute__((__packed__))
|
||||||
|
SVGA3dCmdRedefineGBMob; /* SVGA_3D_CMD_REDEFINE_GB_MOB */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Define a memory object (Mob) in the OTable with a PPN64 base.
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct SVGA3dCmdDefineGBMob64 {
|
||||||
|
SVGAMobId mobid;
|
||||||
|
SVGAMobFormat ptDepth;
|
||||||
|
PPN64 base;
|
||||||
|
uint32 sizeInBytes;
|
||||||
|
}
|
||||||
|
__attribute__((__packed__))
|
||||||
|
SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Redefine an object in the OTable with PPN64 base.
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct SVGA3dCmdRedefineGBMob64 {
|
||||||
|
SVGAMobId mobid;
|
||||||
|
SVGAMobFormat ptDepth;
|
||||||
|
PPN64 base;
|
||||||
|
uint32 sizeInBytes;
|
||||||
|
}
|
||||||
|
__attribute__((__packed__))
|
||||||
|
SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Notification that the page tables have been modified.
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct SVGA3dCmdUpdateGBMobMapping {
|
||||||
|
SVGAMobId mobid;
|
||||||
|
}
|
||||||
|
__attribute__((__packed__))
|
||||||
|
SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Define a guest-backed surface.
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct SVGA3dCmdDefineGBSurface {
|
||||||
|
uint32 sid;
|
||||||
|
SVGA3dSurfaceFlags surfaceFlags;
|
||||||
|
SVGA3dSurfaceFormat format;
|
||||||
|
uint32 numMipLevels;
|
||||||
|
uint32 multisampleCount;
|
||||||
|
SVGA3dTextureFilter autogenFilter;
|
||||||
|
SVGA3dSize size;
|
||||||
|
} SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Destroy a guest-backed surface.
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct SVGA3dCmdDestroyGBSurface {
|
||||||
|
uint32 sid;
|
||||||
|
} SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Bind a guest-backed surface to an object.
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct SVGA3dCmdBindGBSurface {
|
||||||
|
uint32 sid;
|
||||||
|
SVGAMobId mobid;
|
||||||
|
} SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Conditionally bind a mob to a guest backed surface if testMobid
|
||||||
|
* matches the currently bound mob. Optionally issue a readback on
|
||||||
|
* the surface while it is still bound to the old mobid if the mobid
|
||||||
|
* is changed by this command.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_READBACK (1 << 0)
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct{
|
||||||
|
uint32 sid;
|
||||||
|
SVGAMobId testMobid;
|
||||||
|
SVGAMobId mobid;
|
||||||
|
uint32 flags;
|
||||||
|
}
|
||||||
|
SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Update an image in a guest-backed surface.
|
||||||
|
* (Inform the device that the guest-contents have been updated.)
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct SVGA3dCmdUpdateGBImage {
|
||||||
|
SVGA3dSurfaceImageId image;
|
||||||
|
SVGA3dBox box;
|
||||||
|
} SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Update an entire guest-backed surface.
|
||||||
|
* (Inform the device that the guest-contents have been updated.)
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct SVGA3dCmdUpdateGBSurface {
|
||||||
|
uint32 sid;
|
||||||
|
} SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Readback an image in a guest-backed surface.
|
||||||
|
* (Request the device to flush the dirty contents into the guest.)
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct SVGA3dCmdReadbackGBImage {
|
||||||
|
SVGA3dSurfaceImageId image;
|
||||||
|
} SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Readback an entire guest-backed surface.
|
||||||
|
* (Request the device to flush the dirty contents into the guest.)
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct SVGA3dCmdReadbackGBSurface {
|
||||||
|
uint32 sid;
|
||||||
|
} SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Readback a sub rect of an image in a guest-backed surface. After
|
||||||
|
* issuing this command the driver is required to issue an update call
|
||||||
|
* of the same region before issuing any other commands that reference
|
||||||
|
* this surface or rendering is not guaranteed.
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct SVGA3dCmdReadbackGBImagePartial {
|
||||||
|
SVGA3dSurfaceImageId image;
|
||||||
|
SVGA3dBox box;
|
||||||
|
uint32 invertBox;
|
||||||
|
}
|
||||||
|
SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Invalidate an image in a guest-backed surface.
|
||||||
|
* (Notify the device that the contents can be lost.)
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct SVGA3dCmdInvalidateGBImage {
|
||||||
|
SVGA3dSurfaceImageId image;
|
||||||
|
} SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Invalidate an entire guest-backed surface.
|
||||||
|
* (Notify the device that the contents if all images can be lost.)
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct SVGA3dCmdInvalidateGBSurface {
|
||||||
|
uint32 sid;
|
||||||
|
} SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Invalidate a sub rect of an image in a guest-backed surface. After
|
||||||
|
* issuing this command the driver is required to issue an update call
|
||||||
|
* of the same region before issuing any other commands that reference
|
||||||
|
* this surface or rendering is not guaranteed.
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct SVGA3dCmdInvalidateGBImagePartial {
|
||||||
|
SVGA3dSurfaceImageId image;
|
||||||
|
SVGA3dBox box;
|
||||||
|
uint32 invertBox;
|
||||||
|
}
|
||||||
|
SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Define a guest-backed context.
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct SVGA3dCmdDefineGBContext {
|
||||||
|
uint32 cid;
|
||||||
|
} SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Destroy a guest-backed context.
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct SVGA3dCmdDestroyGBContext {
|
||||||
|
uint32 cid;
|
||||||
|
} SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Bind a guest-backed context.
|
||||||
|
*
|
||||||
|
* validContents should be set to 0 for new contexts,
|
||||||
|
* and 1 if this is an old context which is getting paged
|
||||||
|
* back on to the device.
|
||||||
|
*
|
||||||
|
* For new contexts, it is recommended that the driver
|
||||||
|
* issue commands to initialize all interesting state
|
||||||
|
* prior to rendering.
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct SVGA3dCmdBindGBContext {
|
||||||
|
uint32 cid;
|
||||||
|
SVGAMobId mobid;
|
||||||
|
uint32 validContents;
|
||||||
|
} SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Readback a guest-backed context.
|
||||||
|
* (Request that the device flush the contents back into guest memory.)
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct SVGA3dCmdReadbackGBContext {
|
||||||
|
uint32 cid;
|
||||||
|
} SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Invalidate a guest-backed context.
|
||||||
|
*/
|
||||||
|
typedef
|
||||||
|
struct SVGA3dCmdInvalidateGBContext {
|
||||||
|
uint32 cid;
|
||||||
|
} SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Define a guest-backed shader.
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct SVGA3dCmdDefineGBShader {
|
||||||
|
uint32 shid;
|
||||||
|
SVGA3dShaderType type;
|
||||||
|
uint32 sizeInBytes;
|
||||||
|
} SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Bind a guest-backed shader.
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef struct SVGA3dCmdBindGBShader {
|
||||||
|
uint32 shid;
|
||||||
|
SVGAMobId mobid;
|
||||||
|
uint32 offsetInBytes;
|
||||||
|
} SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Destroy a guest-backed shader.
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef struct SVGA3dCmdDestroyGBShader {
|
||||||
|
uint32 shid;
|
||||||
|
} SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct {
|
||||||
|
uint32 cid;
|
||||||
|
uint32 regStart;
|
||||||
|
SVGA3dShaderType shaderType;
|
||||||
|
SVGA3dShaderConstType constType;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Followed by a variable number of shader constants.
|
||||||
|
*
|
||||||
|
* Note that FLOAT and INT constants are 4-dwords in length, while
|
||||||
|
* BOOL constants are 1-dword in length.
|
||||||
|
*/
|
||||||
|
} SVGA3dCmdSetGBShaderConstInline;
|
||||||
|
/* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct {
|
||||||
|
uint32 cid;
|
||||||
|
SVGA3dQueryType type;
|
||||||
|
} SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct {
|
||||||
|
uint32 cid;
|
||||||
|
SVGA3dQueryType type;
|
||||||
|
SVGAMobId mobid;
|
||||||
|
uint32 offset;
|
||||||
|
} SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* SVGA_3D_CMD_WAIT_FOR_GB_QUERY --
|
||||||
|
*
|
||||||
|
* The semantics of this command are identical to the
|
||||||
|
* SVGA_3D_CMD_WAIT_FOR_QUERY except that the results are written
|
||||||
|
* to a Mob instead of a GMR.
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct {
|
||||||
|
uint32 cid;
|
||||||
|
SVGA3dQueryType type;
|
||||||
|
SVGAMobId mobid;
|
||||||
|
uint32 offset;
|
||||||
|
} SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct {
|
||||||
|
SVGAMobId mobid;
|
||||||
|
uint32 fbOffset;
|
||||||
|
uint32 initalized;
|
||||||
|
}
|
||||||
|
SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct {
|
||||||
|
SVGAMobId mobid;
|
||||||
|
uint32 gartOffset;
|
||||||
|
}
|
||||||
|
SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */
|
||||||
|
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct {
|
||||||
|
uint32 gartOffset;
|
||||||
|
uint32 numPages;
|
||||||
|
}
|
||||||
|
SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Screen Targets
|
||||||
|
*/
|
||||||
|
#define SVGA_STFLAG_PRIMARY (1 << 0)
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct {
|
||||||
|
uint32 stid;
|
||||||
|
uint32 width;
|
||||||
|
uint32 height;
|
||||||
|
int32 xRoot;
|
||||||
|
int32 yRoot;
|
||||||
|
uint32 flags;
|
||||||
|
}
|
||||||
|
SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct {
|
||||||
|
uint32 stid;
|
||||||
|
}
|
||||||
|
SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct {
|
||||||
|
uint32 stid;
|
||||||
|
SVGA3dSurfaceImageId image;
|
||||||
|
}
|
||||||
|
SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct {
|
||||||
|
uint32 stid;
|
||||||
|
SVGA3dBox box;
|
||||||
|
}
|
||||||
|
SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Capability query index.
|
* Capability query index.
|
||||||
*
|
*
|
||||||
@ -1879,10 +2538,41 @@ typedef enum {
|
|||||||
SVGA3D_DEVCAP_SURFACEFMT_BC5_UNORM = 83,
|
SVGA3D_DEVCAP_SURFACEFMT_BC5_UNORM = 83,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Don't add new caps into the previous section; the values in this
|
* Deprecated.
|
||||||
* enumeration must not change. You can put new values right before
|
|
||||||
* SVGA3D_DEVCAP_MAX.
|
|
||||||
*/
|
*/
|
||||||
|
SVGA3D_DEVCAP_VGPU10 = 84,
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This contains several SVGA_3D_CAPS_VIDEO_DECODE elements
|
||||||
|
* ored together, one for every type of video decoding supported.
|
||||||
|
*/
|
||||||
|
SVGA3D_DEVCAP_VIDEO_DECODE = 85,
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements
|
||||||
|
* ored together, one for every type of video processing supported.
|
||||||
|
*/
|
||||||
|
SVGA3D_DEVCAP_VIDEO_PROCESS = 86,
|
||||||
|
|
||||||
|
SVGA3D_DEVCAP_LINE_AA = 87, /* boolean */
|
||||||
|
SVGA3D_DEVCAP_LINE_STIPPLE = 88, /* boolean */
|
||||||
|
SVGA3D_DEVCAP_MAX_LINE_WIDTH = 89, /* float */
|
||||||
|
SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH = 90, /* float */
|
||||||
|
|
||||||
|
SVGA3D_DEVCAP_SURFACEFMT_YV12 = 91,
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Does the host support the SVGA logic ops commands?
|
||||||
|
*/
|
||||||
|
SVGA3D_DEVCAP_LOGICOPS = 92,
|
||||||
|
|
||||||
|
/*
|
||||||
|
* What support does the host have for screen targets?
|
||||||
|
*
|
||||||
|
* See the SVGA3D_SCREENTARGET_CAP bits below.
|
||||||
|
*/
|
||||||
|
SVGA3D_DEVCAP_SCREENTARGETS = 93,
|
||||||
|
|
||||||
SVGA3D_DEVCAP_MAX /* This must be the last index. */
|
SVGA3D_DEVCAP_MAX /* This must be the last index. */
|
||||||
} SVGA3dDevCapIndex;
|
} SVGA3dDevCapIndex;
|
||||||
|
|
||||||
|
@ -169,7 +169,10 @@ enum {
|
|||||||
SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */
|
SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */
|
||||||
SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */
|
SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */
|
||||||
SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */
|
SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */
|
||||||
SVGA_REG_TOP = 48, /* Must be 1 more than the last register */
|
SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */
|
||||||
|
SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */
|
||||||
|
SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */
|
||||||
|
SVGA_REG_TOP = 53, /* Must be 1 more than the last register */
|
||||||
|
|
||||||
SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */
|
SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */
|
||||||
/* Next 768 (== 256*3) registers exist for colormap */
|
/* Next 768 (== 256*3) registers exist for colormap */
|
||||||
@ -431,7 +434,10 @@ struct SVGASignedPoint {
|
|||||||
#define SVGA_CAP_TRACES 0x00200000
|
#define SVGA_CAP_TRACES 0x00200000
|
||||||
#define SVGA_CAP_GMR2 0x00400000
|
#define SVGA_CAP_GMR2 0x00400000
|
||||||
#define SVGA_CAP_SCREEN_OBJECT_2 0x00800000
|
#define SVGA_CAP_SCREEN_OBJECT_2 0x00800000
|
||||||
|
#define SVGA_CAP_COMMAND_BUFFERS 0x01000000
|
||||||
|
#define SVGA_CAP_DEAD1 0x02000000
|
||||||
|
#define SVGA_CAP_CMD_BUFFERS_2 0x04000000
|
||||||
|
#define SVGA_CAP_GBOBJECTS 0x08000000
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FIFO register indices.
|
* FIFO register indices.
|
||||||
|
@ -40,6 +40,10 @@ static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
|
|||||||
static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
|
static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
|
||||||
TTM_PL_FLAG_CACHED;
|
TTM_PL_FLAG_CACHED;
|
||||||
|
|
||||||
|
static uint32_t sys_ne_placement_flags = TTM_PL_FLAG_SYSTEM |
|
||||||
|
TTM_PL_FLAG_CACHED |
|
||||||
|
TTM_PL_FLAG_NO_EVICT;
|
||||||
|
|
||||||
static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR |
|
static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR |
|
||||||
TTM_PL_FLAG_CACHED;
|
TTM_PL_FLAG_CACHED;
|
||||||
|
|
||||||
@ -47,6 +51,9 @@ static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR |
|
|||||||
TTM_PL_FLAG_CACHED |
|
TTM_PL_FLAG_CACHED |
|
||||||
TTM_PL_FLAG_NO_EVICT;
|
TTM_PL_FLAG_NO_EVICT;
|
||||||
|
|
||||||
|
static uint32_t mob_placement_flags = VMW_PL_FLAG_MOB |
|
||||||
|
TTM_PL_FLAG_CACHED;
|
||||||
|
|
||||||
struct ttm_placement vmw_vram_placement = {
|
struct ttm_placement vmw_vram_placement = {
|
||||||
.fpfn = 0,
|
.fpfn = 0,
|
||||||
.lpfn = 0,
|
.lpfn = 0,
|
||||||
@ -116,16 +123,26 @@ struct ttm_placement vmw_sys_placement = {
|
|||||||
.busy_placement = &sys_placement_flags
|
.busy_placement = &sys_placement_flags
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct ttm_placement vmw_sys_ne_placement = {
|
||||||
|
.fpfn = 0,
|
||||||
|
.lpfn = 0,
|
||||||
|
.num_placement = 1,
|
||||||
|
.placement = &sys_ne_placement_flags,
|
||||||
|
.num_busy_placement = 1,
|
||||||
|
.busy_placement = &sys_ne_placement_flags
|
||||||
|
};
|
||||||
|
|
||||||
static uint32_t evictable_placement_flags[] = {
|
static uint32_t evictable_placement_flags[] = {
|
||||||
TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED,
|
TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED,
|
||||||
TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
|
TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
|
||||||
VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
|
VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED,
|
||||||
|
VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ttm_placement vmw_evictable_placement = {
|
struct ttm_placement vmw_evictable_placement = {
|
||||||
.fpfn = 0,
|
.fpfn = 0,
|
||||||
.lpfn = 0,
|
.lpfn = 0,
|
||||||
.num_placement = 3,
|
.num_placement = 4,
|
||||||
.placement = evictable_placement_flags,
|
.placement = evictable_placement_flags,
|
||||||
.num_busy_placement = 1,
|
.num_busy_placement = 1,
|
||||||
.busy_placement = &sys_placement_flags
|
.busy_placement = &sys_placement_flags
|
||||||
@ -140,71 +157,527 @@ struct ttm_placement vmw_srf_placement = {
|
|||||||
.busy_placement = gmr_vram_placement_flags
|
.busy_placement = gmr_vram_placement_flags
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct ttm_placement vmw_mob_placement = {
|
||||||
|
.fpfn = 0,
|
||||||
|
.lpfn = 0,
|
||||||
|
.num_placement = 1,
|
||||||
|
.num_busy_placement = 1,
|
||||||
|
.placement = &mob_placement_flags,
|
||||||
|
.busy_placement = &mob_placement_flags
|
||||||
|
};
|
||||||
|
|
||||||
struct vmw_ttm_tt {
|
struct vmw_ttm_tt {
|
||||||
struct ttm_tt ttm;
|
struct ttm_dma_tt dma_ttm;
|
||||||
struct vmw_private *dev_priv;
|
struct vmw_private *dev_priv;
|
||||||
int gmr_id;
|
int gmr_id;
|
||||||
|
struct vmw_mob *mob;
|
||||||
|
int mem_type;
|
||||||
|
struct sg_table sgt;
|
||||||
|
struct vmw_sg_table vsgt;
|
||||||
|
uint64_t sg_alloc_size;
|
||||||
|
bool mapped;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper functions to advance a struct vmw_piter iterator.
|
||||||
|
*
|
||||||
|
* @viter: Pointer to the iterator.
|
||||||
|
*
|
||||||
|
* These functions return false if past the end of the list,
|
||||||
|
* true otherwise. Functions are selected depending on the current
|
||||||
|
* DMA mapping mode.
|
||||||
|
*/
|
||||||
|
static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
|
||||||
|
{
|
||||||
|
return ++(viter->i) < viter->num_pages;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool __vmw_piter_sg_next(struct vmw_piter *viter)
|
||||||
|
{
|
||||||
|
return __sg_page_iter_next(&viter->iter);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper functions to return a pointer to the current page.
|
||||||
|
*
|
||||||
|
* @viter: Pointer to the iterator
|
||||||
|
*
|
||||||
|
* These functions return a pointer to the page currently
|
||||||
|
* pointed to by @viter. Functions are selected depending on the
|
||||||
|
* current mapping mode.
|
||||||
|
*/
|
||||||
|
static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
|
||||||
|
{
|
||||||
|
return viter->pages[viter->i];
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct page *__vmw_piter_sg_page(struct vmw_piter *viter)
|
||||||
|
{
|
||||||
|
return sg_page_iter_page(&viter->iter);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper functions to return the DMA address of the current page.
|
||||||
|
*
|
||||||
|
* @viter: Pointer to the iterator
|
||||||
|
*
|
||||||
|
* These functions return the DMA address of the page currently
|
||||||
|
* pointed to by @viter. Functions are selected depending on the
|
||||||
|
* current mapping mode.
|
||||||
|
*/
|
||||||
|
static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
|
||||||
|
{
|
||||||
|
return page_to_phys(viter->pages[viter->i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
|
||||||
|
{
|
||||||
|
return viter->addrs[viter->i];
|
||||||
|
}
|
||||||
|
|
||||||
|
static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
|
||||||
|
{
|
||||||
|
return sg_page_iter_dma_address(&viter->iter);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_piter_start - Initialize a struct vmw_piter.
|
||||||
|
*
|
||||||
|
* @viter: Pointer to the iterator to initialize
|
||||||
|
* @vsgt: Pointer to a struct vmw_sg_table to initialize from
|
||||||
|
*
|
||||||
|
* Note that we're following the convention of __sg_page_iter_start, so that
|
||||||
|
* the iterator doesn't point to a valid page after initialization; it has
|
||||||
|
* to be advanced one step first.
|
||||||
|
*/
|
||||||
|
void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
|
||||||
|
unsigned long p_offset)
|
||||||
|
{
|
||||||
|
viter->i = p_offset - 1;
|
||||||
|
viter->num_pages = vsgt->num_pages;
|
||||||
|
switch (vsgt->mode) {
|
||||||
|
case vmw_dma_phys:
|
||||||
|
viter->next = &__vmw_piter_non_sg_next;
|
||||||
|
viter->dma_address = &__vmw_piter_phys_addr;
|
||||||
|
viter->page = &__vmw_piter_non_sg_page;
|
||||||
|
viter->pages = vsgt->pages;
|
||||||
|
break;
|
||||||
|
case vmw_dma_alloc_coherent:
|
||||||
|
viter->next = &__vmw_piter_non_sg_next;
|
||||||
|
viter->dma_address = &__vmw_piter_dma_addr;
|
||||||
|
viter->page = &__vmw_piter_non_sg_page;
|
||||||
|
viter->addrs = vsgt->addrs;
|
||||||
|
viter->pages = vsgt->pages;
|
||||||
|
break;
|
||||||
|
case vmw_dma_map_populate:
|
||||||
|
case vmw_dma_map_bind:
|
||||||
|
viter->next = &__vmw_piter_sg_next;
|
||||||
|
viter->dma_address = &__vmw_piter_sg_addr;
|
||||||
|
viter->page = &__vmw_piter_sg_page;
|
||||||
|
__sg_page_iter_start(&viter->iter, vsgt->sgt->sgl,
|
||||||
|
vsgt->sgt->orig_nents, p_offset);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
BUG();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for
|
||||||
|
* TTM pages
|
||||||
|
*
|
||||||
|
* @vmw_tt: Pointer to a struct vmw_ttm_backend
|
||||||
|
*
|
||||||
|
* Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
|
||||||
|
*/
|
||||||
|
static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
|
||||||
|
{
|
||||||
|
struct device *dev = vmw_tt->dev_priv->dev->dev;
|
||||||
|
|
||||||
|
dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents,
|
||||||
|
DMA_BIDIRECTIONAL);
|
||||||
|
vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_ttm_map_for_dma - map TTM pages to get device addresses
|
||||||
|
*
|
||||||
|
* @vmw_tt: Pointer to a struct vmw_ttm_backend
|
||||||
|
*
|
||||||
|
* This function is used to get device addresses from the kernel DMA layer.
|
||||||
|
* However, it's violating the DMA API in that when this operation has been
|
||||||
|
* performed, it's illegal for the CPU to write to the pages without first
|
||||||
|
* unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
|
||||||
|
* therefore only legal to call this function if we know that the function
|
||||||
|
* dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
|
||||||
|
* a CPU write buffer flush.
|
||||||
|
*/
|
||||||
|
static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
|
||||||
|
{
|
||||||
|
struct device *dev = vmw_tt->dev_priv->dev->dev;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents,
|
||||||
|
DMA_BIDIRECTIONAL);
|
||||||
|
if (unlikely(ret == 0))
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
vmw_tt->sgt.nents = ret;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_ttm_map_dma - Make sure TTM pages are visible to the device
|
||||||
|
*
|
||||||
|
* @vmw_tt: Pointer to a struct vmw_ttm_tt
|
||||||
|
*
|
||||||
|
* Select the correct function for and make sure the TTM pages are
|
||||||
|
* visible to the device. Allocate storage for the device mappings.
|
||||||
|
* If a mapping has already been performed, indicated by the storage
|
||||||
|
* pointer being non NULL, the function returns success.
|
||||||
|
*/
|
||||||
|
static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
|
||||||
|
{
|
||||||
|
struct vmw_private *dev_priv = vmw_tt->dev_priv;
|
||||||
|
struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
|
||||||
|
struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
|
||||||
|
struct vmw_piter iter;
|
||||||
|
dma_addr_t old;
|
||||||
|
int ret = 0;
|
||||||
|
static size_t sgl_size;
|
||||||
|
static size_t sgt_size;
|
||||||
|
|
||||||
|
if (vmw_tt->mapped)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
vsgt->mode = dev_priv->map_mode;
|
||||||
|
vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
|
||||||
|
vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
|
||||||
|
vsgt->addrs = vmw_tt->dma_ttm.dma_address;
|
||||||
|
vsgt->sgt = &vmw_tt->sgt;
|
||||||
|
|
||||||
|
switch (dev_priv->map_mode) {
|
||||||
|
case vmw_dma_map_bind:
|
||||||
|
case vmw_dma_map_populate:
|
||||||
|
if (unlikely(!sgl_size)) {
|
||||||
|
sgl_size = ttm_round_pot(sizeof(struct scatterlist));
|
||||||
|
sgt_size = ttm_round_pot(sizeof(struct sg_table));
|
||||||
|
}
|
||||||
|
vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
|
||||||
|
ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, false,
|
||||||
|
true);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
|
||||||
|
vsgt->num_pages, 0,
|
||||||
|
(unsigned long)
|
||||||
|
vsgt->num_pages << PAGE_SHIFT,
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
goto out_sg_alloc_fail;
|
||||||
|
|
||||||
|
if (vsgt->num_pages > vmw_tt->sgt.nents) {
|
||||||
|
uint64_t over_alloc =
|
||||||
|
sgl_size * (vsgt->num_pages -
|
||||||
|
vmw_tt->sgt.nents);
|
||||||
|
|
||||||
|
ttm_mem_global_free(glob, over_alloc);
|
||||||
|
vmw_tt->sg_alloc_size -= over_alloc;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = vmw_ttm_map_for_dma(vmw_tt);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
goto out_map_fail;
|
||||||
|
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
old = ~((dma_addr_t) 0);
|
||||||
|
vmw_tt->vsgt.num_regions = 0;
|
||||||
|
for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
|
||||||
|
dma_addr_t cur = vmw_piter_dma_addr(&iter);
|
||||||
|
|
||||||
|
if (cur != old + PAGE_SIZE)
|
||||||
|
vmw_tt->vsgt.num_regions++;
|
||||||
|
old = cur;
|
||||||
|
}
|
||||||
|
|
||||||
|
vmw_tt->mapped = true;
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
out_map_fail:
|
||||||
|
sg_free_table(vmw_tt->vsgt.sgt);
|
||||||
|
vmw_tt->vsgt.sgt = NULL;
|
||||||
|
out_sg_alloc_fail:
|
||||||
|
ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_ttm_unmap_dma - Tear down any TTM page device mappings
|
||||||
|
*
|
||||||
|
* @vmw_tt: Pointer to a struct vmw_ttm_tt
|
||||||
|
*
|
||||||
|
* Tear down any previously set up device DMA mappings and free
|
||||||
|
* any storage space allocated for them. If there are no mappings set up,
|
||||||
|
* this function is a NOP.
|
||||||
|
*/
|
||||||
|
static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
|
||||||
|
{
|
||||||
|
struct vmw_private *dev_priv = vmw_tt->dev_priv;
|
||||||
|
|
||||||
|
if (!vmw_tt->vsgt.sgt)
|
||||||
|
return;
|
||||||
|
|
||||||
|
switch (dev_priv->map_mode) {
|
||||||
|
case vmw_dma_map_bind:
|
||||||
|
case vmw_dma_map_populate:
|
||||||
|
vmw_ttm_unmap_from_dma(vmw_tt);
|
||||||
|
sg_free_table(vmw_tt->vsgt.sgt);
|
||||||
|
vmw_tt->vsgt.sgt = NULL;
|
||||||
|
ttm_mem_global_free(vmw_mem_glob(dev_priv),
|
||||||
|
vmw_tt->sg_alloc_size);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
vmw_tt->mapped = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_bo_map_dma - Make sure buffer object pages are visible to the device
|
||||||
|
*
|
||||||
|
* @bo: Pointer to a struct ttm_buffer_object
|
||||||
|
*
|
||||||
|
* Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer
|
||||||
|
* instead of a pointer to a struct vmw_ttm_backend as argument.
|
||||||
|
* Note that the buffer object must be either pinned or reserved before
|
||||||
|
* calling this function.
|
||||||
|
*/
|
||||||
|
int vmw_bo_map_dma(struct ttm_buffer_object *bo)
|
||||||
|
{
|
||||||
|
struct vmw_ttm_tt *vmw_tt =
|
||||||
|
container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
|
||||||
|
|
||||||
|
return vmw_ttm_map_dma(vmw_tt);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device
|
||||||
|
*
|
||||||
|
* @bo: Pointer to a struct ttm_buffer_object
|
||||||
|
*
|
||||||
|
* Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer
|
||||||
|
* instead of a pointer to a struct vmw_ttm_backend as argument.
|
||||||
|
*/
|
||||||
|
void vmw_bo_unmap_dma(struct ttm_buffer_object *bo)
|
||||||
|
{
|
||||||
|
struct vmw_ttm_tt *vmw_tt =
|
||||||
|
container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
|
||||||
|
|
||||||
|
vmw_ttm_unmap_dma(vmw_tt);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_bo_sg_table - Return a struct vmw_sg_table object for a
|
||||||
|
* TTM buffer object
|
||||||
|
*
|
||||||
|
* @bo: Pointer to a struct ttm_buffer_object
|
||||||
|
*
|
||||||
|
* Returns a pointer to a struct vmw_sg_table object. The object should
|
||||||
|
* not be freed after use.
|
||||||
|
* Note that for the device addresses to be valid, the buffer object must
|
||||||
|
* either be reserved or pinned.
|
||||||
|
*/
|
||||||
|
const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
|
||||||
|
{
|
||||||
|
struct vmw_ttm_tt *vmw_tt =
|
||||||
|
container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
|
||||||
|
|
||||||
|
return &vmw_tt->vsgt;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
|
static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
|
||||||
{
|
{
|
||||||
struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
|
struct vmw_ttm_tt *vmw_be =
|
||||||
|
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = vmw_ttm_map_dma(vmw_be);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
return ret;
|
||||||
|
|
||||||
vmw_be->gmr_id = bo_mem->start;
|
vmw_be->gmr_id = bo_mem->start;
|
||||||
|
vmw_be->mem_type = bo_mem->mem_type;
|
||||||
|
|
||||||
return vmw_gmr_bind(vmw_be->dev_priv, ttm->pages,
|
switch (bo_mem->mem_type) {
|
||||||
|
case VMW_PL_GMR:
|
||||||
|
return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
|
||||||
ttm->num_pages, vmw_be->gmr_id);
|
ttm->num_pages, vmw_be->gmr_id);
|
||||||
|
case VMW_PL_MOB:
|
||||||
|
if (unlikely(vmw_be->mob == NULL)) {
|
||||||
|
vmw_be->mob =
|
||||||
|
vmw_mob_create(ttm->num_pages);
|
||||||
|
if (unlikely(vmw_be->mob == NULL))
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
|
||||||
|
&vmw_be->vsgt, ttm->num_pages,
|
||||||
|
vmw_be->gmr_id);
|
||||||
|
default:
|
||||||
|
BUG();
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vmw_ttm_unbind(struct ttm_tt *ttm)
|
static int vmw_ttm_unbind(struct ttm_tt *ttm)
|
||||||
{
|
{
|
||||||
struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
|
struct vmw_ttm_tt *vmw_be =
|
||||||
|
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
|
||||||
|
|
||||||
|
switch (vmw_be->mem_type) {
|
||||||
|
case VMW_PL_GMR:
|
||||||
vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
|
vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
|
||||||
|
break;
|
||||||
|
case VMW_PL_MOB:
|
||||||
|
vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
BUG();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
|
||||||
|
vmw_ttm_unmap_dma(vmw_be);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static void vmw_ttm_destroy(struct ttm_tt *ttm)
|
static void vmw_ttm_destroy(struct ttm_tt *ttm)
|
||||||
{
|
{
|
||||||
struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
|
struct vmw_ttm_tt *vmw_be =
|
||||||
|
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
|
||||||
|
|
||||||
|
vmw_ttm_unmap_dma(vmw_be);
|
||||||
|
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
|
||||||
|
ttm_dma_tt_fini(&vmw_be->dma_ttm);
|
||||||
|
else
|
||||||
ttm_tt_fini(ttm);
|
ttm_tt_fini(ttm);
|
||||||
|
|
||||||
|
if (vmw_be->mob)
|
||||||
|
vmw_mob_destroy(vmw_be->mob);
|
||||||
|
|
||||||
kfree(vmw_be);
|
kfree(vmw_be);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static int vmw_ttm_populate(struct ttm_tt *ttm)
|
||||||
|
{
|
||||||
|
struct vmw_ttm_tt *vmw_tt =
|
||||||
|
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
|
||||||
|
struct vmw_private *dev_priv = vmw_tt->dev_priv;
|
||||||
|
struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (ttm->state != tt_unpopulated)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
|
||||||
|
size_t size =
|
||||||
|
ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
|
||||||
|
ret = ttm_mem_global_alloc(glob, size, false, true);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
ttm_mem_global_free(glob, size);
|
||||||
|
} else
|
||||||
|
ret = ttm_pool_populate(ttm);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
|
||||||
|
{
|
||||||
|
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
|
||||||
|
dma_ttm.ttm);
|
||||||
|
struct vmw_private *dev_priv = vmw_tt->dev_priv;
|
||||||
|
struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
|
||||||
|
|
||||||
|
|
||||||
|
if (vmw_tt->mob) {
|
||||||
|
vmw_mob_destroy(vmw_tt->mob);
|
||||||
|
vmw_tt->mob = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
vmw_ttm_unmap_dma(vmw_tt);
|
||||||
|
if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
|
||||||
|
size_t size =
|
||||||
|
ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
|
||||||
|
|
||||||
|
ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
|
||||||
|
ttm_mem_global_free(glob, size);
|
||||||
|
} else
|
||||||
|
ttm_pool_unpopulate(ttm);
|
||||||
|
}
|
||||||
|
|
||||||
static struct ttm_backend_func vmw_ttm_func = {
|
static struct ttm_backend_func vmw_ttm_func = {
|
||||||
.bind = vmw_ttm_bind,
|
.bind = vmw_ttm_bind,
|
||||||
.unbind = vmw_ttm_unbind,
|
.unbind = vmw_ttm_unbind,
|
||||||
.destroy = vmw_ttm_destroy,
|
.destroy = vmw_ttm_destroy,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
|
static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
|
||||||
unsigned long size, uint32_t page_flags,
|
unsigned long size, uint32_t page_flags,
|
||||||
struct page *dummy_read_page)
|
struct page *dummy_read_page)
|
||||||
{
|
{
|
||||||
struct vmw_ttm_tt *vmw_be;
|
struct vmw_ttm_tt *vmw_be;
|
||||||
|
int ret;
|
||||||
|
|
||||||
vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
|
vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
|
||||||
if (!vmw_be)
|
if (!vmw_be)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
vmw_be->ttm.func = &vmw_ttm_func;
|
vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
|
||||||
vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
|
vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
|
||||||
|
vmw_be->mob = NULL;
|
||||||
|
|
||||||
if (ttm_tt_init(&vmw_be->ttm, bdev, size, page_flags, dummy_read_page)) {
|
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
|
||||||
|
ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags,
|
||||||
|
dummy_read_page);
|
||||||
|
else
|
||||||
|
ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bdev, size, page_flags,
|
||||||
|
dummy_read_page);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
goto out_no_init;
|
||||||
|
|
||||||
|
return &vmw_be->dma_ttm.ttm;
|
||||||
|
out_no_init:
|
||||||
kfree(vmw_be);
|
kfree(vmw_be);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
|
||||||
|
|
||||||
return &vmw_be->ttm;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
|
static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
|
static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
|
||||||
struct ttm_mem_type_manager *man)
|
struct ttm_mem_type_manager *man)
|
||||||
{
|
{
|
||||||
switch (type) {
|
switch (type) {
|
||||||
@ -224,6 +697,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
|
|||||||
man->default_caching = TTM_PL_FLAG_CACHED;
|
man->default_caching = TTM_PL_FLAG_CACHED;
|
||||||
break;
|
break;
|
||||||
case VMW_PL_GMR:
|
case VMW_PL_GMR:
|
||||||
|
case VMW_PL_MOB:
|
||||||
/*
|
/*
|
||||||
* "Guest Memory Regions" is an aperture like feature with
|
* "Guest Memory Regions" is an aperture like feature with
|
||||||
* one slot per bo. There is an upper limit of the number of
|
* one slot per bo. There is an upper limit of the number of
|
||||||
@ -242,7 +716,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void vmw_evict_flags(struct ttm_buffer_object *bo,
|
static void vmw_evict_flags(struct ttm_buffer_object *bo,
|
||||||
struct ttm_placement *placement)
|
struct ttm_placement *placement)
|
||||||
{
|
{
|
||||||
*placement = vmw_sys_placement;
|
*placement = vmw_sys_placement;
|
||||||
@ -271,6 +745,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg
|
|||||||
switch (mem->mem_type) {
|
switch (mem->mem_type) {
|
||||||
case TTM_PL_SYSTEM:
|
case TTM_PL_SYSTEM:
|
||||||
case VMW_PL_GMR:
|
case VMW_PL_GMR:
|
||||||
|
case VMW_PL_MOB:
|
||||||
return 0;
|
return 0;
|
||||||
case TTM_PL_VRAM:
|
case TTM_PL_VRAM:
|
||||||
mem->bus.offset = mem->start << PAGE_SHIFT;
|
mem->bus.offset = mem->start << PAGE_SHIFT;
|
||||||
@ -330,10 +805,42 @@ static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
|
|||||||
VMW_FENCE_WAIT_TIMEOUT);
|
VMW_FENCE_WAIT_TIMEOUT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_move_notify - TTM move_notify_callback
|
||||||
|
*
|
||||||
|
* @bo: The TTM buffer object about to move.
|
||||||
|
* @mem: The truct ttm_mem_reg indicating to what memory
|
||||||
|
* region the move is taking place.
|
||||||
|
*
|
||||||
|
* Calls move_notify for all subsystems needing it.
|
||||||
|
* (currently only resources).
|
||||||
|
*/
|
||||||
|
static void vmw_move_notify(struct ttm_buffer_object *bo,
|
||||||
|
struct ttm_mem_reg *mem)
|
||||||
|
{
|
||||||
|
vmw_resource_move_notify(bo, mem);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_swap_notify - TTM move_notify_callback
|
||||||
|
*
|
||||||
|
* @bo: The TTM buffer object about to be swapped out.
|
||||||
|
*/
|
||||||
|
static void vmw_swap_notify(struct ttm_buffer_object *bo)
|
||||||
|
{
|
||||||
|
struct ttm_bo_device *bdev = bo->bdev;
|
||||||
|
|
||||||
|
// spin_lock(&bdev->fence_lock);
|
||||||
|
// ttm_bo_wait(bo, false, false, false);
|
||||||
|
// spin_unlock(&bdev->fence_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
struct ttm_bo_driver vmw_bo_driver = {
|
struct ttm_bo_driver vmw_bo_driver = {
|
||||||
.ttm_tt_create = &vmw_ttm_tt_create,
|
.ttm_tt_create = &vmw_ttm_tt_create,
|
||||||
.ttm_tt_populate = &ttm_pool_populate,
|
.ttm_tt_populate = &vmw_ttm_populate,
|
||||||
.ttm_tt_unpopulate = &ttm_pool_unpopulate,
|
.ttm_tt_unpopulate = &vmw_ttm_unpopulate,
|
||||||
.invalidate_caches = vmw_invalidate_caches,
|
.invalidate_caches = vmw_invalidate_caches,
|
||||||
.init_mem_type = vmw_init_mem_type,
|
.init_mem_type = vmw_init_mem_type,
|
||||||
.evict_flags = vmw_evict_flags,
|
.evict_flags = vmw_evict_flags,
|
||||||
@ -344,9 +851,243 @@ struct ttm_bo_driver vmw_bo_driver = {
|
|||||||
.sync_obj_flush = vmw_sync_obj_flush,
|
.sync_obj_flush = vmw_sync_obj_flush,
|
||||||
.sync_obj_unref = vmw_sync_obj_unref,
|
.sync_obj_unref = vmw_sync_obj_unref,
|
||||||
.sync_obj_ref = vmw_sync_obj_ref,
|
.sync_obj_ref = vmw_sync_obj_ref,
|
||||||
.move_notify = NULL,
|
.move_notify = vmw_move_notify,
|
||||||
.swap_notify = NULL,
|
.swap_notify = vmw_swap_notify,
|
||||||
.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
|
.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
|
||||||
.io_mem_reserve = &vmw_ttm_io_mem_reserve,
|
.io_mem_reserve = &vmw_ttm_io_mem_reserve,
|
||||||
.io_mem_free = &vmw_ttm_io_mem_free,
|
.io_mem_free = &vmw_ttm_io_mem_free,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
struct scatterlist *sg_next(struct scatterlist *sg)
|
||||||
|
{
|
||||||
|
if (sg_is_last(sg))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
sg++;
|
||||||
|
if (unlikely(sg_is_chain(sg)))
|
||||||
|
sg = sg_chain_ptr(sg);
|
||||||
|
|
||||||
|
return sg;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void __sg_free_table(struct sg_table *table, unsigned int max_ents,
|
||||||
|
sg_free_fn *free_fn)
|
||||||
|
{
|
||||||
|
struct scatterlist *sgl, *next;
|
||||||
|
|
||||||
|
if (unlikely(!table->sgl))
|
||||||
|
return;
|
||||||
|
|
||||||
|
sgl = table->sgl;
|
||||||
|
while (table->orig_nents) {
|
||||||
|
unsigned int alloc_size = table->orig_nents;
|
||||||
|
unsigned int sg_size;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we have more than max_ents segments left,
|
||||||
|
* then assign 'next' to the sg table after the current one.
|
||||||
|
* sg_size is then one less than alloc size, since the last
|
||||||
|
* element is the chain pointer.
|
||||||
|
*/
|
||||||
|
if (alloc_size > max_ents) {
|
||||||
|
next = sg_chain_ptr(&sgl[max_ents - 1]);
|
||||||
|
alloc_size = max_ents;
|
||||||
|
sg_size = alloc_size - 1;
|
||||||
|
} else {
|
||||||
|
sg_size = alloc_size;
|
||||||
|
next = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
table->orig_nents -= sg_size;
|
||||||
|
kfree(sgl);
|
||||||
|
sgl = next;
|
||||||
|
}
|
||||||
|
|
||||||
|
table->sgl = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
void sg_free_table(struct sg_table *table)
|
||||||
|
{
|
||||||
|
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
|
||||||
|
{
|
||||||
|
struct scatterlist *sg, *prv;
|
||||||
|
unsigned int left;
|
||||||
|
unsigned int max_ents = SG_MAX_SINGLE_ALLOC;
|
||||||
|
|
||||||
|
#ifndef ARCH_HAS_SG_CHAIN
|
||||||
|
BUG_ON(nents > max_ents);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
memset(table, 0, sizeof(*table));
|
||||||
|
|
||||||
|
left = nents;
|
||||||
|
prv = NULL;
|
||||||
|
do {
|
||||||
|
unsigned int sg_size, alloc_size = left;
|
||||||
|
|
||||||
|
if (alloc_size > max_ents) {
|
||||||
|
alloc_size = max_ents;
|
||||||
|
sg_size = alloc_size - 1;
|
||||||
|
} else
|
||||||
|
sg_size = alloc_size;
|
||||||
|
|
||||||
|
left -= sg_size;
|
||||||
|
|
||||||
|
sg = kmalloc(alloc_size * sizeof(struct scatterlist), gfp_mask);
|
||||||
|
if (unlikely(!sg)) {
|
||||||
|
/*
|
||||||
|
* Adjust entry count to reflect that the last
|
||||||
|
* entry of the previous table won't be used for
|
||||||
|
* linkage. Without this, sg_kfree() may get
|
||||||
|
* confused.
|
||||||
|
*/
|
||||||
|
if (prv)
|
||||||
|
table->nents = ++table->orig_nents;
|
||||||
|
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
sg_init_table(sg, alloc_size);
|
||||||
|
table->nents = table->orig_nents += sg_size;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If this is the first mapping, assign the sg table header.
|
||||||
|
* If this is not the first mapping, chain previous part.
|
||||||
|
*/
|
||||||
|
if (prv)
|
||||||
|
sg_chain(prv, max_ents, sg);
|
||||||
|
else
|
||||||
|
table->sgl = sg;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If no more entries after this one, mark the end
|
||||||
|
*/
|
||||||
|
if (!left)
|
||||||
|
sg_mark_end(&sg[sg_size - 1]);
|
||||||
|
|
||||||
|
prv = sg;
|
||||||
|
} while (left);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err:
|
||||||
|
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, NULL);
|
||||||
|
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void sg_init_table(struct scatterlist *sgl, unsigned int nents)
|
||||||
|
{
|
||||||
|
memset(sgl, 0, sizeof(*sgl) * nents);
|
||||||
|
#ifdef CONFIG_DEBUG_SG
|
||||||
|
{
|
||||||
|
unsigned int i;
|
||||||
|
for (i = 0; i < nents; i++)
|
||||||
|
sgl[i].sg_magic = SG_MAGIC;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
sg_mark_end(&sgl[nents - 1]);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void __sg_page_iter_start(struct sg_page_iter *piter,
|
||||||
|
struct scatterlist *sglist, unsigned int nents,
|
||||||
|
unsigned long pgoffset)
|
||||||
|
{
|
||||||
|
piter->__pg_advance = 0;
|
||||||
|
piter->__nents = nents;
|
||||||
|
|
||||||
|
piter->sg = sglist;
|
||||||
|
piter->sg_pgoffset = pgoffset;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int sg_page_count(struct scatterlist *sg)
|
||||||
|
{
|
||||||
|
return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool __sg_page_iter_next(struct sg_page_iter *piter)
|
||||||
|
{
|
||||||
|
if (!piter->__nents || !piter->sg)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
piter->sg_pgoffset += piter->__pg_advance;
|
||||||
|
piter->__pg_advance = 1;
|
||||||
|
|
||||||
|
while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
|
||||||
|
piter->sg_pgoffset -= sg_page_count(piter->sg);
|
||||||
|
piter->sg = sg_next(piter->sg);
|
||||||
|
if (!--piter->__nents || !piter->sg)
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__sg_page_iter_next);
|
||||||
|
|
||||||
|
|
||||||
|
int sg_alloc_table_from_pages(struct sg_table *sgt,
|
||||||
|
struct page **pages, unsigned int n_pages,
|
||||||
|
unsigned long offset, unsigned long size,
|
||||||
|
gfp_t gfp_mask)
|
||||||
|
{
|
||||||
|
unsigned int chunks;
|
||||||
|
unsigned int i;
|
||||||
|
unsigned int cur_page;
|
||||||
|
int ret;
|
||||||
|
struct scatterlist *s;
|
||||||
|
|
||||||
|
/* compute number of contiguous chunks */
|
||||||
|
chunks = 1;
|
||||||
|
for (i = 1; i < n_pages; ++i)
|
||||||
|
if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
|
||||||
|
++chunks;
|
||||||
|
|
||||||
|
ret = sg_alloc_table(sgt, chunks, gfp_mask);
|
||||||
|
if (unlikely(ret))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
/* merging chunks and putting them into the scatterlist */
|
||||||
|
cur_page = 0;
|
||||||
|
for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
|
||||||
|
unsigned long chunk_size;
|
||||||
|
unsigned int j;
|
||||||
|
|
||||||
|
/* look for the end of the current chunk */
|
||||||
|
for (j = cur_page + 1; j < n_pages; ++j)
|
||||||
|
if (page_to_pfn(pages[j]) !=
|
||||||
|
page_to_pfn(pages[j - 1]) + 1)
|
||||||
|
break;
|
||||||
|
|
||||||
|
chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
|
||||||
|
sg_set_page(s, pages[cur_page], min(size, chunk_size), offset);
|
||||||
|
size -= chunk_size;
|
||||||
|
offset = 0;
|
||||||
|
cur_page = j;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int dma_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
|
int nelems, int dir)
|
||||||
|
{
|
||||||
|
struct scatterlist *s;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for_each_sg(sglist, s, nelems, i) {
|
||||||
|
s->dma_address = (dma_addr_t)sg_phys(s);
|
||||||
|
#ifdef CONFIG_NEED_SG_DMA_LENGTH
|
||||||
|
s->dma_length = s->length;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
return nelems;
|
||||||
|
}
|
||||||
|
|
||||||
|
@ -32,12 +32,28 @@
|
|||||||
struct vmw_user_context {
|
struct vmw_user_context {
|
||||||
struct ttm_base_object base;
|
struct ttm_base_object base;
|
||||||
struct vmw_resource res;
|
struct vmw_resource res;
|
||||||
|
struct vmw_ctx_binding_state cbs;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *);
|
||||||
|
|
||||||
static void vmw_user_context_free(struct vmw_resource *res);
|
static void vmw_user_context_free(struct vmw_resource *res);
|
||||||
static struct vmw_resource *
|
static struct vmw_resource *
|
||||||
vmw_user_context_base_to_res(struct ttm_base_object *base);
|
vmw_user_context_base_to_res(struct ttm_base_object *base);
|
||||||
|
|
||||||
|
static int vmw_gb_context_create(struct vmw_resource *res);
|
||||||
|
static int vmw_gb_context_bind(struct vmw_resource *res,
|
||||||
|
struct ttm_validate_buffer *val_buf);
|
||||||
|
static int vmw_gb_context_unbind(struct vmw_resource *res,
|
||||||
|
bool readback,
|
||||||
|
struct ttm_validate_buffer *val_buf);
|
||||||
|
static int vmw_gb_context_destroy(struct vmw_resource *res);
|
||||||
|
static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi);
|
||||||
|
static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi);
|
||||||
|
static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi);
|
||||||
|
static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
|
||||||
static uint64_t vmw_user_context_size;
|
static uint64_t vmw_user_context_size;
|
||||||
|
|
||||||
static const struct vmw_user_resource_conv user_context_conv = {
|
static const struct vmw_user_resource_conv user_context_conv = {
|
||||||
@ -62,6 +78,23 @@ static const struct vmw_res_func vmw_legacy_context_func = {
|
|||||||
.unbind = NULL
|
.unbind = NULL
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct vmw_res_func vmw_gb_context_func = {
|
||||||
|
.res_type = vmw_res_context,
|
||||||
|
.needs_backup = true,
|
||||||
|
.may_evict = true,
|
||||||
|
.type_name = "guest backed contexts",
|
||||||
|
.backup_placement = &vmw_mob_placement,
|
||||||
|
.create = vmw_gb_context_create,
|
||||||
|
.destroy = vmw_gb_context_destroy,
|
||||||
|
.bind = vmw_gb_context_bind,
|
||||||
|
.unbind = vmw_gb_context_unbind
|
||||||
|
};
|
||||||
|
|
||||||
|
static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
|
||||||
|
[vmw_ctx_binding_shader] = vmw_context_scrub_shader,
|
||||||
|
[vmw_ctx_binding_rt] = vmw_context_scrub_render_target,
|
||||||
|
[vmw_ctx_binding_tex] = vmw_context_scrub_texture };
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Context management:
|
* Context management:
|
||||||
*/
|
*/
|
||||||
@ -76,6 +109,16 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
|
|||||||
} *cmd;
|
} *cmd;
|
||||||
|
|
||||||
|
|
||||||
|
if (res->func->destroy == vmw_gb_context_destroy) {
|
||||||
|
mutex_lock(&dev_priv->cmdbuf_mutex);
|
||||||
|
(void) vmw_gb_context_destroy(res);
|
||||||
|
if (dev_priv->pinned_bo != NULL &&
|
||||||
|
!dev_priv->query_cid_valid)
|
||||||
|
__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
|
||||||
|
mutex_unlock(&dev_priv->cmdbuf_mutex);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
vmw_execbuf_release_pinned_bo(dev_priv);
|
vmw_execbuf_release_pinned_bo(dev_priv);
|
||||||
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
||||||
if (unlikely(cmd == NULL)) {
|
if (unlikely(cmd == NULL)) {
|
||||||
@ -92,6 +135,33 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
|
|||||||
vmw_3d_resource_dec(dev_priv, false);
|
vmw_3d_resource_dec(dev_priv, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int vmw_gb_context_init(struct vmw_private *dev_priv,
|
||||||
|
struct vmw_resource *res,
|
||||||
|
void (*res_free) (struct vmw_resource *res))
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
struct vmw_user_context *uctx =
|
||||||
|
container_of(res, struct vmw_user_context, res);
|
||||||
|
|
||||||
|
ret = vmw_resource_init(dev_priv, res, true,
|
||||||
|
res_free, &vmw_gb_context_func);
|
||||||
|
res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
|
||||||
|
|
||||||
|
if (unlikely(ret != 0)) {
|
||||||
|
if (res_free)
|
||||||
|
res_free(res);
|
||||||
|
else
|
||||||
|
kfree(res);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
memset(&uctx->cbs, 0, sizeof(uctx->cbs));
|
||||||
|
INIT_LIST_HEAD(&uctx->cbs.list);
|
||||||
|
|
||||||
|
vmw_resource_activate(res, vmw_hw_context_destroy);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int vmw_context_init(struct vmw_private *dev_priv,
|
static int vmw_context_init(struct vmw_private *dev_priv,
|
||||||
struct vmw_resource *res,
|
struct vmw_resource *res,
|
||||||
void (*res_free) (struct vmw_resource *res))
|
void (*res_free) (struct vmw_resource *res))
|
||||||
@ -103,6 +173,9 @@ static int vmw_context_init(struct vmw_private *dev_priv,
|
|||||||
SVGA3dCmdDefineContext body;
|
SVGA3dCmdDefineContext body;
|
||||||
} *cmd;
|
} *cmd;
|
||||||
|
|
||||||
|
if (dev_priv->has_mob)
|
||||||
|
return vmw_gb_context_init(dev_priv, res, res_free);
|
||||||
|
|
||||||
ret = vmw_resource_init(dev_priv, res, false,
|
ret = vmw_resource_init(dev_priv, res, false,
|
||||||
res_free, &vmw_legacy_context_func);
|
res_free, &vmw_legacy_context_func);
|
||||||
|
|
||||||
@ -154,6 +227,184 @@ struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
|
|||||||
return (ret == 0) ? res : NULL;
|
return (ret == 0) ? res : NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static int vmw_gb_context_create(struct vmw_resource *res)
|
||||||
|
{
|
||||||
|
struct vmw_private *dev_priv = res->dev_priv;
|
||||||
|
int ret;
|
||||||
|
struct {
|
||||||
|
SVGA3dCmdHeader header;
|
||||||
|
SVGA3dCmdDefineGBContext body;
|
||||||
|
} *cmd;
|
||||||
|
|
||||||
|
if (likely(res->id != -1))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
ret = vmw_resource_alloc_id(res);
|
||||||
|
if (unlikely(ret != 0)) {
|
||||||
|
DRM_ERROR("Failed to allocate a context id.\n");
|
||||||
|
goto out_no_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
|
||||||
|
ret = -EBUSY;
|
||||||
|
goto out_no_fifo;
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
||||||
|
if (unlikely(cmd == NULL)) {
|
||||||
|
DRM_ERROR("Failed reserving FIFO space for context "
|
||||||
|
"creation.\n");
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto out_no_fifo;
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
|
||||||
|
cmd->header.size = sizeof(cmd->body);
|
||||||
|
cmd->body.cid = res->id;
|
||||||
|
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||||
|
(void) vmw_3d_resource_inc(dev_priv, false);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
out_no_fifo:
|
||||||
|
vmw_resource_release_id(res);
|
||||||
|
out_no_id:
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int vmw_gb_context_bind(struct vmw_resource *res,
|
||||||
|
struct ttm_validate_buffer *val_buf)
|
||||||
|
{
|
||||||
|
struct vmw_private *dev_priv = res->dev_priv;
|
||||||
|
struct {
|
||||||
|
SVGA3dCmdHeader header;
|
||||||
|
SVGA3dCmdBindGBContext body;
|
||||||
|
} *cmd;
|
||||||
|
struct ttm_buffer_object *bo = val_buf->bo;
|
||||||
|
|
||||||
|
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
|
||||||
|
|
||||||
|
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
||||||
|
if (unlikely(cmd == NULL)) {
|
||||||
|
DRM_ERROR("Failed reserving FIFO space for context "
|
||||||
|
"binding.\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
|
||||||
|
cmd->header.size = sizeof(cmd->body);
|
||||||
|
cmd->body.cid = res->id;
|
||||||
|
cmd->body.mobid = bo->mem.start;
|
||||||
|
cmd->body.validContents = res->backup_dirty;
|
||||||
|
res->backup_dirty = false;
|
||||||
|
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int vmw_gb_context_unbind(struct vmw_resource *res,
|
||||||
|
bool readback,
|
||||||
|
struct ttm_validate_buffer *val_buf)
|
||||||
|
{
|
||||||
|
struct vmw_private *dev_priv = res->dev_priv;
|
||||||
|
struct ttm_buffer_object *bo = val_buf->bo;
|
||||||
|
struct vmw_fence_obj *fence;
|
||||||
|
struct vmw_user_context *uctx =
|
||||||
|
container_of(res, struct vmw_user_context, res);
|
||||||
|
|
||||||
|
struct {
|
||||||
|
SVGA3dCmdHeader header;
|
||||||
|
SVGA3dCmdReadbackGBContext body;
|
||||||
|
} *cmd1;
|
||||||
|
struct {
|
||||||
|
SVGA3dCmdHeader header;
|
||||||
|
SVGA3dCmdBindGBContext body;
|
||||||
|
} *cmd2;
|
||||||
|
uint32_t submit_size;
|
||||||
|
uint8_t *cmd;
|
||||||
|
|
||||||
|
|
||||||
|
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
|
||||||
|
|
||||||
|
mutex_lock(&dev_priv->binding_mutex);
|
||||||
|
vmw_context_binding_state_kill(&uctx->cbs);
|
||||||
|
|
||||||
|
submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
|
||||||
|
|
||||||
|
cmd = vmw_fifo_reserve(dev_priv, submit_size);
|
||||||
|
if (unlikely(cmd == NULL)) {
|
||||||
|
DRM_ERROR("Failed reserving FIFO space for context "
|
||||||
|
"unbinding.\n");
|
||||||
|
mutex_unlock(&dev_priv->binding_mutex);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd2 = (void *) cmd;
|
||||||
|
if (readback) {
|
||||||
|
cmd1 = (void *) cmd;
|
||||||
|
cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
|
||||||
|
cmd1->header.size = sizeof(cmd1->body);
|
||||||
|
cmd1->body.cid = res->id;
|
||||||
|
cmd2 = (void *) (&cmd1[1]);
|
||||||
|
}
|
||||||
|
cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
|
||||||
|
cmd2->header.size = sizeof(cmd2->body);
|
||||||
|
cmd2->body.cid = res->id;
|
||||||
|
cmd2->body.mobid = SVGA3D_INVALID_ID;
|
||||||
|
|
||||||
|
vmw_fifo_commit(dev_priv, submit_size);
|
||||||
|
mutex_unlock(&dev_priv->binding_mutex);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Create a fence object and fence the backup buffer.
|
||||||
|
*/
|
||||||
|
|
||||||
|
(void) vmw_execbuf_fence_commands(NULL, dev_priv,
|
||||||
|
&fence, NULL);
|
||||||
|
|
||||||
|
vmw_fence_single_bo(bo, fence);
|
||||||
|
|
||||||
|
if (likely(fence != NULL))
|
||||||
|
vmw_fence_obj_unreference(&fence);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int vmw_gb_context_destroy(struct vmw_resource *res)
|
||||||
|
{
|
||||||
|
struct vmw_private *dev_priv = res->dev_priv;
|
||||||
|
struct {
|
||||||
|
SVGA3dCmdHeader header;
|
||||||
|
SVGA3dCmdDestroyGBContext body;
|
||||||
|
} *cmd;
|
||||||
|
struct vmw_user_context *uctx =
|
||||||
|
container_of(res, struct vmw_user_context, res);
|
||||||
|
|
||||||
|
BUG_ON(!list_empty(&uctx->cbs.list));
|
||||||
|
|
||||||
|
if (likely(res->id == -1))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
||||||
|
if (unlikely(cmd == NULL)) {
|
||||||
|
DRM_ERROR("Failed reserving FIFO space for context "
|
||||||
|
"destruction.\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
|
||||||
|
cmd->header.size = sizeof(cmd->body);
|
||||||
|
cmd->body.cid = res->id;
|
||||||
|
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||||
|
if (dev_priv->query_cid == res->id)
|
||||||
|
dev_priv->query_cid_valid = false;
|
||||||
|
vmw_resource_release_id(res);
|
||||||
|
vmw_3d_resource_dec(dev_priv, false);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* User-space context management:
|
* User-space context management:
|
||||||
*/
|
*/
|
||||||
@ -274,3 +525,283 @@ out_unlock:
|
|||||||
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_context_scrub_shader - scrub a shader binding from a context.
|
||||||
|
*
|
||||||
|
* @bi: single binding information.
|
||||||
|
*/
|
||||||
|
static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
|
||||||
|
{
|
||||||
|
struct vmw_private *dev_priv = bi->ctx->dev_priv;
|
||||||
|
struct {
|
||||||
|
SVGA3dCmdHeader header;
|
||||||
|
SVGA3dCmdSetShader body;
|
||||||
|
} *cmd;
|
||||||
|
|
||||||
|
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
||||||
|
if (unlikely(cmd == NULL)) {
|
||||||
|
DRM_ERROR("Failed reserving FIFO space for shader "
|
||||||
|
"unbinding.\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd->header.id = SVGA_3D_CMD_SET_SHADER;
|
||||||
|
cmd->header.size = sizeof(cmd->body);
|
||||||
|
cmd->body.cid = bi->ctx->id;
|
||||||
|
cmd->body.type = bi->i1.shader_type;
|
||||||
|
cmd->body.shid = SVGA3D_INVALID_ID;
|
||||||
|
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_context_scrub_render_target - scrub a render target binding
|
||||||
|
* from a context.
|
||||||
|
*
|
||||||
|
* @bi: single binding information.
|
||||||
|
*/
|
||||||
|
static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
|
||||||
|
{
|
||||||
|
struct vmw_private *dev_priv = bi->ctx->dev_priv;
|
||||||
|
struct {
|
||||||
|
SVGA3dCmdHeader header;
|
||||||
|
SVGA3dCmdSetRenderTarget body;
|
||||||
|
} *cmd;
|
||||||
|
|
||||||
|
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
||||||
|
if (unlikely(cmd == NULL)) {
|
||||||
|
DRM_ERROR("Failed reserving FIFO space for render target "
|
||||||
|
"unbinding.\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
|
||||||
|
cmd->header.size = sizeof(cmd->body);
|
||||||
|
cmd->body.cid = bi->ctx->id;
|
||||||
|
cmd->body.type = bi->i1.rt_type;
|
||||||
|
cmd->body.target.sid = SVGA3D_INVALID_ID;
|
||||||
|
cmd->body.target.face = 0;
|
||||||
|
cmd->body.target.mipmap = 0;
|
||||||
|
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_context_scrub_texture - scrub a texture binding from a context.
|
||||||
|
*
|
||||||
|
* @bi: single binding information.
|
||||||
|
*
|
||||||
|
* TODO: Possibly complement this function with a function that takes
|
||||||
|
* a list of texture bindings and combines them to a single command.
|
||||||
|
*/
|
||||||
|
static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi)
|
||||||
|
{
|
||||||
|
struct vmw_private *dev_priv = bi->ctx->dev_priv;
|
||||||
|
struct {
|
||||||
|
SVGA3dCmdHeader header;
|
||||||
|
struct {
|
||||||
|
SVGA3dCmdSetTextureState c;
|
||||||
|
SVGA3dTextureState s1;
|
||||||
|
} body;
|
||||||
|
} *cmd;
|
||||||
|
|
||||||
|
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
||||||
|
if (unlikely(cmd == NULL)) {
|
||||||
|
DRM_ERROR("Failed reserving FIFO space for texture "
|
||||||
|
"unbinding.\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
|
||||||
|
cmd->header.size = sizeof(cmd->body);
|
||||||
|
cmd->body.c.cid = bi->ctx->id;
|
||||||
|
cmd->body.s1.stage = bi->i1.texture_stage;
|
||||||
|
cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
|
||||||
|
cmd->body.s1.value = (uint32) SVGA3D_INVALID_ID;
|
||||||
|
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_context_binding_drop: Stop tracking a context binding
|
||||||
|
*
|
||||||
|
* @cb: Pointer to binding tracker storage.
|
||||||
|
*
|
||||||
|
* Stops tracking a context binding, and re-initializes its storage.
|
||||||
|
* Typically used when the context binding is replaced with a binding to
|
||||||
|
* another (or the same, for that matter) resource.
|
||||||
|
*/
|
||||||
|
static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
|
||||||
|
{
|
||||||
|
list_del(&cb->ctx_list);
|
||||||
|
if (!list_empty(&cb->res_list))
|
||||||
|
list_del(&cb->res_list);
|
||||||
|
cb->bi.ctx = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_context_binding_add: Start tracking a context binding
|
||||||
|
*
|
||||||
|
* @cbs: Pointer to the context binding state tracker.
|
||||||
|
* @bi: Information about the binding to track.
|
||||||
|
*
|
||||||
|
* Performs basic checks on the binding to make sure arguments are within
|
||||||
|
* bounds and then starts tracking the binding in the context binding
|
||||||
|
* state structure @cbs.
|
||||||
|
*/
|
||||||
|
int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
|
||||||
|
const struct vmw_ctx_bindinfo *bi)
|
||||||
|
{
|
||||||
|
struct vmw_ctx_binding *loc;
|
||||||
|
|
||||||
|
switch (bi->bt) {
|
||||||
|
case vmw_ctx_binding_rt:
|
||||||
|
if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) {
|
||||||
|
DRM_ERROR("Illegal render target type %u.\n",
|
||||||
|
(unsigned) bi->i1.rt_type);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
loc = &cbs->render_targets[bi->i1.rt_type];
|
||||||
|
break;
|
||||||
|
case vmw_ctx_binding_tex:
|
||||||
|
if (unlikely((unsigned)bi->i1.texture_stage >=
|
||||||
|
SVGA3D_NUM_TEXTURE_UNITS)) {
|
||||||
|
DRM_ERROR("Illegal texture/sampler unit %u.\n",
|
||||||
|
(unsigned) bi->i1.texture_stage);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
loc = &cbs->texture_units[bi->i1.texture_stage];
|
||||||
|
break;
|
||||||
|
case vmw_ctx_binding_shader:
|
||||||
|
if (unlikely((unsigned)bi->i1.shader_type >=
|
||||||
|
SVGA3D_SHADERTYPE_MAX)) {
|
||||||
|
DRM_ERROR("Illegal shader type %u.\n",
|
||||||
|
(unsigned) bi->i1.shader_type);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
loc = &cbs->shaders[bi->i1.shader_type];
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
BUG();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (loc->bi.ctx != NULL)
|
||||||
|
vmw_context_binding_drop(loc);
|
||||||
|
|
||||||
|
loc->bi = *bi;
|
||||||
|
list_add_tail(&loc->ctx_list, &cbs->list);
|
||||||
|
INIT_LIST_HEAD(&loc->res_list);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_context_binding_transfer: Transfer a context binding tracking entry.
|
||||||
|
*
|
||||||
|
* @cbs: Pointer to the persistent context binding state tracker.
|
||||||
|
* @bi: Information about the binding to track.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
|
||||||
|
const struct vmw_ctx_bindinfo *bi)
|
||||||
|
{
|
||||||
|
struct vmw_ctx_binding *loc;
|
||||||
|
|
||||||
|
switch (bi->bt) {
|
||||||
|
case vmw_ctx_binding_rt:
|
||||||
|
loc = &cbs->render_targets[bi->i1.rt_type];
|
||||||
|
break;
|
||||||
|
case vmw_ctx_binding_tex:
|
||||||
|
loc = &cbs->texture_units[bi->i1.texture_stage];
|
||||||
|
break;
|
||||||
|
case vmw_ctx_binding_shader:
|
||||||
|
loc = &cbs->shaders[bi->i1.shader_type];
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
BUG();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (loc->bi.ctx != NULL)
|
||||||
|
vmw_context_binding_drop(loc);
|
||||||
|
|
||||||
|
loc->bi = *bi;
|
||||||
|
list_add_tail(&loc->ctx_list, &cbs->list);
|
||||||
|
if (bi->res != NULL)
|
||||||
|
list_add_tail(&loc->res_list, &bi->res->binding_head);
|
||||||
|
else
|
||||||
|
INIT_LIST_HEAD(&loc->res_list);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_context_binding_kill - Kill a binding on the device
|
||||||
|
* and stop tracking it.
|
||||||
|
*
|
||||||
|
* @cb: Pointer to binding tracker storage.
|
||||||
|
*
|
||||||
|
* Emits FIFO commands to scrub a binding represented by @cb.
|
||||||
|
* Then stops tracking the binding and re-initializes its storage.
|
||||||
|
*/
|
||||||
|
static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
|
||||||
|
{
|
||||||
|
(void) vmw_scrub_funcs[cb->bi.bt](&cb->bi);
|
||||||
|
vmw_context_binding_drop(cb);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_context_binding_state_kill - Kill all bindings associated with a
|
||||||
|
* struct vmw_ctx_binding state structure, and re-initialize the structure.
|
||||||
|
*
|
||||||
|
* @cbs: Pointer to the context binding state tracker.
|
||||||
|
*
|
||||||
|
* Emits commands to scrub all bindings associated with the
|
||||||
|
* context binding state tracker. Then re-initializes the whole structure.
|
||||||
|
*/
|
||||||
|
static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
|
||||||
|
{
|
||||||
|
struct vmw_ctx_binding *entry, *next;
|
||||||
|
|
||||||
|
list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
|
||||||
|
vmw_context_binding_kill(entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_context_binding_res_list_kill - Kill all bindings on a
|
||||||
|
* resource binding list
|
||||||
|
*
|
||||||
|
* @head: list head of resource binding list
|
||||||
|
*
|
||||||
|
* Kills all bindings associated with a specific resource. Typically
|
||||||
|
* called before the resource is destroyed.
|
||||||
|
*/
|
||||||
|
void vmw_context_binding_res_list_kill(struct list_head *head)
|
||||||
|
{
|
||||||
|
struct vmw_ctx_binding *entry, *next;
|
||||||
|
|
||||||
|
list_for_each_entry_safe(entry, next, head, res_list)
|
||||||
|
vmw_context_binding_kill(entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_context_binding_state_transfer - Commit staged binding info
|
||||||
|
*
|
||||||
|
* @ctx: Pointer to context to commit the staged binding info to.
|
||||||
|
* @from: Staged binding info built during execbuf.
|
||||||
|
*
|
||||||
|
* Transfers binding info from a temporary structure to the persistent
|
||||||
|
* structure in the context. This can be done once commands
|
||||||
|
*/
|
||||||
|
void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
|
||||||
|
struct vmw_ctx_binding_state *from)
|
||||||
|
{
|
||||||
|
struct vmw_user_context *uctx =
|
||||||
|
container_of(ctx, struct vmw_user_context, res);
|
||||||
|
struct vmw_ctx_binding *entry, *next;
|
||||||
|
|
||||||
|
list_for_each_entry_safe(entry, next, &from->list, ctx_list)
|
||||||
|
vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
|
||||||
|
}
|
||||||
|
@ -290,8 +290,7 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
|
|||||||
/**
|
/**
|
||||||
* vmw_bo_pin - Pin or unpin a buffer object without moving it.
|
* vmw_bo_pin - Pin or unpin a buffer object without moving it.
|
||||||
*
|
*
|
||||||
* @bo: The buffer object. Must be reserved, and present either in VRAM
|
* @bo: The buffer object. Must be reserved.
|
||||||
* or GMR memory.
|
|
||||||
* @pin: Whether to pin or unpin.
|
* @pin: Whether to pin or unpin.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
@ -303,10 +302,9 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
lockdep_assert_held(&bo->resv->lock.base);
|
lockdep_assert_held(&bo->resv->lock.base);
|
||||||
BUG_ON(old_mem_type != TTM_PL_VRAM &&
|
|
||||||
old_mem_type != VMW_PL_GMR);
|
|
||||||
|
|
||||||
pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED;
|
pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
|
||||||
|
| TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
|
||||||
if (pin)
|
if (pin)
|
||||||
pl_flags |= TTM_PL_FLAG_NO_EVICT;
|
pl_flags |= TTM_PL_FLAG_NO_EVICT;
|
||||||
|
|
||||||
|
@ -32,6 +32,7 @@
|
|||||||
#include <drm/ttm/ttm_bo_driver.h>
|
#include <drm/ttm/ttm_bo_driver.h>
|
||||||
#include <drm/ttm/ttm_object.h>
|
#include <drm/ttm/ttm_object.h>
|
||||||
//#include <drm/ttm/ttm_module.h>
|
//#include <drm/ttm/ttm_module.h>
|
||||||
|
#include <linux/dma_remapping.h>
|
||||||
|
|
||||||
#define VMWGFX_DRIVER_NAME "vmwgfx"
|
#define VMWGFX_DRIVER_NAME "vmwgfx"
|
||||||
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
|
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
|
||||||
@ -111,6 +112,21 @@
|
|||||||
#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
|
#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
|
||||||
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
|
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
|
||||||
struct drm_vmw_update_layout_arg)
|
struct drm_vmw_update_layout_arg)
|
||||||
|
#define DRM_IOCTL_VMW_CREATE_SHADER \
|
||||||
|
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \
|
||||||
|
struct drm_vmw_shader_create_arg)
|
||||||
|
#define DRM_IOCTL_VMW_UNREF_SHADER \
|
||||||
|
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \
|
||||||
|
struct drm_vmw_shader_arg)
|
||||||
|
#define DRM_IOCTL_VMW_GB_SURFACE_CREATE \
|
||||||
|
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \
|
||||||
|
union drm_vmw_gb_surface_create_arg)
|
||||||
|
#define DRM_IOCTL_VMW_GB_SURFACE_REF \
|
||||||
|
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
|
||||||
|
union drm_vmw_gb_surface_reference_arg)
|
||||||
|
#define DRM_IOCTL_VMW_SYNCCPU \
|
||||||
|
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
|
||||||
|
struct drm_vmw_synccpu_arg)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The core DRM version of this macro doesn't account for
|
* The core DRM version of this macro doesn't account for
|
||||||
@ -176,6 +192,21 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
|
|||||||
VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
|
VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
|
||||||
vmw_kms_update_layout_ioctl,
|
vmw_kms_update_layout_ioctl,
|
||||||
DRM_MASTER | DRM_UNLOCKED),
|
DRM_MASTER | DRM_UNLOCKED),
|
||||||
|
VMW_IOCTL_DEF(VMW_CREATE_SHADER,
|
||||||
|
vmw_shader_define_ioctl,
|
||||||
|
DRM_AUTH | DRM_UNLOCKED),
|
||||||
|
VMW_IOCTL_DEF(VMW_UNREF_SHADER,
|
||||||
|
vmw_shader_destroy_ioctl,
|
||||||
|
DRM_AUTH | DRM_UNLOCKED),
|
||||||
|
VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
|
||||||
|
vmw_gb_surface_define_ioctl,
|
||||||
|
DRM_AUTH | DRM_UNLOCKED),
|
||||||
|
VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
|
||||||
|
vmw_gb_surface_reference_ioctl,
|
||||||
|
DRM_AUTH | DRM_UNLOCKED),
|
||||||
|
VMW_IOCTL_DEF(VMW_SYNCCPU,
|
||||||
|
vmw_user_dmabuf_synccpu_ioctl,
|
||||||
|
DRM_AUTH | DRM_UNLOCKED),
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -185,12 +216,25 @@ static struct pci_device_id vmw_pci_id_list[] = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static int enable_fbdev = 1;
|
static int enable_fbdev = 1;
|
||||||
|
static int vmw_force_iommu;
|
||||||
|
static int vmw_restrict_iommu;
|
||||||
|
static int vmw_force_coherent;
|
||||||
|
static int vmw_restrict_dma_mask;
|
||||||
|
|
||||||
static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
|
static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
|
||||||
static void vmw_master_init(struct vmw_master *);
|
static void vmw_master_init(struct vmw_master *);
|
||||||
|
|
||||||
MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
|
MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
|
||||||
module_param_named(enable_fbdev, enable_fbdev, int, 0600);
|
module_param_named(enable_fbdev, enable_fbdev, int, 0600);
|
||||||
|
MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
|
||||||
|
module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
|
||||||
|
MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
|
||||||
|
module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
|
||||||
|
MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
|
||||||
|
module_param_named(force_coherent, vmw_force_coherent, int, 0600);
|
||||||
|
MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
|
||||||
|
module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
|
||||||
|
|
||||||
|
|
||||||
static void vmw_print_capabilities(uint32_t capabilities)
|
static void vmw_print_capabilities(uint32_t capabilities)
|
||||||
{
|
{
|
||||||
@ -227,53 +271,14 @@ static void vmw_print_capabilities(uint32_t capabilities)
|
|||||||
DRM_INFO(" GMR2.\n");
|
DRM_INFO(" GMR2.\n");
|
||||||
if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
|
if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
|
||||||
DRM_INFO(" Screen Object 2.\n");
|
DRM_INFO(" Screen Object 2.\n");
|
||||||
|
if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
|
||||||
|
DRM_INFO(" Command Buffers.\n");
|
||||||
|
if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
|
||||||
|
DRM_INFO(" Command Buffers 2.\n");
|
||||||
|
if (capabilities & SVGA_CAP_GBOBJECTS)
|
||||||
|
DRM_INFO(" Guest Backed Resources.\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* vmw_execbuf_prepare_dummy_query - Initialize a query result structure at
|
|
||||||
* the start of a buffer object.
|
|
||||||
*
|
|
||||||
* @dev_priv: The device private structure.
|
|
||||||
*
|
|
||||||
* This function will idle the buffer using an uninterruptible wait, then
|
|
||||||
* map the first page and initialize a pending occlusion query result structure,
|
|
||||||
* Finally it will unmap the buffer.
|
|
||||||
*
|
|
||||||
* TODO: Since we're only mapping a single page, we should optimize the map
|
|
||||||
* to use kmap_atomic / iomap_atomic.
|
|
||||||
*/
|
|
||||||
static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
|
|
||||||
{
|
|
||||||
struct ttm_bo_kmap_obj map;
|
|
||||||
volatile SVGA3dQueryResult *result;
|
|
||||||
bool dummy;
|
|
||||||
int ret;
|
|
||||||
struct ttm_bo_device *bdev = &dev_priv->bdev;
|
|
||||||
struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
|
|
||||||
|
|
||||||
ttm_bo_reserve(bo, false, false, false, 0);
|
|
||||||
spin_lock(&bdev->fence_lock);
|
|
||||||
ret = 0; //ttm_bo_wait(bo, false, false, false);
|
|
||||||
spin_unlock(&bdev->fence_lock);
|
|
||||||
if (unlikely(ret != 0))
|
|
||||||
(void) vmw_fallback_wait(dev_priv, false, true, 0, false,
|
|
||||||
10*HZ);
|
|
||||||
/*
|
|
||||||
ret = ttm_bo_kmap(bo, 0, 1, &map);
|
|
||||||
if (likely(ret == 0)) {
|
|
||||||
result = ttm_kmap_obj_virtual(&map, &dummy);
|
|
||||||
result->totalSize = sizeof(*result);
|
|
||||||
result->state = SVGA3D_QUERYSTATE_PENDING;
|
|
||||||
result->result32 = 0xff;
|
|
||||||
ttm_bo_kunmap(&map);
|
|
||||||
} else
|
|
||||||
DRM_ERROR("Dummy query buffer map failed.\n");
|
|
||||||
*/
|
|
||||||
ttm_bo_unreserve(bo);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* vmw_dummy_query_bo_create - create a bo to hold a dummy query result
|
* vmw_dummy_query_bo_create - create a bo to hold a dummy query result
|
||||||
*
|
*
|
||||||
@ -281,20 +286,57 @@ static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
|
|||||||
*
|
*
|
||||||
* This function creates a small buffer object that holds the query
|
* This function creates a small buffer object that holds the query
|
||||||
* result for dummy queries emitted as query barriers.
|
* result for dummy queries emitted as query barriers.
|
||||||
|
* The function will then map the first page and initialize a pending
|
||||||
|
* occlusion query result structure, Finally it will unmap the buffer.
|
||||||
* No interruptible waits are done within this function.
|
* No interruptible waits are done within this function.
|
||||||
*
|
*
|
||||||
* Returns an error if bo creation fails.
|
* Returns an error if bo creation or initialization fails.
|
||||||
*/
|
*/
|
||||||
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
|
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
|
||||||
{
|
{
|
||||||
return ttm_bo_create(&dev_priv->bdev,
|
int ret;
|
||||||
|
struct ttm_buffer_object *bo;
|
||||||
|
struct ttm_bo_kmap_obj map;
|
||||||
|
volatile SVGA3dQueryResult *result;
|
||||||
|
bool dummy;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Create the bo as pinned, so that a tryreserve will
|
||||||
|
* immediately succeed. This is because we're the only
|
||||||
|
* user of the bo currently.
|
||||||
|
*/
|
||||||
|
ret = ttm_bo_create(&dev_priv->bdev,
|
||||||
PAGE_SIZE,
|
PAGE_SIZE,
|
||||||
ttm_bo_type_device,
|
ttm_bo_type_device,
|
||||||
&vmw_vram_sys_placement,
|
&vmw_sys_ne_placement,
|
||||||
0, false, NULL,
|
0, false, NULL,
|
||||||
&dev_priv->dummy_query_bo);
|
&bo);
|
||||||
}
|
|
||||||
|
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = ttm_bo_reserve(bo, false, true, false, 0);
|
||||||
|
BUG_ON(ret != 0);
|
||||||
|
|
||||||
|
ret = ttm_bo_kmap(bo, 0, 1, &map);
|
||||||
|
if (likely(ret == 0)) {
|
||||||
|
result = ttm_kmap_obj_virtual(&map, &dummy);
|
||||||
|
result->totalSize = sizeof(*result);
|
||||||
|
result->state = SVGA3D_QUERYSTATE_PENDING;
|
||||||
|
result->result32 = 0xff;
|
||||||
|
ttm_bo_kunmap(&map);
|
||||||
|
}
|
||||||
|
vmw_bo_pin(bo, false);
|
||||||
|
ttm_bo_unreserve(bo);
|
||||||
|
|
||||||
|
if (unlikely(ret != 0)) {
|
||||||
|
DRM_ERROR("Dummy query buffer map failed.\n");
|
||||||
|
ttm_bo_unref(&bo);
|
||||||
|
} else
|
||||||
|
dev_priv->dummy_query_bo = bo;
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static int vmw_request_device(struct vmw_private *dev_priv)
|
static int vmw_request_device(struct vmw_private *dev_priv)
|
||||||
{
|
{
|
||||||
@ -336,6 +378,7 @@ static void vmw_release_device(struct vmw_private *dev_priv)
|
|||||||
vmw_fifo_release(dev_priv, &dev_priv->fifo);
|
vmw_fifo_release(dev_priv, &dev_priv->fifo);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Increase the 3d resource refcount.
|
* Increase the 3d resource refcount.
|
||||||
* If the count was prevously zero, initialize the fifo, switching to svga
|
* If the count was prevously zero, initialize the fifo, switching to svga
|
||||||
@ -432,12 +475,41 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv)
|
|||||||
dev_priv->initial_height = height;
|
dev_priv->initial_height = height;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_dma_masks - set required page- and dma masks
|
||||||
|
*
|
||||||
|
* @dev: Pointer to struct drm-device
|
||||||
|
*
|
||||||
|
* With 32-bit we can only handle 32 bit PFNs. Optionally set that
|
||||||
|
* restriction also for 64-bit systems.
|
||||||
|
*/
|
||||||
|
#ifdef CONFIG_INTEL_IOMMU
|
||||||
|
static int vmw_dma_masks(struct vmw_private *dev_priv)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = dev_priv->dev;
|
||||||
|
|
||||||
|
if (intel_iommu_enabled &&
|
||||||
|
(sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
|
||||||
|
DRM_INFO("Restricting DMA addresses to 44 bits.\n");
|
||||||
|
return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static int vmw_dma_masks(struct vmw_private *dev_priv)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
||||||
{
|
{
|
||||||
struct vmw_private *dev_priv;
|
struct vmw_private *dev_priv;
|
||||||
int ret;
|
int ret;
|
||||||
uint32_t svga_id;
|
uint32_t svga_id;
|
||||||
enum vmw_res_type i;
|
enum vmw_res_type i;
|
||||||
|
bool refuse_dma = false;
|
||||||
|
|
||||||
|
|
||||||
ENTER();
|
ENTER();
|
||||||
|
|
||||||
@ -455,6 +527,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
|||||||
mutex_init(&dev_priv->hw_mutex);
|
mutex_init(&dev_priv->hw_mutex);
|
||||||
mutex_init(&dev_priv->cmdbuf_mutex);
|
mutex_init(&dev_priv->cmdbuf_mutex);
|
||||||
mutex_init(&dev_priv->release_mutex);
|
mutex_init(&dev_priv->release_mutex);
|
||||||
|
mutex_init(&dev_priv->binding_mutex);
|
||||||
rwlock_init(&dev_priv->resource_lock);
|
rwlock_init(&dev_priv->resource_lock);
|
||||||
|
|
||||||
for (i = vmw_res_context; i < vmw_res_max; ++i) {
|
for (i = vmw_res_context; i < vmw_res_max; ++i) {
|
||||||
@ -491,6 +564,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
|||||||
}
|
}
|
||||||
|
|
||||||
dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
|
dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
|
||||||
|
// ret = vmw_dma_select_mode(dev_priv);
|
||||||
|
// if (unlikely(ret != 0)) {
|
||||||
|
// DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
|
||||||
|
// refuse_dma = true;
|
||||||
|
// }
|
||||||
|
|
||||||
dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
|
dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
|
||||||
dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
|
dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
|
||||||
@ -499,14 +577,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
|||||||
|
|
||||||
vmw_get_initial_size(dev_priv);
|
vmw_get_initial_size(dev_priv);
|
||||||
|
|
||||||
if (dev_priv->capabilities & SVGA_CAP_GMR) {
|
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
|
||||||
dev_priv->max_gmr_descriptors =
|
|
||||||
vmw_read(dev_priv,
|
|
||||||
SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
|
|
||||||
dev_priv->max_gmr_ids =
|
dev_priv->max_gmr_ids =
|
||||||
vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
|
vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
|
||||||
}
|
|
||||||
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
|
|
||||||
dev_priv->max_gmr_pages =
|
dev_priv->max_gmr_pages =
|
||||||
vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
|
vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
|
||||||
dev_priv->memory_size =
|
dev_priv->memory_size =
|
||||||
@ -519,23 +592,42 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
|||||||
*/
|
*/
|
||||||
dev_priv->memory_size = 512*1024*1024;
|
dev_priv->memory_size = 512*1024*1024;
|
||||||
}
|
}
|
||||||
|
dev_priv->max_mob_pages = 0;
|
||||||
|
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
|
||||||
|
uint64_t mem_size =
|
||||||
|
vmw_read(dev_priv,
|
||||||
|
SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
|
||||||
|
|
||||||
|
dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
|
||||||
|
dev_priv->prim_bb_mem =
|
||||||
|
vmw_read(dev_priv,
|
||||||
|
SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
|
||||||
|
} else
|
||||||
|
dev_priv->prim_bb_mem = dev_priv->vram_size;
|
||||||
|
|
||||||
|
ret = vmw_dma_masks(dev_priv);
|
||||||
|
if (unlikely(ret != 0)) {
|
||||||
|
mutex_unlock(&dev_priv->hw_mutex);
|
||||||
|
goto out_err0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (unlikely(dev_priv->prim_bb_mem < dev_priv->vram_size))
|
||||||
|
dev_priv->prim_bb_mem = dev_priv->vram_size;
|
||||||
|
|
||||||
mutex_unlock(&dev_priv->hw_mutex);
|
mutex_unlock(&dev_priv->hw_mutex);
|
||||||
|
|
||||||
vmw_print_capabilities(dev_priv->capabilities);
|
vmw_print_capabilities(dev_priv->capabilities);
|
||||||
|
|
||||||
if (dev_priv->capabilities & SVGA_CAP_GMR) {
|
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
|
||||||
DRM_INFO("Max GMR ids is %u\n",
|
DRM_INFO("Max GMR ids is %u\n",
|
||||||
(unsigned)dev_priv->max_gmr_ids);
|
(unsigned)dev_priv->max_gmr_ids);
|
||||||
DRM_INFO("Max GMR descriptors is %u\n",
|
|
||||||
(unsigned)dev_priv->max_gmr_descriptors);
|
|
||||||
}
|
|
||||||
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
|
|
||||||
DRM_INFO("Max number of GMR pages is %u\n",
|
DRM_INFO("Max number of GMR pages is %u\n",
|
||||||
(unsigned)dev_priv->max_gmr_pages);
|
(unsigned)dev_priv->max_gmr_pages);
|
||||||
DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
|
DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
|
||||||
(unsigned)dev_priv->memory_size / 1024);
|
(unsigned)dev_priv->memory_size / 1024);
|
||||||
}
|
}
|
||||||
|
DRM_INFO("Maximum display memory size is %u kiB\n",
|
||||||
|
dev_priv->prim_bb_mem / 1024);
|
||||||
DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
|
DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
|
||||||
dev_priv->vram_start, dev_priv->vram_size / 1024);
|
dev_priv->vram_start, dev_priv->vram_size / 1024);
|
||||||
DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
|
DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
|
||||||
@ -546,6 +638,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
|||||||
goto out_err0;
|
goto out_err0;
|
||||||
|
|
||||||
|
|
||||||
|
vmw_master_init(&dev_priv->fbdev_master);
|
||||||
|
dev_priv->active_master = &dev_priv->fbdev_master;
|
||||||
|
|
||||||
|
|
||||||
ret = ttm_bo_device_init(&dev_priv->bdev,
|
ret = ttm_bo_device_init(&dev_priv->bdev,
|
||||||
@ -565,13 +659,23 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
|||||||
}
|
}
|
||||||
|
|
||||||
dev_priv->has_gmr = true;
|
dev_priv->has_gmr = true;
|
||||||
if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
|
if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
|
||||||
dev_priv->max_gmr_ids) != 0) {
|
refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
|
||||||
|
VMW_PL_GMR) != 0) {
|
||||||
DRM_INFO("No GMR memory available. "
|
DRM_INFO("No GMR memory available. "
|
||||||
"Graphics memory resources are very limited.\n");
|
"Graphics memory resources are very limited.\n");
|
||||||
dev_priv->has_gmr = false;
|
dev_priv->has_gmr = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
|
||||||
|
dev_priv->has_mob = true;
|
||||||
|
if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
|
||||||
|
VMW_PL_MOB) != 0) {
|
||||||
|
DRM_INFO("No MOB memory available. "
|
||||||
|
"3D will be disabled.\n");
|
||||||
|
dev_priv->has_mob = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
|
dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
|
||||||
dev_priv->mmio_size);
|
dev_priv->mmio_size);
|
||||||
|
|
||||||
@ -590,14 +694,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
|||||||
goto out_err4;
|
goto out_err4;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_priv->tdev = ttm_object_device_init
|
// dev_priv->tdev = ttm_object_device_init
|
||||||
(dev_priv->mem_global_ref.object, 12);
|
// (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
|
||||||
|
|
||||||
if (unlikely(dev_priv->tdev == NULL)) {
|
// if (unlikely(dev_priv->tdev == NULL)) {
|
||||||
DRM_ERROR("Unable to initialize TTM object management.\n");
|
// DRM_ERROR("Unable to initialize TTM object management.\n");
|
||||||
ret = -ENOMEM;
|
// ret = -ENOMEM;
|
||||||
goto out_err4;
|
// goto out_err4;
|
||||||
}
|
// }
|
||||||
|
|
||||||
dev->dev_private = dev_priv;
|
dev->dev_private = dev_priv;
|
||||||
|
|
||||||
@ -702,6 +806,8 @@ static int vmw_driver_unload(struct drm_device *dev)
|
|||||||
ttm_object_device_release(&dev_priv->tdev);
|
ttm_object_device_release(&dev_priv->tdev);
|
||||||
iounmap(dev_priv->mmio_virt);
|
iounmap(dev_priv->mmio_virt);
|
||||||
arch_phys_wc_del(dev_priv->mmio_mtrr);
|
arch_phys_wc_del(dev_priv->mmio_mtrr);
|
||||||
|
if (dev_priv->has_mob)
|
||||||
|
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
|
||||||
if (dev_priv->has_gmr)
|
if (dev_priv->has_gmr)
|
||||||
(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
|
(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
|
||||||
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
|
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
|
||||||
@ -731,9 +837,16 @@ static void vmw_postclose(struct drm_device *dev,
|
|||||||
struct vmw_fpriv *vmw_fp;
|
struct vmw_fpriv *vmw_fp;
|
||||||
|
|
||||||
vmw_fp = vmw_fpriv(file_priv);
|
vmw_fp = vmw_fpriv(file_priv);
|
||||||
ttm_object_file_release(&vmw_fp->tfile);
|
|
||||||
if (vmw_fp->locked_master)
|
if (vmw_fp->locked_master) {
|
||||||
|
struct vmw_master *vmaster =
|
||||||
|
vmw_master(vmw_fp->locked_master);
|
||||||
|
|
||||||
|
ttm_vt_unlock(&vmaster->lock);
|
||||||
drm_master_put(&vmw_fp->locked_master);
|
drm_master_put(&vmw_fp->locked_master);
|
||||||
|
}
|
||||||
|
|
||||||
|
ttm_object_file_release(&vmw_fp->tfile);
|
||||||
kfree(vmw_fp);
|
kfree(vmw_fp);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@ -810,10 +923,11 @@ static void vmw_lastclose(struct drm_device *dev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static void vmw_master_init(struct vmw_master *vmaster)
|
static void vmw_master_init(struct vmw_master *vmaster)
|
||||||
{
|
{
|
||||||
ttm_lock_init(&vmaster->lock);
|
// ttm_lock_init(&vmaster->lock);
|
||||||
INIT_LIST_HEAD(&vmaster->fb_surf);
|
INIT_LIST_HEAD(&vmaster->fb_surf);
|
||||||
mutex_init(&vmaster->fb_surf_mutex);
|
mutex_init(&vmaster->fb_surf_mutex);
|
||||||
}
|
}
|
||||||
@ -828,7 +942,7 @@ static int vmw_master_create(struct drm_device *dev,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
vmw_master_init(vmaster);
|
vmw_master_init(vmaster);
|
||||||
ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
|
// ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
|
||||||
master->driver_priv = vmaster;
|
master->driver_priv = vmaster;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -843,7 +957,7 @@ static void vmw_master_destroy(struct drm_device *dev,
|
|||||||
kfree(vmaster);
|
kfree(vmaster);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if 0
|
||||||
static int vmw_master_set(struct drm_device *dev,
|
static int vmw_master_set(struct drm_device *dev,
|
||||||
struct drm_file *file_priv,
|
struct drm_file *file_priv,
|
||||||
bool from_open)
|
bool from_open)
|
||||||
@ -918,14 +1032,12 @@ static void vmw_master_drop(struct drm_device *dev,
|
|||||||
|
|
||||||
vmw_fp->locked_master = drm_master_get(file_priv->master);
|
vmw_fp->locked_master = drm_master_get(file_priv->master);
|
||||||
ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
|
ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
|
||||||
vmw_execbuf_release_pinned_bo(dev_priv);
|
|
||||||
|
|
||||||
if (unlikely((ret != 0))) {
|
if (unlikely((ret != 0))) {
|
||||||
DRM_ERROR("Unable to lock TTM at VT switch.\n");
|
DRM_ERROR("Unable to lock TTM at VT switch.\n");
|
||||||
drm_master_put(&vmw_fp->locked_master);
|
drm_master_put(&vmw_fp->locked_master);
|
||||||
}
|
}
|
||||||
|
|
||||||
ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
|
vmw_execbuf_release_pinned_bo(dev_priv);
|
||||||
|
|
||||||
if (!dev_priv->enable_fb) {
|
if (!dev_priv->enable_fb) {
|
||||||
ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
|
ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
|
||||||
@ -1150,3 +1262,15 @@ int vmw_init(void)
|
|||||||
MODULE_AUTHOR("VMware Inc. and others");
|
MODULE_AUTHOR("VMware Inc. and others");
|
||||||
MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
|
MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
|
||||||
MODULE_LICENSE("GPL and additional rights");
|
MODULE_LICENSE("GPL and additional rights");
|
||||||
|
|
||||||
|
|
||||||
|
void *kmemdup(const void *src, size_t len, gfp_t gfp)
|
||||||
|
{
|
||||||
|
void *p;
|
||||||
|
|
||||||
|
p = kmalloc(len, gfp);
|
||||||
|
if (p)
|
||||||
|
memcpy(p, src, len);
|
||||||
|
return p;
|
||||||
|
}
|
||||||
|
|
||||||
|
@ -32,6 +32,7 @@
|
|||||||
#include <drm/drmP.h>
|
#include <drm/drmP.h>
|
||||||
#include <drm/vmwgfx_drm.h>
|
#include <drm/vmwgfx_drm.h>
|
||||||
#include <drm/drm_hashtab.h>
|
#include <drm/drm_hashtab.h>
|
||||||
|
#include <linux/scatterlist.h>
|
||||||
//#include <linux/suspend.h>
|
//#include <linux/suspend.h>
|
||||||
#include <drm/ttm/ttm_bo_driver.h>
|
#include <drm/ttm/ttm_bo_driver.h>
|
||||||
#include <drm/ttm/ttm_object.h>
|
#include <drm/ttm/ttm_object.h>
|
||||||
@ -40,9 +41,9 @@
|
|||||||
//#include <drm/ttm/ttm_module.h>
|
//#include <drm/ttm/ttm_module.h>
|
||||||
#include "vmwgfx_fence.h"
|
#include "vmwgfx_fence.h"
|
||||||
|
|
||||||
#define VMWGFX_DRIVER_DATE "20120209"
|
#define VMWGFX_DRIVER_DATE "20121114"
|
||||||
#define VMWGFX_DRIVER_MAJOR 2
|
#define VMWGFX_DRIVER_MAJOR 2
|
||||||
#define VMWGFX_DRIVER_MINOR 4
|
#define VMWGFX_DRIVER_MINOR 5
|
||||||
#define VMWGFX_DRIVER_PATCHLEVEL 0
|
#define VMWGFX_DRIVER_PATCHLEVEL 0
|
||||||
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
|
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
|
||||||
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
|
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
|
||||||
@ -50,14 +51,30 @@
|
|||||||
#define VMWGFX_MAX_VALIDATIONS 2048
|
#define VMWGFX_MAX_VALIDATIONS 2048
|
||||||
#define VMWGFX_MAX_DISPLAYS 16
|
#define VMWGFX_MAX_DISPLAYS 16
|
||||||
#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
|
#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
|
||||||
|
#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 0
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Perhaps we should have sysfs entries for these.
|
||||||
|
*/
|
||||||
|
#define VMWGFX_NUM_GB_CONTEXT 256
|
||||||
|
#define VMWGFX_NUM_GB_SHADER 20000
|
||||||
|
#define VMWGFX_NUM_GB_SURFACE 32768
|
||||||
|
#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
|
||||||
|
#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
|
||||||
|
VMWGFX_NUM_GB_SHADER +\
|
||||||
|
VMWGFX_NUM_GB_SURFACE +\
|
||||||
|
VMWGFX_NUM_GB_SCREEN_TARGET)
|
||||||
|
|
||||||
#define VMW_PL_GMR TTM_PL_PRIV0
|
#define VMW_PL_GMR TTM_PL_PRIV0
|
||||||
#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
|
#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
|
||||||
|
#define VMW_PL_MOB TTM_PL_PRIV1
|
||||||
|
#define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1
|
||||||
|
|
||||||
#define VMW_RES_CONTEXT ttm_driver_type0
|
#define VMW_RES_CONTEXT ttm_driver_type0
|
||||||
#define VMW_RES_SURFACE ttm_driver_type1
|
#define VMW_RES_SURFACE ttm_driver_type1
|
||||||
#define VMW_RES_STREAM ttm_driver_type2
|
#define VMW_RES_STREAM ttm_driver_type2
|
||||||
#define VMW_RES_FENCE ttm_driver_type3
|
#define VMW_RES_FENCE ttm_driver_type3
|
||||||
|
#define VMW_RES_SHADER ttm_driver_type4
|
||||||
|
|
||||||
#define ioread32(addr) readl(addr)
|
#define ioread32(addr) readl(addr)
|
||||||
|
|
||||||
@ -98,6 +115,7 @@ struct vmw_dma_buffer {
|
|||||||
struct vmw_validate_buffer {
|
struct vmw_validate_buffer {
|
||||||
struct ttm_validate_buffer base;
|
struct ttm_validate_buffer base;
|
||||||
struct drm_hash_item hash;
|
struct drm_hash_item hash;
|
||||||
|
bool validate_as_mob;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct vmw_res_func;
|
struct vmw_res_func;
|
||||||
@ -114,6 +132,7 @@ struct vmw_resource {
|
|||||||
const struct vmw_res_func *func;
|
const struct vmw_res_func *func;
|
||||||
struct list_head lru_head; /* Protected by the resource lock */
|
struct list_head lru_head; /* Protected by the resource lock */
|
||||||
struct list_head mob_head; /* Protected by @backup reserved */
|
struct list_head mob_head; /* Protected by @backup reserved */
|
||||||
|
struct list_head binding_head; /* Protected by binding_mutex */
|
||||||
void (*res_free) (struct vmw_resource *res);
|
void (*res_free) (struct vmw_resource *res);
|
||||||
void (*hw_destroy) (struct vmw_resource *res);
|
void (*hw_destroy) (struct vmw_resource *res);
|
||||||
};
|
};
|
||||||
@ -122,6 +141,7 @@ enum vmw_res_type {
|
|||||||
vmw_res_context,
|
vmw_res_context,
|
||||||
vmw_res_surface,
|
vmw_res_surface,
|
||||||
vmw_res_stream,
|
vmw_res_stream,
|
||||||
|
vmw_res_shader,
|
||||||
vmw_res_max
|
vmw_res_max
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -170,6 +190,7 @@ struct vmw_fifo_state {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct vmw_relocation {
|
struct vmw_relocation {
|
||||||
|
SVGAMobId *mob_loc;
|
||||||
SVGAGuestPtr *location;
|
SVGAGuestPtr *location;
|
||||||
uint32_t index;
|
uint32_t index;
|
||||||
};
|
};
|
||||||
@ -193,6 +214,123 @@ struct vmw_res_cache_entry {
|
|||||||
struct vmw_resource_val_node *node;
|
struct vmw_resource_val_node *node;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
|
||||||
|
*/
|
||||||
|
enum vmw_dma_map_mode {
|
||||||
|
vmw_dma_phys, /* Use physical page addresses */
|
||||||
|
vmw_dma_alloc_coherent, /* Use TTM coherent pages */
|
||||||
|
vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */
|
||||||
|
vmw_dma_map_bind, /* Unmap from DMA just before unbind */
|
||||||
|
vmw_dma_map_max
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct vmw_sg_table - Scatter/gather table for binding, with additional
|
||||||
|
* device-specific information.
|
||||||
|
*
|
||||||
|
* @sgt: Pointer to a struct sg_table with binding information
|
||||||
|
* @num_regions: Number of regions with device-address contigous pages
|
||||||
|
*/
|
||||||
|
struct vmw_sg_table {
|
||||||
|
enum vmw_dma_map_mode mode;
|
||||||
|
struct page **pages;
|
||||||
|
const dma_addr_t *addrs;
|
||||||
|
struct sg_table *sgt;
|
||||||
|
unsigned long num_regions;
|
||||||
|
unsigned long num_pages;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct vmw_piter - Page iterator that iterates over a list of pages
|
||||||
|
* and DMA addresses that could be either a scatter-gather list or
|
||||||
|
* arrays
|
||||||
|
*
|
||||||
|
* @pages: Array of page pointers to the pages.
|
||||||
|
* @addrs: DMA addresses to the pages if coherent pages are used.
|
||||||
|
* @iter: Scatter-gather page iterator. Current position in SG list.
|
||||||
|
* @i: Current position in arrays.
|
||||||
|
* @num_pages: Number of pages total.
|
||||||
|
* @next: Function to advance the iterator. Returns false if past the list
|
||||||
|
* of pages, true otherwise.
|
||||||
|
* @dma_address: Function to return the DMA address of the current page.
|
||||||
|
*/
|
||||||
|
struct vmw_piter {
|
||||||
|
struct page **pages;
|
||||||
|
const dma_addr_t *addrs;
|
||||||
|
struct sg_page_iter iter;
|
||||||
|
unsigned long i;
|
||||||
|
unsigned long num_pages;
|
||||||
|
bool (*next)(struct vmw_piter *);
|
||||||
|
dma_addr_t (*dma_address)(struct vmw_piter *);
|
||||||
|
struct page *(*page)(struct vmw_piter *);
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* enum vmw_ctx_binding_type - abstract resource to context binding types
|
||||||
|
*/
|
||||||
|
enum vmw_ctx_binding_type {
|
||||||
|
vmw_ctx_binding_shader,
|
||||||
|
vmw_ctx_binding_rt,
|
||||||
|
vmw_ctx_binding_tex,
|
||||||
|
vmw_ctx_binding_max
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct vmw_ctx_bindinfo - structure representing a single context binding
|
||||||
|
*
|
||||||
|
* @ctx: Pointer to the context structure. NULL means the binding is not
|
||||||
|
* active.
|
||||||
|
* @res: Non ref-counted pointer to the bound resource.
|
||||||
|
* @bt: The binding type.
|
||||||
|
* @i1: Union of information needed to unbind.
|
||||||
|
*/
|
||||||
|
struct vmw_ctx_bindinfo {
|
||||||
|
struct vmw_resource *ctx;
|
||||||
|
struct vmw_resource *res;
|
||||||
|
enum vmw_ctx_binding_type bt;
|
||||||
|
union {
|
||||||
|
SVGA3dShaderType shader_type;
|
||||||
|
SVGA3dRenderTargetType rt_type;
|
||||||
|
uint32 texture_stage;
|
||||||
|
} i1;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct vmw_ctx_binding - structure representing a single context binding
|
||||||
|
* - suitable for tracking in a context
|
||||||
|
*
|
||||||
|
* @ctx_list: List head for context.
|
||||||
|
* @res_list: List head for bound resource.
|
||||||
|
* @bi: Binding info
|
||||||
|
*/
|
||||||
|
struct vmw_ctx_binding {
|
||||||
|
struct list_head ctx_list;
|
||||||
|
struct list_head res_list;
|
||||||
|
struct vmw_ctx_bindinfo bi;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct vmw_ctx_binding_state - context binding state
|
||||||
|
*
|
||||||
|
* @list: linked list of individual bindings.
|
||||||
|
* @render_targets: Render target bindings.
|
||||||
|
* @texture_units: Texture units/samplers bindings.
|
||||||
|
* @shaders: Shader bindings.
|
||||||
|
*
|
||||||
|
* Note that this structure also provides storage space for the individual
|
||||||
|
* struct vmw_ctx_binding objects, so that no dynamic allocation is needed
|
||||||
|
* for individual bindings.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
struct vmw_ctx_binding_state {
|
||||||
|
struct list_head list;
|
||||||
|
struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX];
|
||||||
|
struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS];
|
||||||
|
struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_MAX];
|
||||||
|
};
|
||||||
|
|
||||||
struct vmw_sw_context{
|
struct vmw_sw_context{
|
||||||
struct drm_open_hash res_ht;
|
struct drm_open_hash res_ht;
|
||||||
bool res_ht_initialized;
|
bool res_ht_initialized;
|
||||||
@ -214,6 +352,7 @@ struct vmw_sw_context{
|
|||||||
struct vmw_resource *last_query_ctx;
|
struct vmw_resource *last_query_ctx;
|
||||||
bool needs_post_query_barrier;
|
bool needs_post_query_barrier;
|
||||||
struct vmw_resource *error_resource;
|
struct vmw_resource *error_resource;
|
||||||
|
struct vmw_ctx_binding_state staged_bindings;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct vmw_legacy_display;
|
struct vmw_legacy_display;
|
||||||
@ -245,6 +384,7 @@ struct vmw_private {
|
|||||||
unsigned int io_start;
|
unsigned int io_start;
|
||||||
uint32_t vram_start;
|
uint32_t vram_start;
|
||||||
uint32_t vram_size;
|
uint32_t vram_size;
|
||||||
|
uint32_t prim_bb_mem;
|
||||||
uint32_t mmio_start;
|
uint32_t mmio_start;
|
||||||
uint32_t mmio_size;
|
uint32_t mmio_size;
|
||||||
uint32_t fb_max_width;
|
uint32_t fb_max_width;
|
||||||
@ -254,11 +394,12 @@ struct vmw_private {
|
|||||||
__le32 __iomem *mmio_virt;
|
__le32 __iomem *mmio_virt;
|
||||||
int mmio_mtrr;
|
int mmio_mtrr;
|
||||||
uint32_t capabilities;
|
uint32_t capabilities;
|
||||||
uint32_t max_gmr_descriptors;
|
|
||||||
uint32_t max_gmr_ids;
|
uint32_t max_gmr_ids;
|
||||||
uint32_t max_gmr_pages;
|
uint32_t max_gmr_pages;
|
||||||
|
uint32_t max_mob_pages;
|
||||||
uint32_t memory_size;
|
uint32_t memory_size;
|
||||||
bool has_gmr;
|
bool has_gmr;
|
||||||
|
bool has_mob;
|
||||||
struct mutex hw_mutex;
|
struct mutex hw_mutex;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -334,6 +475,7 @@ struct vmw_private {
|
|||||||
|
|
||||||
struct vmw_sw_context ctx;
|
struct vmw_sw_context ctx;
|
||||||
struct mutex cmdbuf_mutex;
|
struct mutex cmdbuf_mutex;
|
||||||
|
struct mutex binding_mutex;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Operating mode.
|
* Operating mode.
|
||||||
@ -346,9 +488,9 @@ struct vmw_private {
|
|||||||
* Master management.
|
* Master management.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// struct vmw_master *active_master;
|
struct vmw_master *active_master;
|
||||||
// struct vmw_master fbdev_master;
|
struct vmw_master fbdev_master;
|
||||||
// struct notifier_block pm_nb;
|
// struct notifier_block pm_nb;
|
||||||
bool suspended;
|
bool suspended;
|
||||||
|
|
||||||
struct mutex release_mutex;
|
struct mutex release_mutex;
|
||||||
@ -374,6 +516,17 @@ struct vmw_private {
|
|||||||
|
|
||||||
struct list_head res_lru[vmw_res_max];
|
struct list_head res_lru[vmw_res_max];
|
||||||
uint32_t used_memory_size;
|
uint32_t used_memory_size;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* DMA mapping stuff.
|
||||||
|
*/
|
||||||
|
enum vmw_dma_map_mode map_mode;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Guest Backed stuff
|
||||||
|
*/
|
||||||
|
struct ttm_buffer_object *otable_bo;
|
||||||
|
struct vmw_otable *otables;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
|
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
|
||||||
@ -421,7 +574,7 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga);
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
extern int vmw_gmr_bind(struct vmw_private *dev_priv,
|
extern int vmw_gmr_bind(struct vmw_private *dev_priv,
|
||||||
struct page *pages[],
|
const struct vmw_sg_table *vsgt,
|
||||||
unsigned long num_pages,
|
unsigned long num_pages,
|
||||||
int gmr_id);
|
int gmr_id);
|
||||||
extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
|
extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
|
||||||
@ -430,23 +583,12 @@ extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
|
|||||||
* Resource utilities - vmwgfx_resource.c
|
* Resource utilities - vmwgfx_resource.c
|
||||||
*/
|
*/
|
||||||
struct vmw_user_resource_conv;
|
struct vmw_user_resource_conv;
|
||||||
extern const struct vmw_user_resource_conv *user_surface_converter;
|
|
||||||
extern const struct vmw_user_resource_conv *user_context_converter;
|
|
||||||
|
|
||||||
extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
|
|
||||||
extern void vmw_resource_unreference(struct vmw_resource **p_res);
|
extern void vmw_resource_unreference(struct vmw_resource **p_res);
|
||||||
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
|
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
|
||||||
extern int vmw_resource_validate(struct vmw_resource *res);
|
extern int vmw_resource_validate(struct vmw_resource *res);
|
||||||
extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
|
extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
|
||||||
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
|
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
|
||||||
extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
|
|
||||||
struct drm_file *file_priv);
|
|
||||||
extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
|
|
||||||
struct drm_file *file_priv);
|
|
||||||
extern int vmw_context_check(struct vmw_private *dev_priv,
|
|
||||||
struct ttm_object_file *tfile,
|
|
||||||
int id,
|
|
||||||
struct vmw_resource **p_res);
|
|
||||||
extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
|
extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
|
||||||
struct ttm_object_file *tfile,
|
struct ttm_object_file *tfile,
|
||||||
uint32_t handle,
|
uint32_t handle,
|
||||||
@ -458,18 +600,6 @@ extern int vmw_user_resource_lookup_handle(
|
|||||||
uint32_t handle,
|
uint32_t handle,
|
||||||
const struct vmw_user_resource_conv *converter,
|
const struct vmw_user_resource_conv *converter,
|
||||||
struct vmw_resource **p_res);
|
struct vmw_resource **p_res);
|
||||||
extern void vmw_surface_res_free(struct vmw_resource *res);
|
|
||||||
extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
|
|
||||||
struct drm_file *file_priv);
|
|
||||||
extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
|
||||||
struct drm_file *file_priv);
|
|
||||||
extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
|
|
||||||
struct drm_file *file_priv);
|
|
||||||
extern int vmw_surface_check(struct vmw_private *dev_priv,
|
|
||||||
struct ttm_object_file *tfile,
|
|
||||||
uint32_t handle, int *id);
|
|
||||||
extern int vmw_surface_validate(struct vmw_private *dev_priv,
|
|
||||||
struct vmw_surface *srf);
|
|
||||||
extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
|
extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
|
||||||
extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
|
extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
|
||||||
struct vmw_dma_buffer *vmw_bo,
|
struct vmw_dma_buffer *vmw_bo,
|
||||||
@ -478,10 +608,21 @@ extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
|
|||||||
void (*bo_free) (struct ttm_buffer_object *bo));
|
void (*bo_free) (struct ttm_buffer_object *bo));
|
||||||
extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
|
extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
|
||||||
struct ttm_object_file *tfile);
|
struct ttm_object_file *tfile);
|
||||||
|
extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
|
||||||
|
struct ttm_object_file *tfile,
|
||||||
|
uint32_t size,
|
||||||
|
bool shareable,
|
||||||
|
uint32_t *handle,
|
||||||
|
struct vmw_dma_buffer **p_dma_buf);
|
||||||
|
extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
|
||||||
|
struct vmw_dma_buffer *dma_buf,
|
||||||
|
uint32_t *handle);
|
||||||
extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
|
extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file_priv);
|
struct drm_file *file_priv);
|
||||||
extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
|
extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file_priv);
|
struct drm_file *file_priv);
|
||||||
|
extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
|
||||||
|
struct drm_file *file_priv);
|
||||||
extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
|
extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
|
||||||
uint32_t cur_validate_node);
|
uint32_t cur_validate_node);
|
||||||
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
|
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
|
||||||
@ -539,8 +680,6 @@ extern int vmw_present_ioctl(struct drm_device *dev, void *data,
|
|||||||
struct drm_file *file_priv);
|
struct drm_file *file_priv);
|
||||||
extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
|
extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file_priv);
|
struct drm_file *file_priv);
|
||||||
//extern unsigned int vmw_fops_poll(struct file *filp,
|
|
||||||
// struct poll_table_struct *wait);
|
|
||||||
extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
|
extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
|
||||||
size_t count, loff_t *offset);
|
size_t count, loff_t *offset);
|
||||||
|
|
||||||
@ -574,16 +713,62 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
|
|||||||
* TTM buffer object driver - vmwgfx_buffer.c
|
* TTM buffer object driver - vmwgfx_buffer.c
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
extern const size_t vmw_tt_size;
|
||||||
extern struct ttm_placement vmw_vram_placement;
|
extern struct ttm_placement vmw_vram_placement;
|
||||||
extern struct ttm_placement vmw_vram_ne_placement;
|
extern struct ttm_placement vmw_vram_ne_placement;
|
||||||
extern struct ttm_placement vmw_vram_sys_placement;
|
extern struct ttm_placement vmw_vram_sys_placement;
|
||||||
extern struct ttm_placement vmw_vram_gmr_placement;
|
extern struct ttm_placement vmw_vram_gmr_placement;
|
||||||
extern struct ttm_placement vmw_vram_gmr_ne_placement;
|
extern struct ttm_placement vmw_vram_gmr_ne_placement;
|
||||||
extern struct ttm_placement vmw_sys_placement;
|
extern struct ttm_placement vmw_sys_placement;
|
||||||
|
extern struct ttm_placement vmw_sys_ne_placement;
|
||||||
extern struct ttm_placement vmw_evictable_placement;
|
extern struct ttm_placement vmw_evictable_placement;
|
||||||
extern struct ttm_placement vmw_srf_placement;
|
extern struct ttm_placement vmw_srf_placement;
|
||||||
|
extern struct ttm_placement vmw_mob_placement;
|
||||||
extern struct ttm_bo_driver vmw_bo_driver;
|
extern struct ttm_bo_driver vmw_bo_driver;
|
||||||
extern int vmw_dma_quiescent(struct drm_device *dev);
|
extern int vmw_dma_quiescent(struct drm_device *dev);
|
||||||
|
extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
|
||||||
|
extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
|
||||||
|
extern const struct vmw_sg_table *
|
||||||
|
vmw_bo_sg_table(struct ttm_buffer_object *bo);
|
||||||
|
extern void vmw_piter_start(struct vmw_piter *viter,
|
||||||
|
const struct vmw_sg_table *vsgt,
|
||||||
|
unsigned long p_offs);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_piter_next - Advance the iterator one page.
|
||||||
|
*
|
||||||
|
* @viter: Pointer to the iterator to advance.
|
||||||
|
*
|
||||||
|
* Returns false if past the list of pages, true otherwise.
|
||||||
|
*/
|
||||||
|
static inline bool vmw_piter_next(struct vmw_piter *viter)
|
||||||
|
{
|
||||||
|
return viter->next(viter);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_piter_dma_addr - Return the DMA address of the current page.
|
||||||
|
*
|
||||||
|
* @viter: Pointer to the iterator
|
||||||
|
*
|
||||||
|
* Returns the DMA address of the page pointed to by @viter.
|
||||||
|
*/
|
||||||
|
static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
|
||||||
|
{
|
||||||
|
return viter->dma_address(viter);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_piter_page - Return a pointer to the current page.
|
||||||
|
*
|
||||||
|
* @viter: Pointer to the iterator
|
||||||
|
*
|
||||||
|
* Returns the DMA address of the page pointed to by @viter.
|
||||||
|
*/
|
||||||
|
static inline struct page *vmw_piter_page(struct vmw_piter *viter)
|
||||||
|
{
|
||||||
|
return viter->page(viter);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Command submission - vmwgfx_execbuf.c
|
* Command submission - vmwgfx_execbuf.c
|
||||||
@ -620,7 +805,7 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
|
|||||||
* IRQs and wating - vmwgfx_irq.c
|
* IRQs and wating - vmwgfx_irq.c
|
||||||
*/
|
*/
|
||||||
|
|
||||||
extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS);
|
extern irqreturn_t vmw_irq_handler(int irq, void *arg);
|
||||||
extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
|
extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
|
||||||
uint32_t seqno, bool interruptible,
|
uint32_t seqno, bool interruptible,
|
||||||
unsigned long timeout);
|
unsigned long timeout);
|
||||||
@ -738,6 +923,62 @@ int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
|
|||||||
|
|
||||||
extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
|
extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
|
||||||
|
|
||||||
|
/**
|
||||||
|
/*
|
||||||
|
* MemoryOBject management - vmwgfx_mob.c
|
||||||
|
*/
|
||||||
|
struct vmw_mob;
|
||||||
|
extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob,
|
||||||
|
const struct vmw_sg_table *vsgt,
|
||||||
|
unsigned long num_data_pages, int32_t mob_id);
|
||||||
|
extern void vmw_mob_unbind(struct vmw_private *dev_priv,
|
||||||
|
struct vmw_mob *mob);
|
||||||
|
extern void vmw_mob_destroy(struct vmw_mob *mob);
|
||||||
|
extern struct vmw_mob *vmw_mob_create(unsigned long data_pages);
|
||||||
|
extern int vmw_otables_setup(struct vmw_private *dev_priv);
|
||||||
|
extern void vmw_otables_takedown(struct vmw_private *dev_priv);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Context management - vmwgfx_context.c
|
||||||
|
*/
|
||||||
|
|
||||||
|
extern const struct vmw_user_resource_conv *user_context_converter;
|
||||||
|
|
||||||
|
extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
|
||||||
|
|
||||||
|
extern int vmw_context_check(struct vmw_private *dev_priv,
|
||||||
|
struct ttm_object_file *tfile,
|
||||||
|
int id,
|
||||||
|
struct vmw_resource **p_res);
|
||||||
|
extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
|
||||||
|
struct drm_file *file_priv);
|
||||||
|
extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
|
||||||
|
struct drm_file *file_priv);
|
||||||
|
extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
|
||||||
|
const struct vmw_ctx_bindinfo *ci);
|
||||||
|
extern void
|
||||||
|
vmw_context_binding_state_transfer(struct vmw_resource *res,
|
||||||
|
struct vmw_ctx_binding_state *cbs);
|
||||||
|
extern void vmw_context_binding_res_list_kill(struct list_head *head);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Surface management - vmwgfx_surface.c
|
||||||
|
*/
|
||||||
|
|
||||||
|
extern const struct vmw_user_resource_conv *user_surface_converter;
|
||||||
|
|
||||||
|
extern void vmw_surface_res_free(struct vmw_resource *res);
|
||||||
|
extern int vmw_surface_check(struct vmw_private *dev_priv,
|
||||||
|
struct ttm_object_file *tfile,
|
||||||
|
uint32_t handle, int *id);
|
||||||
|
extern int vmw_surface_validate(struct vmw_private *dev_priv,
|
||||||
|
struct vmw_surface *srf);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Shader management - vmwgfx_shader.c
|
||||||
|
*/
|
||||||
|
|
||||||
|
extern const struct vmw_user_resource_conv *user_shader_converter;
|
||||||
/**
|
/**
|
||||||
* Inline helper functions
|
* Inline helper functions
|
||||||
*/
|
*/
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -271,7 +271,7 @@ void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
|
|||||||
spin_unlock_irq(&fman->lock);
|
spin_unlock_irq(&fman->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
|
static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
|
||||||
struct list_head *list)
|
struct list_head *list)
|
||||||
{
|
{
|
||||||
struct vmw_fence_action *action, *next_action;
|
struct vmw_fence_action *action, *next_action;
|
||||||
@ -897,7 +897,7 @@ static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
|
|||||||
* Note that the action callbacks may be executed before this function
|
* Note that the action callbacks may be executed before this function
|
||||||
* returns.
|
* returns.
|
||||||
*/
|
*/
|
||||||
void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
|
static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
|
||||||
struct vmw_fence_action *action)
|
struct vmw_fence_action *action)
|
||||||
{
|
{
|
||||||
struct vmw_fence_manager *fman = fence->fman;
|
struct vmw_fence_manager *fman = fence->fman;
|
||||||
@ -993,7 +993,7 @@ struct vmw_event_fence_pending {
|
|||||||
struct drm_vmw_event_fence event;
|
struct drm_vmw_event_fence event;
|
||||||
};
|
};
|
||||||
|
|
||||||
int vmw_event_fence_action_create(struct drm_file *file_priv,
|
static int vmw_event_fence_action_create(struct drm_file *file_priv,
|
||||||
struct vmw_fence_obj *fence,
|
struct vmw_fence_obj *fence,
|
||||||
uint32_t flags,
|
uint32_t flags,
|
||||||
uint64_t user_data,
|
uint64_t user_data,
|
||||||
@ -1081,7 +1081,8 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
|
|||||||
*/
|
*/
|
||||||
if (arg->handle) {
|
if (arg->handle) {
|
||||||
struct ttm_base_object *base =
|
struct ttm_base_object *base =
|
||||||
ttm_base_object_lookup(vmw_fp->tfile, arg->handle);
|
ttm_base_object_lookup_for_ref(dev_priv->tdev,
|
||||||
|
arg->handle);
|
||||||
|
|
||||||
if (unlikely(base == NULL)) {
|
if (unlikely(base == NULL)) {
|
||||||
DRM_ERROR("Fence event invalid fence object handle "
|
DRM_ERROR("Fence event invalid fence object handle "
|
||||||
|
@ -41,6 +41,23 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
|
|||||||
uint32_t fifo_min, hwversion;
|
uint32_t fifo_min, hwversion;
|
||||||
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
|
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
|
||||||
|
|
||||||
|
if (!(dev_priv->capabilities & SVGA_CAP_3D))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
|
||||||
|
uint32_t result;
|
||||||
|
|
||||||
|
if (!dev_priv->has_mob)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
mutex_lock(&dev_priv->hw_mutex);
|
||||||
|
vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
|
||||||
|
result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
|
||||||
|
mutex_unlock(&dev_priv->hw_mutex);
|
||||||
|
|
||||||
|
return (result != 0);
|
||||||
|
}
|
||||||
|
|
||||||
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
|
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
@ -518,23 +535,15 @@ out_err:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* vmw_fifo_emit_dummy_query - emits a dummy query to the fifo.
|
* vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using
|
||||||
|
* legacy query commands.
|
||||||
*
|
*
|
||||||
* @dev_priv: The device private structure.
|
* @dev_priv: The device private structure.
|
||||||
* @cid: The hardware context id used for the query.
|
* @cid: The hardware context id used for the query.
|
||||||
*
|
*
|
||||||
* This function is used to emit a dummy occlusion query with
|
* See the vmw_fifo_emit_dummy_query documentation.
|
||||||
* no primitives rendered between query begin and query end.
|
|
||||||
* It's used to provide a query barrier, in order to know that when
|
|
||||||
* this query is finished, all preceding queries are also finished.
|
|
||||||
*
|
|
||||||
* A Query results structure should have been initialized at the start
|
|
||||||
* of the dev_priv->dummy_query_bo buffer object. And that buffer object
|
|
||||||
* must also be either reserved or pinned when this function is called.
|
|
||||||
*
|
|
||||||
* Returns -ENOMEM on failure to reserve fifo space.
|
|
||||||
*/
|
*/
|
||||||
int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
|
static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
|
||||||
uint32_t cid)
|
uint32_t cid)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
@ -573,3 +582,75 @@ int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
|
||||||
|
* guest-backed resource query commands.
|
||||||
|
*
|
||||||
|
* @dev_priv: The device private structure.
|
||||||
|
* @cid: The hardware context id used for the query.
|
||||||
|
*
|
||||||
|
* See the vmw_fifo_emit_dummy_query documentation.
|
||||||
|
*/
|
||||||
|
static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
|
||||||
|
uint32_t cid)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* A query wait without a preceding query end will
|
||||||
|
* actually finish all queries for this cid
|
||||||
|
* without writing to the query result structure.
|
||||||
|
*/
|
||||||
|
|
||||||
|
struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
|
||||||
|
struct {
|
||||||
|
SVGA3dCmdHeader header;
|
||||||
|
SVGA3dCmdWaitForGBQuery body;
|
||||||
|
} *cmd;
|
||||||
|
|
||||||
|
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
||||||
|
|
||||||
|
if (unlikely(cmd == NULL)) {
|
||||||
|
DRM_ERROR("Out of fifo space for dummy query.\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
|
||||||
|
cmd->header.size = sizeof(cmd->body);
|
||||||
|
cmd->body.cid = cid;
|
||||||
|
cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
|
||||||
|
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
|
||||||
|
cmd->body.mobid = bo->mem.start;
|
||||||
|
cmd->body.offset = 0;
|
||||||
|
|
||||||
|
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
|
||||||
|
* appropriate resource query commands.
|
||||||
|
*
|
||||||
|
* @dev_priv: The device private structure.
|
||||||
|
* @cid: The hardware context id used for the query.
|
||||||
|
*
|
||||||
|
* This function is used to emit a dummy occlusion query with
|
||||||
|
* no primitives rendered between query begin and query end.
|
||||||
|
* It's used to provide a query barrier, in order to know that when
|
||||||
|
* this query is finished, all preceding queries are also finished.
|
||||||
|
*
|
||||||
|
* A Query results structure should have been initialized at the start
|
||||||
|
* of the dev_priv->dummy_query_bo buffer object. And that buffer object
|
||||||
|
* must also be either reserved or pinned when this function is called.
|
||||||
|
*
|
||||||
|
* Returns -ENOMEM on failure to reserve fifo space.
|
||||||
|
*/
|
||||||
|
int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
|
||||||
|
uint32_t cid)
|
||||||
|
{
|
||||||
|
if (dev_priv->has_mob)
|
||||||
|
return vmw_fifo_emit_dummy_gb_query(dev_priv, cid);
|
||||||
|
|
||||||
|
return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
|
||||||
|
}
|
||||||
|
@ -35,9 +35,11 @@
|
|||||||
#define VMW_PPN_SIZE (sizeof(unsigned long))
|
#define VMW_PPN_SIZE (sizeof(unsigned long))
|
||||||
/* A future safe maximum remap size. */
|
/* A future safe maximum remap size. */
|
||||||
#define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
|
#define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
|
||||||
|
#define DMA_ADDR_INVALID ((dma_addr_t) 0)
|
||||||
|
#define DMA_PAGE_INVALID 0UL
|
||||||
|
|
||||||
static int vmw_gmr2_bind(struct vmw_private *dev_priv,
|
static int vmw_gmr2_bind(struct vmw_private *dev_priv,
|
||||||
struct page *pages[],
|
struct vmw_piter *iter,
|
||||||
unsigned long num_pages,
|
unsigned long num_pages,
|
||||||
int gmr_id)
|
int gmr_id)
|
||||||
{
|
{
|
||||||
@ -83,13 +85,15 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
|
|||||||
cmd += sizeof(remap_cmd) / sizeof(*cmd);
|
cmd += sizeof(remap_cmd) / sizeof(*cmd);
|
||||||
|
|
||||||
for (i = 0; i < nr; ++i) {
|
for (i = 0; i < nr; ++i) {
|
||||||
if (VMW_PPN_SIZE <= 4)
|
if (VMW_PPN_SIZE <= 4)
|
||||||
*cmd = page_to_pfn(*pages++);
|
*cmd = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
|
||||||
else
|
else
|
||||||
*((uint64_t *)cmd) = page_to_pfn(*pages++);
|
*((uint64_t *)cmd) = vmw_piter_dma_addr(iter) >>
|
||||||
|
PAGE_SHIFT;
|
||||||
|
|
||||||
cmd += VMW_PPN_SIZE / sizeof(*cmd);
|
cmd += VMW_PPN_SIZE / sizeof(*cmd);
|
||||||
}
|
vmw_piter_next(iter);
|
||||||
|
}
|
||||||
|
|
||||||
num_pages -= nr;
|
num_pages -= nr;
|
||||||
remap_pos += nr;
|
remap_pos += nr;
|
||||||
@ -125,32 +129,26 @@ static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
|
|||||||
|
|
||||||
|
|
||||||
int vmw_gmr_bind(struct vmw_private *dev_priv,
|
int vmw_gmr_bind(struct vmw_private *dev_priv,
|
||||||
struct page *pages[],
|
const struct vmw_sg_table *vsgt,
|
||||||
unsigned long num_pages,
|
unsigned long num_pages,
|
||||||
int gmr_id)
|
int gmr_id)
|
||||||
{
|
{
|
||||||
struct list_head desc_pages;
|
struct vmw_piter data_iter;
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
|
vmw_piter_start(&data_iter, vsgt, 0);
|
||||||
return vmw_gmr2_bind(dev_priv, pages, num_pages, gmr_id);
|
|
||||||
|
|
||||||
printf("%s epic fail\n",__FUNCTION__);
|
if (unlikely(!vmw_piter_next(&data_iter)))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR2)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
|
void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
|
||||||
{
|
{
|
||||||
if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) {
|
if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
|
||||||
vmw_gmr2_unbind(dev_priv, gmr_id);
|
vmw_gmr2_unbind(dev_priv, gmr_id);
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
mutex_lock(&dev_priv->hw_mutex);
|
|
||||||
vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
|
|
||||||
wmb();
|
|
||||||
vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0);
|
|
||||||
mb();
|
|
||||||
mutex_unlock(&dev_priv->hw_mutex);
|
|
||||||
}
|
}
|
||||||
|
@ -125,10 +125,21 @@ static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
spin_lock_init(&gman->lock);
|
spin_lock_init(&gman->lock);
|
||||||
gman->max_gmr_pages = dev_priv->max_gmr_pages;
|
|
||||||
gman->used_gmr_pages = 0;
|
gman->used_gmr_pages = 0;
|
||||||
ida_init(&gman->gmr_ida);
|
ida_init(&gman->gmr_ida);
|
||||||
gman->max_gmr_ids = p_size;
|
|
||||||
|
switch (p_size) {
|
||||||
|
case VMW_PL_GMR:
|
||||||
|
gman->max_gmr_ids = dev_priv->max_gmr_ids;
|
||||||
|
gman->max_gmr_pages = dev_priv->max_gmr_pages;
|
||||||
|
break;
|
||||||
|
case VMW_PL_MOB:
|
||||||
|
gman->max_gmr_ids = VMWGFX_NUM_MOB;
|
||||||
|
gman->max_gmr_pages = dev_priv->max_mob_pages;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
BUG();
|
||||||
|
}
|
||||||
man->priv = (void *) gman;
|
man->priv = (void *) gman;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -33,7 +33,7 @@
|
|||||||
|
|
||||||
#define VMW_FENCE_WRAP (1 << 24)
|
#define VMW_FENCE_WRAP (1 << 24)
|
||||||
|
|
||||||
irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
|
irqreturn_t vmw_irq_handler(int irq, void *arg)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = (struct drm_device *)arg;
|
struct drm_device *dev = (struct drm_device *)arg;
|
||||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||||
|
@ -666,9 +666,9 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
|
|||||||
|
|
||||||
if (unlikely(surface->mip_levels[0] != 1 ||
|
if (unlikely(surface->mip_levels[0] != 1 ||
|
||||||
surface->num_sizes != 1 ||
|
surface->num_sizes != 1 ||
|
||||||
surface->sizes[0].width < mode_cmd->width ||
|
surface->base_size.width < mode_cmd->width ||
|
||||||
surface->sizes[0].height < mode_cmd->height ||
|
surface->base_size.height < mode_cmd->height ||
|
||||||
surface->sizes[0].depth != 1)) {
|
surface->base_size.depth != 1)) {
|
||||||
DRM_ERROR("Incompatible surface dimensions "
|
DRM_ERROR("Incompatible surface dimensions "
|
||||||
"for requested mode.\n");
|
"for requested mode.\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -1456,9 +1456,9 @@ int vmw_kms_init(struct vmw_private *dev_priv)
|
|||||||
{
|
{
|
||||||
struct drm_device *dev = dev_priv->dev;
|
struct drm_device *dev = dev_priv->dev;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ENTER();
|
ENTER();
|
||||||
|
|
||||||
drm_mode_config_init(dev);
|
drm_mode_config_init(dev);
|
||||||
dev->mode_config.funcs = &vmw_kms_funcs;
|
dev->mode_config.funcs = &vmw_kms_funcs;
|
||||||
dev->mode_config.min_width = 1;
|
dev->mode_config.min_width = 1;
|
||||||
@ -1471,8 +1471,8 @@ int vmw_kms_init(struct vmw_private *dev_priv)
|
|||||||
// if (ret) /* Fallback */
|
// if (ret) /* Fallback */
|
||||||
// (void)vmw_kms_init_legacy_display_system(dev_priv);
|
// (void)vmw_kms_init_legacy_display_system(dev_priv);
|
||||||
|
|
||||||
LEAVE();
|
LEAVE();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1491,7 +1491,7 @@ int vmw_kms_close(struct vmw_private *dev_priv)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if 0
|
#if 0
|
||||||
int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
|
int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file_priv)
|
struct drm_file *file_priv)
|
||||||
{
|
{
|
||||||
@ -1517,7 +1517,7 @@ int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
|
|||||||
|
|
||||||
obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC);
|
obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC);
|
||||||
if (!obj) {
|
if (!obj) {
|
||||||
ret = -EINVAL;
|
ret = -ENOENT;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1533,7 +1533,7 @@ out:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
int vmw_kms_write_svga(struct vmw_private *vmw_priv,
|
int vmw_kms_write_svga(struct vmw_private *vmw_priv,
|
||||||
unsigned width, unsigned height, unsigned pitch,
|
unsigned width, unsigned height, unsigned pitch,
|
||||||
unsigned bpp, unsigned depth)
|
unsigned bpp, unsigned depth)
|
||||||
@ -1640,7 +1640,7 @@ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
|
|||||||
uint32_t pitch,
|
uint32_t pitch,
|
||||||
uint32_t height)
|
uint32_t height)
|
||||||
{
|
{
|
||||||
return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size;
|
return ((u64) pitch * (u64) height) < (u64) dev_priv->prim_bb_mem;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
653
drivers/video/drm/vmwgfx/vmwgfx_mob.c
Normal file
653
drivers/video/drm/vmwgfx/vmwgfx_mob.c
Normal file
@ -0,0 +1,653 @@
|
|||||||
|
/**************************************************************************
|
||||||
|
*
|
||||||
|
* Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
|
||||||
|
* All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the
|
||||||
|
* "Software"), to deal in the Software without restriction, including
|
||||||
|
* without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
* distribute, sub license, and/or sell copies of the Software, and to
|
||||||
|
* permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
* the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice (including the
|
||||||
|
* next paragraph) shall be included in all copies or substantial portions
|
||||||
|
* of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||||
|
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||||
|
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||||
|
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
**************************************************************************/
|
||||||
|
|
||||||
|
#include "vmwgfx_drv.h"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we set up the screen target otable, screen objects stop working.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE) ? 0 : 1)
|
||||||
|
|
||||||
|
#ifdef CONFIG_64BIT
|
||||||
|
#define VMW_PPN_SIZE 8
|
||||||
|
#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH64_0
|
||||||
|
#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH64_1
|
||||||
|
#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH64_2
|
||||||
|
#else
|
||||||
|
#define VMW_PPN_SIZE 4
|
||||||
|
#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH_0
|
||||||
|
#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH_1
|
||||||
|
#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH_2
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* struct vmw_mob - Structure containing page table and metadata for a
|
||||||
|
* Guest Memory OBject.
|
||||||
|
*
|
||||||
|
* @num_pages Number of pages that make up the page table.
|
||||||
|
* @pt_level The indirection level of the page table. 0-2.
|
||||||
|
* @pt_root_page DMA address of the level 0 page of the page table.
|
||||||
|
*/
|
||||||
|
struct vmw_mob {
|
||||||
|
struct ttm_buffer_object *pt_bo;
|
||||||
|
unsigned long num_pages;
|
||||||
|
unsigned pt_level;
|
||||||
|
dma_addr_t pt_root_page;
|
||||||
|
uint32_t id;
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* struct vmw_otable - Guest Memory OBject table metadata
|
||||||
|
*
|
||||||
|
* @size: Size of the table (page-aligned).
|
||||||
|
* @page_table: Pointer to a struct vmw_mob holding the page table.
|
||||||
|
*/
|
||||||
|
struct vmw_otable {
|
||||||
|
unsigned long size;
|
||||||
|
struct vmw_mob *page_table;
|
||||||
|
};
|
||||||
|
|
||||||
|
static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
|
||||||
|
struct vmw_mob *mob);
|
||||||
|
static void vmw_mob_pt_setup(struct vmw_mob *mob,
|
||||||
|
struct vmw_piter data_iter,
|
||||||
|
unsigned long num_data_pages);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* vmw_setup_otable_base - Issue an object table base setup command to
|
||||||
|
* the device
|
||||||
|
*
|
||||||
|
* @dev_priv: Pointer to a device private structure
|
||||||
|
* @type: Type of object table base
|
||||||
|
* @offset Start of table offset into dev_priv::otable_bo
|
||||||
|
* @otable Pointer to otable metadata;
|
||||||
|
*
|
||||||
|
* This function returns -ENOMEM if it fails to reserve fifo space,
|
||||||
|
* and may block waiting for fifo space.
|
||||||
|
*/
|
||||||
|
static int vmw_setup_otable_base(struct vmw_private *dev_priv,
|
||||||
|
SVGAOTableType type,
|
||||||
|
unsigned long offset,
|
||||||
|
struct vmw_otable *otable)
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
SVGA3dCmdHeader header;
|
||||||
|
SVGA3dCmdSetOTableBase64 body;
|
||||||
|
} *cmd;
|
||||||
|
struct vmw_mob *mob;
|
||||||
|
const struct vmw_sg_table *vsgt;
|
||||||
|
struct vmw_piter iter;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
BUG_ON(otable->page_table != NULL);
|
||||||
|
|
||||||
|
vsgt = vmw_bo_sg_table(dev_priv->otable_bo);
|
||||||
|
vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
|
||||||
|
WARN_ON(!vmw_piter_next(&iter));
|
||||||
|
|
||||||
|
mob = vmw_mob_create(otable->size >> PAGE_SHIFT);
|
||||||
|
if (unlikely(mob == NULL)) {
|
||||||
|
DRM_ERROR("Failed creating OTable page table.\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (otable->size <= PAGE_SIZE) {
|
||||||
|
mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
|
||||||
|
mob->pt_root_page = vmw_piter_dma_addr(&iter);
|
||||||
|
} else if (vsgt->num_regions == 1) {
|
||||||
|
mob->pt_level = SVGA3D_MOBFMT_RANGE;
|
||||||
|
mob->pt_root_page = vmw_piter_dma_addr(&iter);
|
||||||
|
} else {
|
||||||
|
ret = vmw_mob_pt_populate(dev_priv, mob);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
goto out_no_populate;
|
||||||
|
|
||||||
|
vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT);
|
||||||
|
mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
||||||
|
if (unlikely(cmd == NULL)) {
|
||||||
|
DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
|
||||||
|
goto out_no_fifo;
|
||||||
|
}
|
||||||
|
|
||||||
|
memset(cmd, 0, sizeof(*cmd));
|
||||||
|
cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64;
|
||||||
|
cmd->header.size = sizeof(cmd->body);
|
||||||
|
cmd->body.type = type;
|
||||||
|
cmd->body.baseAddress = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT);
|
||||||
|
cmd->body.sizeInBytes = otable->size;
|
||||||
|
cmd->body.validSizeInBytes = 0;
|
||||||
|
cmd->body.ptDepth = mob->pt_level;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The device doesn't support this, But the otable size is
|
||||||
|
* determined at compile-time, so this BUG shouldn't trigger
|
||||||
|
* randomly.
|
||||||
|
*/
|
||||||
|
BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2);
|
||||||
|
|
||||||
|
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||||
|
otable->page_table = mob;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
out_no_fifo:
|
||||||
|
out_no_populate:
|
||||||
|
vmw_mob_destroy(mob);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* vmw_takedown_otable_base - Issue an object table base takedown command
|
||||||
|
* to the device
|
||||||
|
*
|
||||||
|
* @dev_priv: Pointer to a device private structure
|
||||||
|
* @type: Type of object table base
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
|
||||||
|
SVGAOTableType type,
|
||||||
|
struct vmw_otable *otable)
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
SVGA3dCmdHeader header;
|
||||||
|
SVGA3dCmdSetOTableBase body;
|
||||||
|
} *cmd;
|
||||||
|
struct ttm_buffer_object *bo;
|
||||||
|
|
||||||
|
if (otable->page_table == NULL)
|
||||||
|
return;
|
||||||
|
|
||||||
|
bo = otable->page_table->pt_bo;
|
||||||
|
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
||||||
|
if (unlikely(cmd == NULL))
|
||||||
|
DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
|
||||||
|
|
||||||
|
memset(cmd, 0, sizeof(*cmd));
|
||||||
|
cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
|
||||||
|
cmd->header.size = sizeof(cmd->body);
|
||||||
|
cmd->body.type = type;
|
||||||
|
cmd->body.baseAddress = 0;
|
||||||
|
cmd->body.sizeInBytes = 0;
|
||||||
|
cmd->body.validSizeInBytes = 0;
|
||||||
|
cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
|
||||||
|
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||||
|
|
||||||
|
if (bo) {
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = ttm_bo_reserve(bo, false, true, false, NULL);
|
||||||
|
BUG_ON(ret != 0);
|
||||||
|
|
||||||
|
vmw_fence_single_bo(bo, NULL);
|
||||||
|
ttm_bo_unreserve(bo);
|
||||||
|
}
|
||||||
|
|
||||||
|
vmw_mob_destroy(otable->page_table);
|
||||||
|
otable->page_table = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* vmw_otables_setup - Set up guest backed memory object tables
|
||||||
|
*
|
||||||
|
* @dev_priv: Pointer to a device private structure
|
||||||
|
*
|
||||||
|
* Takes care of the device guest backed surface
|
||||||
|
* initialization, by setting up the guest backed memory object tables.
|
||||||
|
* Returns 0 on success and various error codes on failure. A succesful return
|
||||||
|
* means the object tables can be taken down using the vmw_otables_takedown
|
||||||
|
* function.
|
||||||
|
*/
|
||||||
|
int vmw_otables_setup(struct vmw_private *dev_priv)
|
||||||
|
{
|
||||||
|
unsigned long offset;
|
||||||
|
unsigned long bo_size;
|
||||||
|
struct vmw_otable *otables;
|
||||||
|
SVGAOTableType i;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
otables = kzalloc(SVGA_OTABLE_DX9_MAX * sizeof(*otables),
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (unlikely(otables == NULL)) {
|
||||||
|
DRM_ERROR("Failed to allocate space for otable "
|
||||||
|
"metadata.\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
otables[SVGA_OTABLE_MOB].size =
|
||||||
|
VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE;
|
||||||
|
otables[SVGA_OTABLE_SURFACE].size =
|
||||||
|
VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE;
|
||||||
|
otables[SVGA_OTABLE_CONTEXT].size =
|
||||||
|
VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE;
|
||||||
|
otables[SVGA_OTABLE_SHADER].size =
|
||||||
|
VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE;
|
||||||
|
otables[SVGA_OTABLE_SCREEN_TARGET].size =
|
||||||
|
VMWGFX_NUM_GB_SCREEN_TARGET *
|
||||||
|
SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE;
|
||||||
|
|
||||||
|
bo_size = 0;
|
||||||
|
for (i = 0; i < SVGA_OTABLE_DX9_MAX; ++i) {
|
||||||
|
otables[i].size =
|
||||||
|
(otables[i].size + PAGE_SIZE - 1) & PAGE_MASK;
|
||||||
|
bo_size += otables[i].size;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = ttm_bo_create(&dev_priv->bdev, bo_size,
|
||||||
|
ttm_bo_type_device,
|
||||||
|
&vmw_sys_ne_placement,
|
||||||
|
0, false, NULL,
|
||||||
|
&dev_priv->otable_bo);
|
||||||
|
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
goto out_no_bo;
|
||||||
|
|
||||||
|
ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, NULL);
|
||||||
|
BUG_ON(ret != 0);
|
||||||
|
ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
goto out_unreserve;
|
||||||
|
ret = vmw_bo_map_dma(dev_priv->otable_bo);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
goto out_unreserve;
|
||||||
|
|
||||||
|
ttm_bo_unreserve(dev_priv->otable_bo);
|
||||||
|
|
||||||
|
offset = 0;
|
||||||
|
for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) {
|
||||||
|
ret = vmw_setup_otable_base(dev_priv, i, offset,
|
||||||
|
&otables[i]);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
goto out_no_setup;
|
||||||
|
offset += otables[i].size;
|
||||||
|
}
|
||||||
|
|
||||||
|
dev_priv->otables = otables;
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
out_unreserve:
|
||||||
|
ttm_bo_unreserve(dev_priv->otable_bo);
|
||||||
|
out_no_setup:
|
||||||
|
for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
|
||||||
|
vmw_takedown_otable_base(dev_priv, i, &otables[i]);
|
||||||
|
|
||||||
|
ttm_bo_unref(&dev_priv->otable_bo);
|
||||||
|
out_no_bo:
|
||||||
|
kfree(otables);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* vmw_otables_takedown - Take down guest backed memory object tables
|
||||||
|
*
|
||||||
|
* @dev_priv: Pointer to a device private structure
|
||||||
|
*
|
||||||
|
* Take down the Guest Memory Object tables.
|
||||||
|
*/
|
||||||
|
void vmw_otables_takedown(struct vmw_private *dev_priv)
|
||||||
|
{
|
||||||
|
SVGAOTableType i;
|
||||||
|
struct ttm_buffer_object *bo = dev_priv->otable_bo;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
|
||||||
|
vmw_takedown_otable_base(dev_priv, i,
|
||||||
|
&dev_priv->otables[i]);
|
||||||
|
|
||||||
|
ret = ttm_bo_reserve(bo, false, true, false, NULL);
|
||||||
|
BUG_ON(ret != 0);
|
||||||
|
|
||||||
|
vmw_fence_single_bo(bo, NULL);
|
||||||
|
ttm_bo_unreserve(bo);
|
||||||
|
|
||||||
|
ttm_bo_unref(&dev_priv->otable_bo);
|
||||||
|
kfree(dev_priv->otables);
|
||||||
|
dev_priv->otables = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* vmw_mob_calculate_pt_pages - Calculate the number of page table pages
|
||||||
|
* needed for a guest backed memory object.
|
||||||
|
*
|
||||||
|
* @data_pages: Number of data pages in the memory object buffer.
|
||||||
|
*/
|
||||||
|
static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages)
|
||||||
|
{
|
||||||
|
unsigned long data_size = data_pages * PAGE_SIZE;
|
||||||
|
unsigned long tot_size = 0;
|
||||||
|
|
||||||
|
while (likely(data_size > PAGE_SIZE)) {
|
||||||
|
data_size = DIV_ROUND_UP(data_size, PAGE_SIZE);
|
||||||
|
data_size *= VMW_PPN_SIZE;
|
||||||
|
tot_size += (data_size + PAGE_SIZE - 1) & PAGE_MASK;
|
||||||
|
}
|
||||||
|
|
||||||
|
return tot_size >> PAGE_SHIFT;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* vmw_mob_create - Create a mob, but don't populate it.
|
||||||
|
*
|
||||||
|
* @data_pages: Number of data pages of the underlying buffer object.
|
||||||
|
*/
|
||||||
|
struct vmw_mob *vmw_mob_create(unsigned long data_pages)
|
||||||
|
{
|
||||||
|
struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL);
|
||||||
|
|
||||||
|
if (unlikely(mob == NULL))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
mob->num_pages = vmw_mob_calculate_pt_pages(data_pages);
|
||||||
|
|
||||||
|
return mob;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* vmw_mob_pt_populate - Populate the mob pagetable
|
||||||
|
*
|
||||||
|
* @mob: Pointer to the mob the pagetable of which we want to
|
||||||
|
* populate.
|
||||||
|
*
|
||||||
|
* This function allocates memory to be used for the pagetable, and
|
||||||
|
* adjusts TTM memory accounting accordingly. Returns ENOMEM if
|
||||||
|
* memory resources aren't sufficient and may cause TTM buffer objects
|
||||||
|
* to be swapped out by using the TTM memory accounting function.
|
||||||
|
*/
|
||||||
|
static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
|
||||||
|
struct vmw_mob *mob)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
BUG_ON(mob->pt_bo != NULL);
|
||||||
|
|
||||||
|
ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE,
|
||||||
|
ttm_bo_type_device,
|
||||||
|
&vmw_sys_ne_placement,
|
||||||
|
0, false, NULL, &mob->pt_bo);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = ttm_bo_reserve(mob->pt_bo, false, true, false, NULL);
|
||||||
|
|
||||||
|
BUG_ON(ret != 0);
|
||||||
|
ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
goto out_unreserve;
|
||||||
|
ret = vmw_bo_map_dma(mob->pt_bo);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
goto out_unreserve;
|
||||||
|
|
||||||
|
ttm_bo_unreserve(mob->pt_bo);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
out_unreserve:
|
||||||
|
ttm_bo_unreserve(mob->pt_bo);
|
||||||
|
ttm_bo_unref(&mob->pt_bo);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_mob_assign_ppn - Assign a value to a page table entry
|
||||||
|
*
|
||||||
|
* @addr: Pointer to pointer to page table entry.
|
||||||
|
* @val: The page table entry
|
||||||
|
*
|
||||||
|
* Assigns a value to a page table entry pointed to by *@addr and increments
|
||||||
|
* *@addr according to the page table entry size.
|
||||||
|
*/
|
||||||
|
#if (VMW_PPN_SIZE == 8)
|
||||||
|
static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val)
|
||||||
|
{
|
||||||
|
*((__le64 *) *addr) = cpu_to_le64(val >> PAGE_SHIFT);
|
||||||
|
*addr += 2;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val)
|
||||||
|
{
|
||||||
|
*(*addr)++ = cpu_to_le32(val >> PAGE_SHIFT);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* vmw_mob_build_pt - Build a pagetable
|
||||||
|
*
|
||||||
|
* @data_addr: Array of DMA addresses to the underlying buffer
|
||||||
|
* object's data pages.
|
||||||
|
* @num_data_pages: Number of buffer object data pages.
|
||||||
|
* @pt_pages: Array of page pointers to the page table pages.
|
||||||
|
*
|
||||||
|
* Returns the number of page table pages actually used.
|
||||||
|
* Uses atomic kmaps of highmem pages to avoid TLB thrashing.
|
||||||
|
*/
|
||||||
|
static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter,
|
||||||
|
unsigned long num_data_pages,
|
||||||
|
struct vmw_piter *pt_iter)
|
||||||
|
{
|
||||||
|
unsigned long pt_size = num_data_pages * VMW_PPN_SIZE;
|
||||||
|
unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE);
|
||||||
|
unsigned long pt_page;
|
||||||
|
__le32 *addr, *save_addr;
|
||||||
|
unsigned long i;
|
||||||
|
struct page *page;
|
||||||
|
|
||||||
|
save_addr = addr = AllocKernelSpace(4096);
|
||||||
|
|
||||||
|
for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) {
|
||||||
|
page = vmw_piter_page(pt_iter);
|
||||||
|
|
||||||
|
MapPage(save_addr,(addr_t)page, 3);
|
||||||
|
|
||||||
|
for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) {
|
||||||
|
vmw_mob_assign_ppn(&addr,
|
||||||
|
vmw_piter_dma_addr(data_iter));
|
||||||
|
if (unlikely(--num_data_pages == 0))
|
||||||
|
break;
|
||||||
|
WARN_ON(!vmw_piter_next(data_iter));
|
||||||
|
}
|
||||||
|
vmw_piter_next(pt_iter);
|
||||||
|
}
|
||||||
|
FreeKernelSpace(save_addr);
|
||||||
|
return num_pt_pages;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* vmw_mob_build_pt - Set up a multilevel mob pagetable
|
||||||
|
*
|
||||||
|
* @mob: Pointer to a mob whose page table needs setting up.
|
||||||
|
* @data_addr Array of DMA addresses to the buffer object's data
|
||||||
|
* pages.
|
||||||
|
* @num_data_pages: Number of buffer object data pages.
|
||||||
|
*
|
||||||
|
* Uses tail recursion to set up a multilevel mob page table.
|
||||||
|
*/
|
||||||
|
static void vmw_mob_pt_setup(struct vmw_mob *mob,
|
||||||
|
struct vmw_piter data_iter,
|
||||||
|
unsigned long num_data_pages)
|
||||||
|
{
|
||||||
|
unsigned long num_pt_pages = 0;
|
||||||
|
struct ttm_buffer_object *bo = mob->pt_bo;
|
||||||
|
struct vmw_piter save_pt_iter;
|
||||||
|
struct vmw_piter pt_iter;
|
||||||
|
const struct vmw_sg_table *vsgt;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = ttm_bo_reserve(bo, false, true, false, NULL);
|
||||||
|
BUG_ON(ret != 0);
|
||||||
|
|
||||||
|
vsgt = vmw_bo_sg_table(bo);
|
||||||
|
vmw_piter_start(&pt_iter, vsgt, 0);
|
||||||
|
BUG_ON(!vmw_piter_next(&pt_iter));
|
||||||
|
mob->pt_level = 0;
|
||||||
|
while (likely(num_data_pages > 1)) {
|
||||||
|
++mob->pt_level;
|
||||||
|
BUG_ON(mob->pt_level > 2);
|
||||||
|
save_pt_iter = pt_iter;
|
||||||
|
num_pt_pages = vmw_mob_build_pt(&data_iter, num_data_pages,
|
||||||
|
&pt_iter);
|
||||||
|
data_iter = save_pt_iter;
|
||||||
|
num_data_pages = num_pt_pages;
|
||||||
|
}
|
||||||
|
|
||||||
|
mob->pt_root_page = vmw_piter_dma_addr(&save_pt_iter);
|
||||||
|
ttm_bo_unreserve(bo);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* vmw_mob_destroy - Destroy a mob, unpopulating first if necessary.
|
||||||
|
*
|
||||||
|
* @mob: Pointer to a mob to destroy.
|
||||||
|
*/
|
||||||
|
void vmw_mob_destroy(struct vmw_mob *mob)
|
||||||
|
{
|
||||||
|
if (mob->pt_bo)
|
||||||
|
ttm_bo_unref(&mob->pt_bo);
|
||||||
|
kfree(mob);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* vmw_mob_unbind - Hide a mob from the device.
|
||||||
|
*
|
||||||
|
* @dev_priv: Pointer to a device private.
|
||||||
|
* @mob_id: Device id of the mob to unbind.
|
||||||
|
*/
|
||||||
|
void vmw_mob_unbind(struct vmw_private *dev_priv,
|
||||||
|
struct vmw_mob *mob)
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
SVGA3dCmdHeader header;
|
||||||
|
SVGA3dCmdDestroyGBMob body;
|
||||||
|
} *cmd;
|
||||||
|
int ret;
|
||||||
|
struct ttm_buffer_object *bo = mob->pt_bo;
|
||||||
|
|
||||||
|
if (bo) {
|
||||||
|
ret = ttm_bo_reserve(bo, false, true, false, NULL);
|
||||||
|
/*
|
||||||
|
* Noone else should be using this buffer.
|
||||||
|
*/
|
||||||
|
BUG_ON(ret != 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
||||||
|
if (unlikely(cmd == NULL)) {
|
||||||
|
DRM_ERROR("Failed reserving FIFO space for Memory "
|
||||||
|
"Object unbinding.\n");
|
||||||
|
}
|
||||||
|
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
|
||||||
|
cmd->header.size = sizeof(cmd->body);
|
||||||
|
cmd->body.mobid = mob->id;
|
||||||
|
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||||
|
if (bo) {
|
||||||
|
vmw_fence_single_bo(bo, NULL);
|
||||||
|
ttm_bo_unreserve(bo);
|
||||||
|
}
|
||||||
|
vmw_3d_resource_dec(dev_priv, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* vmw_mob_bind - Make a mob visible to the device after first
|
||||||
|
* populating it if necessary.
|
||||||
|
*
|
||||||
|
* @dev_priv: Pointer to a device private.
|
||||||
|
* @mob: Pointer to the mob we're making visible.
|
||||||
|
* @data_addr: Array of DMA addresses to the data pages of the underlying
|
||||||
|
* buffer object.
|
||||||
|
* @num_data_pages: Number of data pages of the underlying buffer
|
||||||
|
* object.
|
||||||
|
* @mob_id: Device id of the mob to bind
|
||||||
|
*
|
||||||
|
* This function is intended to be interfaced with the ttm_tt backend
|
||||||
|
* code.
|
||||||
|
*/
|
||||||
|
int vmw_mob_bind(struct vmw_private *dev_priv,
|
||||||
|
struct vmw_mob *mob,
|
||||||
|
const struct vmw_sg_table *vsgt,
|
||||||
|
unsigned long num_data_pages,
|
||||||
|
int32_t mob_id)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
bool pt_set_up = false;
|
||||||
|
struct vmw_piter data_iter;
|
||||||
|
struct {
|
||||||
|
SVGA3dCmdHeader header;
|
||||||
|
SVGA3dCmdDefineGBMob64 body;
|
||||||
|
} *cmd;
|
||||||
|
|
||||||
|
mob->id = mob_id;
|
||||||
|
vmw_piter_start(&data_iter, vsgt, 0);
|
||||||
|
if (unlikely(!vmw_piter_next(&data_iter)))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (likely(num_data_pages == 1)) {
|
||||||
|
mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
|
||||||
|
mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
|
||||||
|
} else if (vsgt->num_regions == 1) {
|
||||||
|
mob->pt_level = SVGA3D_MOBFMT_RANGE;
|
||||||
|
mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
|
||||||
|
} else if (unlikely(mob->pt_bo == NULL)) {
|
||||||
|
ret = vmw_mob_pt_populate(dev_priv, mob);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
vmw_mob_pt_setup(mob, data_iter, num_data_pages);
|
||||||
|
pt_set_up = true;
|
||||||
|
mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
|
||||||
|
}
|
||||||
|
|
||||||
|
(void) vmw_3d_resource_inc(dev_priv, false);
|
||||||
|
|
||||||
|
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
||||||
|
if (unlikely(cmd == NULL)) {
|
||||||
|
DRM_ERROR("Failed reserving FIFO space for Memory "
|
||||||
|
"Object binding.\n");
|
||||||
|
goto out_no_cmd_space;
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd->header.id = SVGA_3D_CMD_DEFINE_GB_MOB64;
|
||||||
|
cmd->header.size = sizeof(cmd->body);
|
||||||
|
cmd->body.mobid = mob_id;
|
||||||
|
cmd->body.ptDepth = mob->pt_level;
|
||||||
|
cmd->body.base = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT);
|
||||||
|
cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
|
||||||
|
|
||||||
|
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
out_no_cmd_space:
|
||||||
|
vmw_3d_resource_dec(dev_priv, false);
|
||||||
|
if (pt_set_up)
|
||||||
|
ttm_bo_unref(&mob->pt_bo);
|
||||||
|
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
@ -32,8 +32,10 @@
|
|||||||
#include <drm/drmP.h>
|
#include <drm/drmP.h>
|
||||||
#include "vmwgfx_resource_priv.h"
|
#include "vmwgfx_resource_priv.h"
|
||||||
|
|
||||||
|
#define VMW_RES_EVICT_ERR_COUNT 10
|
||||||
|
|
||||||
struct vmw_user_dma_buffer {
|
struct vmw_user_dma_buffer {
|
||||||
struct ttm_base_object base;
|
struct ttm_prime_object prime;
|
||||||
struct vmw_dma_buffer dma;
|
struct vmw_dma_buffer dma;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -213,6 +215,7 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
|
|||||||
res->func = func;
|
res->func = func;
|
||||||
INIT_LIST_HEAD(&res->lru_head);
|
INIT_LIST_HEAD(&res->lru_head);
|
||||||
INIT_LIST_HEAD(&res->mob_head);
|
INIT_LIST_HEAD(&res->mob_head);
|
||||||
|
INIT_LIST_HEAD(&res->binding_head);
|
||||||
res->id = -1;
|
res->id = -1;
|
||||||
res->backup = NULL;
|
res->backup = NULL;
|
||||||
res->backup_offset = 0;
|
res->backup_offset = 0;
|
||||||
@ -295,7 +298,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
|
|||||||
if (unlikely(base == NULL))
|
if (unlikely(base == NULL))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (unlikely(base->object_type != converter->object_type))
|
if (unlikely(ttm_base_object_type(base) != converter->object_type))
|
||||||
goto out_bad_resource;
|
goto out_bad_resource;
|
||||||
|
|
||||||
res = converter->base_obj_to_res(base);
|
res = converter->base_obj_to_res(base);
|
||||||
@ -350,6 +353,38 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
|
|||||||
/**
|
/**
|
||||||
* Buffer management.
|
* Buffer management.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
|
||||||
|
*
|
||||||
|
* @dev_priv: Pointer to a struct vmw_private identifying the device.
|
||||||
|
* @size: The requested buffer size.
|
||||||
|
* @user: Whether this is an ordinary dma buffer or a user dma buffer.
|
||||||
|
*/
|
||||||
|
static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
|
||||||
|
bool user)
|
||||||
|
{
|
||||||
|
static size_t struct_size, user_struct_size;
|
||||||
|
size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||||
|
size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
|
||||||
|
|
||||||
|
if (unlikely(struct_size == 0)) {
|
||||||
|
size_t backend_size = ttm_round_pot(vmw_tt_size);
|
||||||
|
|
||||||
|
struct_size = backend_size +
|
||||||
|
ttm_round_pot(sizeof(struct vmw_dma_buffer));
|
||||||
|
user_struct_size = backend_size +
|
||||||
|
ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (dev_priv->map_mode == vmw_dma_alloc_coherent)
|
||||||
|
page_array_size +=
|
||||||
|
ttm_round_pot(num_pages * sizeof(dma_addr_t));
|
||||||
|
|
||||||
|
return ((user) ? user_struct_size : struct_size) +
|
||||||
|
page_array_size;
|
||||||
|
}
|
||||||
|
|
||||||
void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
|
void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
|
||||||
{
|
{
|
||||||
struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
|
struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
|
||||||
@ -357,6 +392,13 @@ void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
|
|||||||
kfree(vmw_bo);
|
kfree(vmw_bo);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
|
||||||
|
{
|
||||||
|
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
|
||||||
|
|
||||||
|
// ttm_prime_object_kfree(vmw_user_bo, prime);
|
||||||
|
}
|
||||||
|
|
||||||
int vmw_dmabuf_init(struct vmw_private *dev_priv,
|
int vmw_dmabuf_init(struct vmw_private *dev_priv,
|
||||||
struct vmw_dma_buffer *vmw_bo,
|
struct vmw_dma_buffer *vmw_bo,
|
||||||
size_t size, struct ttm_placement *placement,
|
size_t size, struct ttm_placement *placement,
|
||||||
@ -366,28 +408,23 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
|
|||||||
struct ttm_bo_device *bdev = &dev_priv->bdev;
|
struct ttm_bo_device *bdev = &dev_priv->bdev;
|
||||||
size_t acc_size;
|
size_t acc_size;
|
||||||
int ret;
|
int ret;
|
||||||
|
bool user = (bo_free == &vmw_user_dmabuf_destroy);
|
||||||
|
|
||||||
BUG_ON(!bo_free);
|
BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
|
||||||
|
|
||||||
acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
|
acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
|
||||||
memset(vmw_bo, 0, sizeof(*vmw_bo));
|
memset(vmw_bo, 0, sizeof(*vmw_bo));
|
||||||
|
|
||||||
INIT_LIST_HEAD(&vmw_bo->res_list);
|
INIT_LIST_HEAD(&vmw_bo->res_list);
|
||||||
|
|
||||||
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
|
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
|
||||||
ttm_bo_type_device, placement,
|
(user) ? ttm_bo_type_device :
|
||||||
|
ttm_bo_type_kernel, placement,
|
||||||
0, interruptible,
|
0, interruptible,
|
||||||
NULL, acc_size, NULL, bo_free);
|
NULL, acc_size, NULL, bo_free);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
|
|
||||||
{
|
|
||||||
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
|
|
||||||
|
|
||||||
// ttm_base_object_kfree(vmw_user_bo, base);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
|
static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
|
||||||
{
|
{
|
||||||
struct vmw_user_dma_buffer *vmw_user_bo;
|
struct vmw_user_dma_buffer *vmw_user_bo;
|
||||||
@ -399,11 +436,27 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
|
|||||||
if (unlikely(base == NULL))
|
if (unlikely(base == NULL))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
|
vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
|
||||||
|
prime.base);
|
||||||
bo = &vmw_user_bo->dma.base;
|
bo = &vmw_user_bo->dma.base;
|
||||||
ttm_bo_unref(&bo);
|
ttm_bo_unref(&bo);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
|
||||||
|
enum ttm_ref_type ref_type)
|
||||||
|
{
|
||||||
|
struct vmw_user_dma_buffer *user_bo;
|
||||||
|
user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
|
||||||
|
|
||||||
|
switch (ref_type) {
|
||||||
|
case TTM_REF_SYNCCPU_WRITE:
|
||||||
|
ttm_bo_synccpu_write_release(&user_bo->dma.base);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
BUG();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* vmw_user_dmabuf_alloc - Allocate a user dma buffer
|
* vmw_user_dmabuf_alloc - Allocate a user dma buffer
|
||||||
*
|
*
|
||||||
@ -434,24 +487,30 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
|
|||||||
}
|
}
|
||||||
|
|
||||||
ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
|
ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
|
||||||
|
(dev_priv->has_mob) ?
|
||||||
|
&vmw_sys_placement :
|
||||||
&vmw_vram_sys_placement, true,
|
&vmw_vram_sys_placement, true,
|
||||||
&vmw_user_dmabuf_destroy);
|
&vmw_user_dmabuf_destroy);
|
||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
tmp = ttm_bo_reference(&user_bo->dma.base);
|
tmp = ttm_bo_reference(&user_bo->dma.base);
|
||||||
ret = ttm_base_object_init(tfile,
|
/*
|
||||||
&user_bo->base,
|
ret = ttm_prime_object_init(tfile,
|
||||||
|
size,
|
||||||
|
&user_bo->prime,
|
||||||
shareable,
|
shareable,
|
||||||
ttm_buffer_type,
|
ttm_buffer_type,
|
||||||
&vmw_user_dmabuf_release, NULL);
|
&vmw_user_dmabuf_release,
|
||||||
|
&vmw_user_dmabuf_ref_obj_release);
|
||||||
if (unlikely(ret != 0)) {
|
if (unlikely(ret != 0)) {
|
||||||
ttm_bo_unref(&tmp);
|
ttm_bo_unref(&tmp);
|
||||||
goto out_no_base_object;
|
goto out_no_base_object;
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
*p_dma_buf = &user_bo->dma;
|
*p_dma_buf = &user_bo->dma;
|
||||||
*handle = user_bo->base.hash.key;
|
*handle = user_bo->prime.base.hash.key;
|
||||||
|
|
||||||
out_no_base_object:
|
out_no_base_object:
|
||||||
return ret;
|
return ret;
|
||||||
@ -473,8 +532,132 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
|
|||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
vmw_user_bo = vmw_user_dma_buffer(bo);
|
vmw_user_bo = vmw_user_dma_buffer(bo);
|
||||||
return (vmw_user_bo->base.tfile == tfile ||
|
return (vmw_user_bo->prime.base.tfile == tfile ||
|
||||||
vmw_user_bo->base.shareable) ? 0 : -EPERM;
|
vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
|
||||||
|
* access, idling previous GPU operations on the buffer and optionally
|
||||||
|
* blocking it for further command submissions.
|
||||||
|
*
|
||||||
|
* @user_bo: Pointer to the buffer object being grabbed for CPU access
|
||||||
|
* @tfile: Identifying the caller.
|
||||||
|
* @flags: Flags indicating how the grab should be performed.
|
||||||
|
*
|
||||||
|
* A blocking grab will be automatically released when @tfile is closed.
|
||||||
|
*/
|
||||||
|
static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
|
||||||
|
struct ttm_object_file *tfile,
|
||||||
|
uint32_t flags)
|
||||||
|
{
|
||||||
|
struct ttm_buffer_object *bo = &user_bo->dma.base;
|
||||||
|
bool existed;
|
||||||
|
int ret=0;
|
||||||
|
|
||||||
|
if (flags & drm_vmw_synccpu_allow_cs) {
|
||||||
|
struct ttm_bo_device *bdev = bo->bdev;
|
||||||
|
|
||||||
|
// spin_lock(&bdev->fence_lock);
|
||||||
|
// ret = ttm_bo_wait(bo, false, true,
|
||||||
|
// !!(flags & drm_vmw_synccpu_dontblock));
|
||||||
|
// spin_unlock(&bdev->fence_lock);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ret = ttm_bo_synccpu_write_grab
|
||||||
|
// (bo, !!(flags & drm_vmw_synccpu_dontblock));
|
||||||
|
// if (unlikely(ret != 0))
|
||||||
|
// return ret;
|
||||||
|
|
||||||
|
ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
|
||||||
|
TTM_REF_SYNCCPU_WRITE, &existed);
|
||||||
|
// if (ret != 0 || existed)
|
||||||
|
// ttm_bo_synccpu_write_release(&user_bo->dma.base);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
|
||||||
|
* and unblock command submission on the buffer if blocked.
|
||||||
|
*
|
||||||
|
* @handle: Handle identifying the buffer object.
|
||||||
|
* @tfile: Identifying the caller.
|
||||||
|
* @flags: Flags indicating the type of release.
|
||||||
|
*/
|
||||||
|
static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
|
||||||
|
struct ttm_object_file *tfile,
|
||||||
|
uint32_t flags)
|
||||||
|
{
|
||||||
|
if (!(flags & drm_vmw_synccpu_allow_cs))
|
||||||
|
return ttm_ref_object_base_unref(tfile, handle,
|
||||||
|
TTM_REF_SYNCCPU_WRITE);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
|
||||||
|
* functionality.
|
||||||
|
*
|
||||||
|
* @dev: Identifies the drm device.
|
||||||
|
* @data: Pointer to the ioctl argument.
|
||||||
|
* @file_priv: Identifies the caller.
|
||||||
|
*
|
||||||
|
* This function checks the ioctl arguments for validity and calls the
|
||||||
|
* relevant synccpu functions.
|
||||||
|
*/
|
||||||
|
int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
|
||||||
|
struct drm_file *file_priv)
|
||||||
|
{
|
||||||
|
struct drm_vmw_synccpu_arg *arg =
|
||||||
|
(struct drm_vmw_synccpu_arg *) data;
|
||||||
|
struct vmw_dma_buffer *dma_buf;
|
||||||
|
struct vmw_user_dma_buffer *user_bo;
|
||||||
|
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
|
||||||
|
|| (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
|
||||||
|
drm_vmw_synccpu_dontblock |
|
||||||
|
drm_vmw_synccpu_allow_cs)) != 0) {
|
||||||
|
DRM_ERROR("Illegal synccpu flags.\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (arg->op) {
|
||||||
|
case drm_vmw_synccpu_grab:
|
||||||
|
ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
|
||||||
|
dma);
|
||||||
|
ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
|
||||||
|
vmw_dmabuf_unreference(&dma_buf);
|
||||||
|
if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
|
||||||
|
ret != -EBUSY)) {
|
||||||
|
DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
|
||||||
|
(unsigned int) arg->handle);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case drm_vmw_synccpu_release:
|
||||||
|
ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
|
||||||
|
arg->flags);
|
||||||
|
if (unlikely(ret != 0)) {
|
||||||
|
DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
|
||||||
|
(unsigned int) arg->handle);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
DRM_ERROR("Invalid synccpu operation.\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if 0
|
#if 0
|
||||||
@ -538,14 +721,15 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
|
|||||||
return -ESRCH;
|
return -ESRCH;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(base->object_type != ttm_buffer_type)) {
|
if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
|
||||||
ttm_base_object_unref(&base);
|
ttm_base_object_unref(&base);
|
||||||
printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
|
printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
|
||||||
(unsigned long)handle);
|
(unsigned long)handle);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
|
vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
|
||||||
|
prime.base);
|
||||||
(void)ttm_bo_reference(&vmw_user_bo->dma.base);
|
(void)ttm_bo_reference(&vmw_user_bo->dma.base);
|
||||||
ttm_base_object_unref(&base);
|
ttm_base_object_unref(&base);
|
||||||
*out = &vmw_user_bo->dma;
|
*out = &vmw_user_bo->dma;
|
||||||
@ -554,7 +738,8 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
|
|||||||
}
|
}
|
||||||
|
|
||||||
int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
|
int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
|
||||||
struct vmw_dma_buffer *dma_buf)
|
struct vmw_dma_buffer *dma_buf,
|
||||||
|
uint32_t *handle)
|
||||||
{
|
{
|
||||||
struct vmw_user_dma_buffer *user_bo;
|
struct vmw_user_dma_buffer *user_bo;
|
||||||
|
|
||||||
@ -562,7 +747,10 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
|
user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
|
||||||
return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
|
|
||||||
|
*handle = user_bo->prime.base.hash.key;
|
||||||
|
return ttm_ref_object_add(tfile, &user_bo->prime.base,
|
||||||
|
TTM_REF_USAGE, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -785,48 +973,39 @@ int vmw_dumb_create(struct drm_file *file_priv,
|
|||||||
{
|
{
|
||||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||||
struct vmw_master *vmaster = vmw_master(file_priv->master);
|
struct vmw_master *vmaster = vmw_master(file_priv->master);
|
||||||
struct vmw_user_dma_buffer *vmw_user_bo;
|
struct vmw_dma_buffer *dma_buf;
|
||||||
struct ttm_buffer_object *tmp;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
args->pitch = args->width * ((args->bpp + 7) / 8);
|
args->pitch = args->width * ((args->bpp + 7) / 8);
|
||||||
args->size = args->pitch * args->height;
|
args->size = args->pitch * args->height;
|
||||||
|
|
||||||
vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
|
|
||||||
if (vmw_user_bo == NULL)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
ret = ttm_read_lock(&vmaster->lock, true);
|
ret = ttm_read_lock(&vmaster->lock, true);
|
||||||
if (ret != 0) {
|
if (unlikely(ret != 0))
|
||||||
kfree(vmw_user_bo);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
|
||||||
|
|
||||||
ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size,
|
ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
|
||||||
&vmw_vram_sys_placement, true,
|
args->size, false, &args->handle,
|
||||||
&vmw_user_dmabuf_destroy);
|
&dma_buf);
|
||||||
if (ret != 0)
|
if (unlikely(ret != 0))
|
||||||
goto out_no_dmabuf;
|
goto out_no_dmabuf;
|
||||||
|
|
||||||
tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
|
vmw_dmabuf_unreference(&dma_buf);
|
||||||
ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
|
|
||||||
&vmw_user_bo->base,
|
|
||||||
false,
|
|
||||||
ttm_buffer_type,
|
|
||||||
&vmw_user_dmabuf_release, NULL);
|
|
||||||
if (unlikely(ret != 0))
|
|
||||||
goto out_no_base_object;
|
|
||||||
|
|
||||||
args->handle = vmw_user_bo->base.hash.key;
|
|
||||||
|
|
||||||
out_no_base_object:
|
|
||||||
ttm_bo_unref(&tmp);
|
|
||||||
out_no_dmabuf:
|
out_no_dmabuf:
|
||||||
ttm_read_unlock(&vmaster->lock);
|
ttm_read_unlock(&vmaster->lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_dumb_map_offset - Return the address space offset of a dumb buffer
|
||||||
|
*
|
||||||
|
* @file_priv: Pointer to a struct drm_file identifying the caller.
|
||||||
|
* @dev: Pointer to the drm device.
|
||||||
|
* @handle: Handle identifying the dumb buffer.
|
||||||
|
* @offset: The address space offset returned.
|
||||||
|
*
|
||||||
|
* This is a driver callback for the core drm dumb_map_offset functionality.
|
||||||
|
*/
|
||||||
int vmw_dumb_map_offset(struct drm_file *file_priv,
|
int vmw_dumb_map_offset(struct drm_file *file_priv,
|
||||||
struct drm_device *dev, uint32_t handle,
|
struct drm_device *dev, uint32_t handle,
|
||||||
uint64_t *offset)
|
uint64_t *offset)
|
||||||
@ -844,6 +1023,15 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_dumb_destroy - Destroy a dumb boffer
|
||||||
|
*
|
||||||
|
* @file_priv: Pointer to a struct drm_file identifying the caller.
|
||||||
|
* @dev: Pointer to the drm device.
|
||||||
|
* @handle: Handle identifying the dumb buffer.
|
||||||
|
*
|
||||||
|
* This is a driver callback for the core drm dumb_destroy functionality.
|
||||||
|
*/
|
||||||
int vmw_dumb_destroy(struct drm_file *file_priv,
|
int vmw_dumb_destroy(struct drm_file *file_priv,
|
||||||
struct drm_device *dev,
|
struct drm_device *dev,
|
||||||
uint32_t handle)
|
uint32_t handle)
|
||||||
@ -975,7 +1163,7 @@ void vmw_resource_unreserve(struct vmw_resource *res,
|
|||||||
if (new_backup)
|
if (new_backup)
|
||||||
res->backup_offset = new_backup_offset;
|
res->backup_offset = new_backup_offset;
|
||||||
|
|
||||||
if (!res->func->may_evict)
|
if (!res->func->may_evict || res->id == -1)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
write_lock(&dev_priv->resource_lock);
|
write_lock(&dev_priv->resource_lock);
|
||||||
@ -997,7 +1185,6 @@ void vmw_resource_unreserve(struct vmw_resource *res,
|
|||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
vmw_resource_check_buffer(struct vmw_resource *res,
|
vmw_resource_check_buffer(struct vmw_resource *res,
|
||||||
struct ww_acquire_ctx *ticket,
|
|
||||||
bool interruptible,
|
bool interruptible,
|
||||||
struct ttm_validate_buffer *val_buf)
|
struct ttm_validate_buffer *val_buf)
|
||||||
{
|
{
|
||||||
@ -1014,7 +1201,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
|
|||||||
INIT_LIST_HEAD(&val_list);
|
INIT_LIST_HEAD(&val_list);
|
||||||
val_buf->bo = ttm_bo_reference(&res->backup->base);
|
val_buf->bo = ttm_bo_reference(&res->backup->base);
|
||||||
list_add_tail(&val_buf->head, &val_list);
|
list_add_tail(&val_buf->head, &val_list);
|
||||||
ret = ttm_eu_reserve_buffers(ticket, &val_list);
|
ret = ttm_eu_reserve_buffers(NULL, &val_list);
|
||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
goto out_no_reserve;
|
goto out_no_reserve;
|
||||||
|
|
||||||
@ -1032,7 +1219,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_no_validate:
|
out_no_validate:
|
||||||
ttm_eu_backoff_reservation(ticket, &val_list);
|
ttm_eu_backoff_reservation(NULL, &val_list);
|
||||||
out_no_reserve:
|
out_no_reserve:
|
||||||
ttm_bo_unref(&val_buf->bo);
|
ttm_bo_unref(&val_buf->bo);
|
||||||
if (backup_dirty)
|
if (backup_dirty)
|
||||||
@ -1077,8 +1264,7 @@ int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
|
|||||||
* @val_buf: Backup buffer information.
|
* @val_buf: Backup buffer information.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
|
vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
|
||||||
struct ttm_validate_buffer *val_buf)
|
|
||||||
{
|
{
|
||||||
struct list_head val_list;
|
struct list_head val_list;
|
||||||
|
|
||||||
@ -1087,7 +1273,7 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
|
|||||||
|
|
||||||
INIT_LIST_HEAD(&val_list);
|
INIT_LIST_HEAD(&val_list);
|
||||||
list_add_tail(&val_buf->head, &val_list);
|
list_add_tail(&val_buf->head, &val_list);
|
||||||
ttm_eu_backoff_reservation(ticket, &val_list);
|
ttm_eu_backoff_reservation(NULL, &val_list);
|
||||||
ttm_bo_unref(&val_buf->bo);
|
ttm_bo_unref(&val_buf->bo);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1096,18 +1282,18 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
|
|||||||
* to a backup buffer.
|
* to a backup buffer.
|
||||||
*
|
*
|
||||||
* @res: The resource to evict.
|
* @res: The resource to evict.
|
||||||
|
* @interruptible: Whether to wait interruptible.
|
||||||
*/
|
*/
|
||||||
int vmw_resource_do_evict(struct vmw_resource *res)
|
int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
|
||||||
{
|
{
|
||||||
struct ttm_validate_buffer val_buf;
|
struct ttm_validate_buffer val_buf;
|
||||||
const struct vmw_res_func *func = res->func;
|
const struct vmw_res_func *func = res->func;
|
||||||
struct ww_acquire_ctx ticket;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
BUG_ON(!func->may_evict);
|
BUG_ON(!func->may_evict);
|
||||||
|
|
||||||
val_buf.bo = NULL;
|
val_buf.bo = NULL;
|
||||||
ret = vmw_resource_check_buffer(res, &ticket, true, &val_buf);
|
ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
|
||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@ -1122,7 +1308,7 @@ int vmw_resource_do_evict(struct vmw_resource *res)
|
|||||||
res->backup_dirty = true;
|
res->backup_dirty = true;
|
||||||
res->res_dirty = false;
|
res->res_dirty = false;
|
||||||
out_no_unbind:
|
out_no_unbind:
|
||||||
vmw_resource_backoff_reservation(&ticket, &val_buf);
|
vmw_resource_backoff_reservation(&val_buf);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -1146,6 +1332,7 @@ int vmw_resource_validate(struct vmw_resource *res)
|
|||||||
struct vmw_private *dev_priv = res->dev_priv;
|
struct vmw_private *dev_priv = res->dev_priv;
|
||||||
struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
|
struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
|
||||||
struct ttm_validate_buffer val_buf;
|
struct ttm_validate_buffer val_buf;
|
||||||
|
unsigned err_count = 0;
|
||||||
|
|
||||||
if (likely(!res->func->may_evict))
|
if (likely(!res->func->may_evict))
|
||||||
return 0;
|
return 0;
|
||||||
@ -1160,7 +1347,7 @@ int vmw_resource_validate(struct vmw_resource *res)
|
|||||||
|
|
||||||
write_lock(&dev_priv->resource_lock);
|
write_lock(&dev_priv->resource_lock);
|
||||||
if (list_empty(lru_list) || !res->func->may_evict) {
|
if (list_empty(lru_list) || !res->func->may_evict) {
|
||||||
DRM_ERROR("Out of device device id entries "
|
DRM_ERROR("Out of device device resources "
|
||||||
"for %s.\n", res->func->type_name);
|
"for %s.\n", res->func->type_name);
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
write_unlock(&dev_priv->resource_lock);
|
write_unlock(&dev_priv->resource_lock);
|
||||||
@ -1173,7 +1360,19 @@ int vmw_resource_validate(struct vmw_resource *res)
|
|||||||
list_del_init(&evict_res->lru_head);
|
list_del_init(&evict_res->lru_head);
|
||||||
|
|
||||||
write_unlock(&dev_priv->resource_lock);
|
write_unlock(&dev_priv->resource_lock);
|
||||||
vmw_resource_do_evict(evict_res);
|
|
||||||
|
ret = vmw_resource_do_evict(evict_res, true);
|
||||||
|
if (unlikely(ret != 0)) {
|
||||||
|
write_lock(&dev_priv->resource_lock);
|
||||||
|
list_add_tail(&evict_res->lru_head, lru_list);
|
||||||
|
write_unlock(&dev_priv->resource_lock);
|
||||||
|
if (ret == -ERESTARTSYS ||
|
||||||
|
++err_count > VMW_RES_EVICT_ERR_COUNT) {
|
||||||
|
vmw_resource_unreference(&evict_res);
|
||||||
|
goto out_no_validate;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
vmw_resource_unreference(&evict_res);
|
vmw_resource_unreference(&evict_res);
|
||||||
} while (1);
|
} while (1);
|
||||||
|
|
||||||
@ -1234,7 +1433,17 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo,
|
|||||||
* @mem: The truct ttm_mem_reg indicating to what memory
|
* @mem: The truct ttm_mem_reg indicating to what memory
|
||||||
* region the move is taking place.
|
* region the move is taking place.
|
||||||
*
|
*
|
||||||
* For now does nothing.
|
* Evicts the Guest Backed hardware resource if the backup
|
||||||
|
* buffer is being moved out of MOB memory.
|
||||||
|
* Note that this function should not race with the resource
|
||||||
|
* validation code as long as it accesses only members of struct
|
||||||
|
* resource that remain static while bo::res is !NULL and
|
||||||
|
* while we have @bo reserved. struct resource::backup is *not* a
|
||||||
|
* static member. The resource validation code will take care
|
||||||
|
* to set @bo::res to NULL, while having @bo reserved when the
|
||||||
|
* buffer is no longer bound to the resource, so @bo:res can be
|
||||||
|
* used to determine whether there is a need to unbind and whether
|
||||||
|
* it is safe to unbind.
|
||||||
*/
|
*/
|
||||||
void vmw_resource_move_notify(struct ttm_buffer_object *bo,
|
void vmw_resource_move_notify(struct ttm_buffer_object *bo,
|
||||||
struct ttm_mem_reg *mem)
|
struct ttm_mem_reg *mem)
|
||||||
@ -1258,13 +1467,15 @@ bool vmw_resource_needs_backup(const struct vmw_resource *res)
|
|||||||
* @type: The resource type to evict
|
* @type: The resource type to evict
|
||||||
*
|
*
|
||||||
* To avoid thrashing starvation or as part of the hibernation sequence,
|
* To avoid thrashing starvation or as part of the hibernation sequence,
|
||||||
* evict all evictable resources of a specific type.
|
* try to evict all evictable resources of a specific type.
|
||||||
*/
|
*/
|
||||||
static void vmw_resource_evict_type(struct vmw_private *dev_priv,
|
static void vmw_resource_evict_type(struct vmw_private *dev_priv,
|
||||||
enum vmw_res_type type)
|
enum vmw_res_type type)
|
||||||
{
|
{
|
||||||
struct list_head *lru_list = &dev_priv->res_lru[type];
|
struct list_head *lru_list = &dev_priv->res_lru[type];
|
||||||
struct vmw_resource *evict_res;
|
struct vmw_resource *evict_res;
|
||||||
|
unsigned err_count = 0;
|
||||||
|
int ret;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
write_lock(&dev_priv->resource_lock);
|
write_lock(&dev_priv->resource_lock);
|
||||||
@ -1277,7 +1488,18 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
|
|||||||
lru_head));
|
lru_head));
|
||||||
list_del_init(&evict_res->lru_head);
|
list_del_init(&evict_res->lru_head);
|
||||||
write_unlock(&dev_priv->resource_lock);
|
write_unlock(&dev_priv->resource_lock);
|
||||||
vmw_resource_do_evict(evict_res);
|
|
||||||
|
ret = vmw_resource_do_evict(evict_res, false);
|
||||||
|
if (unlikely(ret != 0)) {
|
||||||
|
write_lock(&dev_priv->resource_lock);
|
||||||
|
list_add_tail(&evict_res->lru_head, lru_list);
|
||||||
|
write_unlock(&dev_priv->resource_lock);
|
||||||
|
if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
|
||||||
|
vmw_resource_unreference(&evict_res);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
vmw_resource_unreference(&evict_res);
|
vmw_resource_unreference(&evict_res);
|
||||||
} while (1);
|
} while (1);
|
||||||
|
|
||||||
|
@ -317,6 +317,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
|
|||||||
crtc->fb = NULL;
|
crtc->fb = NULL;
|
||||||
crtc->x = 0;
|
crtc->x = 0;
|
||||||
crtc->y = 0;
|
crtc->y = 0;
|
||||||
|
crtc->enabled = false;
|
||||||
|
|
||||||
vmw_sou_del_active(dev_priv, sou);
|
vmw_sou_del_active(dev_priv, sou);
|
||||||
|
|
||||||
@ -377,6 +378,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
|
|||||||
crtc->fb = NULL;
|
crtc->fb = NULL;
|
||||||
crtc->x = 0;
|
crtc->x = 0;
|
||||||
crtc->y = 0;
|
crtc->y = 0;
|
||||||
|
crtc->enabled = false;
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -389,6 +391,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
|
|||||||
crtc->fb = fb;
|
crtc->fb = fb;
|
||||||
crtc->x = set->x;
|
crtc->x = set->x;
|
||||||
crtc->y = set->y;
|
crtc->y = set->y;
|
||||||
|
crtc->enabled = true;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -510,9 +513,6 @@ int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv)
|
|||||||
dev_priv->sou_priv->num_implicit = 0;
|
dev_priv->sou_priv->num_implicit = 0;
|
||||||
dev_priv->sou_priv->implicit_fb = NULL;
|
dev_priv->sou_priv->implicit_fb = NULL;
|
||||||
|
|
||||||
// ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
|
|
||||||
// if (unlikely(ret != 0))
|
|
||||||
// goto err_free;
|
|
||||||
|
|
||||||
ret = drm_mode_create_dirty_info_property(dev);
|
ret = drm_mode_create_dirty_info_property(dev);
|
||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
|
443
drivers/video/drm/vmwgfx/vmwgfx_shader.c
Normal file
443
drivers/video/drm/vmwgfx/vmwgfx_shader.c
Normal file
@ -0,0 +1,443 @@
|
|||||||
|
/**************************************************************************
|
||||||
|
*
|
||||||
|
* Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
|
||||||
|
* All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the
|
||||||
|
* "Software"), to deal in the Software without restriction, including
|
||||||
|
* without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
* distribute, sub license, and/or sell copies of the Software, and to
|
||||||
|
* permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
* the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice (including the
|
||||||
|
* next paragraph) shall be included in all copies or substantial portions
|
||||||
|
* of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||||
|
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||||
|
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||||
|
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
**************************************************************************/
|
||||||
|
|
||||||
|
#include "vmwgfx_drv.h"
|
||||||
|
#include "vmwgfx_resource_priv.h"
|
||||||
|
#include "ttm/ttm_placement.h"
|
||||||
|
|
||||||
|
struct vmw_shader {
|
||||||
|
struct vmw_resource res;
|
||||||
|
SVGA3dShaderType type;
|
||||||
|
uint32_t size;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct vmw_user_shader {
|
||||||
|
struct ttm_base_object base;
|
||||||
|
struct vmw_shader shader;
|
||||||
|
};
|
||||||
|
|
||||||
|
static void vmw_user_shader_free(struct vmw_resource *res);
|
||||||
|
static struct vmw_resource *
|
||||||
|
vmw_user_shader_base_to_res(struct ttm_base_object *base);
|
||||||
|
|
||||||
|
static int vmw_gb_shader_create(struct vmw_resource *res);
|
||||||
|
static int vmw_gb_shader_bind(struct vmw_resource *res,
|
||||||
|
struct ttm_validate_buffer *val_buf);
|
||||||
|
static int vmw_gb_shader_unbind(struct vmw_resource *res,
|
||||||
|
bool readback,
|
||||||
|
struct ttm_validate_buffer *val_buf);
|
||||||
|
static int vmw_gb_shader_destroy(struct vmw_resource *res);
|
||||||
|
|
||||||
|
static uint64_t vmw_user_shader_size;
|
||||||
|
|
||||||
|
static const struct vmw_user_resource_conv user_shader_conv = {
|
||||||
|
.object_type = VMW_RES_SHADER,
|
||||||
|
.base_obj_to_res = vmw_user_shader_base_to_res,
|
||||||
|
.res_free = vmw_user_shader_free
|
||||||
|
};
|
||||||
|
|
||||||
|
const struct vmw_user_resource_conv *user_shader_converter =
|
||||||
|
&user_shader_conv;
|
||||||
|
|
||||||
|
|
||||||
|
static const struct vmw_res_func vmw_gb_shader_func = {
|
||||||
|
.res_type = vmw_res_shader,
|
||||||
|
.needs_backup = true,
|
||||||
|
.may_evict = true,
|
||||||
|
.type_name = "guest backed shaders",
|
||||||
|
.backup_placement = &vmw_mob_placement,
|
||||||
|
.create = vmw_gb_shader_create,
|
||||||
|
.destroy = vmw_gb_shader_destroy,
|
||||||
|
.bind = vmw_gb_shader_bind,
|
||||||
|
.unbind = vmw_gb_shader_unbind
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Shader management:
|
||||||
|
*/
|
||||||
|
|
||||||
|
static inline struct vmw_shader *
|
||||||
|
vmw_res_to_shader(struct vmw_resource *res)
|
||||||
|
{
|
||||||
|
return container_of(res, struct vmw_shader, res);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void vmw_hw_shader_destroy(struct vmw_resource *res)
|
||||||
|
{
|
||||||
|
(void) vmw_gb_shader_destroy(res);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int vmw_gb_shader_init(struct vmw_private *dev_priv,
|
||||||
|
struct vmw_resource *res,
|
||||||
|
uint32_t size,
|
||||||
|
uint64_t offset,
|
||||||
|
SVGA3dShaderType type,
|
||||||
|
struct vmw_dma_buffer *byte_code,
|
||||||
|
void (*res_free) (struct vmw_resource *res))
|
||||||
|
{
|
||||||
|
struct vmw_shader *shader = vmw_res_to_shader(res);
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = vmw_resource_init(dev_priv, res, true,
|
||||||
|
res_free, &vmw_gb_shader_func);
|
||||||
|
|
||||||
|
|
||||||
|
if (unlikely(ret != 0)) {
|
||||||
|
if (res_free)
|
||||||
|
res_free(res);
|
||||||
|
else
|
||||||
|
kfree(res);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
res->backup_size = size;
|
||||||
|
if (byte_code) {
|
||||||
|
res->backup = vmw_dmabuf_reference(byte_code);
|
||||||
|
res->backup_offset = offset;
|
||||||
|
}
|
||||||
|
shader->size = size;
|
||||||
|
shader->type = type;
|
||||||
|
|
||||||
|
vmw_resource_activate(res, vmw_hw_shader_destroy);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int vmw_gb_shader_create(struct vmw_resource *res)
|
||||||
|
{
|
||||||
|
struct vmw_private *dev_priv = res->dev_priv;
|
||||||
|
struct vmw_shader *shader = vmw_res_to_shader(res);
|
||||||
|
int ret;
|
||||||
|
struct {
|
||||||
|
SVGA3dCmdHeader header;
|
||||||
|
SVGA3dCmdDefineGBShader body;
|
||||||
|
} *cmd;
|
||||||
|
|
||||||
|
if (likely(res->id != -1))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
ret = vmw_resource_alloc_id(res);
|
||||||
|
if (unlikely(ret != 0)) {
|
||||||
|
DRM_ERROR("Failed to allocate a shader id.\n");
|
||||||
|
goto out_no_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (unlikely(res->id >= VMWGFX_NUM_GB_SHADER)) {
|
||||||
|
ret = -EBUSY;
|
||||||
|
goto out_no_fifo;
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
||||||
|
if (unlikely(cmd == NULL)) {
|
||||||
|
DRM_ERROR("Failed reserving FIFO space for shader "
|
||||||
|
"creation.\n");
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto out_no_fifo;
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SHADER;
|
||||||
|
cmd->header.size = sizeof(cmd->body);
|
||||||
|
cmd->body.shid = res->id;
|
||||||
|
cmd->body.type = shader->type;
|
||||||
|
cmd->body.sizeInBytes = shader->size;
|
||||||
|
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||||
|
(void) vmw_3d_resource_inc(dev_priv, false);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
out_no_fifo:
|
||||||
|
vmw_resource_release_id(res);
|
||||||
|
out_no_id:
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int vmw_gb_shader_bind(struct vmw_resource *res,
|
||||||
|
struct ttm_validate_buffer *val_buf)
|
||||||
|
{
|
||||||
|
struct vmw_private *dev_priv = res->dev_priv;
|
||||||
|
struct {
|
||||||
|
SVGA3dCmdHeader header;
|
||||||
|
SVGA3dCmdBindGBShader body;
|
||||||
|
} *cmd;
|
||||||
|
struct ttm_buffer_object *bo = val_buf->bo;
|
||||||
|
|
||||||
|
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
|
||||||
|
|
||||||
|
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
||||||
|
if (unlikely(cmd == NULL)) {
|
||||||
|
DRM_ERROR("Failed reserving FIFO space for shader "
|
||||||
|
"binding.\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
|
||||||
|
cmd->header.size = sizeof(cmd->body);
|
||||||
|
cmd->body.shid = res->id;
|
||||||
|
cmd->body.mobid = bo->mem.start;
|
||||||
|
cmd->body.offsetInBytes = 0;
|
||||||
|
res->backup_dirty = false;
|
||||||
|
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int vmw_gb_shader_unbind(struct vmw_resource *res,
|
||||||
|
bool readback,
|
||||||
|
struct ttm_validate_buffer *val_buf)
|
||||||
|
{
|
||||||
|
struct vmw_private *dev_priv = res->dev_priv;
|
||||||
|
struct {
|
||||||
|
SVGA3dCmdHeader header;
|
||||||
|
SVGA3dCmdBindGBShader body;
|
||||||
|
} *cmd;
|
||||||
|
struct vmw_fence_obj *fence;
|
||||||
|
|
||||||
|
BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
|
||||||
|
|
||||||
|
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
||||||
|
if (unlikely(cmd == NULL)) {
|
||||||
|
DRM_ERROR("Failed reserving FIFO space for shader "
|
||||||
|
"unbinding.\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
|
||||||
|
cmd->header.size = sizeof(cmd->body);
|
||||||
|
cmd->body.shid = res->id;
|
||||||
|
cmd->body.mobid = SVGA3D_INVALID_ID;
|
||||||
|
cmd->body.offsetInBytes = 0;
|
||||||
|
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Create a fence object and fence the backup buffer.
|
||||||
|
*/
|
||||||
|
|
||||||
|
(void) vmw_execbuf_fence_commands(NULL, dev_priv,
|
||||||
|
&fence, NULL);
|
||||||
|
|
||||||
|
vmw_fence_single_bo(val_buf->bo, fence);
|
||||||
|
|
||||||
|
if (likely(fence != NULL))
|
||||||
|
vmw_fence_obj_unreference(&fence);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int vmw_gb_shader_destroy(struct vmw_resource *res)
|
||||||
|
{
|
||||||
|
struct vmw_private *dev_priv = res->dev_priv;
|
||||||
|
struct {
|
||||||
|
SVGA3dCmdHeader header;
|
||||||
|
SVGA3dCmdDestroyGBShader body;
|
||||||
|
} *cmd;
|
||||||
|
|
||||||
|
if (likely(res->id == -1))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
mutex_lock(&dev_priv->binding_mutex);
|
||||||
|
vmw_context_binding_res_list_kill(&res->binding_head);
|
||||||
|
|
||||||
|
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
||||||
|
if (unlikely(cmd == NULL)) {
|
||||||
|
DRM_ERROR("Failed reserving FIFO space for shader "
|
||||||
|
"destruction.\n");
|
||||||
|
mutex_unlock(&dev_priv->binding_mutex);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SHADER;
|
||||||
|
cmd->header.size = sizeof(cmd->body);
|
||||||
|
cmd->body.shid = res->id;
|
||||||
|
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||||
|
mutex_unlock(&dev_priv->binding_mutex);
|
||||||
|
vmw_resource_release_id(res);
|
||||||
|
vmw_3d_resource_dec(dev_priv, false);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* User-space shader management:
|
||||||
|
*/
|
||||||
|
|
||||||
|
static struct vmw_resource *
|
||||||
|
vmw_user_shader_base_to_res(struct ttm_base_object *base)
|
||||||
|
{
|
||||||
|
return &(container_of(base, struct vmw_user_shader, base)->
|
||||||
|
shader.res);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void vmw_user_shader_free(struct vmw_resource *res)
|
||||||
|
{
|
||||||
|
struct vmw_user_shader *ushader =
|
||||||
|
container_of(res, struct vmw_user_shader, shader.res);
|
||||||
|
struct vmw_private *dev_priv = res->dev_priv;
|
||||||
|
|
||||||
|
// ttm_base_object_kfree(ushader, base);
|
||||||
|
// ttm_mem_global_free(vmw_mem_glob(dev_priv),
|
||||||
|
// vmw_user_shader_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This function is called when user space has no more references on the
|
||||||
|
* base object. It releases the base-object's reference on the resource object.
|
||||||
|
*/
|
||||||
|
|
||||||
|
static void vmw_user_shader_base_release(struct ttm_base_object **p_base)
|
||||||
|
{
|
||||||
|
struct ttm_base_object *base = *p_base;
|
||||||
|
struct vmw_resource *res = vmw_user_shader_base_to_res(base);
|
||||||
|
|
||||||
|
*p_base = NULL;
|
||||||
|
vmw_resource_unreference(&res);
|
||||||
|
}
|
||||||
|
|
||||||
|
int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
|
||||||
|
struct drm_file *file_priv)
|
||||||
|
{
|
||||||
|
struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data;
|
||||||
|
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||||
|
|
||||||
|
return ttm_ref_object_base_unref(tfile, arg->handle,
|
||||||
|
TTM_REF_USAGE);
|
||||||
|
}
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
|
||||||
|
struct drm_file *file_priv)
|
||||||
|
{
|
||||||
|
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||||
|
struct vmw_user_shader *ushader;
|
||||||
|
struct vmw_resource *res;
|
||||||
|
struct vmw_resource *tmp;
|
||||||
|
struct drm_vmw_shader_create_arg *arg =
|
||||||
|
(struct drm_vmw_shader_create_arg *)data;
|
||||||
|
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||||
|
struct vmw_master *vmaster = vmw_master(file_priv->master);
|
||||||
|
struct vmw_dma_buffer *buffer = NULL;
|
||||||
|
SVGA3dShaderType shader_type;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (arg->buffer_handle != SVGA3D_INVALID_ID) {
|
||||||
|
ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle,
|
||||||
|
&buffer);
|
||||||
|
if (unlikely(ret != 0)) {
|
||||||
|
DRM_ERROR("Could not find buffer for shader "
|
||||||
|
"creation.\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((u64)buffer->base.num_pages * PAGE_SIZE <
|
||||||
|
(u64)arg->size + (u64)arg->offset) {
|
||||||
|
DRM_ERROR("Illegal buffer- or shader size.\n");
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out_bad_arg;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (arg->shader_type) {
|
||||||
|
case drm_vmw_shader_type_vs:
|
||||||
|
shader_type = SVGA3D_SHADERTYPE_VS;
|
||||||
|
break;
|
||||||
|
case drm_vmw_shader_type_ps:
|
||||||
|
shader_type = SVGA3D_SHADERTYPE_PS;
|
||||||
|
break;
|
||||||
|
case drm_vmw_shader_type_gs:
|
||||||
|
shader_type = SVGA3D_SHADERTYPE_GS;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
DRM_ERROR("Illegal shader type.\n");
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out_bad_arg;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Approximate idr memory usage with 128 bytes. It will be limited
|
||||||
|
* by maximum number_of shaders anyway.
|
||||||
|
*/
|
||||||
|
|
||||||
|
if (unlikely(vmw_user_shader_size == 0))
|
||||||
|
vmw_user_shader_size = ttm_round_pot(sizeof(*ushader))
|
||||||
|
+ 128;
|
||||||
|
|
||||||
|
ret = ttm_read_lock(&vmaster->lock, true);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
|
||||||
|
vmw_user_shader_size,
|
||||||
|
false, true);
|
||||||
|
if (unlikely(ret != 0)) {
|
||||||
|
if (ret != -ERESTARTSYS)
|
||||||
|
DRM_ERROR("Out of graphics memory for shader"
|
||||||
|
" creation.\n");
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
|
||||||
|
if (unlikely(ushader == NULL)) {
|
||||||
|
ttm_mem_global_free(vmw_mem_glob(dev_priv),
|
||||||
|
vmw_user_shader_size);
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
res = &ushader->shader.res;
|
||||||
|
ushader->base.shareable = false;
|
||||||
|
ushader->base.tfile = NULL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* From here on, the destructor takes over resource freeing.
|
||||||
|
*/
|
||||||
|
|
||||||
|
ret = vmw_gb_shader_init(dev_priv, res, arg->size,
|
||||||
|
arg->offset, shader_type, buffer,
|
||||||
|
vmw_user_shader_free);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
|
tmp = vmw_resource_reference(res);
|
||||||
|
ret = ttm_base_object_init(tfile, &ushader->base, false,
|
||||||
|
VMW_RES_SHADER,
|
||||||
|
&vmw_user_shader_base_release, NULL);
|
||||||
|
|
||||||
|
if (unlikely(ret != 0)) {
|
||||||
|
vmw_resource_unreference(&tmp);
|
||||||
|
goto out_err;
|
||||||
|
}
|
||||||
|
|
||||||
|
arg->shader_handle = ushader->base.hash.key;
|
||||||
|
out_err:
|
||||||
|
vmw_resource_unreference(&res);
|
||||||
|
out_unlock:
|
||||||
|
ttm_read_unlock(&vmaster->lock);
|
||||||
|
out_bad_arg:
|
||||||
|
vmw_dmabuf_unreference(&buffer);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
}
|
||||||
|
#endif
|
@ -38,10 +38,9 @@
|
|||||||
* @size: TTM accounting size for the surface.
|
* @size: TTM accounting size for the surface.
|
||||||
*/
|
*/
|
||||||
struct vmw_user_surface {
|
struct vmw_user_surface {
|
||||||
struct ttm_base_object base;
|
struct ttm_prime_object prime;
|
||||||
struct vmw_surface srf;
|
struct vmw_surface srf;
|
||||||
uint32_t size;
|
uint32_t size;
|
||||||
uint32_t backup_handle;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -68,6 +67,14 @@ static int vmw_legacy_srf_unbind(struct vmw_resource *res,
|
|||||||
struct ttm_validate_buffer *val_buf);
|
struct ttm_validate_buffer *val_buf);
|
||||||
static int vmw_legacy_srf_create(struct vmw_resource *res);
|
static int vmw_legacy_srf_create(struct vmw_resource *res);
|
||||||
static int vmw_legacy_srf_destroy(struct vmw_resource *res);
|
static int vmw_legacy_srf_destroy(struct vmw_resource *res);
|
||||||
|
static int vmw_gb_surface_create(struct vmw_resource *res);
|
||||||
|
static int vmw_gb_surface_bind(struct vmw_resource *res,
|
||||||
|
struct ttm_validate_buffer *val_buf);
|
||||||
|
static int vmw_gb_surface_unbind(struct vmw_resource *res,
|
||||||
|
bool readback,
|
||||||
|
struct ttm_validate_buffer *val_buf);
|
||||||
|
static int vmw_gb_surface_destroy(struct vmw_resource *res);
|
||||||
|
|
||||||
|
|
||||||
static const struct vmw_user_resource_conv user_surface_conv = {
|
static const struct vmw_user_resource_conv user_surface_conv = {
|
||||||
.object_type = VMW_RES_SURFACE,
|
.object_type = VMW_RES_SURFACE,
|
||||||
@ -93,6 +100,18 @@ static const struct vmw_res_func vmw_legacy_surface_func = {
|
|||||||
.unbind = &vmw_legacy_srf_unbind
|
.unbind = &vmw_legacy_srf_unbind
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct vmw_res_func vmw_gb_surface_func = {
|
||||||
|
.res_type = vmw_res_surface,
|
||||||
|
.needs_backup = true,
|
||||||
|
.may_evict = true,
|
||||||
|
.type_name = "guest backed surfaces",
|
||||||
|
.backup_placement = &vmw_mob_placement,
|
||||||
|
.create = vmw_gb_surface_create,
|
||||||
|
.destroy = vmw_gb_surface_destroy,
|
||||||
|
.bind = vmw_gb_surface_bind,
|
||||||
|
.unbind = vmw_gb_surface_unbind
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct vmw_surface_dma - SVGA3D DMA command
|
* struct vmw_surface_dma - SVGA3D DMA command
|
||||||
*/
|
*/
|
||||||
@ -291,6 +310,11 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
|
|||||||
struct vmw_surface *srf;
|
struct vmw_surface *srf;
|
||||||
void *cmd;
|
void *cmd;
|
||||||
|
|
||||||
|
if (res->func->destroy == vmw_gb_surface_destroy) {
|
||||||
|
(void) vmw_gb_surface_destroy(res);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (res->id != -1) {
|
if (res->id != -1) {
|
||||||
|
|
||||||
cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
|
cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
|
||||||
@ -549,11 +573,14 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
|
|||||||
struct vmw_resource *res = &srf->res;
|
struct vmw_resource *res = &srf->res;
|
||||||
|
|
||||||
BUG_ON(res_free == NULL);
|
BUG_ON(res_free == NULL);
|
||||||
|
if (!dev_priv->has_mob)
|
||||||
(void) vmw_3d_resource_inc(dev_priv, false);
|
(void) vmw_3d_resource_inc(dev_priv, false);
|
||||||
ret = vmw_resource_init(dev_priv, res, true, res_free,
|
ret = vmw_resource_init(dev_priv, res, true, res_free,
|
||||||
|
(dev_priv->has_mob) ? &vmw_gb_surface_func :
|
||||||
&vmw_legacy_surface_func);
|
&vmw_legacy_surface_func);
|
||||||
|
|
||||||
if (unlikely(ret != 0)) {
|
if (unlikely(ret != 0)) {
|
||||||
|
if (!dev_priv->has_mob)
|
||||||
vmw_3d_resource_dec(dev_priv, false);
|
vmw_3d_resource_dec(dev_priv, false);
|
||||||
res_free(res);
|
res_free(res);
|
||||||
return ret;
|
return ret;
|
||||||
@ -580,7 +607,8 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
|
|||||||
static struct vmw_resource *
|
static struct vmw_resource *
|
||||||
vmw_user_surface_base_to_res(struct ttm_base_object *base)
|
vmw_user_surface_base_to_res(struct ttm_base_object *base)
|
||||||
{
|
{
|
||||||
return &(container_of(base, struct vmw_user_surface, base)->srf.res);
|
return &(container_of(base, struct vmw_user_surface,
|
||||||
|
prime.base)->srf.res);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -616,7 +644,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
|
|||||||
{
|
{
|
||||||
struct ttm_base_object *base = *p_base;
|
struct ttm_base_object *base = *p_base;
|
||||||
struct vmw_user_surface *user_srf =
|
struct vmw_user_surface *user_srf =
|
||||||
container_of(base, struct vmw_user_surface, base);
|
container_of(base, struct vmw_user_surface, prime.base);
|
||||||
struct vmw_resource *res = &user_srf->srf.res;
|
struct vmw_resource *res = &user_srf->srf.res;
|
||||||
|
|
||||||
*p_base = NULL;
|
*p_base = NULL;
|
||||||
@ -733,7 +761,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
|||||||
|
|
||||||
srf->base_size = *srf->sizes;
|
srf->base_size = *srf->sizes;
|
||||||
srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
|
srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
|
||||||
srf->multisample_count = 1;
|
srf->multisample_count = 0;
|
||||||
|
|
||||||
cur_bo_offset = 0;
|
cur_bo_offset = 0;
|
||||||
cur_offset = srf->offsets;
|
cur_offset = srf->offsets;
|
||||||
@ -774,8 +802,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
|||||||
}
|
}
|
||||||
srf->snooper.crtc = NULL;
|
srf->snooper.crtc = NULL;
|
||||||
|
|
||||||
user_srf->base.shareable = false;
|
user_srf->prime.base.shareable = false;
|
||||||
user_srf->base.tfile = NULL;
|
user_srf->prime.base.tfile = NULL;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* From this point, the generic resource management functions
|
* From this point, the generic resource management functions
|
||||||
@ -787,7 +815,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
|||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
tmp = vmw_resource_reference(&srf->res);
|
tmp = vmw_resource_reference(&srf->res);
|
||||||
ret = ttm_base_object_init(tfile, &user_srf->base,
|
ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
|
||||||
req->shareable, VMW_RES_SURFACE,
|
req->shareable, VMW_RES_SURFACE,
|
||||||
&vmw_user_surface_base_release, NULL);
|
&vmw_user_surface_base_release, NULL);
|
||||||
|
|
||||||
@ -797,7 +825,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
|||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
rep->sid = user_srf->base.hash.key;
|
rep->sid = user_srf->prime.base.hash.key;
|
||||||
vmw_resource_unreference(&res);
|
vmw_resource_unreference(&res);
|
||||||
|
|
||||||
ttm_read_unlock(&vmaster->lock);
|
ttm_read_unlock(&vmaster->lock);
|
||||||
@ -807,7 +835,7 @@ out_no_copy:
|
|||||||
out_no_offsets:
|
out_no_offsets:
|
||||||
kfree(srf->sizes);
|
kfree(srf->sizes);
|
||||||
out_no_sizes:
|
out_no_sizes:
|
||||||
ttm_base_object_kfree(user_srf, base);
|
ttm_prime_object_kfree(user_srf, prime);
|
||||||
out_no_user_srf:
|
out_no_user_srf:
|
||||||
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
|
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
|
||||||
out_unlock:
|
out_unlock:
|
||||||
@ -826,6 +854,7 @@ out_unlock:
|
|||||||
int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
|
int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file_priv)
|
struct drm_file *file_priv)
|
||||||
{
|
{
|
||||||
|
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||||
union drm_vmw_surface_reference_arg *arg =
|
union drm_vmw_surface_reference_arg *arg =
|
||||||
(union drm_vmw_surface_reference_arg *)data;
|
(union drm_vmw_surface_reference_arg *)data;
|
||||||
struct drm_vmw_surface_arg *req = &arg->req;
|
struct drm_vmw_surface_arg *req = &arg->req;
|
||||||
@ -837,19 +866,20 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
|
|||||||
struct ttm_base_object *base;
|
struct ttm_base_object *base;
|
||||||
int ret = -EINVAL;
|
int ret = -EINVAL;
|
||||||
|
|
||||||
base = ttm_base_object_lookup(tfile, req->sid);
|
base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid);
|
||||||
if (unlikely(base == NULL)) {
|
if (unlikely(base == NULL)) {
|
||||||
DRM_ERROR("Could not find surface to reference.\n");
|
DRM_ERROR("Could not find surface to reference.\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(base->object_type != VMW_RES_SURFACE))
|
if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE))
|
||||||
goto out_bad_resource;
|
goto out_bad_resource;
|
||||||
|
|
||||||
user_srf = container_of(base, struct vmw_user_surface, base);
|
user_srf = container_of(base, struct vmw_user_surface, prime.base);
|
||||||
srf = &user_srf->srf;
|
srf = &user_srf->srf;
|
||||||
|
|
||||||
ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
|
ret = ttm_ref_object_add(tfile, &user_srf->prime.base,
|
||||||
|
TTM_REF_USAGE, NULL);
|
||||||
if (unlikely(ret != 0)) {
|
if (unlikely(ret != 0)) {
|
||||||
DRM_ERROR("Could not add a reference to a surface.\n");
|
DRM_ERROR("Could not add a reference to a surface.\n");
|
||||||
goto out_no_reference;
|
goto out_no_reference;
|
||||||
|
Loading…
Reference in New Issue
Block a user