ttm: 3.12-rc6

git-svn-id: svn://kolibrios.org@4112 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge) 2013-10-27 04:54:40 +00:00
parent 737ef03421
commit edcb8e9226
4 changed files with 55 additions and 46 deletions

View File

@ -39,6 +39,10 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/module.h> #include <linux/module.h>
#define TTM_ASSERT_LOCKED(param)
#define TTM_DEBUG(fmt, arg...)
#define TTM_BO_HASH_ORDER 13
#define pr_err(fmt, ...) \ #define pr_err(fmt, ...) \
printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
@ -219,7 +223,6 @@ void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
} }
EXPORT_SYMBOL(ttm_bo_del_sub_from_lru); EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
/* /*
* Call bo->mutex locked. * Call bo->mutex locked.
*/ */
@ -230,7 +233,7 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
int ret = 0; int ret = 0;
uint32_t page_flags = 0; uint32_t page_flags = 0;
// TTM_ASSERT_LOCKED(&bo->mutex); TTM_ASSERT_LOCKED(&bo->mutex);
bo->ttm = NULL; bo->ttm = NULL;
if (bdev->need_dma32) if (bdev->need_dma32)
@ -609,13 +612,7 @@ static void ttm_bo_release(struct kref *kref)
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
write_lock(&bdev->vm_lock); drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
if (likely(bo->vm_node != NULL)) {
// rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
drm_mm_put_block(bo->vm_node);
bo->vm_node = NULL;
}
write_unlock(&bdev->vm_lock);
ttm_mem_io_lock(man, false); ttm_mem_io_lock(man, false);
// ttm_mem_io_free_vm(bo); // ttm_mem_io_free_vm(bo);
ttm_mem_io_unlock(man); ttm_mem_io_unlock(man);
@ -1125,6 +1122,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bo->resv = &bo->ttm_resv; bo->resv = &bo->ttm_resv;
// reservation_object_init(bo->resv); // reservation_object_init(bo->resv);
atomic_inc(&bo->glob->bo_count); atomic_inc(&bo->glob->bo_count);
drm_vma_node_reset(&bo->vma_node);
ret = ttm_bo_check_placement(bo, placement); ret = ttm_bo_check_placement(bo, placement);
@ -1303,6 +1301,7 @@ out_no_drp:
kfree(glob); kfree(glob);
return ret; return ret;
} }
EXPORT_SYMBOL(ttm_bo_global_init);
int ttm_bo_device_init(struct ttm_bo_device *bdev, int ttm_bo_device_init(struct ttm_bo_device *bdev,
@ -1315,7 +1314,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
ENTER(); ENTER();
// rwlock_init(&bdev->vm_lock);
bdev->driver = driver; bdev->driver = driver;
memset(bdev->man, 0, sizeof(bdev->man)); memset(bdev->man, 0, sizeof(bdev->man));
@ -1328,9 +1326,8 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_no_sys; goto out_no_sys;
bdev->addr_space_rb = RB_ROOT; drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000); 0x10000000);
// INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); // INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
INIT_LIST_HEAD(&bdev->ddestroy); INIT_LIST_HEAD(&bdev->ddestroy);
bdev->dev_mapping = NULL; bdev->dev_mapping = NULL;

View File

@ -61,28 +61,25 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
lpfn = placement->lpfn; lpfn = placement->lpfn;
if (!lpfn) if (!lpfn)
lpfn = man->size; lpfn = man->size;
do {
ret = drm_mm_pre_get(mm); node = kzalloc(sizeof(*node), GFP_KERNEL);
if (unlikely(ret)) if (!node)
return ret; return -ENOMEM;
spin_lock(&rman->lock); spin_lock(&rman->lock);
node = drm_mm_search_free_in_range(mm, ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
mem->num_pages, mem->page_alignment,
placement->fpfn, lpfn, 1);
if (unlikely(node == NULL)) {
spin_unlock(&rman->lock);
return 0;
}
node = drm_mm_get_block_atomic_range(node, mem->num_pages,
mem->page_alignment, mem->page_alignment,
placement->fpfn, placement->fpfn, lpfn,
lpfn); DRM_MM_SEARCH_BEST);
spin_unlock(&rman->lock); spin_unlock(&rman->lock);
} while (node == NULL);
if (unlikely(ret)) {
kfree(node);
} else {
mem->mm_node = node; mem->mm_node = node;
mem->start = node->start; mem->start = node->start;
}
return 0; return 0;
} }
@ -93,8 +90,10 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
if (mem->mm_node) { if (mem->mm_node) {
spin_lock(&rman->lock); spin_lock(&rman->lock);
drm_mm_put_block(mem->mm_node); drm_mm_remove_node(mem->mm_node);
spin_unlock(&rman->lock); spin_unlock(&rman->lock);
kfree(mem->mm_node);
mem->mm_node = NULL; mem->mm_node = NULL;
} }
} }

View File

@ -30,6 +30,7 @@
#include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_placement.h>
#include <drm/drm_vma_manager.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/wait.h> #include <linux/wait.h>
@ -450,7 +451,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
INIT_LIST_HEAD(&fbo->lru); INIT_LIST_HEAD(&fbo->lru);
INIT_LIST_HEAD(&fbo->swap); INIT_LIST_HEAD(&fbo->swap);
INIT_LIST_HEAD(&fbo->io_reserve_lru); INIT_LIST_HEAD(&fbo->io_reserve_lru);
fbo->vm_node = NULL; drm_vma_node_reset(&fbo->vma_node);
atomic_set(&fbo->cpu_writers, 0); atomic_set(&fbo->cpu_writers, 0);
spin_lock(&bdev->fence_lock); spin_lock(&bdev->fence_lock);

View File

@ -383,28 +383,26 @@ out:
return nr_free; return nr_free;
} }
/* Get good estimation how many pages are free in pools */
static int ttm_pool_get_num_unused_pages(void)
{
unsigned i;
int total = 0;
for (i = 0; i < NUM_POOLS; ++i)
total += _manager->pools[i].npages;
return total;
}
/** /**
* Callback for mm to request pool to reduce number of page held. * Callback for mm to request pool to reduce number of page held.
*
* XXX: (dchinner) Deadlock warning!
*
* ttm_page_pool_free() does memory allocation using GFP_KERNEL. that means
* this can deadlock when called a sc->gfp_mask that is not equal to
* GFP_KERNEL.
*
* This code is crying out for a shrinker per pool....
*/ */
static int ttm_pool_mm_shrink(struct shrinker *shrink, static unsigned long
struct shrink_control *sc) ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{ {
static atomic_t start_pool = ATOMIC_INIT(0); static atomic_t start_pool = ATOMIC_INIT(0);
unsigned i; unsigned i;
unsigned pool_offset = atomic_add_return(1, &start_pool); unsigned pool_offset = atomic_add_return(1, &start_pool);
struct ttm_page_pool *pool; struct ttm_page_pool *pool;
int shrink_pages = sc->nr_to_scan; int shrink_pages = sc->nr_to_scan;
unsigned long freed = 0;
pool_offset = pool_offset % NUM_POOLS; pool_offset = pool_offset % NUM_POOLS;
/* select start pool in round robin fashion */ /* select start pool in round robin fashion */
@ -414,14 +412,28 @@ static int ttm_pool_mm_shrink(struct shrinker *shrink,
break; break;
pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
shrink_pages = ttm_page_pool_free(pool, nr_free); shrink_pages = ttm_page_pool_free(pool, nr_free);
freed += nr_free - shrink_pages;
} }
/* return estimated number of unused pages in pool */ return freed;
return ttm_pool_get_num_unused_pages(); }
static unsigned long
ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
unsigned i;
unsigned long count = 0;
for (i = 0; i < NUM_POOLS; ++i)
count += _manager->pools[i].npages;
return count;
} }
static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager) static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{ {
manager->mm_shrink.shrink = &ttm_pool_mm_shrink; manager->mm_shrink.count_objects = ttm_pool_shrink_count;
manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
manager->mm_shrink.seeks = 1; manager->mm_shrink.seeks = 1;
register_shrinker(&manager->mm_shrink); register_shrinker(&manager->mm_shrink);
} }