ddk: update

git-svn-id: svn://kolibrios.org@6295 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge) 2016-02-27 20:06:12 +00:00
parent 5bae2399f4
commit cae5fbcf75
12 changed files with 720 additions and 148 deletions

View File

@ -25,6 +25,8 @@ CORE_SRC= core.S
NAME_SRCS:= \ NAME_SRCS:= \
debug/dbglog.c \ debug/dbglog.c \
debug/chkstk.S \ debug/chkstk.S \
dma/dma_alloc.c \
dma/fence.c \
io/create.c \ io/create.c \
io/finfo.c \ io/finfo.c \
io/ssize.c \ io/ssize.c \
@ -33,6 +35,7 @@ NAME_SRCS:= \
linux/ctype.c \ linux/ctype.c \
linux/dmapool.c \ linux/dmapool.c \
linux/dmi.c \ linux/dmi.c \
linux/fbsysfs.c \
linux/find_next_bit.c \ linux/find_next_bit.c \
linux/firmware.c \ linux/firmware.c \
linux/gcd.c \ linux/gcd.c \

View File

@ -0,0 +1,22 @@
#include <linux/types.h>
#include <linux/gfp.h>
#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
{
void *ret;
size = ALIGN(size,32768);
ret = (void *)KernelAlloc(size);
if (ret) {
__builtin_memset(ret, 0, size);
*dma_handle = GetPgAddr(ret);
}
return ret;
}

370
drivers/ddk/dma/fence.c Normal file
View File

@ -0,0 +1,370 @@
/*
* Fence mechanism for dma-buf and to allow for asynchronous dma access
*
* Copyright (C) 2012 Canonical Ltd
* Copyright (C) 2012 Texas Instruments
*
* Authors:
* Rob Clark <robdclark@gmail.com>
* Maarten Lankhorst <maarten.lankhorst@canonical.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/atomic.h>
#include <linux/fence.h>
/*
* fence context counter: each execution context should have its own
* fence context, this allows checking if fences belong to the same
* context or not. One device can have multiple separate contexts,
* and they're used if some engine can run independently of another.
*/
static atomic_t fence_context_counter = ATOMIC_INIT(0);
/**
* fence_context_alloc - allocate an array of fence contexts
* @num: [in] amount of contexts to allocate
*
* This function will return the first index of the number of fences allocated.
* The fence context is used for setting fence->context to a unique number.
*/
unsigned fence_context_alloc(unsigned num)
{
BUG_ON(!num);
return atomic_add_return(num, &fence_context_counter) - num;
}
EXPORT_SYMBOL(fence_context_alloc);
/**
* fence_signal_locked - signal completion of a fence
* @fence: the fence to signal
*
* Signal completion for software callbacks on a fence, this will unblock
* fence_wait() calls and run all the callbacks added with
* fence_add_callback(). Can be called multiple times, but since a fence
* can only go from unsignaled to signaled state, it will only be effective
* the first time.
*
* Unlike fence_signal, this function must be called with fence->lock held.
*/
int fence_signal_locked(struct fence *fence)
{
struct fence_cb *cur, *tmp;
int ret = 0;
if (WARN_ON(!fence))
return -EINVAL;
if (!ktime_to_ns(fence->timestamp)) {
fence->timestamp = ktime_get();
smp_mb__before_atomic();
}
if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
ret = -EINVAL;
/*
* we might have raced with the unlocked fence_signal,
* still run through all callbacks
*/
}
list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
list_del_init(&cur->node);
cur->func(fence, cur);
}
return ret;
}
EXPORT_SYMBOL(fence_signal_locked);
/**
* fence_signal - signal completion of a fence
* @fence: the fence to signal
*
* Signal completion for software callbacks on a fence, this will unblock
* fence_wait() calls and run all the callbacks added with
* fence_add_callback(). Can be called multiple times, but since a fence
* can only go from unsignaled to signaled state, it will only be effective
* the first time.
*/
int fence_signal(struct fence *fence)
{
unsigned long flags;
if (!fence)
return -EINVAL;
if (!ktime_to_ns(fence->timestamp)) {
fence->timestamp = ktime_get();
smp_mb__before_atomic();
}
if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return -EINVAL;
if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
struct fence_cb *cur, *tmp;
spin_lock_irqsave(fence->lock, flags);
list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
list_del_init(&cur->node);
cur->func(fence, cur);
}
spin_unlock_irqrestore(fence->lock, flags);
}
return 0;
}
EXPORT_SYMBOL(fence_signal);
/**
* fence_wait_timeout - sleep until the fence gets signaled
* or until timeout elapses
* @fence: [in] the fence to wait on
* @intr: [in] if true, do an interruptible wait
* @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
*
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
* remaining timeout in jiffies on success. Other error values may be
* returned on custom implementations.
*
* Performs a synchronous wait on this fence. It is assumed the caller
* directly or indirectly (buf-mgr between reservation and committing)
* holds a reference to the fence, otherwise the fence might be
* freed before return, resulting in undefined behavior.
*/
signed long
fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
{
signed long ret;
if (WARN_ON(timeout < 0))
return -EINVAL;
if (timeout == 0)
return fence_is_signaled(fence);
ret = fence->ops->wait(fence, intr, timeout);
return ret;
}
EXPORT_SYMBOL(fence_wait_timeout);
void fence_release(struct kref *kref)
{
struct fence *fence =
container_of(kref, struct fence, refcount);
BUG_ON(!list_empty(&fence->cb_list));
if (fence->ops->release)
fence->ops->release(fence);
else
fence_free(fence);
}
EXPORT_SYMBOL(fence_release);
void fence_free(struct fence *fence)
{
kfree_rcu(fence, rcu);
}
EXPORT_SYMBOL(fence_free);
/**
* fence_enable_sw_signaling - enable signaling on fence
* @fence: [in] the fence to enable
*
* this will request for sw signaling to be enabled, to make the fence
* complete as soon as possible
*/
void fence_enable_sw_signaling(struct fence *fence)
{
unsigned long flags;
if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
spin_lock_irqsave(fence->lock, flags);
if (!fence->ops->enable_signaling(fence))
fence_signal_locked(fence);
spin_unlock_irqrestore(fence->lock, flags);
}
}
EXPORT_SYMBOL(fence_enable_sw_signaling);
/**
* fence_add_callback - add a callback to be called when the fence
* is signaled
* @fence: [in] the fence to wait on
* @cb: [in] the callback to register
* @func: [in] the function to call
*
* cb will be initialized by fence_add_callback, no initialization
* by the caller is required. Any number of callbacks can be registered
* to a fence, but a callback can only be registered to one fence at a time.
*
* Note that the callback can be called from an atomic context. If
* fence is already signaled, this function will return -ENOENT (and
* *not* call the callback)
*
* Add a software callback to the fence. Same restrictions apply to
* refcount as it does to fence_wait, however the caller doesn't need to
* keep a refcount to fence afterwards: when software access is enabled,
* the creator of the fence is required to keep the fence alive until
* after it signals with fence_signal. The callback itself can be called
* from irq context.
*
*/
int fence_add_callback(struct fence *fence, struct fence_cb *cb,
fence_func_t func)
{
unsigned long flags;
int ret = 0;
bool was_set;
if (WARN_ON(!fence || !func))
return -EINVAL;
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
INIT_LIST_HEAD(&cb->node);
return -ENOENT;
}
spin_lock_irqsave(fence->lock, flags);
was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
ret = -ENOENT;
else if (!was_set) {
if (!fence->ops->enable_signaling(fence)) {
fence_signal_locked(fence);
ret = -ENOENT;
}
}
if (!ret) {
cb->func = func;
list_add_tail(&cb->node, &fence->cb_list);
} else
INIT_LIST_HEAD(&cb->node);
spin_unlock_irqrestore(fence->lock, flags);
return ret;
}
EXPORT_SYMBOL(fence_add_callback);
/**
* fence_remove_callback - remove a callback from the signaling list
* @fence: [in] the fence to wait on
* @cb: [in] the callback to remove
*
* Remove a previously queued callback from the fence. This function returns
* true if the callback is successfully removed, or false if the fence has
* already been signaled.
*
* *WARNING*:
* Cancelling a callback should only be done if you really know what you're
* doing, since deadlocks and race conditions could occur all too easily. For
* this reason, it should only ever be done on hardware lockup recovery,
* with a reference held to the fence.
*/
bool
fence_remove_callback(struct fence *fence, struct fence_cb *cb)
{
unsigned long flags;
bool ret;
spin_lock_irqsave(fence->lock, flags);
ret = !list_empty(&cb->node);
if (ret)
list_del_init(&cb->node);
spin_unlock_irqrestore(fence->lock, flags);
return ret;
}
EXPORT_SYMBOL(fence_remove_callback);
struct default_wait_cb {
struct fence_cb base;
struct task_struct *task;
};
static bool
fence_test_signaled_any(struct fence **fences, uint32_t count)
{
int i;
for (i = 0; i < count; ++i) {
struct fence *fence = fences[i];
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return true;
}
return false;
}
/**
* fence_wait_any_timeout - sleep until any fence gets signaled
* or until timeout elapses
* @fences: [in] array of fences to wait on
* @count: [in] number of fences to wait on
* @intr: [in] if true, do an interruptible wait
* @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
*
* Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if
* interrupted, 0 if the wait timed out, or the remaining timeout in jiffies
* on success.
*
* Synchronous waits for the first fence in the array to be signaled. The
* caller needs to hold a reference to all fences in the array, otherwise a
* fence might be freed before return, resulting in undefined behavior.
*/
/**
* fence_init - Initialize a custom fence.
* @fence: [in] the fence to initialize
* @ops: [in] the fence_ops for operations on this fence
* @lock: [in] the irqsafe spinlock to use for locking this fence
* @context: [in] the execution context this fence is run on
* @seqno: [in] a linear increasing sequence number for this context
*
* Initializes an allocated fence, the caller doesn't have to keep its
* refcount after committing with this fence, but it will need to hold a
* refcount again if fence_ops.enable_signaling gets called. This can
* be used for other implementing other types of fence.
*
* context and seqno are used for easy comparison between fences, allowing
* to check which fence is later by simply using fence_later.
*/
void
fence_init(struct fence *fence, const struct fence_ops *ops,
spinlock_t *lock, unsigned context, unsigned seqno)
{
BUG_ON(!lock);
BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
!ops->get_driver_name || !ops->get_timeline_name);
kref_init(&fence->refcount);
fence->ops = ops;
INIT_LIST_HEAD(&fence->cb_list);
fence->lock = lock;
fence->context = context;
fence->seqno = seqno;
fence->flags = 0UL;
}
EXPORT_SYMBOL(fence_init);

View File

@ -22,22 +22,29 @@
* keep a count of how many are currently allocated from each page. * keep a count of how many are currently allocated from each page.
*/ */
#include <linux/device.h>
#include <ddk.h> #include <linux/dmapool.h>
#include <linux/slab.h> #include <linux/kernel.h>
#include <linux/errno.h> #include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <syscall.h> #include <syscall.h>
struct dma_pool { /* the pool */ struct dma_pool { /* the pool */
struct list_head page_list; struct list_head page_list;
struct mutex lock; spinlock_t lock;
size_t size; size_t size;
struct device *dev;
size_t allocation; size_t allocation;
size_t boundary; size_t boundary;
char name[32];
struct list_head pools; struct list_head pools;
}; };
@ -49,8 +56,10 @@ struct dma_page { /* cacheable header for 'allocation' bytes */
unsigned int offset; unsigned int offset;
}; };
static DEFINE_MUTEX(pools_lock); static DEFINE_MUTEX(pools_lock);
static DEFINE_MUTEX(pools_reg_lock);
/** /**
@ -75,45 +84,45 @@ static DEFINE_MUTEX(pools_lock);
* boundaries of 4KBytes. * boundaries of 4KBytes.
*/ */
struct dma_pool *dma_pool_create(const char *name, struct device *dev, struct dma_pool *dma_pool_create(const char *name, struct device *dev,
size_t size, size_t align, size_t boundary) size_t size, size_t align, size_t boundary)
{ {
struct dma_pool *retval; struct dma_pool *retval;
size_t allocation; size_t allocation;
bool empty = false;
if (align == 0) { if (align == 0)
align = 1; align = 1;
} else if (align & (align - 1)) { else if (align & (align - 1))
return NULL; return NULL;
}
if (size == 0) { if (size == 0)
return NULL; return NULL;
} else if (size < 4) { else if (size < 4)
size = 4; size = 4;
}
if ((size % align) != 0) if ((size % align) != 0)
size = ALIGN(size, align); size = ALIGN(size, align);
allocation = max_t(size_t, size, PAGE_SIZE); allocation = max_t(size_t, size, PAGE_SIZE);
allocation = (allocation+0x7FFF) & ~0x7FFF; allocation = (allocation+0x7FFF) & ~0x7FFF;
if (!boundary) { if (!boundary)
boundary = allocation; boundary = allocation;
} else if ((boundary < size) || (boundary & (boundary - 1))) { else if ((boundary < size) || (boundary & (boundary - 1)))
return NULL; return NULL;
}
retval = kmalloc(sizeof(*retval), GFP_KERNEL); retval = kmalloc(sizeof(*retval), GFP_KERNEL);
if (!retval) if (!retval)
return retval; return retval;
INIT_LIST_HEAD(&retval->page_list); strlcpy(retval->name, name, sizeof(retval->name));
// spin_lock_init(&retval->lock); retval->dev = dev;
INIT_LIST_HEAD(&retval->page_list);
spin_lock_init(&retval->lock);
retval->size = size; retval->size = size;
retval->boundary = boundary; retval->boundary = boundary;
retval->allocation = allocation; retval->allocation = allocation;
@ -139,51 +148,54 @@ static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
} while (offset < pool->allocation); } while (offset < pool->allocation);
} }
static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
static struct dma_page *pool_alloc_page(struct dma_pool *pool)
{ {
struct dma_page *page; struct dma_page *page;
page = __builtin_malloc(sizeof(*page)); page = kmalloc(sizeof(*page), mem_flags);
if (!page) if (!page)
return NULL; return NULL;
page->vaddr = (void*)KernelAlloc(pool->allocation); page->vaddr = (void*)KernelAlloc(pool->allocation);
dbgprintf("%s 0x%0x ",__FUNCTION__, page->vaddr); dbgprintf("%s 0x%0x ",__FUNCTION__, page->vaddr);
if (page->vaddr) if (page->vaddr) {
{ #ifdef DMAPOOL_DEBUG
memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
#endif
page->dma = GetPgAddr(page->vaddr); page->dma = GetPgAddr(page->vaddr);
dbgprintf("dma 0x%0x\n", page->dma); dbgprintf("dma 0x%0x\n", page->dma);
pool_initialise_page(pool, page); pool_initialise_page(pool, page);
list_add(&page->page_list, &pool->page_list);
page->in_use = 0; page->in_use = 0;
page->offset = 0; page->offset = 0;
} else { } else {
free(page); kfree(page);
page = NULL; page = NULL;
} }
return page; return page;
} }
static inline int is_page_busy(struct dma_page *page) static inline bool is_page_busy(struct dma_page *page)
{ {
return page->in_use != 0; return page->in_use != 0;
} }
static void pool_free_page(struct dma_pool *pool, struct dma_page *page) static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
{ {
dma_addr_t dma = page->dma; dma_addr_t dma = page->dma;
#ifdef DMAPOOL_DEBUG
memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
#endif
KernelFree(page->vaddr); KernelFree(page->vaddr);
list_del(&page->page_list); list_del(&page->page_list);
free(page); kfree(page);
} }
/** /**
* dma_pool_destroy - destroys a pool of dma memory blocks. * dma_pool_destroy - destroys a pool of dma memory blocks.
* @pool: dma pool that will be destroyed * @pool: dma pool that will be destroyed
@ -194,16 +206,23 @@ static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
*/ */
void dma_pool_destroy(struct dma_pool *pool) void dma_pool_destroy(struct dma_pool *pool)
{ {
bool empty = false;
if (unlikely(!pool))
return;
mutex_lock(&pools_reg_lock);
mutex_lock(&pools_lock); mutex_lock(&pools_lock);
list_del(&pool->pools); list_del(&pool->pools);
mutex_unlock(&pools_lock); mutex_unlock(&pools_lock);
mutex_unlock(&pools_reg_lock);
while (!list_empty(&pool->page_list)) { while (!list_empty(&pool->page_list)) {
struct dma_page *page; struct dma_page *page;
page = list_entry(pool->page_list.next, page = list_entry(pool->page_list.next,
struct dma_page, page_list); struct dma_page, page_list);
if (is_page_busy(page)) if (is_page_busy(page)) {
{
printk(KERN_ERR "dma_pool_destroy %p busy\n", printk(KERN_ERR "dma_pool_destroy %p busy\n",
page->vaddr); page->vaddr);
/* leak the still-in-use consistent memory */ /* leak the still-in-use consistent memory */
@ -215,7 +234,7 @@ void dma_pool_destroy(struct dma_pool *pool)
kfree(pool); kfree(pool);
} }
EXPORT_SYMBOL(dma_pool_destroy);
/** /**
* dma_pool_alloc - get a block of consistent memory * dma_pool_alloc - get a block of consistent memory
@ -230,55 +249,82 @@ void dma_pool_destroy(struct dma_pool *pool)
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle) dma_addr_t *handle)
{ {
u32 efl; unsigned long flags;
struct dma_page *page; struct dma_page *page;
size_t offset; size_t offset;
void *retval; void *retval;
efl = safe_cli();
restart: spin_lock_irqsave(&pool->lock, flags);
list_for_each_entry(page, &pool->page_list, page_list) { list_for_each_entry(page, &pool->page_list, page_list) {
if (page->offset < pool->allocation) if (page->offset < pool->allocation)
goto ready; goto ready;
} }
page = pool_alloc_page(pool);
if (!page)
{
retval = NULL;
goto done;
}
/* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
spin_unlock_irqrestore(&pool->lock, flags);
page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
if (!page)
return NULL;
spin_lock_irqsave(&pool->lock, flags);
list_add(&page->page_list, &pool->page_list);
ready: ready:
page->in_use++; page->in_use++;
offset = page->offset; offset = page->offset;
page->offset = *(int *)(page->vaddr + offset); page->offset = *(int *)(page->vaddr + offset);
retval = offset + page->vaddr; retval = offset + page->vaddr;
*handle = offset + page->dma; *handle = offset + page->dma;
done: #ifdef DMAPOOL_DEBUG
safe_sti(efl); {
int i;
u8 *data = retval;
/* page->offset is stored in first 4 bytes */
for (i = sizeof(page->offset); i < pool->size; i++) {
if (data[i] == POOL_POISON_FREED)
continue;
if (pool->dev)
dev_err(pool->dev,
"dma_pool_alloc %s, %p (corrupted)\n",
pool->name, retval);
else
pr_err("dma_pool_alloc %s, %p (corrupted)\n",
pool->name, retval);
/*
* Dump the first 4 bytes even if they are not
* POOL_POISON_FREED
*/
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
data, pool->size, 1);
break;
}
}
if (!(mem_flags & __GFP_ZERO))
memset(retval, POOL_POISON_ALLOCATED, pool->size);
#endif
spin_unlock_irqrestore(&pool->lock, flags);
if (mem_flags & __GFP_ZERO)
memset(retval, 0, pool->size);
return retval; return retval;
} }
EXPORT_SYMBOL(dma_pool_alloc);
static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
{ {
struct dma_page *page; struct dma_page *page;
u32 efl;
efl = safe_cli();
list_for_each_entry(page, &pool->page_list, page_list) { list_for_each_entry(page, &pool->page_list, page_list) {
if (dma < page->dma) if (dma < page->dma)
continue; continue;
if (dma < (page->dma + pool->allocation)) if ((dma - page->dma) < pool->allocation)
goto done; return page;
} }
page = NULL; return NULL;
done:
safe_sti(efl);
return page;
} }
/** /**
@ -296,27 +342,75 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
unsigned long flags; unsigned long flags;
unsigned int offset; unsigned int offset;
u32 efl; spin_lock_irqsave(&pool->lock, flags);
page = pool_find_page(pool, dma); page = pool_find_page(pool, dma);
if (!page) { if (!page) {
printk(KERN_ERR "dma_pool_free %p/%lx (bad dma)\n", spin_unlock_irqrestore(&pool->lock, flags);
vaddr, (unsigned long)dma); printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
pool->name, vaddr, (unsigned long)dma);
return; return;
} }
offset = vaddr - page->vaddr; offset = vaddr - page->vaddr;
#ifdef DMAPOOL_DEBUG
if ((dma - page->dma) != offset) {
spin_unlock_irqrestore(&pool->lock, flags);
if (pool->dev)
dev_err(pool->dev,
"dma_pool_free %s, %p (bad vaddr)/%Lx\n",
pool->name, vaddr, (unsigned long long)dma);
else
printk(KERN_ERR
"dma_pool_free %s, %p (bad vaddr)/%Lx\n",
pool->name, vaddr, (unsigned long long)dma);
return;
}
{
unsigned int chain = page->offset;
while (chain < pool->allocation) {
if (chain != offset) {
chain = *(int *)(page->vaddr + chain);
continue;
}
spin_unlock_irqrestore(&pool->lock, flags);
if (pool->dev)
dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
"already free\n", pool->name,
(unsigned long long)dma);
else
printk(KERN_ERR "dma_pool_free %s, dma %Lx "
"already free\n", pool->name,
(unsigned long long)dma);
return;
}
}
memset(vaddr, POOL_POISON_FREED, pool->size);
#endif
efl = safe_cli(); page->in_use--;
{ *(int *)vaddr = page->offset;
page->in_use--; page->offset = offset;
*(int *)vaddr = page->offset;
page->offset = offset;
/* /*
* Resist a temptation to do * Resist a temptation to do
* if (!is_page_busy(page)) pool_free_page(pool, page); * if (!is_page_busy(page)) pool_free_page(pool, page);
* Better have a few empty pages hang around. * Better have a few empty pages hang around.
*/ */
}safe_sti(efl); spin_unlock_irqrestore(&pool->lock, flags);
}
EXPORT_SYMBOL(dma_pool_free);
/*
* Managed DMA pool
*/
static void dmam_pool_release(struct device *dev, void *res)
{
struct dma_pool *pool = *(struct dma_pool **)res;
dma_pool_destroy(pool);
}
static int dmam_pool_match(struct device *dev, void *res, void *match_data)
{
return *(struct dma_pool **)res == match_data;
} }

View File

@ -0,0 +1,87 @@
/*
* fbsysfs.c - framebuffer device class and attributes
*
* Copyright (c) 2004 James Simmons <jsimmons@infradead.org>
*
* This program is free software you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
/*
* Note: currently there's only stubs for framebuffer_alloc and
* framebuffer_release here. The reson for that is that until all drivers
* are converted to use it a sysfsification will open OOPSable races.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/fb.h>
#include <linux/module.h>
#define FB_SYSFS_FLAG_ATTR 1
/**
* framebuffer_alloc - creates a new frame buffer info structure
*
* @size: size of driver private data, can be zero
* @dev: pointer to the device for this fb, this can be NULL
*
* Creates a new frame buffer info structure. Also reserves @size bytes
* for driver private data (info->par). info->par (if any) will be
* aligned to sizeof(long).
*
* Returns the new structure, or NULL if an error occurred.
*
*/
struct fb_info *framebuffer_alloc(size_t size, struct device *dev)
{
#define BYTES_PER_LONG (BITS_PER_LONG/8)
#define PADDING (BYTES_PER_LONG - (sizeof(struct fb_info) % BYTES_PER_LONG))
int fb_info_size = sizeof(struct fb_info);
struct fb_info *info;
char *p;
if (size)
fb_info_size += PADDING;
p = kzalloc(fb_info_size + size, GFP_KERNEL);
if (!p)
return NULL;
info = (struct fb_info *) p;
if (size)
info->par = p + fb_info_size;
info->device = dev;
#ifdef CONFIG_FB_BACKLIGHT
mutex_init(&info->bl_curve_mutex);
#endif
return info;
#undef PADDING
#undef BYTES_PER_LONG
}
EXPORT_SYMBOL(framebuffer_alloc);
/**
* framebuffer_release - marks the structure available for freeing
*
* @info: frame buffer info structure
*
* Drop the reference count of the device embedded in the
* framebuffer info structure.
*
*/
void framebuffer_release(struct fb_info *info)
{
if (!info)
return;
kfree(info->apertures);
kfree(info);
}
EXPORT_SYMBOL(framebuffer_release);

View File

@ -465,7 +465,7 @@ struct ttm_bo_global {
* Constant after init. * Constant after init.
*/ */
// struct kobject kobj; struct kobject kobj;
struct ttm_mem_global *mem_glob; struct ttm_mem_global *mem_glob;
struct page *dummy_read_page; struct page *dummy_read_page;
struct ttm_mem_shrink shrink; struct ttm_mem_shrink shrink;

View File

@ -40,7 +40,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <drm/drm_hashtab.h> #include <drm/drm_hashtab.h>
#include <linux/kref.h> #include <linux/kref.h>
//#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/dma-buf.h> #include <linux/dma-buf.h>
#include <ttm/ttm_memory.h> #include <ttm/ttm_memory.h>
@ -345,6 +345,6 @@ extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
uint32_t handle, uint32_t flags, uint32_t handle, uint32_t flags,
int *prime_fd); int *prime_fd);
//#define ttm_prime_object_kfree(__obj, __prime) \ #define ttm_prime_object_kfree(__obj, __prime) \
// kfree_rcu(__obj, __prime.base.rhead) kfree_rcu(__obj, __prime.base.rhead)
#endif #endif

View File

@ -144,7 +144,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
*/ */
#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) ) #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
#define __trace_if(cond) \ #define __trace_if(cond) \
if (__builtin_constant_p((cond)) ? !!(cond) : \ if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
({ \ ({ \
int ______r; \ int ______r; \
static struct ftrace_branch_data \ static struct ftrace_branch_data \

View File

@ -62,4 +62,8 @@ struct vm_operations_struct {
}; };
#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
static inline int set_page_dirty(struct page *page)
{ return 0; };
#endif #endif

View File

@ -458,46 +458,10 @@ int rcu_read_lock_bh_held(void);
* If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
* RCU-sched read-side critical section. In absence of * RCU-sched read-side critical section. In absence of
* CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
* critical section unless it can prove otherwise. Note that disabling * critical section unless it can prove otherwise.
* of preemption (including disabling irqs) counts as an RCU-sched
* read-side critical section. This is useful for debug checks in functions
* that required that they be called within an RCU-sched read-side
* critical section.
*
* Check debug_lockdep_rcu_enabled() to prevent false positives during boot
* and while lockdep is disabled.
*
* Note that if the CPU is in the idle loop from an RCU point of
* view (ie: that we are in the section between rcu_idle_enter() and
* rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
* did an rcu_read_lock(). The reason for this is that RCU ignores CPUs
* that are in such a section, considering these as in extended quiescent
* state, so such a CPU is effectively never in an RCU read-side critical
* section regardless of what RCU primitives it invokes. This state of
* affairs is required --- we need to keep an RCU-free window in idle
* where the CPU may possibly enter into low power mode. This way we can
* notice an extended quiescent state to other CPUs that started a grace
* period. Otherwise we would delay any grace period as long as we run in
* the idle task.
*
* Similarly, we avoid claiming an SRCU read lock held if the current
* CPU is offline.
*/ */
#ifdef CONFIG_PREEMPT_COUNT #ifdef CONFIG_PREEMPT_COUNT
static inline int rcu_read_lock_sched_held(void) int rcu_read_lock_sched_held(void);
{
int lockdep_opinion = 0;
if (!debug_lockdep_rcu_enabled())
return 1;
if (!rcu_is_watching())
return 0;
if (!rcu_lockdep_current_cpu_online())
return 0;
if (debug_locks)
lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
}
#else /* #ifdef CONFIG_PREEMPT_COUNT */ #else /* #ifdef CONFIG_PREEMPT_COUNT */
static inline int rcu_read_lock_sched_held(void) static inline int rcu_read_lock_sched_held(void)
{ {
@ -537,14 +501,14 @@ static inline int rcu_read_lock_sched_held(void)
#ifdef CONFIG_PROVE_RCU #ifdef CONFIG_PROVE_RCU
/** /**
* rcu_lockdep_assert - emit lockdep splat if specified condition not met * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met
* @c: condition to check * @c: condition to check
* @s: informative message * @s: informative message
*/ */
#define rcu_lockdep_assert(c, s) \ #define RCU_LOCKDEP_WARN(c, s) \
do { \ do { \
static bool __section(.data.unlikely) __warned; \ static bool __section(.data.unlikely) __warned; \
if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ if (debug_lockdep_rcu_enabled() && !__warned && (c)) { \
__warned = true; \ __warned = true; \
lockdep_rcu_suspicious(__FILE__, __LINE__, s); \ lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
} \ } \
@ -553,7 +517,7 @@ static inline int rcu_read_lock_sched_held(void)
#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU) #if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
static inline void rcu_preempt_sleep_check(void) static inline void rcu_preempt_sleep_check(void)
{ {
rcu_lockdep_assert(!lock_is_held(&rcu_lock_map), RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map),
"Illegal context switch in RCU read-side critical section"); "Illegal context switch in RCU read-side critical section");
} }
#else /* #ifdef CONFIG_PROVE_RCU */ #else /* #ifdef CONFIG_PROVE_RCU */
@ -565,15 +529,15 @@ static inline void rcu_preempt_sleep_check(void)
#define rcu_sleep_check() \ #define rcu_sleep_check() \
do { \ do { \
rcu_preempt_sleep_check(); \ rcu_preempt_sleep_check(); \
rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), \ RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \
"Illegal context switch in RCU-bh read-side critical section"); \ "Illegal context switch in RCU-bh read-side critical section"); \
rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map), \ RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \
"Illegal context switch in RCU-sched read-side critical section"); \ "Illegal context switch in RCU-sched read-side critical section"); \
} while (0) } while (0)
#else /* #ifdef CONFIG_PROVE_RCU */ #else /* #ifdef CONFIG_PROVE_RCU */
#define rcu_lockdep_assert(c, s) do { } while (0) #define RCU_LOCKDEP_WARN(c, s) do { } while (0)
#define rcu_sleep_check() do { } while (0) #define rcu_sleep_check() do { } while (0)
#endif /* #else #ifdef CONFIG_PROVE_RCU */ #endif /* #else #ifdef CONFIG_PROVE_RCU */
@ -604,13 +568,13 @@ static inline void rcu_preempt_sleep_check(void)
({ \ ({ \
/* Dependency order vs. p above. */ \ /* Dependency order vs. p above. */ \
typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \ typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
rcu_dereference_sparse(p, space); \ rcu_dereference_sparse(p, space); \
((typeof(*p) __force __kernel *)(________p1)); \ ((typeof(*p) __force __kernel *)(________p1)); \
}) })
#define __rcu_dereference_protected(p, c, space) \ #define __rcu_dereference_protected(p, c, space) \
({ \ ({ \
rcu_lockdep_assert(c, "suspicious rcu_dereference_protected() usage"); \ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \
rcu_dereference_sparse(p, space); \ rcu_dereference_sparse(p, space); \
((typeof(*p) __force __kernel *)(p)); \ ((typeof(*p) __force __kernel *)(p)); \
}) })
@ -834,7 +798,7 @@ static inline void rcu_read_lock(void)
__rcu_read_lock(); __rcu_read_lock();
__acquire(RCU); __acquire(RCU);
rcu_lock_acquire(&rcu_lock_map); rcu_lock_acquire(&rcu_lock_map);
rcu_lockdep_assert(rcu_is_watching(), RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_lock() used illegally while idle"); "rcu_read_lock() used illegally while idle");
} }
@ -885,7 +849,7 @@ static inline void rcu_read_lock(void)
*/ */
static inline void rcu_read_unlock(void) static inline void rcu_read_unlock(void)
{ {
rcu_lockdep_assert(rcu_is_watching(), RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_unlock() used illegally while idle"); "rcu_read_unlock() used illegally while idle");
__release(RCU); __release(RCU);
__rcu_read_unlock(); __rcu_read_unlock();
@ -914,7 +878,7 @@ static inline void rcu_read_lock_bh(void)
local_bh_disable(); local_bh_disable();
__acquire(RCU_BH); __acquire(RCU_BH);
rcu_lock_acquire(&rcu_bh_lock_map); rcu_lock_acquire(&rcu_bh_lock_map);
rcu_lockdep_assert(rcu_is_watching(), RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_lock_bh() used illegally while idle"); "rcu_read_lock_bh() used illegally while idle");
} }
@ -925,7 +889,7 @@ static inline void rcu_read_lock_bh(void)
*/ */
static inline void rcu_read_unlock_bh(void) static inline void rcu_read_unlock_bh(void)
{ {
rcu_lockdep_assert(rcu_is_watching(), RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_unlock_bh() used illegally while idle"); "rcu_read_unlock_bh() used illegally while idle");
rcu_lock_release(&rcu_bh_lock_map); rcu_lock_release(&rcu_bh_lock_map);
__release(RCU_BH); __release(RCU_BH);
@ -950,7 +914,7 @@ static inline void rcu_read_lock_sched(void)
preempt_disable(); preempt_disable();
__acquire(RCU_SCHED); __acquire(RCU_SCHED);
rcu_lock_acquire(&rcu_sched_lock_map); rcu_lock_acquire(&rcu_sched_lock_map);
rcu_lockdep_assert(rcu_is_watching(), RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_lock_sched() used illegally while idle"); "rcu_read_lock_sched() used illegally while idle");
} }
@ -968,7 +932,7 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
*/ */
static inline void rcu_read_unlock_sched(void) static inline void rcu_read_unlock_sched(void)
{ {
rcu_lockdep_assert(rcu_is_watching(), RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_unlock_sched() used illegally while idle"); "rcu_read_unlock_sched() used illegally while idle");
rcu_lock_release(&rcu_sched_lock_map); rcu_lock_release(&rcu_sched_lock_map);
__release(RCU_SCHED); __release(RCU_SCHED);

View File

@ -1,3 +1,35 @@
#ifndef _LINUX_VMALLOC_H #ifndef _LINUX_VMALLOC_H
#define _LINUX_VMALLOC_H #define _LINUX_VMALLOC_H
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/list.h>
struct vm_area_struct; /* vma defining user mapping in mm_types.h */
/* bits in flags of vmalloc's vm_struct below */
#define VM_IOREMAP 0x00000001 /* ioremap() and friends */
#define VM_ALLOC 0x00000002 /* vmalloc() */
#define VM_MAP 0x00000004 /* vmap()ed pages */
#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
#define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
#define VM_NO_GUARD 0x00000040 /* don't add guard page */
#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
/* bits [20..32] reserved for arch specific ioremap internals */
/*
* Maximum alignment for ioremap() regions.
* Can be overriden by arch-specific value.
*/
#ifndef IOREMAP_MAX_ORDER
#define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */
#endif
extern void *vmalloc(unsigned long size);
extern void *vzalloc(unsigned long size);
extern void vfree(const void *addr);
extern void *vmap(struct page **pages, unsigned int count,
unsigned long flags, pgprot_t prot);
extern void vunmap(const void *addr);
#endif /* _LINUX_VMALLOC_H */ #endif /* _LINUX_VMALLOC_H */

View File

@ -49,7 +49,7 @@ void* STDCALL AllocKernelSpace(size_t size)__asm__("AllocKernelSpace");
void STDCALL FreeKernelSpace(void *mem)__asm__("FreeKernelSpace"); void STDCALL FreeKernelSpace(void *mem)__asm__("FreeKernelSpace");
addr_t STDCALL MapIoMem(addr_t base, size_t size, u32 flags)__asm__("MapIoMem"); addr_t STDCALL MapIoMem(addr_t base, size_t size, u32 flags)__asm__("MapIoMem");
void* STDCALL KernelAlloc(size_t size)__asm__("KernelAlloc"); void* STDCALL KernelAlloc(size_t size)__asm__("KernelAlloc");
void* STDCALL KernelFree(void *mem)__asm__("KernelFree"); void* STDCALL KernelFree(const void *mem)__asm__("KernelFree");
void* STDCALL UserAlloc(size_t size)__asm__("UserAlloc"); void* STDCALL UserAlloc(size_t size)__asm__("UserAlloc");
int STDCALL UserFree(void *mem)__asm__("UserFree"); int STDCALL UserFree(void *mem)__asm__("UserFree");
@ -527,10 +527,6 @@ static inline void *vzalloc(unsigned long size)
return mem; return mem;
}; };
static inline void vfree(void *addr)
{
KernelFree(addr);
}
static inline int power_supply_is_system_supplied(void) { return -1; }; static inline int power_supply_is_system_supplied(void) { return -1; };