update ddk

git-svn-id: svn://kolibrios.org@2966 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge) 2012-09-04 21:40:09 +00:00
parent 4d9c0c2526
commit 96aa59bb73
4 changed files with 115 additions and 52 deletions

View File

@ -24,6 +24,7 @@ NAME_SRCS:= \
io/write.c \
linux/idr.c \
linux/firmware.c \
linux/kref.c \
linux/list_sort.c \
linux/dmapool.c \
linux/ctype.c \

View File

@ -2,8 +2,9 @@
.file "export.s"
.intel_syntax
.text
.text
.global _AllocKernelSpace
.global _AllocPage
.global _AllocPages
@ -14,9 +15,11 @@
.global _CreateRingBuffer
.global _Delay
.global _DestroyEvent
.global _DestroyObject
.global _FreeKernelSpace
.global _FreePage
.global _GetDisplay
.global _GetEvent
@ -25,6 +28,7 @@
.global _GetService
.global _GetTimerTicks
.global _GetStackBase
.global _GetWindowRect
.global _KernelAlloc
.global _KernelFree
@ -53,11 +57,14 @@
.global _SetScreen
.global _SysMsgBoardStr
.global _TimerHs
.global _UserAlloc
.global _WaitEvent
.def _AllocKernelSpace; .scl 2; .type 32; .endef
.def _AllocPage; .scl 2; .type 32; .endef
.def _AllocPages; .scl 2; .type 32; .endef
@ -68,9 +75,11 @@
.def _CreateRingBuffer; .scl 2; .type 32; .endef
.def _Delay; .scl 2; .type 32; .endef
.def _DestroyEvent; .scl 2; .type 32; .endef
.def _DestroyObject; .scl 2; .type 32; .endef
.def _FreeKernelSpace; .scl 2; .type 32; .endef
.def _FreePage; .scl 2; .type 32; .endef
.def _GetDisplay; .scl 2; .type 32; .endef
.def _GetEvent; .scl 2; .type 32; .endef
@ -79,6 +88,7 @@
.def _GetService; .scl 2; .type 32; .endef
.def _GetTimerTicks; .scl 2; .type 32; .endef
.def _GetStackBase; .scl 2; .type 32; .endef
.def _GetWindowRect; .scl 2; .type 32; .endef
.def _KernelAlloc; .scl 2; .type 32; .endef
.def _KernelFree; .scl 2; .type 32; .endef
@ -106,12 +116,14 @@
.def _SetKeyboardData; .scl 2; .type 32; .endef
.def _SysMsgBoardStr; .scl 2; .type 32; .endef
.def _TimerHs; .scl 2; .type 32; .endef
.def _UserAlloc; .scl 2; .type 32; .endef
.def _WaitEvent; .scl 2; .type 32; .endef
_AllocKernelSpace:
_AllocPage:
_AllocPages:
@ -123,9 +135,11 @@ _CreateRingBuffer:
_Delay:
_DestroyEvent:
_DestroyObject:
_FreeKernelSpace:
_FreePage:
_GetDisplay:
_GetEvent:
@ -134,6 +148,7 @@ _GetPgAddr:
_GetService:
_GetTimerTicks:
_GetStackBase:
_GetWindowRect:
_KernelAlloc:
_KernelFree:
@ -161,12 +176,16 @@ _SetKeyboardData:
_SetScreen:
_SysMsgBoardStr:
_TimerHs:
_UserAlloc:
_WaitEvent:
ret
.section .drectve
.section .drectve
.ascii " -export:AllocKernelSpace" # stdcall
.ascii " -export:AllocPage" # gcc ABI
.ascii " -export:AllocPages" # gcc ABI
@ -180,9 +199,11 @@ _WaitEvent:
.ascii " -export:Delay" # stdcall
.ascii " -export:DestroyEvent"
.ascii " -export:DestroyObject"
.ascii " -export:FreeKernelSpace" # stdcall
.ascii " -export:FreePage" #
.ascii " -export:GetDisplay" # stdcall
.ascii " -export:GetEvent" #
@ -191,6 +212,7 @@ _WaitEvent:
.ascii " -export:GetService" # stdcall
.ascii " -export:GetTimerTicks" #
.ascii " -export:GetStackBase" #
.ascii " -export:GetWindowRect" # fastcall
.ascii " -export:KernelAlloc" # stdcall
@ -220,6 +242,8 @@ _WaitEvent:
.ascii " -export:SetScreen" # stdcall
.ascii " -export:SysMsgBoardStr" # stdcall
.ascii " -export:TimerHs" # stdcall
.ascii " -export:UserAlloc" # stdcall
.ascii " -export:WaitEvent" # stdcall

View File

@ -117,7 +117,6 @@ static struct idr_layer *get_from_free_list(struct idr *idp)
return(p);
}
static void idr_layer_rcu_free(struct rcu_head *head)
{
struct idr_layer *layer;
@ -126,14 +125,11 @@ static void idr_layer_rcu_free(struct rcu_head *head)
kfree(layer);
}
static inline void free_layer(struct idr_layer *p)
{
kfree(p);
}
/* only called when idp->lock is held */
static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
{
@ -174,21 +170,20 @@ static void idr_mark_full(struct idr_layer **pa, int id)
}
}
/**
* idr_pre_get - reserver resources for idr allocation
* idr_pre_get - reserve resources for idr allocation
* @idp: idr handle
* @gfp_mask: memory allocation flags
*
* This function should be called prior to locking and calling the
* idr_get_new* functions. It preallocates enough memory to satisfy
* the worst possible allocation.
* This function should be called prior to calling the idr_get_new* functions.
* It preallocates enough memory to satisfy the worst possible allocation. The
* caller should pass in GFP_KERNEL if possible. This of course requires that
* no spinning locks be held.
*
* If the system is REALLY out of memory this function returns 0,
* otherwise 1.
* If the system is REALLY out of memory this function returns %0,
* otherwise %1.
*/
int idr_pre_get(struct idr *idp, u32_t gfp_mask)
int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
{
while (idp->id_free_cnt < IDR_FREE_MAX) {
struct idr_layer *new;
@ -267,7 +262,6 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
return id;
}
static int idr_get_empty_slot(struct idr *idp, int starting_id,
struct idr_layer **pa)
{
@ -352,22 +346,25 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
/**
* idr_get_new_above - allocate new idr entry above or equal to a start id
* @idp: idr handle
* @ptr: pointer you want associated with the ide
* @start_id: id to start search at
* @ptr: pointer you want associated with the id
* @starting_id: id to start search at
* @id: pointer to the allocated handle
*
* This is the allocate id function. It should be called with any
* required locks.
*
* If memory is required, it will return -EAGAIN, you should unlock
* and go back to the idr_pre_get() call. If the idr is full, it will
* return -ENOSPC.
* If allocation from IDR's private freelist fails, idr_get_new_above() will
* return %-EAGAIN. The caller should retry the idr_pre_get() call to refill
* IDR's preallocation and then retry the idr_get_new_above() call.
*
* @id returns a value in the range @starting_id ... 0x7fffffff
* If the idr is full idr_get_new_above() will return %-ENOSPC.
*
* @id returns a value in the range @starting_id ... %0x7fffffff
*/
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
{
int rv;
rv = idr_get_new_above_int(idp, ptr, starting_id);
/*
* This is a cheap hack until the IDR code can be fixed to
@ -385,17 +382,16 @@ int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
/**
* idr_get_new - allocate new idr entry
* @idp: idr handle
* @ptr: pointer you want associated with the ide
* @ptr: pointer you want associated with the id
* @id: pointer to the allocated handle
*
* This is the allocate id function. It should be called with any
* required locks.
* If allocation from IDR's private freelist fails, idr_get_new_above() will
* return %-EAGAIN. The caller should retry the idr_pre_get() call to refill
* IDR's preallocation and then retry the idr_get_new_above() call.
*
* If memory is required, it will return -EAGAIN, you should unlock
* and go back to the idr_pre_get() call. If the idr is full, it will
* return -ENOSPC.
* If the idr is full idr_get_new_above() will return %-ENOSPC.
*
* @id returns a value in the range 0 ... 0x7fffffff
* @id returns a value in the range %0 ... %0x7fffffff
*/
int idr_get_new(struct idr *idp, void *ptr, int *id)
{
@ -457,7 +453,7 @@ static void sub_remove(struct idr *idp, int shift, int id)
}
/**
* idr_remove - remove the given id and free it's slot
* idr_remove - remove the given id and free its slot
* @idp: idr handle
* @id: unique key
*/
@ -506,7 +502,7 @@ void idr_remove(struct idr *idp, int id)
* function will remove all id mappings and leave all idp_layers
* unused.
*
* A typical clean-up sequence for objects stored in an idr tree, will
* A typical clean-up sequence for objects stored in an idr tree will
* use idr_for_each() to free all objects, if necessay, then
* idr_remove_all() to remove all ids, and idr_destroy() to free
* up the cached idr_layers.
@ -514,6 +510,7 @@ void idr_remove(struct idr *idp, int id)
void idr_remove_all(struct idr *idp)
{
int n, id, max;
int bt_mask;
struct idr_layer *p;
struct idr_layer *pa[MAX_LEVEL];
struct idr_layer **paa = &pa[0];
@ -531,8 +528,10 @@ void idr_remove_all(struct idr *idp)
p = p->ary[(id >> n) & IDR_MASK];
}
bt_mask = id;
id += 1 << n;
while (n < fls(id)) {
/* Get the highest bit that the above add changed from 0->1. */
while (n < fls(id ^ bt_mask)) {
if (p)
free_layer(p);
n += IDR_BITS;
@ -544,7 +543,7 @@ void idr_remove_all(struct idr *idp)
/**
* idr_destroy - release all cached layers within an idr tree
* idp: idr handle
* @idp: idr handle
*/
void idr_destroy(struct idr *idp)
{
@ -607,7 +606,7 @@ void *idr_find(struct idr *idp, int id)
* not allowed.
*
* We check the return of @fn each time. If it returns anything other
* than 0, we break out and return that value.
* than %0, we break out and return that value.
*
* The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
*/
@ -651,10 +650,11 @@ EXPORT_SYMBOL(idr_for_each);
/**
* idr_get_next - lookup next object of id to given id.
* @idp: idr handle
* @id: pointer to lookup key
* @nextidp: pointer to lookup key
*
* Returns pointer to registered object with id, which is next number to
* given id.
* given id. After being looked up, *@nextidp will be updated for the next
* iteration.
*/
void *idr_get_next(struct idr *idp, int *nextidp)
@ -701,8 +701,8 @@ void *idr_get_next(struct idr *idp, int *nextidp)
* @id: lookup key
*
* Replace the pointer registered with an id and return the old value.
* A -ENOENT return indicates that @id was not found.
* A -EINVAL return indicates that @id was not within valid constraints.
* A %-ENOENT return indicates that @id was not found.
* A %-EINVAL return indicates that @id was not within valid constraints.
*
* The caller must serialize with writers.
*/
@ -767,7 +767,7 @@ void idr_init(struct idr *idp)
/*
* IDA - IDR based ID allocator
*
* this is id allocator without id -> pointer translation. Memory
* This is id allocator without id -> pointer translation. Memory
* usage is much lower than full blown idr because each id only
* occupies a bit. ida uses a custom leaf node which contains
* IDA_BITMAP_BITS slots.
@ -800,8 +800,8 @@ static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
* following function. It preallocates enough memory to satisfy the
* worst possible allocation.
*
* If the system is REALLY out of memory this function returns 0,
* otherwise 1.
* If the system is REALLY out of memory this function returns %0,
* otherwise %1.
*/
int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
{
@ -827,17 +827,17 @@ EXPORT_SYMBOL(ida_pre_get);
/**
* ida_get_new_above - allocate new ID above or equal to a start id
* @ida: ida handle
* @staring_id: id to start search at
* @starting_id: id to start search at
* @p_id: pointer to the allocated handle
*
* Allocate new ID above or equal to @ida. It should be called with
* any required locks.
* Allocate new ID above or equal to @starting_id. It should be called
* with any required locks.
*
* If memory is required, it will return -EAGAIN, you should unlock
* If memory is required, it will return %-EAGAIN, you should unlock
* and go back to the ida_pre_get() call. If the ida is full, it will
* return -ENOSPC.
* return %-ENOSPC.
*
* @p_id returns a value in the range @starting_id ... 0x7fffffff.
* @p_id returns a value in the range @starting_id ... %0x7fffffff.
*/
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
{
@ -919,11 +919,11 @@ EXPORT_SYMBOL(ida_get_new_above);
*
* Allocate new ID. It should be called with any required locks.
*
* If memory is required, it will return -EAGAIN, you should unlock
* If memory is required, it will return %-EAGAIN, you should unlock
* and go back to the idr_pre_get() call. If the idr is full, it will
* return -ENOSPC.
* return %-ENOSPC.
*
* @id returns a value in the range 0 ... 0x7fffffff.
* @p_id returns a value in the range %0 ... %0x7fffffff.
*/
int ida_get_new(struct ida *ida, int *p_id)
{
@ -981,7 +981,7 @@ EXPORT_SYMBOL(ida_remove);
/**
* ida_destroy - release all cached layers within an ida tree
* ida: ida handle
* @ida: ida handle
*/
void ida_destroy(struct ida *ida)
{

38
drivers/ddk/linux/kref.c Normal file
View File

@ -0,0 +1,38 @@
#include <linux/kref.h>
#include <asm/atomic.h>
void kref_set(struct kref *kref, int num)
{
atomic_set(&kref->refcount, num);
}
/**
* kref_init - initialize object.
* @kref: object in question.
*/
void kref_init(struct kref *kref)
{
kref_set(kref, 1);
}
void kref_get(struct kref *kref)
{
// WARN_ON(!atomic_read(&kref->refcount));
atomic_inc(&kref->refcount);
}
int kref_put(struct kref *kref, void (*release)(struct kref *kref))
{
// WARN_ON(release == NULL);
// WARN_ON(release == (void (*)(struct kref *))kfree);
if (atomic_dec_and_test(&kref->refcount)) {
release(kref);
return 1;
}
return 0;
}