libgcc-5.4.0 application startup code

git-svn-id: svn://kolibrios.org@6527 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge) 2016-09-21 11:24:46 +00:00
parent b54812a97e
commit 316da9d76d
8 changed files with 1327 additions and 136 deletions

View File

@ -29,7 +29,7 @@ CFLAGS+= -Wold-style-definition $(CFLAGS_OPT)
DECNUMINC = -Iconfig/libbid -DENABLE_DECIMAL_BID_FORMAT
INCLUDES = -I. -I../gcc -I../include $(DECNUMINC)
INCLUDES = -I. -I../gcc -I../include -I$(SDK_DIR)/sources/newlib/libc/include $(DECNUMINC)
gcc_compile = $(CC) $(INCLUDES) $(CFLAGS)
@ -66,7 +66,10 @@ TPBIT_FUNCS = _pack_tf _unpack_tf _addsub_tf _mul_tf _div_tf \
LIB2ADDEH = unwind-dw2.c unwind-dw2-fde.c unwind-sjlj.c unwind-c.c
LIB2ADDEHSTATIC = $(LIB2ADDEH)
LIB2ADD = config/i386/gthr-kos32.c
LIB2ADD = config/i386/gthr-kos32.c \
config/i386/kos32-app.c \
config/i386/libc-loader.c
EH_MODEL = dw2
CUSTOM_CRTSTUFF = yes
@ -97,12 +100,14 @@ crtfastmath.o: config/i386/crtfastmath.c
$(gcc_compile) -mfxsr -msse -c $<
LIB1ASMSRC = i386/chkstk.S
LIB1ASMFUNCS = _chkstk _chkstk_ms
LIB1ASMSRC = i386/start.S
LIB1ASMFUNCS = _chkstk _chkstk_ms _start
DFP_ENABLE = true
LIB2ADD += config/i386/cpuinfo.c
LIB2ADD += config/i386/sfp-exceptions.c
softfp_float_modes := tf
softfp_int_modes := si di ti

View File

@ -1,60 +0,0 @@
.section .text
#ifdef L_chkstk
.global ___chkstk
.global __alloca
___chkstk:
__alloca:
pushl %ecx /* save temp */
leal 8(%esp), %ecx /* point past return addr */
subl %eax, %ecx
cmpl %fs:8, %ecx # check low stack limit
jb 1f
movl %esp, %eax /* save old stack pointer */
movl %ecx, %esp /* decrement stack */
movl (%eax), %ecx /* recover saved temp */
movl 4(%eax), %eax /* recover return address */
/* Push the return value back. Doing this instead of just
jumping to %eax preserves the cached call-return stack
used by most modern processors. */
pushl %eax
ret
1:
int3 #trap to debugger
.ascii "Stack overflow"
#endif
#ifdef L_chkstk_ms
.global ___chkstk_ms
___chkstk_ms:
pushl %ecx /* save temp */
pushl %eax
cmpl $0x1000, %eax /* > 4k ?*/
leal 12(%esp), %ecx /* point past return addr */
jb 2f
1:
subl $0x1000, %ecx /* yes, move pointer down 4k*/
cmpl %fs:8, %ecx /* check low stack limit */
jb 3f
orl $0x0, (%ecx) /* probe there */
subl $0x1000, %eax /* decrement count */
cmpl $0x1000, %eax
ja 1b /* and do it again */
2:
subl %eax, %ecx
orl $0x0, (%ecx) /* less than 4k, just peek here */
popl %eax
popl %ecx
ret
3:
int3 #trap to debugger
.ascii "Stack overflow"
#endif

View File

@ -20,6 +20,7 @@ a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include <kos32sys.h>
#include "gthr-kos32.h"
#define FUTEX_INIT 0
@ -27,27 +28,12 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#define FUTEX_WAIT 2
#define FUTEX_WAKE 3
unsigned int tls_alloc(void);
int tls_free(unsigned int key);
void *tls_get(unsigned int key);
void *tls_set(unsigned int key, void *val);
#define exchange_acquire(ptr, new) \
__atomic_exchange_4((ptr), (new), __ATOMIC_ACQUIRE)
#define exchange_release(ptr, new) \
__atomic_exchange_4((ptr), (new), __ATOMIC_RELEASE)
static inline void yield(void)
{
__asm__ __volatile__(
"int $0x40"
::"a"(68), "b"(1));
};
int __gthr_kos32_once (__gthread_once_t *once, void (*func) (void))
{
if (once == NULL || func == NULL)
@ -95,15 +81,12 @@ int __gthr_kos32_key_delete (__gthread_key_t key)
void* __gthr_kos32_getspecific (__gthread_key_t key)
{
void *ptr;
ptr = tls_get(key);
return ptr;
return tls_get(key);
}
int __gthr_kos32_setspecific (__gthread_key_t key, const void *ptr)
{
tls_set(key, CONST_CAST2(void *, const void *, ptr));
return 0;
return tls_set(key, CONST_CAST2(void *, const void *, ptr));
}
void __gthr_kos32_mutex_init_function (__gthread_mutex_t *mutex)
@ -151,7 +134,7 @@ int __gthr_kos32_mutex_trylock (__gthread_mutex_t *mutex)
{
int zero = 0;
return __atomic_compare_exchange_4(&mutex->lock, &zero, 1,0,__ATOMIC_ACQUIRE,__ATOMIC_RELAXED);
return !__atomic_compare_exchange_4(&mutex->lock, &zero, 1,0,__ATOMIC_ACQUIRE,__ATOMIC_RELAXED);
}
int __gthr_kos32_mutex_unlock (__gthread_mutex_t *mutex)
@ -173,79 +156,102 @@ int __gthr_kos32_mutex_unlock (__gthread_mutex_t *mutex)
void __gthr_kos32_recursive_mutex_init_function (__gthread_recursive_mutex_t *mutex)
{
// mutex->counter = -1;
mutex->depth = 0;
mutex->owner = 0;
// mutex->sema = CreateSemaphoreW (NULL, 0, 65535, NULL);
int handle;
mutex->lock = 0;
__asm__ volatile(
"int $0x40\t"
:"=a"(handle)
:"a"(77),"b"(FUTEX_INIT),"c"(mutex));
mutex->handle = handle;
mutex->depth = 0;
mutex->owner = 0;
}
#if 0
int
__gthr_win32_recursive_mutex_lock (__gthread_recursive_mutex_t *mutex)
int __gthr_kos32_recursive_mutex_lock (__gthread_recursive_mutex_t *mutex)
{
DWORD me = GetCurrentThreadId();
if (InterlockedIncrement (&mutex->counter) == 0)
int tmp;
unsigned long me = (unsigned long)tls_get(TLS_KEY_LOW_STACK);
if( __sync_fetch_and_add(&mutex->lock, 1) == 0)
{
mutex->depth = 1;
mutex->owner = me;
mutex->depth = 1;
mutex->owner = me;
return 0;
}
else if (mutex->owner == me)
else if (mutex->owner == me)
{
InterlockedDecrement (&mutex->counter);
++(mutex->depth);
__sync_fetch_and_sub(&mutex->lock, 1);
++(mutex->depth);
}
else if (WaitForSingleObject (mutex->sema, INFINITE) == WAIT_OBJECT_0)
else while (exchange_acquire (&mutex->lock, 2) != 0)
{
mutex->depth = 1;
mutex->owner = me;
}
else
{
/* WaitForSingleObject returns WAIT_FAILED, and we can only do
some best-effort cleanup here. */
InterlockedDecrement (&mutex->counter);
return 1;
}
return 0;
__asm__ volatile(
"int $0x40\t\n"
:"=a"(tmp)
:"a"(77),"b"(FUTEX_WAIT),
"c"(mutex->handle),"d"(2),"S"(0));
mutex->depth = 1;
mutex->owner = me;
};
return 0;
}
int
__gthr_win32_recursive_mutex_trylock (__gthread_recursive_mutex_t *mutex)
int __gthr_kos32_recursive_mutex_trylock (__gthread_recursive_mutex_t *mutex)
{
DWORD me = GetCurrentThreadId();
if (__GTHR_W32_InterlockedCompareExchange (&mutex->counter, 0, -1) < 0)
unsigned long me = (unsigned long)tls_get(TLS_KEY_LOW_STACK);
int zero = 0;
if(__atomic_compare_exchange_4(&mutex->lock, &zero, 1,0,__ATOMIC_ACQUIRE,__ATOMIC_RELAXED))
{
mutex->depth = 1;
mutex->owner = me;
mutex->depth = 1;
mutex->owner = me;
}
else if (mutex->owner == me)
++(mutex->depth);
else
return 1;
else if (mutex->owner == me)
++(mutex->depth);
else
return 1;
return 0;
return 0;
}
int
__gthr_win32_recursive_mutex_unlock (__gthread_recursive_mutex_t *mutex)
int __gthr_kos32_recursive_mutex_unlock (__gthread_recursive_mutex_t *mutex)
{
--(mutex->depth);
if (mutex->depth == 0)
--(mutex->depth);
if (mutex->depth == 0)
{
mutex->owner = 0;
int prev;
if (InterlockedDecrement (&mutex->counter) >= 0)
return ReleaseSemaphore (mutex->sema, 1, NULL) ? 0 : 1;
}
prev = exchange_release (&mutex->lock, 0);
return 0;
if (prev != 1)
{
__asm__ volatile(
"int $0x40\t"
:"=a"(prev)
:"a"(77),"b"(FUTEX_WAKE),
"c"(mutex->handle),"d"(1));
};
mutex->owner = 0;
};
return 0;
}
int
__gthr_win32_recursive_mutex_destroy (__gthread_recursive_mutex_t *mutex)
int __gthr_kos32_recursive_mutex_destroy (__gthread_recursive_mutex_t *mutex)
{
CloseHandle ((HANDLE) mutex->sema);
return 0;
int retval;
__asm__ volatile(
"int $0x40\t"
:"=a"(retval)
:"a"(77),"b"(FUTEX_DESTROY),"c"(mutex->handle));
return 0;
}
#endif

View File

@ -0,0 +1,31 @@
/*
* This file has no copyright assigned and is placed in the Public Domain.
* This file is a part of the kos32-runtime package.
* No warranty is given; refer to the file DISCLAIMER within the package.
*
* Source code for the startup proceedures used by all programs. This code
* is compiled to make crt1.o, which should be located in the library path.
*
*/
void *load_libc(void);
void* get_entry_point(void *raw);
void __attribute__((noreturn))
__crt_startup (void)
{
void __attribute__((noreturn)) (*entry)(void *img);
void *img;
img = load_libc();
if(!img)
{
asm ("int $0x40" ::"a"(-1));
};
entry = get_entry_point(img);
entry(img);
}

View File

@ -0,0 +1,202 @@
#include <stdint.h>
#include <kos32sys.h>
#include "pe.h"
#define unlikely(x) __builtin_expect(!!(x), 0)
void* load_libc(void);
static inline void sec_copy(void *dst, void *src, size_t len)
{
__asm__ __volatile__ (
"shrl $2, %%ecx \n\t"
"rep movsl"
:
:"c"(len),"S"(src),"D"(dst)
:"cc");
__asm__ __volatile__ (
""
:::"ecx","esi","edi");
};
static inline int IsPowerOf2(uint32_t val)
{
if(val == 0)
return 0;
return (val & (val - 1)) == 0;
}
static int validate_pe(void *raw, size_t raw_size)
{
PIMAGE_DOS_HEADER dos;
PIMAGE_NT_HEADERS32 nt;
dos = (PIMAGE_DOS_HEADER)raw;
if( !raw || raw_size < sizeof(IMAGE_DOS_HEADER) )
return 0;
if( dos->e_magic != IMAGE_DOS_SIGNATURE || dos->e_lfanew <= 0)
return 0;
nt = MakePtr( PIMAGE_NT_HEADERS32, dos, dos->e_lfanew);
if( (uint32_t)nt < (uint32_t)raw)
return 0;
if(nt->Signature != IMAGE_NT_SIGNATURE)
return 0;
if(nt->FileHeader.Machine != IMAGE_FILE_MACHINE_I386)
return 0;
if(nt->OptionalHeader.Magic != IMAGE_NT_OPTIONAL_HDR32_MAGIC)
return 0;
if(nt->OptionalHeader.SectionAlignment < 4096)
{
if(nt->OptionalHeader.FileAlignment != nt->OptionalHeader.SectionAlignment)
return 0;
}
else if(nt->OptionalHeader.SectionAlignment < nt->OptionalHeader.FileAlignment)
return 0;
if(!IsPowerOf2(nt->OptionalHeader.SectionAlignment) ||
!IsPowerOf2(nt->OptionalHeader.FileAlignment))
return 0;
if(nt->FileHeader.NumberOfSections > 96)
return 0;
return 1;
}
static void* create_image(void *raw)
{
PIMAGE_DOS_HEADER dos;
PIMAGE_NT_HEADERS32 nt;
PIMAGE_SECTION_HEADER img_sec;
void *img_base;
unsigned int i;
dos = (PIMAGE_DOS_HEADER)raw;
nt = MakePtr( PIMAGE_NT_HEADERS32, dos, dos->e_lfanew);
img_base = user_alloc(nt->OptionalHeader.SizeOfImage);
if(unlikely(img_base == NULL))
return 0;
sec_copy(img_base, raw, nt->OptionalHeader.SizeOfHeaders);
img_sec = MakePtr(PIMAGE_SECTION_HEADER, nt, sizeof(IMAGE_NT_HEADERS32));
for(i=0; i< nt->FileHeader.NumberOfSections; i++)
{
void *src_ptr;
void *dest_ptr;
if ( img_sec->SizeOfRawData && img_sec->PointerToRawData )
{
src_ptr = MakePtr(void*, raw, img_sec->PointerToRawData);
dest_ptr = MakePtr(void*, img_base, img_sec->VirtualAddress);
sec_copy(dest_ptr, src_ptr, img_sec->SizeOfRawData);
};
img_sec++;
};
if(nt->OptionalHeader.DataDirectory[5].Size)
{
PIMAGE_BASE_RELOCATION reloc;
uint32_t delta = (uint32_t)img_base - nt->OptionalHeader.ImageBase;
reloc = MakePtr(PIMAGE_BASE_RELOCATION, img_base,
nt->OptionalHeader.DataDirectory[5].VirtualAddress);
while ( reloc->SizeOfBlock != 0 )
{
uint32_t cnt;
uint16_t *entry;
uint16_t reltype;
uint32_t offs;
cnt = (reloc->SizeOfBlock - sizeof(*reloc))/sizeof(uint16_t);
entry = MakePtr( uint16_t*, reloc, sizeof(*reloc) );
for ( i=0; i < cnt; i++ )
{
uint16_t *p16;
uint32_t *p32;
reltype = (*entry & 0xF000) >> 12;
offs = (*entry & 0x0FFF) + reloc->VirtualAddress;
switch(reltype)
{
case 1:
p16 = MakePtr(uint16_t*, img_base, offs);
*p16+= (uint16_t)(delta>>16);
break;
case 2:
p16 = MakePtr(uint16_t*, img_base, offs);
*p16+= (uint16_t)delta;
break;
case 3:
p32 = MakePtr(uint32_t*, img_base, offs);
*p32+= delta;
}
entry++;
}
reloc = MakePtr(PIMAGE_BASE_RELOCATION, reloc,reloc->SizeOfBlock);
};
// printf("unmap base %p offset %x %d page(s)\n",
// img_base,
// nt->OptionalHeader.DataDirectory[5].VirtualAddress,
// (nt->OptionalHeader.DataDirectory[5].Size+4095)>>12);
user_unmap(img_base,nt->OptionalHeader.DataDirectory[5].VirtualAddress,
nt->OptionalHeader.DataDirectory[5].Size);
};
return img_base;
};
void* get_entry_point(void *raw)
{
PIMAGE_DOS_HEADER dos;
PIMAGE_NT_HEADERS32 nt;
dos = (PIMAGE_DOS_HEADER)raw;
nt = MakePtr( PIMAGE_NT_HEADERS32, dos, dos->e_lfanew);
return MakePtr(void*, raw, nt->OptionalHeader.AddressOfEntryPoint);
};
void* load_libc(void)
{
void *raw_img;
size_t raw_size;
void *img_base = NULL;
ufile_t uf;
uf = load_file("/kolibrios/lib/libc.dll");
raw_img = uf.data;
raw_size = uf.size;
if(raw_img == NULL)
return NULL;
if(validate_pe(raw_img, raw_size) != 0)
img_base = create_image(raw_img);
user_free(raw_img);
return img_base;
}

View File

@ -0,0 +1,707 @@
#ifndef _LINUX_LIST_H
#define _LINUX_LIST_H
/*
* Simple doubly linked list implementation.
*
* Some of the internal functions ("__xxx") are useful when
* manipulating whole lists rather than single entries, as
* sometimes we already know the next/prev entries and we can
* generate better code by using them directly rather than
* using the generic single-entry routines.
*/
/**
* container_of - cast a member of a structure out to the containing structure
* @ptr: the pointer to the member.
* @type: the type of the container struct this is embedded in.
* @member: the name of the member within the struct.
*
*/
#define container_of(ptr, type, member) ({ \
const typeof( ((type *)0)->member ) *__mptr = (ptr); \
(type *)( (char *)__mptr - offsetof(type,member) );})
#define LIST_POISON1 ((struct list_head*)0xFFFF0100)
#define LIST_POISON2 ((struct list_head*)0xFFFF0200)
#define prefetch(x) __builtin_prefetch(x)
struct list_head {
struct list_head *next, *prev;
};
#define LIST_HEAD_INIT(name) { &(name), &(name) }
#define LIST_HEAD(name) \
struct list_head name = LIST_HEAD_INIT(name)
static inline void INIT_LIST_HEAD(struct list_head *list)
{
list->next = list;
list->prev = list;
}
/*
* Insert a new entry between two known consecutive entries.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
#ifndef CONFIG_DEBUG_LIST
static inline void __list_add(struct list_head *new,
struct list_head *prev,
struct list_head *next)
{
next->prev = new;
new->next = next;
new->prev = prev;
prev->next = new;
}
#else
extern void __list_add(struct list_head *new,
struct list_head *prev,
struct list_head *next);
#endif
/**
* list_add - add a new entry
* @new: new entry to be added
* @head: list head to add it after
*
* Insert a new entry after the specified head.
* This is good for implementing stacks.
*/
static inline void list_add(struct list_head *new, struct list_head *head)
{
__list_add(new, head, head->next);
}
/**
* list_add_tail - add a new entry
* @new: new entry to be added
* @head: list head to add it before
*
* Insert a new entry before the specified head.
* This is useful for implementing queues.
*/
static inline void list_add_tail(struct list_head *new, struct list_head *head)
{
__list_add(new, head->prev, head);
}
/*
* Delete a list entry by making the prev/next entries
* point to each other.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
static inline void __list_del(struct list_head * prev, struct list_head * next)
{
next->prev = prev;
prev->next = next;
}
/**
* list_del - deletes entry from list.
* @entry: the element to delete from the list.
* Note: list_empty() on entry does not return true after this, the entry is
* in an undefined state.
*/
#ifndef CONFIG_DEBUG_LIST
static inline void list_del(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
entry->next = LIST_POISON1;
entry->prev = LIST_POISON2;
}
#else
extern void list_del(struct list_head *entry);
#endif
/**
* list_replace - replace old entry by new one
* @old : the element to be replaced
* @new : the new element to insert
*
* If @old was empty, it will be overwritten.
*/
static inline void list_replace(struct list_head *old,
struct list_head *new)
{
new->next = old->next;
new->next->prev = new;
new->prev = old->prev;
new->prev->next = new;
}
static inline void list_replace_init(struct list_head *old,
struct list_head *new)
{
list_replace(old, new);
INIT_LIST_HEAD(old);
}
/**
* list_del_init - deletes entry from list and reinitialize it.
* @entry: the element to delete from the list.
*/
static inline void list_del_init(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
INIT_LIST_HEAD(entry);
}
/**
* list_move - delete from one list and add as another's head
* @list: the entry to move
* @head: the head that will precede our entry
*/
static inline void list_move(struct list_head *list, struct list_head *head)
{
__list_del(list->prev, list->next);
list_add(list, head);
}
/**
* list_move_tail - delete from one list and add as another's tail
* @list: the entry to move
* @head: the head that will follow our entry
*/
static inline void list_move_tail(struct list_head *list,
struct list_head *head)
{
__list_del(list->prev, list->next);
list_add_tail(list, head);
}
/**
* list_is_last - tests whether @list is the last entry in list @head
* @list: the entry to test
* @head: the head of the list
*/
static inline int list_is_last(const struct list_head *list,
const struct list_head *head)
{
return list->next == head;
}
/**
* list_empty - tests whether a list is empty
* @head: the list to test.
*/
static inline int list_empty(const struct list_head *head)
{
return head->next == head;
}
/**
* list_empty_careful - tests whether a list is empty and not being modified
* @head: the list to test
*
* Description:
* tests whether a list is empty _and_ checks that no other CPU might be
* in the process of modifying either member (next or prev)
*
* NOTE: using list_empty_careful() without synchronization
* can only be safe if the only activity that can happen
* to the list entry is list_del_init(). Eg. it cannot be used
* if another CPU could re-list_add() it.
*/
static inline int list_empty_careful(const struct list_head *head)
{
struct list_head *next = head->next;
return (next == head) && (next == head->prev);
}
/**
* list_is_singular - tests whether a list has just one entry.
* @head: the list to test.
*/
static inline int list_is_singular(const struct list_head *head)
{
return !list_empty(head) && (head->next == head->prev);
}
static inline void __list_cut_position(struct list_head *list,
struct list_head *head, struct list_head *entry)
{
struct list_head *new_first = entry->next;
list->next = head->next;
list->next->prev = list;
list->prev = entry;
entry->next = list;
head->next = new_first;
new_first->prev = head;
}
/**
* list_cut_position - cut a list into two
* @list: a new list to add all removed entries
* @head: a list with entries
* @entry: an entry within head, could be the head itself
* and if so we won't cut the list
*
* This helper moves the initial part of @head, up to and
* including @entry, from @head to @list. You should
* pass on @entry an element you know is on @head. @list
* should be an empty list or a list you do not care about
* losing its data.
*
*/
static inline void list_cut_position(struct list_head *list,
struct list_head *head, struct list_head *entry)
{
if (list_empty(head))
return;
if (list_is_singular(head) &&
(head->next != entry && head != entry))
return;
if (entry == head)
INIT_LIST_HEAD(list);
else
__list_cut_position(list, head, entry);
}
static inline void __list_splice(const struct list_head *list,
struct list_head *prev,
struct list_head *next)
{
struct list_head *first = list->next;
struct list_head *last = list->prev;
first->prev = prev;
prev->next = first;
last->next = next;
next->prev = last;
}
/**
* list_splice - join two lists, this is designed for stacks
* @list: the new list to add.
* @head: the place to add it in the first list.
*/
static inline void list_splice(const struct list_head *list,
struct list_head *head)
{
if (!list_empty(list))
__list_splice(list, head, head->next);
}
/**
* list_splice_tail - join two lists, each list being a queue
* @list: the new list to add.
* @head: the place to add it in the first list.
*/
static inline void list_splice_tail(struct list_head *list,
struct list_head *head)
{
if (!list_empty(list))
__list_splice(list, head->prev, head);
}
/**
* list_splice_init - join two lists and reinitialise the emptied list.
* @list: the new list to add.
* @head: the place to add it in the first list.
*
* The list at @list is reinitialised
*/
static inline void list_splice_init(struct list_head *list,
struct list_head *head)
{
if (!list_empty(list)) {
__list_splice(list, head, head->next);
INIT_LIST_HEAD(list);
}
}
/**
* list_splice_tail_init - join two lists and reinitialise the emptied list
* @list: the new list to add.
* @head: the place to add it in the first list.
*
* Each of the lists is a queue.
* The list at @list is reinitialised
*/
static inline void list_splice_tail_init(struct list_head *list,
struct list_head *head)
{
if (!list_empty(list)) {
__list_splice(list, head->prev, head);
INIT_LIST_HEAD(list);
}
}
/**
* list_entry - get the struct for this entry
* @ptr: the &struct list_head pointer.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_struct within the struct.
*/
#define list_entry(ptr, type, member) \
container_of(ptr, type, member)
/**
* list_first_entry - get the first element from a list
* @ptr: the list head to take the element from.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_struct within the struct.
*
* Note, that list is expected to be not empty.
*/
#define list_first_entry(ptr, type, member) \
list_entry((ptr)->next, type, member)
/**
* list_for_each - iterate over a list
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
*/
#define list_for_each(pos, head) \
for (pos = (head)->next; prefetch(pos->next), pos != (head); \
pos = pos->next)
/**
* __list_for_each - iterate over a list
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
*
* This variant differs from list_for_each() in that it's the
* simplest possible list iteration code, no prefetching is done.
* Use this for code that knows the list to be very short (empty
* or 1 entry) most of the time.
*/
#define __list_for_each(pos, head) \
for (pos = (head)->next; pos != (head); pos = pos->next)
/**
* list_for_each_prev - iterate over a list backwards
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
*/
#define list_for_each_prev(pos, head) \
for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \
pos = pos->prev)
/**
* list_for_each_safe - iterate over a list safe against removal of list entry
* @pos: the &struct list_head to use as a loop cursor.
* @n: another &struct list_head to use as temporary storage
* @head: the head for your list.
*/
#define list_for_each_safe(pos, n, head) \
for (pos = (head)->next, n = pos->next; pos != (head); \
pos = n, n = pos->next)
/**
* list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
* @pos: the &struct list_head to use as a loop cursor.
* @n: another &struct list_head to use as temporary storage
* @head: the head for your list.
*/
#define list_for_each_prev_safe(pos, n, head) \
for (pos = (head)->prev, n = pos->prev; \
prefetch(pos->prev), pos != (head); \
pos = n, n = pos->prev)
/**
* list_for_each_entry - iterate over list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry(pos, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member); \
prefetch(pos->member.next), &pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member))
/**
* list_for_each_entry_reverse - iterate backwards over list of given type.
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_reverse(pos, head, member) \
for (pos = list_entry((head)->prev, typeof(*pos), member); \
prefetch(pos->member.prev), &pos->member != (head); \
pos = list_entry(pos->member.prev, typeof(*pos), member))
/**
* list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
* @pos: the type * to use as a start point
* @head: the head of the list
* @member: the name of the list_struct within the struct.
*
* Prepares a pos entry for use as a start point in list_for_each_entry_continue().
*/
#define list_prepare_entry(pos, head, member) \
((pos) ? : list_entry(head, typeof(*pos), member))
/**
* list_for_each_entry_continue - continue iteration over list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*
* Continue to iterate over list of given type, continuing after
* the current position.
*/
#define list_for_each_entry_continue(pos, head, member) \
for (pos = list_entry(pos->member.next, typeof(*pos), member); \
prefetch(pos->member.next), &pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member))
/**
* list_for_each_entry_continue_reverse - iterate backwards from the given point
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*
* Start to iterate over list of given type backwards, continuing after
* the current position.
*/
#define list_for_each_entry_continue_reverse(pos, head, member) \
for (pos = list_entry(pos->member.prev, typeof(*pos), member); \
prefetch(pos->member.prev), &pos->member != (head); \
pos = list_entry(pos->member.prev, typeof(*pos), member))
/**
* list_for_each_entry_from - iterate over list of given type from the current point
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*
* Iterate over list of given type, continuing from current position.
*/
#define list_for_each_entry_from(pos, head, member) \
for (; prefetch(pos->member.next), &pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member))
/**
* list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_safe(pos, n, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member), \
n = list_entry(pos->member.next, typeof(*pos), member); \
&pos->member != (head); \
pos = n, n = list_entry(n->member.next, typeof(*n), member))
/**
* list_for_each_entry_safe_continue
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*
* Iterate over list of given type, continuing after current point,
* safe against removal of list entry.
*/
#define list_for_each_entry_safe_continue(pos, n, head, member) \
for (pos = list_entry(pos->member.next, typeof(*pos), member), \
n = list_entry(pos->member.next, typeof(*pos), member); \
&pos->member != (head); \
pos = n, n = list_entry(n->member.next, typeof(*n), member))
/**
* list_for_each_entry_safe_from
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*
* Iterate over list of given type from current point, safe against
* removal of list entry.
*/
#define list_for_each_entry_safe_from(pos, n, head, member) \
for (n = list_entry(pos->member.next, typeof(*pos), member); \
&pos->member != (head); \
pos = n, n = list_entry(n->member.next, typeof(*n), member))
/**
* list_for_each_entry_safe_reverse
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*
* Iterate backwards over list of given type, safe against removal
* of list entry.
*/
#define list_for_each_entry_safe_reverse(pos, n, head, member) \
for (pos = list_entry((head)->prev, typeof(*pos), member), \
n = list_entry(pos->member.prev, typeof(*pos), member); \
&pos->member != (head); \
pos = n, n = list_entry(n->member.prev, typeof(*n), member))
/*
* Double linked lists with a single pointer list head.
* Mostly useful for hash tables where the two pointer list head is
* too wasteful.
* You lose the ability to access the tail in O(1).
*/
struct hlist_head {
struct hlist_node *first;
};
struct hlist_node {
struct hlist_node *next, **pprev;
};
#define HLIST_HEAD_INIT { .first = NULL }
#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
static inline void INIT_HLIST_NODE(struct hlist_node *h)
{
h->next = NULL;
h->pprev = NULL;
}
static inline int hlist_unhashed(const struct hlist_node *h)
{
return !h->pprev;
}
static inline int hlist_empty(const struct hlist_head *h)
{
return !h->first;
}
static inline void __hlist_del(struct hlist_node *n)
{
struct hlist_node *next = n->next;
struct hlist_node **pprev = n->pprev;
*pprev = next;
if (next)
next->pprev = pprev;
}
static inline void hlist_del(struct hlist_node *n)
{
__hlist_del(n);
n->next = (struct hlist_node*)LIST_POISON1;
n->pprev = (struct hlist_node**)LIST_POISON2;
}
static inline void hlist_del_init(struct hlist_node *n)
{
if (!hlist_unhashed(n)) {
__hlist_del(n);
INIT_HLIST_NODE(n);
}
}
static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
{
struct hlist_node *first = h->first;
n->next = first;
if (first)
first->pprev = &n->next;
h->first = n;
n->pprev = &h->first;
}
/* next must be != NULL */
static inline void hlist_add_before(struct hlist_node *n,
struct hlist_node *next)
{
n->pprev = next->pprev;
n->next = next;
next->pprev = &n->next;
*(n->pprev) = n;
}
static inline void hlist_add_after(struct hlist_node *n,
struct hlist_node *next)
{
next->next = n->next;
n->next = next;
next->pprev = &n->next;
if(next->next)
next->next->pprev = &next->next;
}
/*
* Move a list from one list head to another. Fixup the pprev
* reference of the first entry if it exists.
*/
static inline void hlist_move_list(struct hlist_head *old,
struct hlist_head *new)
{
new->first = old->first;
if (new->first)
new->first->pprev = &new->first;
old->first = NULL;
}
#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
#define hlist_for_each(pos, head) \
for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
pos = pos->next)
#define hlist_for_each_safe(pos, n, head) \
for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
pos = n)
/**
* hlist_for_each_entry - iterate over list of given type
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct hlist_node to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry(tpos, pos, head, member) \
for (pos = (head)->first; \
pos && ({ prefetch(pos->next); 1;}) && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = pos->next)
/**
* hlist_for_each_entry_continue - iterate over a hlist continuing after current point
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct hlist_node to use as a loop cursor.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_continue(tpos, pos, member) \
for (pos = (pos)->next; \
pos && ({ prefetch(pos->next); 1;}) && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = pos->next)
/**
* hlist_for_each_entry_from - iterate over a hlist continuing from current point
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct hlist_node to use as a loop cursor.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_from(tpos, pos, member) \
for (; pos && ({ prefetch(pos->next); 1;}) && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = pos->next)
/**
* hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct hlist_node to use as a loop cursor.
* @n: another &struct hlist_node to use as temporary storage
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
for (pos = (head)->first; \
pos && ({ n = pos->next; 1; }) && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = n)
#endif

View File

@ -0,0 +1,188 @@
typedef unsigned short WORD;
typedef unsigned int DWORD;
typedef unsigned int LONG;
typedef unsigned char BYTE;
#define IMAGE_DOS_SIGNATURE 0x5A4D
#define IMAGE_NT_SIGNATURE 0x00004550
#define IMAGE_NT_OPTIONAL_HDR32_MAGIC 0x10b
#pragma pack(push,2)
typedef struct _IMAGE_DOS_HEADER
{
WORD e_magic;
WORD e_cblp;
WORD e_cp;
WORD e_crlc;
WORD e_cparhdr;
WORD e_minalloc;
WORD e_maxalloc;
WORD e_ss;
WORD e_sp;
WORD e_csum;
WORD e_ip;
WORD e_cs;
WORD e_lfarlc;
WORD e_ovno;
WORD e_res[4];
WORD e_oemid;
WORD e_oeminfo;
WORD e_res2[10];
LONG e_lfanew;
} IMAGE_DOS_HEADER,*PIMAGE_DOS_HEADER;
#pragma pack(pop)
#pragma pack(push,4)
typedef struct _IMAGE_FILE_HEADER
{
WORD Machine;
WORD NumberOfSections;
DWORD TimeDateStamp;
DWORD PointerToSymbolTable;
DWORD NumberOfSymbols;
WORD SizeOfOptionalHeader;
WORD Characteristics;
} IMAGE_FILE_HEADER, *PIMAGE_FILE_HEADER;
#define IMAGE_FILE_DLL 0x2000
#define IMAGE_FILE_MACHINE_I386 0x014c /* Intel 386 or later processors
and compatible processors */
typedef struct _IMAGE_DATA_DIRECTORY {
DWORD VirtualAddress;
DWORD Size;
} IMAGE_DATA_DIRECTORY,*PIMAGE_DATA_DIRECTORY;
#define IMAGE_NUMBEROF_DIRECTORY_ENTRIES 16
typedef struct _IMAGE_OPTIONAL_HEADER {
WORD Magic;
BYTE MajorLinkerVersion;
BYTE MinorLinkerVersion;
DWORD SizeOfCode;
DWORD SizeOfInitializedData;
DWORD SizeOfUninitializedData;
DWORD AddressOfEntryPoint;
DWORD BaseOfCode;
DWORD BaseOfData;
DWORD ImageBase;
DWORD SectionAlignment;
DWORD FileAlignment;
WORD MajorOperatingSystemVersion;
WORD MinorOperatingSystemVersion;
WORD MajorImageVersion;
WORD MinorImageVersion;
WORD MajorSubsystemVersion;
WORD MinorSubsystemVersion;
DWORD Win32VersionValue;
DWORD SizeOfImage;
DWORD SizeOfHeaders;
DWORD CheckSum;
WORD Subsystem;
WORD DllCharacteristics;
DWORD SizeOfStackReserve;
DWORD SizeOfStackCommit;
DWORD SizeOfHeapReserve;
DWORD SizeOfHeapCommit;
DWORD LoaderFlags;
DWORD NumberOfRvaAndSizes;
IMAGE_DATA_DIRECTORY DataDirectory[IMAGE_NUMBEROF_DIRECTORY_ENTRIES];
} IMAGE_OPTIONAL_HEADER,*PIMAGE_OPTIONAL_HEADER;
#pragma pack(pop)
#pragma pack(push,4)
typedef struct _IMAGE_NT_HEADERS
{
DWORD Signature;
IMAGE_FILE_HEADER FileHeader;
IMAGE_OPTIONAL_HEADER OptionalHeader;
} IMAGE_NT_HEADERS32,*PIMAGE_NT_HEADERS32;
#define IMAGE_SIZEOF_SHORT_NAME 8
typedef struct _IMAGE_SECTION_HEADER
{
BYTE Name[IMAGE_SIZEOF_SHORT_NAME];
union
{
DWORD PhysicalAddress;
DWORD VirtualSize;
} Misc;
DWORD VirtualAddress;
DWORD SizeOfRawData;
DWORD PointerToRawData;
DWORD PointerToRelocations;
DWORD PointerToLinenumbers;
WORD NumberOfRelocations;
WORD NumberOfLinenumbers;
DWORD Characteristics;
} IMAGE_SECTION_HEADER,*PIMAGE_SECTION_HEADER;
#pragma pack(pop)
#define IMAGE_SCN_CNT_INITIALIZED_DATA 0x00000040
#define IMAGE_SCN_CNT_UNINITIALIZED_DATA 0x00000080
#define IMAGE_SCN_MEM_SHARED 0x10000000
#define IMAGE_SCN_MEM_EXECUTE 0x20000000
#define IMAGE_SCN_MEM_WRITE 0x80000000
#pragma pack(push,4)
typedef struct _IMAGE_BASE_RELOCATION {
DWORD VirtualAddress;
DWORD SizeOfBlock;
} IMAGE_BASE_RELOCATION,*PIMAGE_BASE_RELOCATION;
#pragma pack(pop)
typedef struct _IMAGE_IMPORT_DESCRIPTOR
{
union
{
DWORD Characteristics;
DWORD OriginalFirstThunk;
};
DWORD TimeDateStamp;
DWORD ForwarderChain;
DWORD Name;
DWORD FirstThunk;
} IMAGE_IMPORT_DESCRIPTOR,*PIMAGE_IMPORT_DESCRIPTOR;
typedef struct _IMAGE_THUNK_DATA32
{
union
{
DWORD ForwarderString;
DWORD Function;
DWORD Ordinal;
DWORD AddressOfData;
} u1;
} IMAGE_THUNK_DATA32,*PIMAGE_THUNK_DATA32;
typedef struct _IMAGE_IMPORT_BY_NAME
{
WORD Hint;
BYTE Name[1];
} IMAGE_IMPORT_BY_NAME,*PIMAGE_IMPORT_BY_NAME;
#define IMAGE_ORDINAL_FLAG 0x80000000
typedef struct _IMAGE_EXPORT_DIRECTORY {
DWORD Characteristics;
DWORD TimeDateStamp;
WORD MajorVersion;
WORD MinorVersion;
DWORD Name;
DWORD Base;
DWORD NumberOfFunctions;
DWORD NumberOfNames;
DWORD AddressOfFunctions;
DWORD AddressOfNames;
DWORD AddressOfNameOrdinals;
} IMAGE_EXPORT_DIRECTORY,*PIMAGE_EXPORT_DIRECTORY;
#define MakePtr( cast, ptr, addValue ) (cast)( (uint32_t)(ptr) + (uint32_t)(addValue) )

View File

@ -0,0 +1,112 @@
#tls:0 pid process id
#tls:4 tid reserved for thread slot
#tls:8 thread's stack low limit
#tls:12 thread's stack high limit
#tls:16 reseved for libc
#ifdef L_start
.section .init
.global __start
.align 4
__start:
movl $68, %eax
movl $12, %ebx
lea __size_of_stack_reserve__, %ecx
addl $4095, %ecx #load stack size
andl $-4096, %ecx #align to page granularity
int $0x40 #and allocate
testl %eax, %eax
jz 1f
addl %eax, %ecx
movl %eax, %fs:8
movl %ecx, %fs:12 #save stack base - low limit
#save stack top - high limit
movl %ecx, %esp #reload stack
subl $1024, %esp
movl $9, %eax
movl %esp, %ebx
movl $-1, %ecx
int $0x40
movl 30(%ebx), %eax
movl %eax, %fs:0 #save pid
addl $1024, %esp
jmp ___crt_startup
1:
int3 #trap to debugger
.ascii "No enough memory for stack allocation"
#endif
#ifdef L_chkstk
.section .text
.global ___chkstk
.global __alloca
___chkstk:
__alloca:
pushl %ecx /* save temp */
leal 8(%esp), %ecx /* point past return addr */
subl %eax, %ecx
cmpl %fs:8, %ecx # check low stack limit
jb 1f
movl %esp, %eax /* save old stack pointer */
movl %ecx, %esp /* decrement stack */
movl (%eax), %ecx /* recover saved temp */
movl 4(%eax), %eax /* recover return address */
/* Push the return value back. Doing this instead of just
jumping to %eax preserves the cached call-return stack
used by most modern processors. */
pushl %eax
ret
1:
int3 #trap to debugger
.ascii "Stack overflow"
#endif
#ifdef L_chkstk_ms
.section .text
.global ___chkstk_ms
___chkstk_ms:
pushl %ecx /* save temp */
pushl %eax
cmpl $0x1000, %eax /* > 4k ?*/
leal 12(%esp), %ecx /* point past return addr */
jb 2f
1:
subl $0x1000, %ecx /* yes, move pointer down 4k*/
cmpl %fs:8, %ecx /* check low stack limit */
jb 3f
orl $0x0, (%ecx) /* probe there */
subl $0x1000, %eax /* decrement count */
cmpl $0x1000, %eax
ja 1b /* and do it again */
2:
subl %eax, %ecx
orl $0x0, (%ecx) /* less than 4k, just peek here */
popl %eax
popl %ecx
ret
3:
int3 #trap to debugger
.ascii "Stack overflow"
#endif