diff --git a/kernel/branches/kolibri_pe/const.inc b/kernel/branches/kolibri_pe/const.inc index 79fcc3dbaa..5c78489384 100644 --- a/kernel/branches/kolibri_pe/const.inc +++ b/kernel/branches/kolibri_pe/const.inc @@ -191,8 +191,10 @@ HEAP_BASE equ 0x80000000 HEAP_MIN_SIZE equ 0x01000000 -page_tabs equ 0xDDC00000 -app_page_tabs equ 0xDDC00000 +page_tabs equ 0xDD800000 +app_page_tabs equ 0xDD800000 + +shared_tabs equ 0xDDC00000 heap_tabs equ (page_tabs+ (HEAP_BASE shr 10)) kernel_tabs equ (page_tabs+ (OS_BASE shr 10)) diff --git a/kernel/branches/kolibri_pe/core/buddy.inc b/kernel/branches/kolibri_pe/core/buddy.inc new file mode 100644 index 0000000000..3f879770dc --- /dev/null +++ b/kernel/branches/kolibri_pe/core/buddy.inc @@ -0,0 +1,368 @@ + +#define BUDDY_SYSTEM_INNER_BLOCK 0xff + +#define frame_index( frame ) \ + (index_t)( (frame) - z_core.frames) + +#define frame_initialize( frame ) \ + (frame)->refcount = 1; \ + (frame)->buddy_order = 0 + +#define buddy_get_order( block) \ + ((frame_t*)(block))->buddy_order + +#define buddy_set_order( block, order) \ + ((frame_t*)(block))->buddy_order = (order) + +#define buddy_mark_busy( block ) \ + ((frame_t*)(block))->refcount = 1 + +#define IS_BUDDY_LEFT_BLOCK(frame) \ + (((frame_index((frame)) >> (frame)->buddy_order) & 0x1) == 0) + +#define IS_BUDDY_RIGHT_BLOCK(frame) \ + (((frame_index((frame)) >> (frame)->buddy_order) & 0x1) == 1) + +#define buddy_mark_available( block ) \ + ((frame_t*)(block))->refcount = 0 + + +static __inline link_t * buddy_bisect(link_t *block) +{ + frame_t *frame_l, *frame_r; + + frame_l = (frame_t*)block; + frame_r = (frame_l + (1 << (frame_l->buddy_order - 1))); + + return &frame_r->buddy_link; +} + +static __inline link_t *buddy_coalesce(link_t *block_1, link_t *block_2) +{ + frame_t *frame1, *frame2; + + frame1 = (frame_t*)block_1; + frame2 = (frame_t*)block_2; + + return frame1 < frame2 ? block_1 : block_2; +} + +static link_t *find_buddy(link_t *block) +{ + frame_t *frame; + index_t index; + u32_t is_left, is_right; + + frame = (frame_t*)block; + // ASSERT(IS_BUDDY_ORDER_OK(frame_index_abs(zone, frame),frame->buddy_order)); + + is_left = IS_BUDDY_LEFT_BLOCK( frame); + is_right = IS_BUDDY_RIGHT_BLOCK( frame); + + // ASSERT(is_left ^ is_right); + if (is_left) { + index = (frame_index(frame)) + (1 << frame->buddy_order); + } else { /* if (is_right) */ + index = (frame_index(frame)) - (1 << frame->buddy_order); + } + + if ( index < z_core.count) + { + if (z_core.frames[index].buddy_order == frame->buddy_order && + z_core.frames[index].refcount == 0) { + return &z_core.frames[index].buddy_link; + } + } + + return NULL; +} + + +static link_t *buddy_find_block(link_t *child, u32_t order) +{ + frame_t *frame; + index_t index; + + frame = (frame_t*)child; + + index = frame_index(frame); + do { + if (z_core.frames[index].buddy_order != order) + return &z_core.frames[index].buddy_link; + + } while(index-- > 0); + return NULL; +} + +static void buddy_system_free(link_t *block) +{ + link_t *buddy, *hlp; + u32_t i; + + /* + * Determine block's order. + */ + i = buddy_get_order(block); + + // ASSERT(i <= z->max_order); + + if (i != z_core.max_order) + { + /* + * See if there is any buddy in the list of order i. + */ + buddy = find_buddy( block ); + if (buddy) + { + + // ASSERT(buddy_get_order(z, buddy) == i); + /* + * Remove buddy from the list of order i. + */ + list_remove(buddy); + + /* + * Invalidate order of both block and buddy. + */ + buddy_set_order(block, BUDDY_SYSTEM_INNER_BLOCK); + buddy_set_order(buddy, BUDDY_SYSTEM_INNER_BLOCK); + + /* + * Coalesce block and buddy into one block. + */ + hlp = buddy_coalesce( block, buddy ); + + /* + * Set order of the coalesced block to i + 1. + */ + buddy_set_order(hlp, i + 1); + + /* + * Recursively add the coalesced block to the list of order i + 1. + */ + buddy_system_free( hlp ); + return; + } + } + /* + * Insert block into the list of order i. + */ + list_append(block, &z_core.order[i]); +} + + +static link_t* buddy_alloc( u32_t i) +{ + link_t *res, *hlp; + + ASSERT(i <= z_core.max_order); + + /* + * If the list of order i is not empty, + * the request can be immediatelly satisfied. + */ + if (!list_empty(&z_core.order[i])) { + res = z_core.order[i].next; + list_remove(res); + buddy_mark_busy(res); + return res; + } + /* + * If order i is already the maximal order, + * the request cannot be satisfied. + */ + if (i == z_core.max_order) + return NULL; + + /* + * Try to recursively satisfy the request from higher order lists. + */ + hlp = buddy_alloc( i + 1 ); + + /* + * The request could not be satisfied + * from higher order lists. + */ + if (!hlp) + return NULL; + + res = hlp; + + /* + * Bisect the block and set order of both of its parts to i. + */ + hlp = buddy_bisect( res ); + + buddy_set_order(res, i); + buddy_set_order(hlp, i); + + /* + * Return the other half to buddy system. Mark the first part + * full, so that it won't coalesce again. + */ + buddy_mark_busy(res); + buddy_system_free( hlp ); + + return res; +} + +static link_t* buddy_alloc_block(link_t *block) +{ + link_t *left,*right, *tmp; + u32_t order; + + left = buddy_find_block(block, BUDDY_SYSTEM_INNER_BLOCK); + ASSERT(left); + list_remove(left); + while (1) + { + if ( !buddy_get_order(left)) + { + buddy_mark_busy(left); + return left; + } + + order = buddy_get_order(left); + + right = buddy_bisect(left); + buddy_set_order(left, order-1); + buddy_set_order(right, order-1); + + tmp = buddy_find_block( block, BUDDY_SYSTEM_INNER_BLOCK); + + if (tmp == right) { + right = left; + left = tmp; + } + ASSERT(tmp == left); + buddy_mark_busy(left); + buddy_system_free(right); + buddy_mark_available(left); + } +} + +static void zone_create(zone_t *z, pfn_t start, count_t count) +{ + unsigned int i; + + spinlock_initialize(&z->lock); + + z->base = start; + z->count = count; + z->free_count = count; + z->busy_count = 0; + + z->max_order = fnzb(count); + + ASSERT(z->max_order < BUDDY_SYSTEM_INNER_BLOCK); + + for (i = 0; i <= z->max_order; i++) + list_initialize(&z->order[i]); + + z->frames = (frame_t *)balloc(count*sizeof(frame_t)); + + for (i = 0; i < count; i++) + frame_initialize(&z->frames[i]); + +/* + for (i = 0; i < count; i++) { + z_core.frames[i].buddy_order=0; + z_core.frames[i].parent = NULL; + z_core.frames[i].refcount=1; + } + + for (i = 0; i < count; i++) + { + z_core.frames[i].refcount = 0; + buddy_system_free(&z_core.frames[i].buddy_link); + } +*/ + + DBG("create zone: base %x count %x order %d\n", + start, count, z->max_order); + +} + +static void zone_mark_unavailable(zone_t *zone, index_t frame_idx) +{ + frame_t *frame; + link_t *link; + + ASSERT(frame_idx < zone->count); + + frame = &zone->frames[frame_idx]; + + if (frame->refcount) + return; + link = buddy_alloc_block( &frame->buddy_link); + ASSERT(link); + zone->free_count--; +} + +static void zone_reserve(zone_t *z, pfn_t base, count_t count) +{ + int i; + pfn_t top = base + count; + + if( (base+count < z->base)||(base > z->base+z->count)) + return; + + if(base < z->base) + base = z->base; + + if(top > z->base+z->count) + top = z->base+z->count; + + DBG("zone reserve base %x top %x\n", base, top); + + for (i = base; i < top; i++) + zone_mark_unavailable(z, i - z->base); + +}; + +static void zone_release(zone_t *z, pfn_t base, count_t count) +{ + int i; + pfn_t top = base+count; + + if( (base+count < z->base)||(base > z->base+z->count)) + return; + + if(base < z->base) + base = z->base; + + if(top > z->base+z->count) + top = z->base+z->count; + + DBG("zone release base %x top %x\n", base, top); + + for (i = base; i < top; i++) { + z->frames[i-z->base].refcount = 0; + buddy_system_free(&z->frames[i-z->base].buddy_link); + } +}; + +static inline frame_t * zone_get_frame(zone_t *zone, index_t frame_idx) +{ + ASSERT(frame_idx < zone->count); + return &zone->frames[frame_idx]; +} + +void __fastcall frame_set_parent(pfn_t pfn, void *data) +{ + spinlock_lock(&z_core.lock); + zone_get_frame(&z_core, pfn-z_core.base)->parent = data; + spinlock_unlock(&z_core.lock); +} + +void* __fastcall frame_get_parent(pfn_t pfn) +{ + void *res; + + spinlock_lock(&z_core.lock); + res = zone_get_frame(&z_core, pfn)->parent; + spinlock_unlock(&z_core.lock); + + return res; +} + diff --git a/kernel/branches/kolibri_pe/core/dll.c b/kernel/branches/kolibri_pe/core/dll.c index 17398e9333..4e013e478b 100644 --- a/kernel/branches/kolibri_pe/core/dll.c +++ b/kernel/branches/kolibri_pe/core/dll.c @@ -173,32 +173,32 @@ srv_t* __fastcall load_pe_driver(const char *path) PIMAGE_NT_HEADERS32 nt; drv_entry_t *drv_entry; - md_t *md; + addr_t *img_base ; srv_t *srv; - md = load_image(path); + img_base = load_image(path); - if( ! md ) + if( ! img_base ) return 0; - if( link_image( md->base ) ) + if( link_image( img_base ) ) { - dos = (PIMAGE_DOS_HEADER)md->base; + dos = (PIMAGE_DOS_HEADER)img_base; nt = MakePtr( PIMAGE_NT_HEADERS32, dos, dos->e_lfanew); - drv_entry = MakePtr(drv_entry_t*, md->base, + drv_entry = MakePtr(drv_entry_t*, img_base, nt->OptionalHeader.AddressOfEntryPoint); srv = drv_entry(1); if(srv != NULL) - srv->entry = nt->OptionalHeader.AddressOfEntryPoint + md->base; + srv->entry = nt->OptionalHeader.AddressOfEntryPoint + img_base; return srv; } else { - md_free( md ); + mem_free( img_base ); return NULL; } } @@ -277,9 +277,9 @@ int sys_exec(char *path, char *cmdline, u32_t flags) ( raw[1] == 0x30305445) ) ) { - DBG("leagacy Kolibri application"); + DBG("leagacy Kolibri application\n"); int tmp = mnt_exec(raw, raw_size, path, cmdline, flags); - DBG(" pid %x\n",tmp); + DBG("pid %x\n",tmp); return tmp; } @@ -311,7 +311,7 @@ int sys_exec(char *path, char *cmdline, u32_t flags) return -30; } - ex_stack_page = core_alloc(0); /* 2^0 = 1 page */ + ex_stack_page = alloc_page(); /* 2^0 = 1 page */ if( ! ex_stack_page ) { mem_free(raw); @@ -327,7 +327,7 @@ int sys_exec(char *path, char *cmdline, u32_t flags) if( !ex_pg_dir ) { - core_free(ex_stack_page); + frame_free(ex_stack_page); mem_free(raw); return -30; /* FIXME */ }; diff --git a/kernel/branches/kolibri_pe/core/frame.c b/kernel/branches/kolibri_pe/core/frame.c new file mode 100644 index 0000000000..3c5cec8886 --- /dev/null +++ b/kernel/branches/kolibri_pe/core/frame.c @@ -0,0 +1,442 @@ + +#include +#include +#include +#include +#include + +extern u32_t pg_balloc; +extern u32_t mem_amount; + +void __fastcall *balloc(size_t size); + +static zone_t z_core; + +#include "buddy.inc" + +typedef struct +{ + link_t link; + SPINLOCK_DECLARE(lock); + u32_t state; + void *parent; + count_t avail; + addr_t base; + index_t next; + int list[512]; +}pslab_t; + +typedef struct +{ + SPINLOCK_DECLARE(lock); + + count_t partial_count; + + link_t full_slabs; /**< List of full slabs */ + link_t partial_slabs; /**< List of partial slabs */ +}pcache_t; + +static pcache_t page_cache; + +static pslab_t *create_page_slab(); + + + +void init_mm() +{ + int i; + + u32_t base; + u32_t size; + count_t pages; + size_t conf_size; + size_t core_size; + pslab_t *slab; + + pages = mem_amount >> PAGE_WIDTH; + DBG("last page = %x total pages = %x\n",mem_amount, pages); + + conf_size = pages*sizeof(frame_t); + DBG("conf_size = %x free mem start =%x\n",conf_size, pg_balloc); + + zone_create(&z_core, 0, pages); + zone_release(&z_core, 0, pages); + zone_reserve(&z_core, 0, pg_balloc >> PAGE_WIDTH); + + list_initialize(&page_cache.full_slabs); + list_initialize(&page_cache.partial_slabs); + + slab = create_page_slab(); + + ASSERT(slab); + + slab->parent = &page_cache; + page_cache.partial_count++; + list_prepend(&slab->link, &page_cache.partial_slabs); +}; + +/** Return wasted space in slab */ +static unsigned int badness(index_t order, size_t size) +{ + unsigned int objects; + unsigned int ssize; + + ssize = PAGE_SIZE << order; + objects = (PAGE_SIZE << order) / size; + return ssize - objects * size; +} + +#define SLAB_MAX_BADNESS(order) (((size_t) PAGE_SIZE << (order)) >> 2) + + +static pslab_t *create_page_slab() +{ + pslab_t *slab; + link_t *tmp; + + spinlock_lock(&z_core.lock); + + tmp = buddy_alloc(9); + + if( tmp != 0 ) + { + frame_t *frame; + int i; + addr_t v; + + /* Update zone information. */ + z_core.free_count -= 512; + z_core.busy_count += 512; + + spinlock_unlock(&z_core.lock); + + /* Frame will be actually a first frame of the block. */ + frame = (frame_t*)tmp; + + frame->parent = 0; + + v = (z_core.base + (index_t)(frame - z_core.frames)) << PAGE_WIDTH; + + slab = (pslab_t*)PA2KA(v); + + for(i = 1; i < 512; i++) + frame[i].parent = slab; + + slab->base = v + PAGE_SIZE; + + slab->avail = 511; + slab->next = 0; + + for(i = 0; i < 511; i++) + slab->list[i] = i + 1; + + } + else + { + spinlock_unlock(&z_core.lock); + slab = NULL; + }; + + DBG("create page slab at %x\n", slab); + + return slab; +} + +static void destroy_page_slab(pslab_t *slab) +{ + u32_t order; + count_t idx; + frame_t *frame; + + + idx = (KA2PA(slab) >> PAGE_WIDTH)-z_core.base; + + frame = &z_core.frames[idx]; + + /* remember frame order */ + order = frame->buddy_order; + + ASSERT(frame->refcount); + + if (!--frame->refcount) + { + spinlock_lock(&z_core.lock); + + buddy_system_free(&frame->buddy_link); + + /* Update zone information. */ + z_core.free_count += (1 << order); + z_core.busy_count -= (1 << order); + + spinlock_unlock(&z_core.lock); + } +} + +#if 0 +fslab_t *create_slab(index_t order, size_t size) +{ + fslab_t *slab; + + slab = (fslab_t*)PA2KA(frame_alloc(0)); + + if( slab ) + { + link_t *tmp; + + spinlock_lock(&z_core.lock); + + tmp = buddy_alloc(order); + ASSERT(tmp); + + if( tmp ) + { + frame_t *frame; + count_t objects; + count_t i; + addr_t v; + + /* Update zone information. */ + z_core.free_count -= (1 << order); + z_core.busy_count += (1 << order); + + spinlock_unlock(&z_heap.lock); + + /* Frame will be actually a first frame of the block. */ + frame = (frame_t*)tmp; + + for(i = 0; i < (1U<base = (v << PAGE_WIDTH); + + slab->avail = (PAGE_SIZE << order) / size; + slab->next = 0; + + objects = (PAGE_SIZE << order) / size; + + for(i = 0; i < objects; i++) + slab->list[i] = i + 1; + } + else + { + spinlock_unlock(&z_core.lock); + frame_free(KA2PA(slab)); + slab = NULL; + }; + }; + + return slab; +} + +static void destroy_slab(fslab_t *slab) +{ + u32_t order; + count_t idx; + frame_t *frame; + + idx = (slab->base >> PAGE_WIDTH)-z_core.base; + frame = &z_core.frames[idx]; + + /* remember frame order */ + order = frame->buddy_order; + + ASSERT(frame->refcount); + + if (!--frame->refcount) + { + spinlock_lock(&z_core.lock); + + buddy_system_free(&frame->buddy_link); + + /* Update zone information. */ + z_core.free_count += (1 << order); + z_core.busy_count -= (1 << order); + + spinlock_unlock(&z_core.lock); + } + +// slab_free(fslab, slab); + +}; +#endif + +addr_t alloc_page(void) +{ + eflags_t efl; + pslab_t *slab; + addr_t frame; + + efl = safe_cli(); + + spinlock_lock(&page_cache.lock); + + if (list_empty(&page_cache.partial_slabs)) + { + slab = create_page_slab(); + if (!slab) + { + spinlock_unlock(&page_cache.lock); + safe_sti(efl); + return 0; + } + slab->parent = &page_cache; + slab->state = 1; + page_cache.partial_count++; + list_prepend(&slab->link, &page_cache.partial_slabs); + } + else + slab = (pslab_t*)page_cache.partial_slabs.next; + + frame = slab->base + (slab->next << PAGE_WIDTH); + slab->next = slab->list[slab->next]; + + slab->avail--; + if( slab->avail == 0 ) + { + slab->state = 0; + list_remove(&slab->link); + list_prepend(&slab->link, &page_cache.full_slabs); + page_cache.partial_count--; + DBG("%s insert empty page slab\n"); + }; + spinlock_unlock(&page_cache.lock); + +// DBG("alloc_page: %x remain %d\n", frame, slab->avail); + + safe_sti(efl); + + return frame; +} + + +addr_t __fastcall frame_alloc(count_t count) +{ + addr_t frame; + + if ( count > 1) + { + eflags_t efl; + index_t order; + frame_t *tmp; + count_t i; + + order = fnzb(count-1)+1; + + efl = safe_cli(); + + spinlock_lock(&z_core.lock); + + tmp = (frame_t*)buddy_alloc( order ); + + ASSERT(tmp); + + z_core.free_count -= (1 << order); + z_core.busy_count += (1 << order); + + for(i = 0; i < (1 << order); i++) + tmp[i].parent = NULL; + + spinlock_unlock(&z_core.lock); + + safe_sti(efl); + + frame = (z_core.base + + (index_t)(tmp - z_core.frames)) << PAGE_WIDTH; + + + DBG("%s %x order %d remain %d\n", __FUNCTION__, + frame, order, z_core.free_count); + } + else + frame = alloc_page(); + + return frame; +} + +size_t __fastcall frame_free(addr_t addr) +{ + eflags_t efl; + index_t idx; + frame_t *frame; + size_t frame_size; + + idx = addr >> PAGE_WIDTH; + + if( (idx < z_core.base) || + (idx >= z_core.base+z_core.count)) { + DBG("%s: invalid address %x\n", __FUNCTION__, addr); + return 0; + } + + efl = safe_cli(); + + frame = &z_core.frames[idx-z_core.base]; + + if( frame->parent != NULL ) + { + pslab_t *slab; + + slab = frame->parent; + + spinlock_lock(&page_cache.lock); + + idx = (addr - slab->base) >> PAGE_WIDTH; + + ASSERT(idx < 512); + + slab->list[idx] = slab->next; + slab->next = idx; + + slab->avail++; + + if( (slab->state == 0 ) && + (slab->avail >= 4)) + { + slab->state = 1; + // list_remove(&slab->link); + // list_prepend(&slab->link, &page_cache.partial_slabs); + // page_cache.partial_count++; + + DBG("%s: insert partial page slab\n", __FUNCTION__); + } + spinlock_unlock(&page_cache.lock); + + frame_size = 1; + } + else + { + count_t order; + + order = frame->buddy_order; + + DBG("%s %x order %d\n", __FUNCTION__, addr, order); + + ASSERT(frame->refcount); + + spinlock_lock(&z_core.lock); + + if (!--frame->refcount) + { + buddy_system_free(&frame->buddy_link); + + /* Update zone information. */ + z_core.free_count += (1 << order); + z_core.busy_count -= (1 << order); + } + spinlock_unlock(&z_core.lock); + + frame_size = 1 << order; + }; + safe_sti(efl); + + return frame_size; +} + +count_t get_free_mem() +{ + return z_core.free_count; +} + diff --git a/kernel/branches/kolibri_pe/core/heap.c b/kernel/branches/kolibri_pe/core/heap.c index 80c391719f..05915e823b 100644 --- a/kernel/branches/kolibri_pe/core/heap.c +++ b/kernel/branches/kolibri_pe/core/heap.c @@ -4,775 +4,481 @@ #include #include #include -#include + +#define PG_DEMAND 0x400 + +#define HF_WIDTH 16 +#define HF_SIZE (1 << HF_WIDTH) + +#define BUDDY_SYSTEM_INNER_BLOCK 0xff + +static zone_t z_heap; + +static link_t shared_mmap; -#define MD_FREE 1 -#define MD_USED 2 +#define heap_index( frame ) \ + (index_t)( (frame) - z_heap.frames) -typedef struct { - u32_t av_mapped; - u32_t av_unmapped; - - link_t mapped[32]; - link_t unmapped[32]; - - link_t used; - - SPINLOCK_DECLARE(lock); /**< this lock protects everything below */ -}heap_t; +#define heap_index_abs( frame ) \ + (index_t)( (frame) - z_heap.frames) -slab_cache_t *md_slab; -slab_cache_t *phm_slab; - - -heap_t lheap; -heap_t sheap; - - -static inline void _set_lavu(count_t idx) -{ asm volatile ("bts %0, _lheap+4"::"r"(idx):"cc"); } - -static inline void _reset_lavu(count_t idx) -{ asm volatile ("btr %0, _lheap+4"::"r"(idx):"cc"); } - -static inline void _set_savm(count_t idx) -{ asm volatile ("bts %0, _sheap"::"r"(idx):"cc"); } - -static inline void _reset_savm(count_t idx) -{ asm volatile ("btr %0, _sheap"::"r"(idx):"cc"); } - -static inline void _set_savu(count_t idx) -{ asm volatile ("bts %0, _sheap+4"::"r"(idx):"cc"); } - -static inline void _reset_savu(count_t idx) -{ asm volatile ("btr %0, _sheap+4"::"r"(idx):"cc"); } - - -int __fastcall init_heap(addr_t base, size_t size) +static __inline void frame_initialize(frame_t *frame) { - md_t *md; - u32_t i; - - ASSERT(base != 0); - ASSERT(size != 0) - ASSERT((base & 0x3FFFFF) == 0); - ASSERT((size & 0x3FFFFF) == 0); - - for (i = 0; i < 32; i++) - { - list_initialize(&lheap.mapped[i]); - list_initialize(&lheap.unmapped[i]); - - list_initialize(&sheap.mapped[i]); - list_initialize(&sheap.unmapped[i]); - }; - - list_initialize(&lheap.used); - list_initialize(&sheap.used); - - md_slab = slab_cache_create(sizeof(md_t), 16,NULL,NULL,SLAB_CACHE_MAGDEFERRED); - - md = (md_t*)slab_alloc(md_slab,0); - - list_initialize(&md->adj); - md->base = base; - md->size = size; - md->parent = NULL; - md->state = MD_FREE; - - list_prepend(&md->link, &lheap.unmapped[31]); - lheap.av_mapped = 0x00000000; - lheap.av_unmapped = 0x80000000; - sheap.av_mapped = 0x00000000; - sheap.av_unmapped = 0x00000000; - - return 1; -}; - -md_t* __fastcall find_large_md(size_t size) -{ - md_t *md = NULL; - - count_t idx0; - u32_t mask; - - ASSERT((size & 0x3FFFFF) == 0); - - idx0 = (size>>22) - 1 < 32 ? (size>>22) - 1 : 31; - mask = lheap.av_unmapped & ( -1<link != &lheap.unmapped[31]) - { - if(tmp->size >= size) - { - DBG("remove large tmp %x\n", tmp); - - md = tmp; - break; - }; - }; - tmp = (md_t*)tmp->link.next; - } - else - { - idx0 = _bsf(mask); - - ASSERT( !list_empty(&lheap.unmapped[idx0])) - - md = (md_t*)lheap.unmapped[idx0].next; - }; - } - else - return NULL; - - ASSERT(md->state == MD_FREE); - - list_remove((link_t*)md); - if(list_empty(&lheap.unmapped[idx0])) - _reset_lavu(idx0); - - if(md->size > size) - { - count_t idx1; - md_t *new_md = (md_t*)slab_alloc(md_slab,0); /* FIXME check */ - - link_initialize(&new_md->link); - list_insert(&new_md->adj, &md->adj); - - new_md->base = md->base; - new_md->size = size; - new_md->parent = NULL; - new_md->state = MD_USED; - - md->base+= size; - md->size-= size; - - idx1 = (md->size>>22) - 1 < 32 ? (md->size>>22) - 1 : 31; - - list_prepend(&md->link, &lheap.unmapped[idx1]); - _set_lavu(idx1); - - return new_md; - }; - md->state = MD_USED; - - return md; + frame->refcount = 1; + frame->buddy_order = 0; } -md_t* __fastcall find_unmapped_md(size_t size) +#define buddy_get_order( block) \ + ((frame_t*)(block))->buddy_order + + +#define buddy_set_order( block, order) \ + ((frame_t*)(block))->buddy_order = (order) + +#define buddy_mark_busy( block ) \ + ((frame_t*)(block))->refcount = 1 + + +static __inline link_t * buddy_bisect(link_t *block) { - eflags_t efl; + frame_t *frame_l, *frame_r; - md_t *md = NULL; + frame_l = (frame_t*)block; + frame_r = (frame_l + (1 << (frame_l->buddy_order - 1))); - count_t idx0; - u32_t mask; + return &frame_r->buddy_link; +} - ASSERT((size & 0xFFF) == 0); +static __inline link_t *buddy_coalesce(link_t *block_1, link_t *block_2) +{ + frame_t *frame1, *frame2; - efl = safe_cli(); + frame1 = (frame_t*)block_1; + frame2 = (frame_t*)block_2; - idx0 = (size>>12) - 1 < 32 ? (size>>12) - 1 : 31; - mask = sheap.av_unmapped & ( -1<> (frame)->buddy_order) & 0x1) == 0) - md_t *tmp = (md_t*)sheap.unmapped[31].next; - while( &tmp->link != &sheap.unmapped[31]) - { - if(tmp->size >= size) - { - md = tmp; - break; - }; - tmp = (md_t*)tmp->link.next; - }; - } - else - { - idx0 = _bsf(mask); +#define IS_BUDDY_RIGHT_BLOCK_ABS(frame) \ + (((heap_index_abs((frame)) >> (frame)->buddy_order) & 0x1) == 1) - ASSERT( !list_empty(&sheap.unmapped[idx0])); - md = (md_t*)sheap.unmapped[idx0].next; - } - }; +static link_t *find_buddy(link_t *block) +{ + frame_t *frame; + index_t index; + u32_t is_left, is_right; - if(md) - { - DBG("remove md %x\n", md); + frame = (frame_t*)block; + // ASSERT(IS_BUDDY_ORDER_OK(frame_index_abs(zone, frame),frame->buddy_order)); - ASSERT(md->state==MD_FREE); - ASSERT(md->parent != NULL); + is_left = IS_BUDDY_LEFT_BLOCK_ABS( frame); + is_right = IS_BUDDY_RIGHT_BLOCK_ABS( frame); - list_remove((link_t*)md); - if(list_empty(&sheap.unmapped[idx0])) - _reset_savu(idx0); + // ASSERT(is_left ^ is_right); + + if (is_left) { + index = (heap_index(frame)) + (1 << frame->buddy_order); } - else - { - md_t *lmd; - lmd = find_large_md((size+0x3FFFFF)&~0x3FFFFF); - - DBG("get large md %x\n", lmd); - - if( !lmd) - { - safe_sti(efl); - return NULL; - }; - - ASSERT(lmd->size != 0); - ASSERT(lmd->base != 0); - ASSERT((lmd->base & 0x3FFFFF) == 0); - ASSERT(lmd->parent == NULL); - - md = (md_t*)slab_alloc(md_slab,0); /* FIXME check */ - - link_initialize(&md->link); - list_initialize(&md->adj); - md->base = lmd->base; - md->size = lmd->size; - md->parent = lmd; - md->state = MD_USED; + else { /* if (is_right) */ + index = (heap_index(frame)) - (1 << frame->buddy_order); }; - if(md->size > size) + + if ( index < z_heap.count) + { + if (z_heap.frames[index].buddy_order == frame->buddy_order && + z_heap.frames[index].refcount == 0) { + return &z_heap.frames[index].buddy_link; + } + } + + return NULL; +} + + +static void buddy_system_free(link_t *block) +{ + link_t *buddy, *hlp; + u32_t i; + + /* + * Determine block's order. + */ + i = buddy_get_order(block); + + ASSERT(i <= z_heap.max_order); + + if (i != z_heap.max_order) { - count_t idx1; - md_t *new_md = (md_t*)slab_alloc(md_slab,0); /* FIXME check */ + /* + * See if there is any buddy in the list of order i. + */ + buddy = find_buddy( block ); + if (buddy) + { - link_initialize(&new_md->link); - list_insert(&new_md->adj, &md->adj); + ASSERT(buddy_get_order(buddy) == i); + /* + * Remove buddy from the list of order i. + */ + list_remove(buddy); - new_md->base = md->base; - new_md->size = size; - new_md->parent = md->parent; - new_md->state = MD_USED; + /* + * Invalidate order of both block and buddy. + */ + buddy_set_order(block, BUDDY_SYSTEM_INNER_BLOCK); + buddy_set_order(buddy, BUDDY_SYSTEM_INNER_BLOCK); - md->base+= size; - md->size-= size; - md->state = MD_FREE; + /* + * Coalesce block and buddy into one block. + */ + hlp = buddy_coalesce( block, buddy ); - idx1 = (md->size>>12) - 1 < 32 ? (md->size>>12) - 1 : 31; + /* + * Set order of the coalesced block to i + 1. + */ + buddy_set_order(hlp, i + 1); - DBG("insert md %x, base %x size %x idx %x\n", md,md->base, md->size,idx1); + /* + * Recursively add the coalesced block to the list of order i + 1. + */ + buddy_system_free( hlp ); + return; + } + } + /* + * Insert block into the list of order i. + */ + list_append(block, &z_heap.order[i]); +} - if( idx1 < 31) - list_prepend(&md->link, &sheap.unmapped[idx1]); - else + +static link_t* buddy_system_alloc( u32_t i) +{ + link_t *res, *hlp; + + ASSERT(i <= z_heap.max_order); + + /* + * If the list of order i is not empty, + * the request can be immediatelly satisfied. + */ + if (!list_empty(&z_heap.order[i])) { + res = z_heap.order[i].next; + list_remove(res); + buddy_mark_busy(res); + return res; + } + /* + * If order i is already the maximal order, + * the request cannot be satisfied. + */ + if (i == z_heap.max_order) + return NULL; + + /* + * Try to recursively satisfy the request from higher order lists. + */ + hlp = buddy_system_alloc( i + 1 ); + + /* + * The request could not be satisfied + * from higher order lists. + */ + if (!hlp) + return NULL; + + res = hlp; + + /* + * Bisect the block and set order of both of its parts to i. + */ + hlp = buddy_bisect( res ); + + buddy_set_order(res, i); + buddy_set_order(hlp, i); + + /* + * Return the other half to buddy system. Mark the first part + * full, so that it won't coalesce again. + */ + buddy_mark_busy(res); + buddy_system_free( hlp ); + + return res; +} + + +int __fastcall init_heap(addr_t start, size_t size) +{ + count_t i; + count_t count; + + count = size >> HF_WIDTH; + + ASSERT( start != 0); + ASSERT( count != 0); + + spinlock_initialize(&z_heap.lock); + + z_heap.base = start >> HF_WIDTH; + z_heap.count = count; + z_heap.free_count = count; + z_heap.busy_count = 0; + + z_heap.max_order = fnzb(count); + + DBG("create heap zone: base %x count %x\n", start, count); + + ASSERT(z_heap.max_order < BUDDY_SYSTEM_INNER_BLOCK); + + for (i = 0; i <= z_heap.max_order; i++) + list_initialize(&z_heap.order[i]); + + + DBG("count %d frame_t %d page_size %d\n", + count, sizeof(frame_t), PAGE_SIZE); + + z_heap.frames = (frame_t *)PA2KA(frame_alloc( (count*sizeof(frame_t)) >> PAGE_WIDTH )); + + + if( z_heap.frames == 0 ) + return 0; + + + for (i = 0; i < count; i++) { + z_heap.frames[i].buddy_order=0; + z_heap.frames[i].parent = NULL; + z_heap.frames[i].refcount=1; + } + + for (i = 0; i < count; i++) + { + z_heap.frames[i].refcount = 0; + buddy_system_free(&z_heap.frames[i].buddy_link); + } + + list_initialize(&shared_mmap); + + return 1; +} + +addr_t __fastcall mem_alloc(size_t size, u32_t flags) +{ + eflags_t efl; + addr_t heap = 0; + + count_t order; + frame_t *frame; + index_t v; + int i; + mmap_t *map; + count_t pages; + + // __asm__ __volatile__ ("xchgw %bx, %bx"); + + size = (size + 4095) & ~4095; + + pages = size >> PAGE_WIDTH; + +// map = (mmap_t*)malloc( sizeof(mmap_t) + +// sizeof(addr_t) * pages); + + map = (mmap_t*)PA2KA(frame_alloc( (sizeof(mmap_t) + + sizeof(addr_t) * pages) >> PAGE_WIDTH)); + + map->size = size; + + if ( map ) + { + order = size >> HF_WIDTH; + + if( order ) + order = fnzb(order - 1) + 1; + + efl = safe_cli(); + + spinlock_lock(&z_heap.lock); + + frame = (frame_t*)buddy_system_alloc(order); + + ASSERT( frame ); + + if( frame ) { - if( list_empty(&sheap.unmapped[31])) - list_prepend(&md->link, &sheap.unmapped[31]); - else + addr_t page = 0; + addr_t mem; + + z_heap.free_count -= (1 << order); + z_heap.busy_count += (1 << order); + +/* get frame address */ + + v = z_heap.base + (index_t)(frame - z_heap.frames); + + heap = v << HF_WIDTH; + + map->base = heap; + + for(i = 0; i < (1 << order); i++) + frame[i].parent = map; + + spinlock_unlock(&z_heap.lock); + + safe_sti(efl); + + + addr_t *pte = &((addr_t*)page_tabs)[heap >> PAGE_WIDTH]; + addr_t *mpte = &map->pte[0]; + +#if 0 + if( flags & PG_MAP ) + page = PG_DEMAND | (flags & 0xFFF); + + mem = heap; + while(pages--) { - md_t *tmp = (md_t*)sheap.unmapped[31].next; + *pte++ = 0; //page; + *mpte++ = page; - while( &tmp->link != &sheap.unmapped[31]) - { - if(md->base < tmp->base) - break; - tmp = (md_t*)tmp->link.next; - } - list_insert(&md->link, &tmp->link); + asm volatile ( "invlpg (%0)" ::"r" (mem) ); + mem+= 4096; }; - }; +#else + mem = heap; - _set_savu(idx1); + while(pages--) + { + if( flags & PG_MAP ) + page = alloc_page(); + + page |= flags & 0xFFF; + + *pte++ = 0; + *mpte++ = page; + + asm volatile ( "invlpg (%0)" ::"r" (mem) ); + mem+= 4096; + }; +#endif + + DBG("%s %x size %d order %d\n", __FUNCTION__, heap, size, order); + + return heap; + } + + spinlock_unlock(&z_heap.lock); safe_sti(efl); - return new_md; + frame_free( KA2PA(map) ); }; - md->state = MD_USED; - - safe_sti(efl); - - return md; + return 0; } -md_t* __fastcall find_mapped_md(size_t size) +void __fastcall mem_free(addr_t addr) { - eflags_t efl; + eflags_t efl; + frame_t *frame; + count_t idx; - md_t *md = NULL; + idx = (addr >> HF_WIDTH); - count_t idx0; - u32_t mask; - - ASSERT((size & 0xFFF) == 0); + if( (idx < z_heap.base) || + (idx >= z_heap.base+z_heap.count)) { + DBG("invalid address %x\n", addr); + return; + } efl = safe_cli(); - idx0 = (size>>12) - 1 < 32 ? (size>>12) - 1 : 31; - mask = sheap.av_mapped & ( -1<buddy_order; - if(mask) + DBG("%s %x order %d\n", __FUNCTION__, addr, order); + + ASSERT(frame->refcount); + + spinlock_lock(&z_heap.lock); + + if (!--frame->refcount) { - if(idx0 == 31) - { - ASSERT( !list_empty(&sheap.mapped[31])); + mmap_t *map; + count_t i; - md_t *tmp = (md_t*)sheap.mapped[31].next; - while( &tmp->link != &sheap.mapped[31]) + map = frame->parent; + + for(i = 0; i < (1 << order); i++) + frame[i].parent = NULL; + + buddy_system_free(&frame->buddy_link); + + /* Update zone information. */ + z_heap.free_count += (1 << order); + z_heap.busy_count -= (1 << order); + + spinlock_unlock(&z_heap.lock); + safe_sti(efl); + + for( i = 0; i < (map->size >> PAGE_WIDTH); i++) + frame_free(map->pte[i]); + + frame_free( KA2PA(map) ); + } + else + { + spinlock_unlock(&z_heap.lock); + safe_sti(efl); + }; +}; + + +void __fastcall heap_fault(addr_t faddr, u32_t code) +{ + index_t idx; + frame_t *frame; + mmap_t *map; + + idx = faddr >> HF_WIDTH; + + frame = &z_heap.frames[idx-z_heap.base]; + + map = frame->parent; + + ASSERT( faddr >= map->base); + + if( faddr < map->base + map->size) + { + addr_t page; + + idx = (faddr - map->base) >> PAGE_WIDTH; + + page = map->pte[idx]; + + if( page != 0) + { +#if 0 + if( page & PG_DEMAND) { - if(tmp->size >= size) - { - md = tmp; - break; - }; - tmp = (md_t*)tmp->link.next; + page &= ~PG_DEMAND; + page = alloc_page() | (page & 0xFFF); + + map->pte[idx] = page; }; - } - else - { - idx0 = _bsf(mask); - - ASSERT( !list_empty(&sheap.mapped[idx0])); - - md = (md_t*)sheap.mapped[idx0].next; - } - }; - - if(md) - { - DBG("remove md %x\n", md); - - ASSERT(md->state==MD_FREE); - - list_remove((link_t*)md); - if(list_empty(&sheap.mapped[idx0])) - _reset_savm(idx0); - } - else - { - md_t *lmd; - addr_t frame; - addr_t *pte; - int i; - - lmd = find_large_md((size+0x3FFFFF)&~0x3FFFFF); - - DBG("get large md %x\n", lmd); - - if( !lmd) - { - safe_sti(efl); - return NULL; - }; - - ASSERT(lmd->size != 0); - ASSERT(lmd->base != 0); - ASSERT((lmd->base & 0x3FFFFF) == 0); - ASSERT(lmd->parent == NULL); - - frame = core_alloc(10); /* FIXME check */ - - lmd->parent = (void*)frame; - - pte = &((addr_t*)page_tabs)[lmd->base>>12]; /* FIXME remove */ - - for(i = 0; i<1024; i++) - { - *pte++ = frame; - frame+= 4096; - } - - md = (md_t*)slab_alloc(md_slab,0); /* FIXME check */ - - link_initialize(&md->link); - list_initialize(&md->adj); - md->base = lmd->base; - md->size = lmd->size; - md->parent = lmd; - md->state = MD_USED; - }; - - if(md->size > size) - { - count_t idx1; - md_t *new_md = (md_t*)slab_alloc(md_slab,0); /* FIXME check */ - - link_initialize(&new_md->link); - list_insert(&new_md->adj, &md->adj); - - new_md->base = md->base; - new_md->size = size; - new_md->parent = md->parent; - - md->base+= size; - md->size-= size; - md->state = MD_FREE; - - idx1 = (md->size>>12) - 1 < 32 ? (md->size>>12) - 1 : 31; - - DBG("insert md %x, base %x size %x idx %x\n", md,md->base, md->size,idx1); - - if( idx1 < 31) - list_prepend(&md->link, &sheap.mapped[idx1]); - else - { - if( list_empty(&sheap.mapped[31])) - list_prepend(&md->link, &sheap.mapped[31]); - else - { - md_t *tmp = (md_t*)sheap.mapped[31].next; - - while( &tmp->link != &sheap.mapped[31]) - { - if(md->base < tmp->base) - break; - tmp = (md_t*)tmp->link.next; - } - list_insert(&md->link, &tmp->link); - }; - }; - - _set_savm(idx1); - - md = new_md; - }; - - md->state = MD_USED; - - safe_sti(efl); - - return md; -} - -void __fastcall free_unmapped_md(md_t *md) -{ - eflags_t efl ; - md_t *fd; - md_t *bk; - count_t idx; - - ASSERT(md->parent != NULL); - - efl = safe_cli(); - spinlock_lock(&sheap.lock); - - if( !list_empty(&md->adj)) - { - bk = (md_t*)md->adj.prev; - fd = (md_t*)md->adj.next; - - if(fd->state == MD_FREE) - { - idx = (fd->size>>12) - 1 < 32 ? (fd->size>>12) - 1 : 31; - - list_remove((link_t*)fd); - if(list_empty(&sheap.unmapped[idx])) - _reset_savu(idx); - - md->size+= fd->size; - md->adj.next = fd->adj.next; - md->adj.next->prev = (link_t*)md; - slab_free(md_slab, fd); - }; - if(bk->state == MD_FREE) - { - idx = (bk->size>>12) - 1 < 32 ? (bk->size>>12) - 1 : 31; - - list_remove((link_t*)bk); - if(list_empty(&sheap.unmapped[idx])) - _reset_savu(idx); - - bk->size+= md->size; - bk->adj.next = md->adj.next; - bk->adj.next->prev = (link_t*)bk; - slab_free(md_slab, md); - md = fd; +#endif + ((addr_t*)page_tabs)[faddr >> PAGE_WIDTH] = page; }; }; - - md->state = MD_FREE; - - idx = (md->size>>12) - 1 < 32 ? (md->size>>12) - 1 : 31; - - _set_savu(idx); - - if( idx < 31) - list_prepend(&md->link, &sheap.unmapped[idx]); - else - { - if( list_empty(&sheap.unmapped[31])) - list_prepend(&md->link, &sheap.unmapped[31]); - else - { - md_t *tmp = (md_t*)sheap.unmapped[31].next; - - while( &tmp->link != &sheap.unmapped[31]) - { - if(md->base < tmp->base) - break; - tmp = (md_t*)tmp->link.next; - } - list_insert(&md->link, &tmp->link); - }; - }; - spinlock_unlock(&sheap.lock); - safe_sti(efl); - }; -void __fastcall free_mapped_md(md_t *md) -{ - eflags_t efl ; - md_t *fd; - md_t *bk; - count_t idx; +//#include "mmap.inc" - ASSERT(md->parent != NULL); - ASSERT( ((md_t*)(md->parent))->parent != NULL); - - efl = safe_cli(); - spinlock_lock(&sheap.lock); - - if( !list_empty(&md->adj)) - { - bk = (md_t*)md->adj.prev; - fd = (md_t*)md->adj.next; - - if(fd->state == MD_FREE) - { - idx = (fd->size>>12) - 1 < 32 ? (fd->size>>12) - 1 : 31; - - list_remove((link_t*)fd); - if(list_empty(&sheap.mapped[idx])) - _reset_savm(idx); - - md->size+= fd->size; - md->adj.next = fd->adj.next; - md->adj.next->prev = (link_t*)md; - slab_free(md_slab, fd); - }; - if(bk->state == MD_FREE) - { - idx = (bk->size>>12) - 1 < 32 ? (bk->size>>12) - 1 : 31; - - list_remove((link_t*)bk); - if(list_empty(&sheap.mapped[idx])) - _reset_savm(idx); - - bk->size+= md->size; - bk->adj.next = md->adj.next; - bk->adj.next->prev = (link_t*)bk; - slab_free(md_slab, md); - md = fd; - }; - }; - - md->state = MD_FREE; - - idx = (md->size>>12) - 1 < 32 ? (md->size>>12) - 1 : 31; - - _set_savm(idx); - - if( idx < 31) - list_prepend(&md->link, &sheap.mapped[idx]); - else - { - if( list_empty(&sheap.mapped[31])) - list_prepend(&md->link, &sheap.mapped[31]); - else - { - md_t *tmp = (md_t*)sheap.mapped[31].next; - - while( &tmp->link != &sheap.mapped[31]) - { - if(md->base < tmp->base) - break; - tmp = (md_t*)tmp->link.next; - } - list_insert(&md->link, &tmp->link); - }; - }; - spinlock_unlock(&sheap.lock); - safe_sti(efl); -}; - - -md_t* __fastcall md_alloc(size_t size, u32_t flags) -{ - eflags_t efl; - - md_t *md; - - size = (size+4095)&~4095; - - if( flags & PG_MAP ) - { - md = find_mapped_md(size); - - if( !md ) - return NULL; - - ASSERT(md->state == MD_USED); - ASSERT(md->parent != NULL); - - md_t *lmd = (md_t*)md->parent; - - ASSERT( lmd != NULL); - ASSERT( lmd->parent != NULL); - - addr_t frame = (md->base - lmd->base + (addr_t)lmd->parent)| - (flags & 0xFFF); - DBG("frame %x\n", frame); - ASSERT(frame != 0); - - count_t tmp = size >> 12; - addr_t *pte = &((addr_t*)page_tabs)[md->base>>12]; - - while(tmp--) - { - *pte++ = frame; - frame+= 4096; - }; - } - else - { - md = find_unmapped_md(size); - if( !md ) - return NULL; - - ASSERT(md->parent != NULL); - ASSERT(md->state == MD_USED); - } - - return md; -}; - - -void __fastcall md_free(md_t *md) -{ - - if( md ) - { - md_t *lmd; - - DBG("free md: %x base: %x size: %x\n",md, md->base, md->size); - - ASSERT(md->state == MD_USED); - - list_remove((link_t*)md); - - lmd = (md_t*)md->parent; - - ASSERT(lmd != 0); - - if(lmd->parent != 0) - { - addr_t mem = md->base; - addr_t *pte = &((addr_t*)page_tabs)[md->base>>12]; - count_t tmp = md->size >> 12; - - while(tmp--) - { - *pte++ = 0; - asm volatile ( "invlpg (%0)" ::"r" (mem) ); - mem+= 4096; - }; - free_mapped_md( md ); - } - else - free_unmapped_md( md ); - } - - return; -}; - -void * __fastcall mem_alloc(size_t size, u32_t flags) -{ - eflags_t efl; - - md_t *md; - - DBG("\nmem_alloc: %x bytes\n", size); - - ASSERT(size != 0); - - md = md_alloc(size, flags); - - if( !md ) - return NULL; - - efl = safe_cli(); - spinlock_lock(&sheap.lock); - - if( list_empty(&sheap.used) ) - list_prepend(&md->link, &sheap.used); - else - { - md_t *tmp = (md_t*)sheap.used.next; - - while( &tmp->link != &sheap.used) - { - if(md->base < tmp->base) - break; - tmp = (md_t*)tmp->link.next; - } - list_insert(&md->link, &tmp->link); - }; - - spinlock_unlock(&sheap.lock); - safe_sti(efl); - - DBG("allocate: %x size %x\n\n",md->base, size); - return (void*)md->base; -}; - -void __fastcall mem_free(void *mem) -{ - eflags_t efl; - - md_t *tmp; - md_t *md = NULL; - - DBG("mem_free: %x\n",mem); - - ASSERT( mem != 0 ); - ASSERT( ((addr_t)mem & 0xFFF) == 0 ); - ASSERT( ! list_empty(&sheap.used)); - - efl = safe_cli(); - - tmp = (md_t*)sheap.used.next; - - while( &tmp->link != &sheap.used) - { - if( tmp->base == (addr_t)mem ) - { - md = tmp; - break; - }; - tmp = (md_t*)tmp->link.next; - } - - if( md ) - { - md_free( md ); - - } - else - DBG("\tERROR: invalid base address: %x\n", mem); - - safe_sti(efl); -}; diff --git a/kernel/branches/kolibri_pe/core/heap.inc b/kernel/branches/kolibri_pe/core/heap.inc index 03fe733a19..bd64cbda01 100644 --- a/kernel/branches/kolibri_pe/core/heap.inc +++ b/kernel/branches/kolibri_pe/core/heap.inc @@ -163,7 +163,7 @@ proc user_free stdcall, base:dword test cl, 1 jz @F - call @core_free@4 + call @frame_free@4 mov eax, esi shl eax, 12 invlpg [eax] @@ -287,7 +287,7 @@ user_realloc: jz .loop push edx - call @core_free@4 + call @frame_free@4 pop edx mov eax, edx shl eax, 12 diff --git a/kernel/branches/kolibri_pe/core/init.asm b/kernel/branches/kolibri_pe/core/init.asm index da46371814..fea1473f75 100644 --- a/kernel/branches/kolibri_pe/core/init.asm +++ b/kernel/branches/kolibri_pe/core/init.asm @@ -214,7 +214,7 @@ core_init: call @init_heap@8 call _init_core_dll - call _init_threads + ; call _init_threads ; SAVE & CLEAR 0-0xffff @@ -277,8 +277,8 @@ map_LFB: add eax, 0x00400000 mov [_sys_pdbr+4+(LFB_BASE shr 20)], eax if SHADOWFB - mov ecx, 11 - call @core_alloc@4 + mov ecx, 1 shl 11 + call @frame_alloc@4 or eax, PG_LARGE+PG_UW mov [_sys_pdbr+(SHADOWFB shr 20)], eax add eax, 0x00400000 diff --git a/kernel/branches/kolibri_pe/core/malloc.inc b/kernel/branches/kolibri_pe/core/malloc.inc index 83e784e064..935846800a 100644 --- a/kernel/branches/kolibri_pe/core/malloc.inc +++ b/kernel/branches/kolibri_pe/core/malloc.inc @@ -987,8 +987,8 @@ malloc_large: align 4 init_malloc: - mov ecx, 6 - call @core_alloc@4 + mov ecx, 64 + call @frame_alloc@4 add eax, OS_BASE mov [mst.top], eax diff --git a/kernel/branches/kolibri_pe/core/memory.inc b/kernel/branches/kolibri_pe/core/memory.inc index f01e91a805..1076621d5e 100644 --- a/kernel/branches/kolibri_pe/core/memory.inc +++ b/kernel/branches/kolibri_pe/core/memory.inc @@ -212,7 +212,7 @@ proc new_mem_resize stdcall, new_size:dword mov ebx, edi shl ebx, 12 invlpg [ebx] - call @core_free@4 + call @frame_free@4 .next: add edi, 1 cmp edi, esi @@ -220,6 +220,8 @@ proc new_mem_resize stdcall, new_size:dword .update_size: mov ebx, [new_size] + mov edx, [current_slot] + call update_mem_size xor eax, eax @@ -422,12 +424,19 @@ proc page_fault_handler align 4 .kernel_heap: + ; xchg bx, bx + shr ebx, 22 mov edx, [master_tab + ebx*4] test edx, PG_MAP jz .check_ptab ;таблица страниц не создана + mov ecx, [.err_addr] + mov edx, [.err_code] + + call @heap_fault@8 + jmp .exit .check_ptab: @@ -435,8 +444,7 @@ align 4 test edx, PG_MAP jnz @F - xor ecx, ecx - call @core_alloc@4 + call _alloc_page test eax, eax jz .fail @@ -546,7 +554,7 @@ align 4 popad add esp, 4 -; iretd + iretd save_ring3_context ;debugger support @@ -1094,15 +1102,12 @@ proc create_ring_buffer stdcall, size:dword, flags:dword push ebx - xor ecx, ecx - mov edx, [size] - shr edx, 12 - mov ebx, edx - dec edx - bsr ecx, edx - inc ecx + mov ecx, [size] + shr ecx, 12 - call @core_alloc@4 + mov ebx, ecx + + call @frame_alloc@4 test eax, eax jz .mm_fail diff --git a/kernel/branches/kolibri_pe/core/mm.c b/kernel/branches/kolibri_pe/core/mm.c index de0bd75f5b..d36162541c 100644 --- a/kernel/branches/kolibri_pe/core/mm.c +++ b/kernel/branches/kolibri_pe/core/mm.c @@ -13,17 +13,6 @@ void __fastcall *balloc(u32_t size); zone_t z_core; -static inline u32_t save_edx(void) -{ - u32_t val; - asm volatile ("movl %%edx, %0":"=r"(val)); - return val; -}; - -static inline void restore_edx(u32_t val) -{ - asm volatile (""::"d" (val) ); -}; static void buddy_system_create(zone_t *z); static void __fastcall buddy_system_free(zone_t *z, link_t *block); @@ -36,8 +25,6 @@ size_t buddy_conf_size(int max_order); static inline void frame_initialize(frame_t *frame); -void init_mm(); - static void zone_create(zone_t *z, pfn_t start, count_t count); static void zone_reserve(zone_t *z, pfn_t base, count_t count); @@ -558,8 +545,8 @@ addr_t __fastcall core_alloc(u32_t order) spinlock_unlock(&z_core.lock); safe_sti(efl); - DBG("core alloc: %x, size %x remain %d\n", v << FRAME_WIDTH, - ((1<>12); + // zone_free(&z_core, frame>>12); spinlock_unlock(&z_core.lock); safe_sti(efl); diff --git a/kernel/branches/kolibri_pe/core/pe.c b/kernel/branches/kolibri_pe/core/pe.c index 2cb86e1c9c..07d39b59bc 100644 --- a/kernel/branches/kolibri_pe/core/pe.c +++ b/kernel/branches/kolibri_pe/core/pe.c @@ -42,7 +42,6 @@ int __stdcall strncmp(const char *s1, const char *s2, size_t n); bool link_image(addr_t img_base); -md_t* __fastcall load_image(const char *path); /* void* __fastcall load_pe(const char *path) @@ -109,12 +108,12 @@ bool validate_pe(void *raw, size_t raw_size, bool is_exec) return true; } -md_t* __fastcall load_image(const char *path) +addr_t __fastcall load_image(const char *path) { PIMAGE_DOS_HEADER dos; PIMAGE_NT_HEADERS32 nt; - md_t *img_md; + // md_t *img_md; size_t img_size; addr_t img_base; @@ -147,16 +146,17 @@ md_t* __fastcall load_image(const char *path) img_size = nt->OptionalHeader.SizeOfImage; - img_md = md_alloc(img_size, PG_SW); +// img_md = md_alloc(img_size, PG_SW); + img_base = mem_alloc(img_size, PG_SW); - if( !img_md) + if( !img_base) { mem_free(raw); return NULL; }; - img_base = img_md->base; +// img_base = img_md->base; create_image(img_base, (addr_t)raw, true); @@ -165,7 +165,7 @@ md_t* __fastcall load_image(const char *path) // dos = (PIMAGE_DOS_HEADER)img_base; // nt = MakePtr( PIMAGE_NT_HEADERS32, dos, dos->e_lfanew); - return img_md; + return img_base; }; diff --git a/kernel/branches/kolibri_pe/core/slab.c b/kernel/branches/kolibri_pe/core/slab.c index d9b118888e..f785096a27 100644 --- a/kernel/branches/kolibri_pe/core/slab.c +++ b/kernel/branches/kolibri_pe/core/slab.c @@ -33,13 +33,15 @@ static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) unsigned int i; u32_t p; - data = (void*)PA2KA(core_alloc(cache->order)); + DBG("%s order %d\n", __FUNCTION__, cache->order); + + data = (void*)PA2KA(frame_alloc(1 << cache->order)); if (!data) { return NULL; } slab = (slab_t*)slab_create(); if (!slab) { - core_free(KA2PA(data)); + frame_free(KA2PA(data)); return NULL; } @@ -74,12 +76,6 @@ static void * slab_obj_create(slab_cache_t *cache, int flags) spinlock_lock(&cache->slablock); if (list_empty(&cache->partial_slabs)) { - /* Allow recursion and reclaiming - * - this should work, as the slab control structures - * are small and do not need to allocate with anything - * other than frame_alloc when they are allocating, - * that's why we should get recursion at most 1-level deep - */ slab = slab_space_alloc(cache, flags); if (!slab) { @@ -211,7 +207,7 @@ _slab_cache_create(slab_cache_t *cache, /* Minimum slab order */ pages = SIZE2FRAMES(cache->size); /* We need the 2^order >= pages */ - if (pages == 1) + if (pages <= 1) cache->order = 0; else cache->order = fnzb(pages-1)+1; @@ -241,6 +237,8 @@ slab_cache_t * slab_cache_create( { slab_cache_t *cache; + DBG("%s\n", __FUNCTION__); + cache = (slab_cache_t*)slab_cache_alloc(); _slab_cache_create(cache, size, align, constructor, destructor, flags); return cache; @@ -337,22 +335,18 @@ static slab_t *slab_create() void *obj; u32_t p; + DBG("%s\n", __FUNCTION__); + // spinlock_lock(&cache->slablock); if (list_empty(&slab_cache->partial_slabs)) { - /* Allow recursion and reclaiming - * - this should work, as the slab control structures - * are small and do not need to allocate with anything - * other than frame_alloc when they are allocating, - * that's why we should get recursion at most 1-level deep - */ // spinlock_unlock(&cache->slablock); // slab = slab_create(); void *data; unsigned int i; - data = (void*)PA2KA(core_alloc(0)); + data = (void*)PA2KA(alloc_page()); if (!data) { return NULL; } @@ -400,20 +394,17 @@ static slab_cache_t * slab_cache_alloc() void *obj; u32_t *p; - if (list_empty(&slab_cache_cache.partial_slabs)) { - /* Allow recursion and reclaiming - * - this should work, as the slab control structures - * are small and do not need to allocate with anything - * other than frame_alloc when they are allocating, - * that's why we should get recursion at most 1-level deep - */ + DBG("%s\n", __FUNCTION__); + + if (list_empty(&slab_cache_cache.partial_slabs)) + { // spinlock_unlock(&cache->slablock); // slab = slab_create(); void *data; unsigned int i; - data = (void*)(PA2KA(core_alloc(0))); + data = (void*)(PA2KA(alloc_page())); if (!data) { return NULL; } @@ -437,7 +428,8 @@ static slab_cache_t * slab_cache_alloc() atomic_inc(&slab_cache_cache.allocated_slabs); // spinlock_lock(&cache->slablock); - } else { + } + else { slab = list_get_instance(slab_cache_cache.partial_slabs.next, slab_t, link); list_remove(&slab->link); } @@ -457,6 +449,7 @@ static slab_cache_t * slab_cache_alloc() void slab_cache_init(void) { + DBG("%s\n", __FUNCTION__); _slab_cache_create(&slab_cache_cache, sizeof(slab_cache_t), sizeof(void *), NULL, NULL, diff --git a/kernel/branches/kolibri_pe/core/sys32.inc b/kernel/branches/kolibri_pe/core/sys32.inc index a54cef4411..00015e459e 100644 --- a/kernel/branches/kolibri_pe/core/sys32.inc +++ b/kernel/branches/kolibri_pe/core/sys32.inc @@ -638,23 +638,23 @@ term9: mov ecx,[edi+APPDATA.pl0_stack] sub ecx, OS_BASE - call @core_free@4 + call @frame_free@4 mov ecx,[edi+APPDATA.cur_dir] sub ecx, OS_BASE - call @core_free@4 + call @frame_free@4 mov ecx, [edi+APPDATA.io_map] cmp ecx, (tss._io_map_0-OS_BASE+PG_MAP) je @F - call @core_free@4 + call @frame_free@4 @@: mov ecx, [edi+APPDATA.io_map+4] cmp ecx, (tss._io_map_1-OS_BASE+PG_MAP) je @F - call @core_free@4 + call @frame_free@4 @@: mov eax, 0x20202020 stosd diff --git a/kernel/branches/kolibri_pe/core/taskman.inc b/kernel/branches/kolibri_pe/core/taskman.inc index 386d45fac2..383f264390 100644 --- a/kernel/branches/kolibri_pe/core/taskman.inc +++ b/kernel/branches/kolibri_pe/core/taskman.inc @@ -222,10 +222,9 @@ proc mnt_exec stdcall file_base:dword, file_size:dword, \ mov ebx,[slot_base] mov [ebx+APPDATA.dir_table],eax - mov eax,[hdr_mem] - mov [ebx+APPDATA.mem_size],eax + mov ecx,[hdr_mem] + mov [ebx+APPDATA.mem_size],ecx - mov ecx, [hdr_mem] mov edi, [file_size] ; add edi, 4095 ; and edi, not 4095 @@ -337,8 +336,8 @@ proc pe_app_param stdcall path:dword, raw:dword, ex_pg_dir:dword, ex_stack:dword ;mov [ebx+APPDATA.mem_size],eax - mov ecx, 1 - call @core_alloc@4 + mov ecx, 2 + call @frame_alloc@4 lea edi, [eax+OS_BASE] mov [pl0_stack], edi @@ -375,8 +374,7 @@ proc pe_app_param stdcall path:dword, raw:dword, ex_pg_dir:dword, ex_stack:dword mov ecx, [def_cursor] mov [ebx+APPDATA.cursor],ecx - xor ecx, ecx - call @core_alloc@4 + call _alloc_page lea edi, [eax+OS_BASE] ; FIXME mov esi,[current_slot] @@ -542,7 +540,9 @@ proc create_app_space stdcall, app_size:dword,img_base:dword,img_size:dword cld rep stosd - mov ecx, 512 + mov esi, [img_base] + + mov ecx, 512 ; FIX only core tabs mov esi, _sys_pdbr+(HEAP_BASE shr 20) rep movsd @@ -553,7 +553,9 @@ proc create_app_space stdcall, app_size:dword,img_base:dword,img_size:dword mov eax, edi call set_cr3 - mov edx, [app_tabs] + mov esi, [img_base] + + mov ebx, [app_tabs] mov edi, master_tab @@: call _alloc_page @@ -562,7 +564,7 @@ proc create_app_space stdcall, app_size:dword,img_base:dword,img_size:dword or eax, PG_UW stosd - dec edx + dec ebx jnz @B mov edi, page_tabs @@ -571,11 +573,9 @@ proc create_app_space stdcall, app_size:dword,img_base:dword,img_size:dword xor eax, eax rep stosd - mov ecx, [app_pages] xor ebx, ebx .alloc: - xor ecx, ecx - call @core_alloc@4 + call _alloc_page test eax, eax jz .fail @@ -615,8 +615,7 @@ align 4 lea ebx, [ecx+0x3FFFFF] - xor ecx, ecx - call @core_alloc@4 + call _alloc_page test eax, eax mov [esp], eax jz .fail @@ -627,15 +626,14 @@ align 4 cld rep stosd - mov ecx, 512 + mov ecx, 512 ; FIX only core tabs mov esi, _sys_pdbr+(HEAP_BASE shr 20) rep movsd mov esi, [esp] shr ebx, 22 .new_ptab: - xor ecx, ecx - call @core_alloc@4 + call _alloc_page test eax, eax jz .fail @@ -651,8 +649,7 @@ align 4 dec ebx jnz .new_ptab - xor ecx, ecx - call @core_alloc@4 + call _alloc_page test eax, eax jz .fail @@ -701,7 +698,7 @@ proc destroy_page_table stdcall, pg_tab:dword test ecx, 1 shl 9 jnz .next ;skip shared pages - call @core_free@4 + call @frame_free@4 .next: add esi, 4 dec ebx @@ -759,14 +756,14 @@ proc destroy_app_space stdcall, pg_dir:dword stdcall destroy_page_table, eax mov ecx, [esi] - call @core_free@4 + call @frame_free@4 .next: add esi, 4 dec edi jnz .destroy mov ecx, [pg_dir] - call @core_free@4 + call @frame_free@4 .exit: dec [pg_data.pg_mutex] ret @@ -1150,8 +1147,8 @@ proc set_app_params stdcall,slot:dword, params:dword,\ pl0_stack dd ? endl - mov ecx, 1 ;(RING0_STACK_SIZE+512) shr 12 - call @core_alloc@4 + mov ecx, 2 ;(RING0_STACK_SIZE+512) shr 12 + call @frame_alloc@4 add eax, OS_BASE mov [pl0_stack], eax @@ -1196,12 +1193,11 @@ proc set_app_params stdcall,slot:dword, params:dword,\ mov [SLOT_BASE+APPDATA.saved_esp0+ebx], eax call _alloc_page - add eax, OS_BASE + lea edi, [eax + OS_BASE] + mov [ebx+SLOT_BASE+APPDATA.cur_dir], edi mov esi,[current_slot] mov esi,[esi+APPDATA.cur_dir] mov ecx,0x1000/4 - mov edi,eax - mov [ebx+SLOT_BASE+APPDATA.cur_dir],eax rep movsd shr ebx,3 @@ -1302,7 +1298,7 @@ proc set_app_params stdcall,slot:dword, params:dword,\ mov [SLOT_BASE+ebx*8+APPDATA.debugger_slot],eax .no_debug: mov [CURRENT_TASK+ebx+TASKDATA.state], cl - DEBUGF 1,"%s",new_process_running + ; DEBUGF 1,"%s",new_process_running ret endp diff --git a/kernel/branches/kolibri_pe/core/thread.c b/kernel/branches/kolibri_pe/core/thread.c index e3f5751435..8071ce4a21 100644 --- a/kernel/branches/kolibri_pe/core/thread.c +++ b/kernel/branches/kolibri_pe/core/thread.c @@ -14,6 +14,8 @@ extern addr_t sys_pdbr; void init_threads() { + DBG("%s\n", __FUNCTION__); + thr_slab = slab_cache_create(sizeof(thr_t), 16, NULL,NULL,SLAB_CACHE_MAGDEFERRED); }; @@ -27,8 +29,10 @@ thr_t* __fastcall create_systhread(addr_t entry_ptr) thr_t *thr; addr_t thr_stack; + DBG("%s\n", __FUNCTION__); + thr = (thr_t*)slab_alloc(thr_slab,0); - thr_stack = PA2KA(core_alloc(1)); + thr_stack = PA2KA(frame_alloc(2)); thr_cnt++; diff --git a/kernel/branches/kolibri_pe/fs/parse_fn.inc b/kernel/branches/kolibri_pe/fs/parse_fn.inc index 35efb69588..008abd2587 100644 --- a/kernel/branches/kolibri_pe/fs/parse_fn.inc +++ b/kernel/branches/kolibri_pe/fs/parse_fn.inc @@ -85,8 +85,7 @@ endp proc load_file_parse_table - xor eac, ecx - call @core_alloc@4 + call _alloc_page add eax, OS_BASE mov [tmp_file_name_table],eax mov edi,eax diff --git a/kernel/branches/kolibri_pe/include/core.h b/kernel/branches/kolibri_pe/include/core.h index 53e61187fd..4e96b07b74 100644 --- a/kernel/branches/kolibri_pe/include/core.h +++ b/kernel/branches/kolibri_pe/include/core.h @@ -4,7 +4,7 @@ #define LOAD_BASE 0x00100000 -#define page_tabs 0xDDC00000 +#define page_tabs 0xDD800000 #define master_tab (page_tabs+(page_tabs>>10)) @@ -20,6 +20,10 @@ #define sel_srv_stack 0x39 + +#define __export __attribute__ ((dllexport)) + + void printf (const char *format, ...); #define CALLER ((addr_t) __builtin_return_address(0)) @@ -74,11 +78,10 @@ static inline void safe_sti(eflags_t efl) asm volatile ( "pushl %0\n\t" "popfl\n" - : : "r" (efl) - ); + : : "r" (efl)); } -static inline count_t fnzb(u32_t arg) +static inline index_t fnzb(u32_t arg) { count_t n; asm volatile ( @@ -88,7 +91,7 @@ static inline count_t fnzb(u32_t arg) return n; } -static inline count_t _bsf(u32_t arg) +static inline index_t _bsf(u32_t arg) { count_t n; asm volatile ( @@ -201,7 +204,7 @@ typedef struct }ioctl_t; -typedef struct +typedef struct __attribute__ ((packed)) { u32_t code; union @@ -214,7 +217,7 @@ typedef struct u16_t x; /* cursor x */ u16_t y; /* cursor y */ u32_t unused; - }__attribute__ ((packed)); + }; struct /* realtime io */ { @@ -237,6 +240,7 @@ typedef struct }event_t; +void __fastcall dump_file(addr_t addr, size_t size); diff --git a/kernel/branches/kolibri_pe/include/mm.h b/kernel/branches/kolibri_pe/include/mm.h index 4867e7575e..cad5ed15cd 100644 --- a/kernel/branches/kolibri_pe/include/mm.h +++ b/kernel/branches/kolibri_pe/include/mm.h @@ -1,13 +1,24 @@ +typedef struct +{ + link_t link; + addr_t base; + size_t size; + addr_t pte[0]; + +}mmap_t; + + typedef struct { link_t buddy_link; /**< link to the next free block inside one order */ - count_t refcount; /**< tracking of shared frames */ - u32_t buddy_order; /**< buddy system block order */ + u16_t refcount; /**< tracking of shared frames */ + u16_t buddy_order; /**< buddy system block order */ void *parent; /**< If allocated by slab, this points there */ } frame_t; -typedef struct { +typedef struct +{ SPINLOCK_DECLARE(lock); /**< this lock protects everything below */ pfn_t base; /**< frame_no of the first frame in the frames array */ count_t count; /**< Size of zone */ @@ -44,9 +55,7 @@ typedef struct #define PAGE_SIZE 4096 -#define FRAME_WIDTH 12 - -#define BUDDY_SYSTEM_INNER_BLOCK 0xff +#define PAGE_WIDTH 12 # define PA2KA(x) (((addr_t) (x)) + OS_BASE) @@ -56,39 +65,39 @@ static inline count_t SIZE2FRAMES(size_t size) { if (!size) return 0; - return (count_t) ((size - 1) >> FRAME_WIDTH) + 1; + return (count_t) ((size - 1) >> PAGE_WIDTH) + 1; } static inline addr_t PFN2ADDR(pfn_t frame) { - return (addr_t) (frame << FRAME_WIDTH); + return (addr_t) (frame << PAGE_WIDTH); } static inline pfn_t ADDR2PFN(addr_t addr) { - return (pfn_t) (addr >> FRAME_WIDTH); + return (pfn_t) (addr >> PAGE_WIDTH); }; void init_mm(); +void init_pg_slab(); void* __fastcall frame_get_parent(pfn_t pfn); void __fastcall frame_set_parent(pfn_t pfn, void *data); -void frame_free(pfn_t frame); - addr_t __fastcall core_alloc(u32_t order); void __fastcall core_free(addr_t frame); -pfn_t alloc_page() __attribute__ ((deprecated)); - -#define __export __attribute__ ((dllexport)) +addr_t alloc_page(void); md_t* __fastcall md_alloc(size_t size, u32_t flags) ; void __fastcall md_free(md_t *md); -void* __fastcall __export mem_alloc(size_t size, u32_t flags) asm ("MemAlloc"); -void __fastcall __export mem_free(void *mem) asm ("MemFree"); +addr_t __fastcall __export mem_alloc(size_t size, u32_t flags) asm ("MemAlloc"); +void __fastcall __export mem_free(addr_t mem) asm ("MemFree"); + +addr_t __fastcall frame_alloc(size_t size); +size_t __fastcall frame_free(addr_t addr); diff --git a/kernel/branches/kolibri_pe/include/pe.h b/kernel/branches/kolibri_pe/include/pe.h index 7eb136ffcb..108355dba9 100644 --- a/kernel/branches/kolibri_pe/include/pe.h +++ b/kernel/branches/kolibri_pe/include/pe.h @@ -201,7 +201,7 @@ bool validate_pe(void *raw, size_t raw_size, bool is_exec); dll_t * find_dll(link_t *list, const char *name); -md_t* __fastcall load_image(const char *path); +addr_t __fastcall load_image(const char *path); void create_image(addr_t img_base, addr_t raw, bool force_clear) asm ("CreateImage"); diff --git a/kernel/branches/kolibri_pe/kernel.asm b/kernel/branches/kolibri_pe/kernel.asm index 413a369e59..6f429fff49 100644 --- a/kernel/branches/kolibri_pe/kernel.asm +++ b/kernel/branches/kolibri_pe/kernel.asm @@ -122,6 +122,7 @@ public _gdts public __hlt public _panic_printf public _printf +public _dump public _pg_balloc public _mem_amount public @balloc@4 @@ -215,11 +216,16 @@ extrn _16bit_end extrn _poweroff -extrn @core_alloc@4 -extrn @core_free@4 +extrn @pf_dump@8 + +extrn @frame_alloc@4 +extrn @frame_free@4 extrn @find_large_md@4 +extrn @heap_fault@8 + + extrn _MemAlloc extrn _MemFree @@ -486,7 +492,7 @@ no_mode_0x12: call rerouteirqs ; Initialize system V86 machine - call init_sys_v86 + ; call init_sys_v86 ; TIMER SET TO 1/100 S @@ -587,7 +593,7 @@ include 'detect/disks.inc' call boot_log ;call setirqreadports -; SET UP OS TASK +; SETUP OS TASK mov esi,boot_setostask call boot_log @@ -597,7 +603,7 @@ include 'detect/disks.inc' mov dword [SLOT_BASE+APPDATA.fpu_handler], eax mov dword [SLOT_BASE+APPDATA.sse_handler], eax - ; name for OS/IDLE process +; name for OS/IDLE process mov dword [SLOT_BASE+256+APPDATA.app_name], dword 'OS/I' mov dword [SLOT_BASE+256+APPDATA.app_name+4], dword 'DLE ' @@ -863,14 +869,14 @@ checkidle: jnz idle_exit call _rdtsc mov ecx,eax - idle_loop: +idle_loop: hlt cmp [check_idle_semaphore],0 jne idle_loop_exit mov eax,[timer_ticks] ;[0xfdf0] cmp ebx,eax jz idle_loop - idle_loop_exit: +idle_loop_exit: mov [idlemem],eax call _rdtsc sub eax,ecx @@ -3373,16 +3379,16 @@ redrawscreen: ;mov ecx,0 ; redraw flags for apps xor ecx,ecx - newdw2: +newdw2: inc ecx push ecx mov eax,ecx shl eax,5 - add eax,window_data + add eax, window_data - cmp eax,[esp+4] + cmp eax, [esp+4] je not_this_task ; check if window in redraw area mov edi,eax @@ -3429,7 +3435,7 @@ bgli: jz newdw8 test al,al jz .az - lea eax,[edi+draw_data+(0x100000000-OS_BASE)] + lea eax,[edi+draw_data-window_data] mov ebx,[dlx] cmp ebx,[eax+RECT.left] jae @f @@ -3454,7 +3460,7 @@ bgli: .az: mov eax,edi - add eax, draw_data+(0x100000000-OS_BASE) + add eax, draw_data-window_data mov ebx,[dlx] ; set limits mov [eax + RECT.left], ebx @@ -3465,7 +3471,7 @@ bgli: mov ebx,[dlye] mov [eax + RECT.bottom], ebx - sub eax,draw_data+(0x100000000-OS_BASE) + sub eax,draw_data - window_data cmp dword [esp],1 jne nobgrd diff --git a/kernel/branches/kolibri_pe/ld.x b/kernel/branches/kolibri_pe/ld.x index 4217419724..d027b8135d 100644 --- a/kernel/branches/kolibri_pe/ld.x +++ b/kernel/branches/kolibri_pe/ld.x @@ -25,9 +25,10 @@ SECTIONS .flat . + 0x00400000: { *(.flat) *(.data) + . = ALIGN(4096); } - .edata ALIGN(32): + .edata : { *(.edata) _code_end = .; diff --git a/kernel/branches/kolibri_pe/makefile b/kernel/branches/kolibri_pe/makefile index 15aadfe000..481846248b 100644 --- a/kernel/branches/kolibri_pe/makefile +++ b/kernel/branches/kolibri_pe/makefile @@ -6,7 +6,7 @@ INCLUDE = include/ DEFS = -DUSE_SMP -DCONFIG_DEBUG -CFLAGS = -c -O2 -DCONFIG_DEBUG -I $(INCLUDE) -fomit-frame-pointer -fno-builtin +CFLAGS = -c -O2 $(DEFS) -I $(INCLUDE) -fomit-frame-pointer -fno-builtin-printf LDFLAGS = -shared -s -Map kernel.map --image-base 0x100000 --file-alignment 32 KERNEL_SRC:= \ @@ -33,9 +33,9 @@ KERNEL_SRC:= \ PE_SRC:= \ init.asm \ mbi.c \ - mm.c \ + heap.c \ slab.c \ - heap.c \ + frame.c \ pe.c \ dll.c \ spinlock.c \ @@ -70,6 +70,9 @@ kernel.mnt: kernel.obj bin/export.obj $(PE_OBJS) Makefile ld.x bin/%.obj : core/%.c $(H_SRC) Makefile $(CC) $(CFLAGS) -o $@ $< +bin/%.obj : gui/%.c $(H_SRC) Makefile + $(CC) $(CFLAGS) -o $@ $< + bin/%.obj: core/%.asm Makefile $(FASM) $< $@ diff --git a/kernel/branches/kolibri_pe/printf.inc b/kernel/branches/kolibri_pe/printf.inc index 54a2ff8272..0529cb77e3 100644 --- a/kernel/branches/kolibri_pe/printf.inc +++ b/kernel/branches/kolibri_pe/printf.inc @@ -5,6 +5,24 @@ BYTE equ byte PTR equ +align 4 + +_dump: + + mov ecx, DWORD PTR [esp+4] +@@: + mov edx, 0x3FD + in al, dx + test al, 96 + je @B + + mov dl, -8 + mov eax, ecx + out dx, al + ret + + +align 4 _putc: mov ecx, DWORD PTR [esp+4] .L13: diff --git a/kernel/branches/kolibri_pe/unpacker.inc b/kernel/branches/kolibri_pe/unpacker.inc index 5e70709f9b..157da89fc6 100644 --- a/kernel/branches/kolibri_pe/unpacker.inc +++ b/kernel/branches/kolibri_pe/unpacker.inc @@ -28,7 +28,11 @@ unpack: popad ret 8 .lzma: + pushfd + cli call .lzma_unpack + popfd + .common: pop eax test al, 0x80