new memory management for kernel heap

git-svn-id: svn://kolibrios.org@888 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge) 2008-10-29 15:55:39 +00:00
parent 3f22b06b9f
commit b7083f5742
11 changed files with 415 additions and 348 deletions

View File

@ -103,7 +103,6 @@ kernel_export:
dd szPciWrite32 , pci_write32 dd szPciWrite32 , pci_write32
dd szAllocPage , _alloc_page ;stdcall dd szAllocPage , _alloc_page ;stdcall
dd szFreePage , free_page
dd szMapPage , map_page ;stdcall dd szMapPage , map_page ;stdcall
dd szMapSpace , map_space dd szMapSpace , map_space
dd szMapIoMem , map_io_mem ;stdcall dd szMapIoMem , map_io_mem ;stdcall

View File

@ -6,6 +6,8 @@
#include <mm.h> #include <mm.h>
#include <slab.h> #include <slab.h>
#define page_tabs 0xDF800000
typedef struct typedef struct
{ {
link_t link; link_t link;
@ -20,12 +22,15 @@ typedef struct
#define MD_USED 2 #define MD_USED 2
typedef struct { typedef struct {
SPINLOCK_DECLARE(lock); /**< this lock protects everything below */ u32_t av_mapped;
u32_t av_unmapped;
u32_t availmask; link_t mapped[32];
link_t free[32]; link_t unmapped[32];
link_t used; link_t used;
SPINLOCK_DECLARE(lock); /**< this lock protects everything below */
}heap_t; }heap_t;
@ -37,19 +42,24 @@ heap_t lheap;
heap_t sheap; heap_t sheap;
static inline void _set_lavu(count_t idx)
{ asm volatile ("bts %0, _lheap+4"::"r"(idx):"cc"); }
static inline void _set_lmask(count_t idx) static inline void _reset_lavu(count_t idx)
{ asm volatile ("bts %0, _lheap"::"r"(idx):"cc"); } { asm volatile ("btr %0, _lheap+4"::"r"(idx):"cc"); }
static inline void _reset_lmask(count_t idx) static inline void _set_savm(count_t idx)
{ asm volatile ("btr %0, _lheap"::"r"(idx):"cc"); }
static inline void _set_smask(count_t idx)
{ asm volatile ("bts %0, _sheap"::"r"(idx):"cc"); } { asm volatile ("bts %0, _sheap"::"r"(idx):"cc"); }
static inline void _reset_smask(count_t idx) static inline void _reset_savm(count_t idx)
{ asm volatile ("btr %0, _sheap"::"r"(idx):"cc"); } { asm volatile ("btr %0, _sheap"::"r"(idx):"cc"); }
static inline void _set_savu(count_t idx)
{ asm volatile ("bts %0, _sheap+4"::"r"(idx):"cc"); }
static inline void _reset_savu(count_t idx)
{ asm volatile ("btr %0, _sheap+4"::"r"(idx):"cc"); }
int __fastcall init_heap(addr_t base, size_t size) int __fastcall init_heap(addr_t base, size_t size)
{ {
@ -63,8 +73,11 @@ int __fastcall init_heap(addr_t base, size_t size)
for (i = 0; i < 32; i++) for (i = 0; i < 32; i++)
{ {
list_initialize(&lheap.free[i]); list_initialize(&lheap.mapped[i]);
list_initialize(&sheap.free[i]); list_initialize(&lheap.unmapped[i]);
list_initialize(&sheap.mapped[i]);
list_initialize(&sheap.unmapped[i]);
}; };
list_initialize(&lheap.used); list_initialize(&lheap.used);
@ -81,11 +94,11 @@ int __fastcall init_heap(addr_t base, size_t size)
md->parent = NULL; md->parent = NULL;
md->state = MD_FREE; md->state = MD_FREE;
list_prepend(&md->link, &lheap.free[31]); list_prepend(&md->link, &lheap.unmapped[31]);
lheap.availmask = 0x80000000; lheap.av_mapped = 0x00000000;
sheap.availmask = 0x00000000; lheap.av_unmapped = 0x80000000;
sheap.av_mapped = 0x00000000;
// phm_slab = slab_cache_create(sizeof(phismem_t), 32,NULL,NULL,SLAB_CACHE_MAGDEFERRED); sheap.av_unmapped = 0x00000000;
return 1; return 1;
}; };
@ -100,14 +113,14 @@ md_t* __fastcall find_large_md(size_t size)
ASSERT((size & 0x3FFFFF) == 0); ASSERT((size & 0x3FFFFF) == 0);
idx0 = (size>>22) - 1 < 32 ? (size>>22) - 1 : 31; idx0 = (size>>22) - 1 < 32 ? (size>>22) - 1 : 31;
mask = lheap.availmask & ( -1<<idx0 ); mask = lheap.av_unmapped & ( -1<<idx0 );
if(mask) if(mask)
{ {
if(idx0 == 31) if(idx0 == 31)
{ {
md_t *tmp = (md_t*)lheap.free[31].next; md_t *tmp = (md_t*)lheap.unmapped[31].next;
while((link_t*)tmp != &lheap.free[31]) while((link_t*)tmp != &lheap.unmapped[31])
{ {
if(tmp->size >= size) if(tmp->size >= size)
{ {
@ -123,9 +136,9 @@ md_t* __fastcall find_large_md(size_t size)
{ {
idx0 = _bsf(mask); idx0 = _bsf(mask);
ASSERT( !list_empty(&lheap.free[idx0])) ASSERT( !list_empty(&lheap.unmapped[idx0]))
md = (md_t*)lheap.free[idx0].next; md = (md_t*)lheap.unmapped[idx0].next;
}; };
} }
else else
@ -134,8 +147,8 @@ md_t* __fastcall find_large_md(size_t size)
ASSERT(md->state == MD_FREE); ASSERT(md->state == MD_FREE);
list_remove((link_t*)md); list_remove((link_t*)md);
if(list_empty(&lheap.free[idx0])) if(list_empty(&lheap.unmapped[idx0]))
_reset_lmask(idx0); _reset_lavu(idx0);
if(md->size > size) if(md->size > size)
{ {
@ -147,6 +160,7 @@ md_t* __fastcall find_large_md(size_t size)
new_md->base = md->base; new_md->base = md->base;
new_md->size = size; new_md->size = size;
new_md->parent = NULL;
new_md->state = MD_USED; new_md->state = MD_USED;
md->base+= size; md->base+= size;
@ -154,8 +168,8 @@ md_t* __fastcall find_large_md(size_t size)
idx1 = (md->size>>22) - 1 < 32 ? (md->size>>22) - 1 : 31; idx1 = (md->size>>22) - 1 < 32 ? (md->size>>22) - 1 : 31;
list_prepend(&md->link, &lheap.free[idx1]); list_prepend(&md->link, &lheap.unmapped[idx1]);
_set_lmask(idx1); _set_lavu(idx1);
return new_md; return new_md;
}; };
@ -164,7 +178,7 @@ md_t* __fastcall find_large_md(size_t size)
return md; return md;
} }
md_t* __fastcall find_small_md(size_t size) md_t* __fastcall find_unmapped_md(size_t size)
{ {
eflags_t efl; eflags_t efl;
@ -178,18 +192,18 @@ md_t* __fastcall find_small_md(size_t size)
efl = safe_cli(); efl = safe_cli();
idx0 = (size>>12) - 1 < 32 ? (size>>12) - 1 : 31; idx0 = (size>>12) - 1 < 32 ? (size>>12) - 1 : 31;
mask = sheap.availmask & ( -1<<idx0 ); mask = sheap.av_unmapped & ( -1<<idx0 );
DBG("smask %x size %x idx0 %x mask %x\n",sheap.availmask, size, idx0, mask); DBG("smask %x size %x idx0 %x mask %x\n",sheap.av_unmapped, size, idx0, mask);
if(mask) if(mask)
{ {
if(idx0 == 31) if(idx0 == 31)
{ {
ASSERT( !list_empty(&sheap.free[31])); ASSERT( !list_empty(&sheap.unmapped[31]));
md_t *tmp = (md_t*)sheap.free[31].next; md_t *tmp = (md_t*)sheap.unmapped[31].next;
while((link_t*)tmp != &sheap.free[31]) while((link_t*)tmp != &sheap.unmapped[31])
{ {
if(tmp->size >= size) if(tmp->size >= size)
{ {
@ -203,9 +217,9 @@ md_t* __fastcall find_small_md(size_t size)
{ {
idx0 = _bsf(mask); idx0 = _bsf(mask);
ASSERT( !list_empty(&sheap.free[idx0])); ASSERT( !list_empty(&sheap.unmapped[idx0]));
md = (md_t*)sheap.free[idx0].next; md = (md_t*)sheap.unmapped[idx0].next;
} }
}; };
@ -214,10 +228,11 @@ md_t* __fastcall find_small_md(size_t size)
DBG("remove md %x\n", md); DBG("remove md %x\n", md);
ASSERT(md->state==MD_FREE); ASSERT(md->state==MD_FREE);
ASSERT(md->parent != NULL);
list_remove((link_t*)md); list_remove((link_t*)md);
if(list_empty(&sheap.free[idx0])) if(list_empty(&sheap.unmapped[idx0]))
_reset_smask(idx0); _reset_savu(idx0);
} }
else else
{ {
@ -232,6 +247,11 @@ md_t* __fastcall find_small_md(size_t size)
return NULL; return NULL;
}; };
ASSERT(lmd->size != 0);
ASSERT(lmd->base != 0);
ASSERT((lmd->base & 0x3FFFFF) == 0);
ASSERT(lmd->parent == NULL);
md = (md_t*)slab_alloc(md_slab,0); /* FIXME check */ md = (md_t*)slab_alloc(md_slab,0); /* FIXME check */
link_initialize(&md->link); link_initialize(&md->link);
@ -264,16 +284,16 @@ md_t* __fastcall find_small_md(size_t size)
DBG("insert md %x, base %x size %x idx %x\n", md,md->base, md->size,idx1); DBG("insert md %x, base %x size %x idx %x\n", md,md->base, md->size,idx1);
if( idx1 < 31) if( idx1 < 31)
list_prepend(&md->link, &sheap.free[idx1]); list_prepend(&md->link, &sheap.unmapped[idx1]);
else else
{ {
if( list_empty(&sheap.free[31])) if( list_empty(&sheap.unmapped[31]))
list_prepend(&md->link, &sheap.free[31]); list_prepend(&md->link, &sheap.unmapped[31]);
else else
{ {
md_t *tmp = (md_t*)sheap.free[31].next; md_t *tmp = (md_t*)sheap.unmapped[31].next;
while((link_t*)tmp != &sheap.free[31]) while((link_t*)tmp != &sheap.unmapped[31])
{ {
if(md->base < tmp->base) if(md->base < tmp->base)
break; break;
@ -283,7 +303,7 @@ md_t* __fastcall find_small_md(size_t size)
}; };
}; };
_set_smask(idx1); _set_savu(idx1);
safe_sti(efl); safe_sti(efl);
@ -297,13 +317,167 @@ md_t* __fastcall find_small_md(size_t size)
return md; return md;
} }
void __fastcall free_small_md(md_t *md) md_t* __fastcall find_mapped_md(size_t size)
{
eflags_t efl;
md_t *md = NULL;
count_t idx0;
u32_t mask;
ASSERT((size & 0xFFF) == 0);
efl = safe_cli();
idx0 = (size>>12) - 1 < 32 ? (size>>12) - 1 : 31;
mask = sheap.av_mapped & ( -1<<idx0 );
DBG("small av_mapped %x size %x idx0 %x mask %x\n",sheap.av_mapped, size,
idx0, mask);
if(mask)
{
if(idx0 == 31)
{
ASSERT( !list_empty(&sheap.mapped[31]));
md_t *tmp = (md_t*)sheap.mapped[31].next;
while((link_t*)tmp != &sheap.mapped[31])
{
if(tmp->size >= size)
{
md = tmp;
break;
};
tmp = (md_t*)tmp->link.next;
};
}
else
{
idx0 = _bsf(mask);
ASSERT( !list_empty(&sheap.mapped[idx0]));
md = (md_t*)sheap.mapped[idx0].next;
}
};
if(md)
{
DBG("remove md %x\n", md);
ASSERT(md->state==MD_FREE);
list_remove((link_t*)md);
if(list_empty(&sheap.mapped[idx0]))
_reset_savm(idx0);
}
else
{
md_t *lmd;
addr_t frame;
addr_t *pte;
int i;
lmd = find_large_md((size+0x3FFFFF)&~0x3FFFFF);
DBG("get large md %x\n", lmd);
if( !lmd)
{
safe_sti(efl);
return NULL;
};
ASSERT(lmd->size != 0);
ASSERT(lmd->base != 0);
ASSERT((lmd->base & 0x3FFFFF) == 0);
ASSERT(lmd->parent == NULL);
frame = core_alloc(10); /* FIXME check */
lmd->parent = (void*)frame;
pte = &((addr_t*)page_tabs)[lmd->base>>12]; /* FIXME remove */
for(i = 0; i<1024; i++)
{
*pte++ = frame;
frame+= 4096;
}
md = (md_t*)slab_alloc(md_slab,0); /* FIXME check */
link_initialize(&md->link);
list_initialize(&md->adj);
md->base = lmd->base;
md->size = lmd->size;
md->parent = lmd;
md->state = MD_USED;
};
if(md->size > size)
{
count_t idx1;
md_t *new_md = (md_t*)slab_alloc(md_slab,0); /* FIXME check */
link_initialize(&new_md->link);
list_insert(&new_md->adj, &md->adj);
new_md->base = md->base;
new_md->size = size;
new_md->parent = md->parent;
md->base+= size;
md->size-= size;
md->state = MD_FREE;
idx1 = (md->size>>12) - 1 < 32 ? (md->size>>12) - 1 : 31;
DBG("insert md %x, base %x size %x idx %x\n", md,md->base, md->size,idx1);
if( idx1 < 31)
list_prepend(&md->link, &sheap.mapped[idx1]);
else
{
if( list_empty(&sheap.mapped[31]))
list_prepend(&md->link, &sheap.mapped[31]);
else
{
md_t *tmp = (md_t*)sheap.mapped[31].next;
while((link_t*)tmp != &sheap.mapped[31])
{
if(md->base < tmp->base)
break;
tmp = (md_t*)tmp->link.next;
}
list_insert(&md->link, &tmp->link);
};
};
_set_savm(idx1);
md = new_md;
};
md->state = MD_USED;
safe_sti(efl);
return md;
}
void __fastcall free_unmapped_md(md_t *md)
{ {
eflags_t efl ; eflags_t efl ;
md_t *fd; md_t *fd;
md_t *bk; md_t *bk;
count_t idx; count_t idx;
ASSERT(md->parent != NULL);
efl = safe_cli(); efl = safe_cli();
spinlock_lock(&sheap.lock); spinlock_lock(&sheap.lock);
@ -317,8 +491,8 @@ void __fastcall free_small_md(md_t *md)
idx = (fd->size>>12) - 1 < 32 ? (fd->size>>12) - 1 : 31; idx = (fd->size>>12) - 1 < 32 ? (fd->size>>12) - 1 : 31;
list_remove((link_t*)fd); list_remove((link_t*)fd);
if(list_empty(&sheap.free[idx])) if(list_empty(&sheap.unmapped[idx]))
_reset_smask(idx); _reset_savu(idx);
md->size+= fd->size; md->size+= fd->size;
md->adj.next = fd->adj.next; md->adj.next = fd->adj.next;
@ -330,8 +504,8 @@ void __fastcall free_small_md(md_t *md)
idx = (bk->size>>12) - 1 < 32 ? (bk->size>>12) - 1 : 31; idx = (bk->size>>12) - 1 < 32 ? (bk->size>>12) - 1 : 31;
list_remove((link_t*)bk); list_remove((link_t*)bk);
if(list_empty(&sheap.free[idx])) if(list_empty(&sheap.unmapped[idx]))
_reset_smask(idx); _reset_savu(idx);
bk->size+= md->size; bk->size+= md->size;
bk->adj.next = md->adj.next; bk->adj.next = md->adj.next;
@ -345,19 +519,19 @@ void __fastcall free_small_md(md_t *md)
idx = (md->size>>12) - 1 < 32 ? (md->size>>12) - 1 : 31; idx = (md->size>>12) - 1 < 32 ? (md->size>>12) - 1 : 31;
_set_smask(idx); _set_savu(idx);
if( idx < 31) if( idx < 31)
list_prepend(&md->link, &sheap.free[idx]); list_prepend(&md->link, &sheap.unmapped[idx]);
else else
{ {
if( list_empty(&sheap.free[31])) if( list_empty(&sheap.unmapped[31]))
list_prepend(&md->link, &sheap.free[31]); list_prepend(&md->link, &sheap.unmapped[31]);
else else
{ {
md_t *tmp = (md_t*)sheap.free[31].next; md_t *tmp = (md_t*)sheap.unmapped[31].next;
while((link_t*)tmp != &sheap.free[31]) while((link_t*)tmp != &sheap.unmapped[31])
{ {
if(md->base < tmp->base) if(md->base < tmp->base)
break; break;
@ -371,59 +545,82 @@ void __fastcall free_small_md(md_t *md)
}; };
void __fastcall free_mapped_md(md_t *md)
#define page_tabs 0xDF800000
/*
phismem_t* __fastcall phis_alloc(count_t count)
{ {
phismem_t *phm; eflags_t efl ;
count_t tmp; md_t *fd;
phm = (phismem_t*)slab_alloc(phm_slab, 0); md_t *bk;
count_t idx;
phm->count = count; ASSERT(md->parent != NULL);
tmp = count; ASSERT( ((md_t*)(md->parent))->parent != NULL);
while(tmp)
efl = safe_cli();
spinlock_lock(&sheap.lock);
if( !list_empty(&md->adj))
{ {
u32_t order; bk = (md_t*)md->adj.prev;
fd = (md_t*)md->adj.next;
asm volatile ("bsr %0, %1":"=&r"(order):"r"(tmp):"cc"); if(fd->state == MD_FREE)
asm volatile ("btr %0, %1" :"=r"(tmp):"r"(order):"cc"); {
idx = (fd->size>>12) - 1 < 32 ? (fd->size>>12) - 1 : 31;
phm->frames[order] = core_alloc(order); list_remove((link_t*)fd);
if(list_empty(&sheap.mapped[idx]))
_reset_savm(idx);
md->size+= fd->size;
md->adj.next = fd->adj.next;
md->adj.next->prev = (link_t*)md;
slab_free(md_slab, fd);
};
if(bk->state == MD_FREE)
{
idx = (bk->size>>12) - 1 < 32 ? (bk->size>>12) - 1 : 31;
list_remove((link_t*)bk);
if(list_empty(&sheap.mapped[idx]))
_reset_savm(idx);
bk->size+= md->size;
bk->adj.next = md->adj.next;
bk->adj.next->prev = (link_t*)bk;
slab_free(md_slab, md);
md = fd;
};
}; };
return phm; md->state = MD_FREE;
}
void map_phm(addr_t base, phismem_t *phm, u32_t mapflags) idx = (md->size>>12) - 1 < 32 ? (md->size>>12) - 1 : 31;
_set_savm(idx);
if( idx < 31)
list_prepend(&md->link, &sheap.mapped[idx]);
else
{ {
count_t count; if( list_empty(&sheap.mapped[31]))
addr_t *pte; list_prepend(&md->link, &sheap.mapped[31]);
else
count = phm->count;
pte = &((addr_t*)page_tabs)[base>>12];
while(count)
{ {
u32_t order; md_t *tmp = (md_t*)sheap.mapped[31].next;
addr_t frame;
count_t size;
asm volatile ("bsr %0, %1":"=&r"(order):"r"(count):"cc"); while((link_t*)tmp != &sheap.mapped[31])
asm volatile ("btr %0, %1" :"=r"(count):"r"(order):"cc");
frame = phm->frames[order] | mapflags;
size = (1 << order);
while(size--)
{ {
*pte++ = frame; if(md->base < tmp->base)
frame+= 4096; break;
} tmp = (md_t*)tmp->link.next;
} }
list_insert(&md->link, &tmp->link);
}; };
*/ };
spinlock_unlock(&sheap.lock);
safe_sti(efl);
};
void * __fastcall mem_alloc(size_t size, u32_t flags) void * __fastcall mem_alloc(size_t size, u32_t flags)
{ {
@ -437,36 +634,41 @@ void * __fastcall mem_alloc(size_t size, u32_t flags)
size = (size+4095)&~4095; size = (size+4095)&~4095;
md = find_small_md(size);
if( md )
{
ASSERT(md->state == MD_USED);
if( flags & PG_MAP ) if( flags & PG_MAP )
{ {
md = find_mapped_md(size);
if( !md )
return NULL;
md_t *lmd = (md_t*)md->parent;
ASSERT( lmd != NULL);
ASSERT( lmd->parent != NULL);
addr_t frame = (md->base - lmd->base + (addr_t)lmd->parent)|
(flags & 0xFFF);
DBG("frame %x\n", frame);
ASSERT(frame != 0);
count_t tmp = size >> 12; count_t tmp = size >> 12;
addr_t *pte = &((addr_t*)page_tabs)[md->base>>12]; addr_t *pte = &((addr_t*)page_tabs)[md->base>>12];
while(tmp) while(tmp--)
{
u32_t order;
addr_t frame;
size_t size;
asm volatile ("bsr %1, %0":"=&r"(order):"r"(tmp):"cc");
asm volatile ("btr %1, %0" :"=r"(tmp):"r"(order):"cc");
frame = core_alloc(order) | flags; /* FIXME check */
size = (1 << order);
while(size--)
{ {
*pte++ = frame; *pte++ = frame;
frame+= 4096; frame+= 4096;
}; };
}; }
}; else
md = find_unmapped_md(size);
if( !md )
return NULL;
ASSERT(md->parent != NULL);
ASSERT(md->state == MD_USED);
efl = safe_cli(); efl = safe_cli();
spinlock_lock(&sheap.lock); spinlock_lock(&sheap.lock);
@ -492,8 +694,6 @@ void * __fastcall mem_alloc(size_t size, u32_t flags)
DBG("allocate: %x size %x\n\n",md->base, size); DBG("allocate: %x size %x\n\n",md->base, size);
return (void*)md->base; return (void*)md->base;
}; };
return NULL;
};
void __fastcall mem_free(void *mem) void __fastcall mem_free(void *mem)
{ {
@ -524,10 +724,20 @@ void __fastcall mem_free(void *mem)
if( md ) if( md )
{ {
md_t *lmd;
DBG("\tmd: %x base: %x size: %x\n",md, md->base, md->size); DBG("\tmd: %x base: %x size: %x\n",md, md->base, md->size);
ASSERT(md->state == MD_USED); ASSERT(md->state == MD_USED);
list_remove((link_t*)md);
lmd = (md_t*)md->parent;
ASSERT(lmd != 0);
if(lmd->parent != 0)
{
count_t tmp = md->size >> 12; count_t tmp = md->size >> 12;
addr_t *pte = &((addr_t*)page_tabs)[md->base>>12]; addr_t *pte = &((addr_t*)page_tabs)[md->base>>12];
@ -536,17 +746,17 @@ void __fastcall mem_free(void *mem)
*pte++ = 0; *pte++ = 0;
asm volatile ( asm volatile (
"invlpg (%0)" "invlpg (%0)"
: ::"r" (mem) );
:"r" (mem) );
mem+= 4096; mem+= 4096;
}; };
list_remove((link_t*)md);
free_small_md( md ); free_mapped_md( md );
}
else
free_unmapped_md( md );
} }
else else
{
DBG("\tERROR: invalid base address: %x\n", mem); DBG("\tERROR: invalid base address: %x\n", mem);
};
safe_sti(efl); safe_sti(efl);
}; };

View File

@ -149,30 +149,31 @@ proc user_free stdcall, base:dword
test al, DONT_FREE_BLOCK test al, DONT_FREE_BLOCK
jnz .cantfree jnz .cantfree
push edi
and eax, not 4095 and eax, not 4095
mov ecx, eax mov edi, eax
or al, FREE_BLOCK or al, FREE_BLOCK
mov [page_tabs+(esi-1)*4], eax mov [page_tabs+(esi-1)*4], eax
sub ecx, 4096 sub edi, 4096
mov ebx, ecx mov ebx, edi
shr ecx, 12 shr edi, 12
jz .released jz .released
.release: .release:
xor eax, eax xor ecx, ecx
xchg eax, [page_tabs+esi*4] xchg ecx, [page_tabs+esi*4]
test al, 1 test cl, 1
jz @F jz @F
call free_page
call @core_free@4
mov eax, esi mov eax, esi
shl eax, 12 shl eax, 12
invlpg [eax] invlpg [eax]
@@: @@:
inc esi inc esi
dec ecx dec edi
jnz .release jnz .release
.released: .released:
push edi
mov edx, [current_slot] mov edx, [current_slot]
mov esi, dword [edx+APPDATA.heap_base] mov esi, dword [edx+APPDATA.heap_base]
mov edi, dword [edx+APPDATA.heap_top] mov edi, dword [edx+APPDATA.heap_top]
@ -276,20 +277,28 @@ user_realloc:
cmp edx, ebx cmp edx, ebx
jb .realloc_add jb .realloc_add
; release part of allocated memory ; release part of allocated memory
push ecx
.loop: .loop:
cmp edx, ebx cmp edx, ebx
jz .release_done jz .release_done
dec edx dec edx
xor eax, eax xor ecx, ecx
xchg eax, [page_tabs+edx*4] xchg ecx, [page_tabs+edx*4]
test al, 1 test al, 1
jz .loop jz .loop
call free_page
push edx
call @core_free@4
pop edx
mov eax, edx mov eax, edx
shl eax, 12 shl eax, 12
invlpg [eax] invlpg [eax]
jmp .loop jmp .loop
.release_done: .release_done:
pop ecx
sub ebx, ecx sub ebx, ecx
cmp ebx, 1 cmp ebx, 1
jnz .nofreeall jnz .nofreeall

View File

@ -29,12 +29,6 @@ map_space: ;not implemented
ret ret
align 4
free_page:
ret
proc map_io_mem stdcall, base:dword, size:dword, flags:dword proc map_io_mem stdcall, base:dword, size:dword, flags:dword
push edi push edi
@ -246,18 +240,16 @@ proc new_mem_resize stdcall, new_size:dword
shr edi, 12 shr edi, 12
shr esi, 12 shr esi, 12
@@: @@:
mov eax, [app_page_tabs+edi*4] mov ecx, [app_page_tabs+edi*4]
test eax, 1 test ecx, 1
jz .next jz .next
mov dword [app_page_tabs+edi*4], 2 mov dword [app_page_tabs+edi*4], 2
mov ebx, edi mov ebx, edi
shl ebx, 12 shl ebx, 12
push eax
invlpg [ebx] invlpg [ebx]
pop eax call @core_free@4
call free_page .next:
add edi, 1
.next: add edi, 1
cmp edi, esi cmp edi, esi
jb @B jb @B

View File

@ -549,7 +549,7 @@ addr_t __fastcall zone_alloc(zone_t *zone, u32_t order)
return (v << FRAME_WIDTH); return (v << FRAME_WIDTH);
} }
addr_t __fastcall core_alloc(u32_t order) //export addr_t __fastcall core_alloc(u32_t order)
{ {
eflags_t efl; eflags_t efl;
pfn_t v; pfn_t v;
@ -559,11 +559,14 @@ addr_t __fastcall core_alloc(u32_t order) //export
v = zone_frame_alloc(&z_core, order); v = zone_frame_alloc(&z_core, order);
spinlock_unlock(&z_core.lock); spinlock_unlock(&z_core.lock);
safe_sti(efl); safe_sti(efl);
DBG("core alloc: %x, size %x\n", v << FRAME_WIDTH, (1<<order)<<12);
DBG("core alloc: %x, size %x remain %d\n", v << FRAME_WIDTH,
((1<<order)<<12), z_core.free_count);
return (v << FRAME_WIDTH); return (v << FRAME_WIDTH);
}; };
void __fastcall core_free(addr_t frame) //export void __fastcall core_free(addr_t frame)
{ {
eflags_t efl; eflags_t efl;
@ -572,6 +575,9 @@ void __fastcall core_free(addr_t frame) //export
zone_free(&z_core, frame>>12); zone_free(&z_core, frame>>12);
spinlock_unlock(&z_core.lock); spinlock_unlock(&z_core.lock);
safe_sti(efl); safe_sti(efl);
DBG("core free %x remain %d\n", frame, z_core.free_count);
} }
addr_t alloc_page() //obsolete addr_t alloc_page() //obsolete
@ -587,7 +593,7 @@ addr_t alloc_page() //obsolete
spinlock_unlock(&z_core.lock); spinlock_unlock(&z_core.lock);
safe_sti(efl); safe_sti(efl);
DBG("alloc_page: %x\n", v << FRAME_WIDTH); DBG("alloc_page: %x remain %d\n", v << FRAME_WIDTH, z_core.free_count);
restore_edx(edx); restore_edx(edx);
return (v << FRAME_WIDTH); return (v << FRAME_WIDTH);
@ -605,7 +611,8 @@ void __fastcall zone_free(zone_t *zone, pfn_t frame_idx)
ASSERT(frame->refcount); ASSERT(frame->refcount);
if (!--frame->refcount) { if (!--frame->refcount)
{
buddy_system_free(zone, &frame->buddy_link); buddy_system_free(zone, &frame->buddy_link);
/* Update zone information. */ /* Update zone information. */

View File

@ -500,7 +500,7 @@ term9:
mov eax, [.slot] mov eax, [.slot]
shl eax, 8 shl eax, 8
mov eax,[SLOT_BASE+eax+APPDATA.dir_table] mov eax,[SLOT_BASE+eax+APPDATA.dir_table]
; stdcall destroy_app_space, eax stdcall destroy_app_space, eax
mov esi, [.slot] mov esi, [.slot]
cmp [fpu_owner],esi ; if user fpu last -> fpu user = 1 cmp [fpu_owner],esi ; if user fpu last -> fpu user = 1
@ -629,23 +629,23 @@ term9:
shl edi, 8 shl edi, 8
add edi,SLOT_BASE add edi,SLOT_BASE
mov eax,[edi+APPDATA.pl0_stack] mov ecx,[edi+APPDATA.pl0_stack]
sub eax, OS_BASE sub ecx, OS_BASE
call free_page call @core_free@4
mov eax,[edi+APPDATA.cur_dir] mov ecx,[edi+APPDATA.cur_dir]
sub eax, OS_BASE sub ecx, OS_BASE
call free_page call @core_free@4
mov eax, [edi+APPDATA.io_map] mov ecx, [edi+APPDATA.io_map]
cmp eax, (tss._io_map_0-OS_BASE+PG_MAP) cmp ecx, (tss._io_map_0-OS_BASE+PG_MAP)
je @F je @F
call free_page call @core_free@4
@@: @@:
mov eax, [edi+APPDATA.io_map+4] mov ecx, [edi+APPDATA.io_map+4]
cmp eax, (tss._io_map_1-OS_BASE+PG_MAP) cmp ecx, (tss._io_map_1-OS_BASE+PG_MAP)
je @F je @F
call free_page call @core_free@4
@@: @@:
mov eax, 0x20202020 mov eax, 0x20202020
stosd stosd

View File

@ -200,8 +200,6 @@ proc fs_execute
mov eax,[hdr_mem] mov eax,[hdr_mem]
mov [ebx+APPDATA.mem_size],eax mov [ebx+APPDATA.mem_size],eax
if GREEDY_KERNEL
else
mov ecx, [hdr_mem] mov ecx, [hdr_mem]
mov edi, [file_size] mov edi, [file_size]
add edi, 4095 add edi, 4095
@ -213,11 +211,9 @@ else
cld cld
rep stosb rep stosb
@@: @@:
end if mov ecx, [file_base]
call @mem_free@4
; release only virtual space, not phisical memory
stdcall free_kernel_space, [file_base] ;
lea eax, [hdr_cmdline] lea eax, [hdr_cmdline]
lea ebx, [cmdline] lea ebx, [cmdline]
lea ecx, [filename] lea ecx, [filename]
@ -375,14 +371,6 @@ proc create_app_space stdcall, app_size:dword,img_base:dword,img_size:dword
shr ecx, 12 shr ecx, 12
mov [img_pages], ecx mov [img_pages], ecx
; if GREEDY_KERNEL
; lea eax, [ecx+ebx+2] ;only image size
; else
; lea eax, [eax+ebx+2] ;all requested memory
; end if
; cmp eax, [pg_data.pages_free]
; ja .fail
call _alloc_page call _alloc_page
test eax, eax test eax, eax
mov [dir_addr], eax mov [dir_addr], eax
@ -434,41 +422,24 @@ proc create_app_space stdcall, app_size:dword,img_base:dword,img_size:dword
xor eax, eax xor eax, eax
rep stosd rep stosd
mov ecx, [img_pages]
mov ebx, PG_UW
mov esi, [img_base]
shr esi, 10
add esi, page_tabs
xor edx, edx
mov edi, page_tabs
.remap:
lodsd
or eax, ebx ; force user level r/w access
stosd
add edx, 0x1000
dec [app_pages]
dec ecx
jnz .remap
mov ecx, [app_pages] mov ecx, [app_pages]
test ecx, ecx xor ebx, ebx
jz .done
if GREEDY_KERNEL
mov eax, 0x02
rep stosd
else
.alloc: .alloc:
call _alloc_page xor ecx, ecx
call @core_alloc@4
test eax, eax test eax, eax
jz .fail jz .fail
stdcall map_page,edx,eax,dword PG_UW stdcall map_page,ebx,eax,dword PG_UW
add edx, 0x1000 add ebx, 0x1000
dec [app_pages] dec [app_pages]
jnz .alloc jnz .alloc
end if
mov ecx, [img_size] ; FIXME remap md
mov esi, [img_base]
xor edi, edi
rep movsb
.done: .done:
dec [pg_data.pg_mutex] dec [pg_data.pg_mutex]
@ -495,24 +466,26 @@ set_cr3:
align 4 align 4
proc destroy_page_table stdcall, pg_tab:dword proc destroy_page_table stdcall, pg_tab:dword
push ebx
push esi push esi
mov esi, [pg_tab] mov esi, [pg_tab]
mov ecx, 1024 mov ebx, 1024
.free: .free:
mov eax, [esi] mov ecx, [esi]
test eax, 1 test ecx, 1
jz .next jz .next
test eax, 1 shl 9 test ecx, 1 shl 9
jnz .next ;skip shared pages jnz .next ;skip shared pages
call free_page call @core_free@4
.next: .next:
add esi, 4 add esi, 4
dec ecx dec ebx
jnz .free jnz .free
pop esi pop esi
pop ebx
ret ret
endp endp
@ -563,15 +536,15 @@ proc destroy_app_space stdcall, pg_dir:dword
stdcall destroy_page_table, eax stdcall destroy_page_table, eax
mov eax, [esi] mov ecx, [esi]
call free_page call @core_free@4
.next: .next:
add esi, 4 add esi, 4
dec edi dec edi
jnz .destroy jnz .destroy
mov eax, [pg_dir] mov ecx, [pg_dir]
call free_page call @core_free@4
.exit: .exit:
dec [pg_data.pg_mutex] dec [pg_data.pg_mutex]
ret ret
@ -744,7 +717,6 @@ proc read_process_memory
mov eax, [slot] mov eax, [slot]
shl eax,8 shl eax,8
mov ebx, [offset] mov ebx, [offset]
; add ebx, new_app_base
push ecx push ecx
stdcall map_memEx, [proc_mem_map],\ stdcall map_memEx, [proc_mem_map],\
[SLOT_BASE+eax+0xB8],\ [SLOT_BASE+eax+0xB8],\

View File

@ -1,123 +0,0 @@
typedef unsigned char u8_t;
typedef unsigned short int u16_t;
typedef unsigned int u32_t;
typedef unsigned long long u64_t;
static inline u8_t inb(u16_t port)
{
u8_t val;
if(port < 0x100)
asm volatile ("in %b0, %w1 \n" : "=a" (val) : "dN" (port) );
else
asm volatile ("in %b0, %w1 \n" : "=a" (val) : "d" (port) );
return val;
}
static inline outb(u16_t port, u8_t val)
{
if (port < 0x100) /* GCC can optimize this if constant */
asm volatile ("out %w0, %b1" : :"dN"(port), "a"(val));
else
asm volatile ("out %w0, %b1" : :"d"(port), "a"(val));
}
/* Convert the integer D to a string and save the string in BUF. If
BASE is equal to 'd', interpret that D is decimal, and if BASE is
equal to 'x', interpret that D is hexadecimal. */
static void itoa (char *buf, int base, int d)
{
char *p = buf;
char *p1, *p2;
unsigned long ud = d;
int divisor = 10;
/* If %d is specified and D is minus, put `-' in the head. */
if (base == 'd' && d < 0)
{
*p++ = '-';
buf++;
ud = -d;
}
else if (base == 'x')
divisor = 16;
/* Divide UD by DIVISOR until UD == 0. */
do
{
int remainder = ud % divisor;
*p++ = (remainder < 10) ? remainder + '0' : remainder + 'a' - 10;
}
while (ud /= divisor);
/* Terminate BUF. */
*p = 0;
/* Reverse BUF. */
p1 = buf;
p2 = p - 1;
while (p1 < p2)
{
char tmp = *p1;
*p1 = *p2;
*p2 = tmp;
p1++;
p2--;
}
}
void putc(int c)
{
while (!(inb(0x3f8+5) & 0x60));
outb(0x3f8,c);
if (c == '\n')
putc('\r');
}
void _printf (const char *format, ...)
{
char **arg = (char **) &format;
int c;
char buf[20];
arg++;
while ((c = *format++) != 0)
{
if (c != '%')
putc(c);
else
{
char *p;
c = *format++;
switch (c)
{
case 'd':
case 'u':
case 'x':
itoa (buf, c, *((int *) arg++));
p = buf;
goto string;
break;
case 's':
p = *arg++;
if (! p)
p = "(null)";
string:
while (*p)
putc(*p++);
break;
default:
putc(*((int *) arg++));
break;
}
}
}
}

View File

@ -30,6 +30,13 @@ extern void panic_printf(char *fmt, ...) __attribute__((noreturn));
# define DBG(format,...) # define DBG(format,...)
# define PANIC(expr) \
if (!(expr)) { \
panic_printf("Kernel panic in %s() at %s:%u: " \
"assertion failed (%s)",__func__ ,__FILE__,__LINE__, \
#expr); \
};
#endif #endif

View File

@ -71,4 +71,4 @@ void frame_free(pfn_t frame);
void __fastcall frame_set_parent(pfn_t pfn, void *data); void __fastcall frame_set_parent(pfn_t pfn, void *data);
void* __fastcall frame_get_parent(pfn_t pfn); void* __fastcall frame_get_parent(pfn_t pfn);
void* __fastcall heap_alloc(size_t size, u32_t flags) ; void* __fastcall mem_alloc(size_t size, u32_t flags) ;

View File

@ -146,7 +146,6 @@ extrn @core_free@4
extrn @init_heap@8 extrn @init_heap@8
extrn @find_large_md@4 extrn @find_large_md@4
extrn @find_small_md@4
extrn @phis_alloc@4 extrn @phis_alloc@4
extrn @mem_alloc@8 extrn @mem_alloc@8
@ -2397,11 +2396,6 @@ draw_background_temp:
add ecx, 0xFFF add ecx, 0xFFF
shr ecx, 12 shr ecx, 12
.z: .z:
mov eax, [page_tabs+ebx*4]
test al, 1
jz @f
call free_page
@@:
mov eax, [page_tabs+esi*4] mov eax, [page_tabs+esi*4]
or al, PG_UW or al, PG_UW
mov [page_tabs+ebx*4], eax mov [page_tabs+ebx*4], eax