1) slab allocator

2) new kernel heap

git-svn-id: svn://kolibrios.org@859 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge) 2008-09-10 13:04:24 +00:00
parent 67aca9ac45
commit 2b91637d4d
18 changed files with 1148 additions and 758 deletions

View File

@ -69,7 +69,7 @@ __start:
; ENABLE PAGING
mov ecx, 32
mov ecx, 64
mov edi, _sys_pdbr+(OS_BASE shr 20)+(0x100000000-OS_BASE)
mov eax, PG_LARGE+PG_SW
@@:

View File

@ -544,8 +544,6 @@ struc MEM_STATE
struc PG_DATA
{ .mem_amount dd ?
.vesa_mem dd ?
.pages_count dd ?
.pages_free dd ?
.pages_faults dd ?
.pagemap_size dd ?
.kernel_pages dd ?

View File

@ -0,0 +1,295 @@
#include <types.h>
#include <core.h>
#include <spinlock.h>
#include <link.h>
#include <mm.h>
#include <slab.h>
typedef struct
{
link_t link;
link_t adj;
addr_t base;
size_t size;
void* parent;
u32_t reserved;
}md_t;
typedef struct {
SPINLOCK_DECLARE(lock); /**< this lock protects everything below */
u32_t availmask;
link_t list[32];
}heap_t;
slab_cache_t *md_slab;
slab_cache_t *phm_slab;
heap_t lheap;
heap_t sheap;
static inline void _set_lmask(count_t idx)
{ asm volatile ("bts DWORD PTR [_lheap], %0"::"r"(idx):"cc"); }
static inline void _reset_lmask(count_t idx)
{ asm volatile ("btr DWORD PTR [_lheap], %0"::"r"(idx):"cc"); }
static inline void _set_smask(count_t idx)
{ asm volatile ("bts DWORD PTR [_sheap], %0"::"r"(idx):"cc"); }
static inline void _reset_smask(count_t idx)
{ asm volatile ("btr DWORD PTR [_sheap], %0"::"r"(idx):"cc"); }
int __fastcall init_heap(addr_t base, size_t size)
{
md_t *md;
u32_t i;
ASSERT(base != 0);
ASSERT(size != 0)
ASSERT(base & 0x3FFFFF == 0);
ASSERT(size & 0x3FFFFF == 0);
for (i = 0; i < 32; i++)
{
list_initialize(&lheap.list[i]);
list_initialize(&sheap.list[i]);
};
md_slab = slab_cache_create(sizeof(md_t), 32,NULL,NULL,SLAB_CACHE_MAGDEFERRED);
md = (md_t*)slab_alloc(md_slab,0);
list_initialize(&md->adj);
md->base = base;
md->size = size;
md->parent = NULL;
md->reserved = 0;
list_prepend(&md->link, &lheap.list[31]);
lheap.availmask = 0x80000000;
sheap.availmask = 0x00000000;
// phm_slab = slab_cache_create(sizeof(phismem_t), 32,NULL,NULL,SLAB_CACHE_MAGDEFERRED);
return 1;
};
md_t* __fastcall find_large_md(size_t size)
{
md_t *md = NULL;
count_t idx0;
u32_t mask;
ASSERT(size & 0x3FFFFF == 0);
idx0 = (size>>22) - 1 < 32 ? (size>>22) - 1 : 31;
mask = lheap.availmask & ( -1<<idx0 );
if(mask)
{
idx0 = _bsf(mask);
ASSERT( !list_empty(&lheap.list[idx0]))
md = (md_t*)lheap.list[idx0].next;
}
else
return NULL;
list_remove((link_t*)md);
if(list_empty(&lheap.list[idx0]))
_reset_lmask(idx0);
if(md->size > size)
{
count_t idx1;
md_t *new_md = (md_t*)slab_alloc(md_slab,0);
link_initialize(&new_md->link);
list_insert(&md->adj, &new_md->adj);
new_md->base = md->base;
new_md->size = size;
md->base+= size;
md->size-= size;
idx1 = (md->size>>22) - 1 < 32 ? (md->size>>22) - 1 : 31;
list_prepend(&md->link, &lheap.list[idx1]);
_set_lmask(idx1);
return new_md;
}
return md;
}
md_t* __fastcall find_small_md(size_t size)
{
eflags_t efl;
md_t *md = NULL;
count_t idx0;
u32_t mask;
ASSERT(size & 0xFFF == 0);
efl = safe_cli();
idx0 = (size>>12) - 1 < 32 ? (size>>12) - 1 : 31;
mask = sheap.availmask & ( -1<<idx0 );
if(mask)
{
idx0 = _bsf(mask);
ASSERT( !list_empty(&sheap.list[idx0]))
md = (md_t*)sheap.list[idx0].next;
list_remove((link_t*)md);
if(list_empty(&sheap.list[idx0]))
_reset_smask(idx0);
}
else
{
md_t *lmd;
lmd = find_large_md((size+0x3FFFFF)&~0x3FFFFF);
if( !lmd)
{
safe_sti(efl);
return NULL;
};
md = (md_t*)slab_alloc(md_slab,0);
link_initialize(&md->link);
list_initialize(&md->adj);
md->base = lmd->base;
md->size = lmd->size;
md->parent = lmd;
md->reserved = 0;
};
if(md->size > size)
{
count_t idx1;
md_t *new_md = (md_t*)slab_alloc(md_slab,0);
link_initialize(&new_md->link);
list_insert(&md->adj, &new_md->adj);
new_md->base = md->base;
new_md->size = size;
new_md->parent = md->parent;
new_md->reserved = 0;
md->base+= size;
md->size-= size;
idx1 = (md->size>>22) - 1 < 32 ? (md->size>>22) - 1 : 31;
list_prepend(&md->link, &sheap.list[idx1]);
_set_smask(idx1);
safe_sti(efl);
return new_md;
}
safe_sti(efl);
return md;
}
phismem_t* __fastcall phis_alloc(count_t count)
{
phismem_t *phm;
count_t tmp;
phm = (phismem_t*)slab_alloc(phm_slab, 0);
phm->count = count;
tmp = count;
while(tmp)
{
u32_t order;
asm volatile ("bsr %0, %1":"=&r"(order):"r"(tmp):"cc");
asm volatile ("btr %0, %1" :"=r"(tmp):"r"(order):"cc");
phm->frames[order] = core_alloc(order);
};
return phm;
}
#define page_tabs 0xDF800000
void map_phm(addr_t base, phismem_t *phm, u32_t mapflags)
{
count_t count;
addr_t *pte;
count = phm->count;
pte = &((addr_t*)page_tabs)[base>>12];
while(count)
{
u32_t order;
addr_t frame;
count_t size;
asm volatile ("bsr %0, %1":"=&r"(order):"r"(count):"cc");
asm volatile ("btr %0, %1" :"=r"(count):"r"(order):"cc");
frame = phm->frames[order] | mapflags;
size = (1 << order);
while(size--)
{
*pte++ = frame;
frame+= 4096;
}
}
};
void* __fastcall mem_alloc(size_t size, u32_t flags)
{
md_t *md;
phismem_t *phm;
size = (size+4095)&~4095;
md = find_small_md(size);
if( md )
{
phm = phis_alloc(size>>12);
map_phm(md->base, phm, flags);
return (void*)md->base;
}
return NULL;
};
void* __stdcall alloc_kernel_space(size_t size); //__asm__("alloc_kernel_space");
void* __stdcall alloc_kernel_space(size_t size)
{
md_t *md;
size = (size+4095)&~4095;
md = find_small_md(size);
if( md )
return (void*)md->base;
return NULL;
}
//void* __stdcall kernel_alloc(size_t size)
//{
//
// return NULL;
//}
//*/

View File

@ -38,506 +38,10 @@ block_base equ MEM_BLOCK.base
block_size equ MEM_BLOCK.size
block_flags equ MEM_BLOCK.flags
macro calc_index op
{ shr op, 12
dec op
cmp op, 63
jna @f
mov op, 63
@@:
}
macro remove_from_list op
{ mov edx, [op+list_fd]
mov ecx, [op+list_bk]
test edx, edx
jz @f
mov [edx+list_bk], ecx
@@:
test ecx, ecx
jz @f
mov [ecx+list_fd], edx
@@:
mov [op+list_fd],0
mov [op+list_bk],0
}
macro remove_from_free op
{
remove_from_list op
mov eax, [op+block_size]
calc_index eax
cmp [mem_block_list+eax*4], op
jne @f
mov [mem_block_list+eax*4], edx
@@:
cmp [mem_block_list+eax*4], 0
jne @f
btr [mem_block_mask], eax
@@:
}
macro remove_from_used op
{
mov edx, [op+list_fd]
mov ecx, [op+list_bk]
mov [edx+list_bk], ecx
mov [ecx+list_fd], edx
mov [op+list_fd], 0
mov [op+list_bk], 0
}
align 4
init_kernel_heap:
mov ecx, 64/4
mov edi, mem_block_list
xor eax, eax
cld
rep stosd
mov ecx, 512/4
mov edi, mem_block_map
not eax
rep stosd
mov ecx, 128*1024
fastcall _balloc
mov [mem_block_start], mem_block_map
mov [mem_block_end], mem_block_map+512
mov [mem_block_arr], eax
mov ebx, mem_used.fd-MEM_LIST_OFFSET
mov [mem_used.fd], ebx
mov [mem_used.bk], ebx
xor edx, edx
mov [eax+block_next], edx
mov [eax+block_prev], edx
mov [eax+list_fd], edx
mov [eax+list_bk], edx
mov [eax+block_base], HEAP_BASE
mov [eax+block_size], page_tabs-HEAP_BASE
mov [eax+block_flags], FREE_BLOCK
mov [heap_size], page_tabs-HEAP_BASE
mov [heap_free], page_tabs-HEAP_BASE
mov [mem_block_mask], edx
mov [mem_block_mask+4],0x80000000
mov [mem_block_list+63*4], eax
mov byte [mem_block_map], 0xFE
and [heap_mutex], 0
mov [heap_blocks], 4096
mov [free_blocks], 4096
ret
; param
; eax= required size
;
; retval
; edi= memory block descriptor
; ebx= descriptor index
align 4
get_small_block:
mov ecx, eax
shr ecx, 12
dec ecx
cmp ecx, 63
jle .get_index
mov ecx, 63
.get_index:
lea esi, [mem_block_mask]
xor ebx, ebx
or edx, -1
cmp ecx, 32
jb .bit_test
sub ecx, 32
add ebx, 32
add esi, 4
.bit_test:
shl edx, cl
and edx, [esi]
.find:
bsf edi, edx
jz .high_mask
add ebx, edi
mov edi, [mem_block_list+ebx*4]
.check_size:
cmp eax, [edi+block_size]
ja .next
ret
.high_mask:
add esi, 4
cmp esi, mem_block_mask+8
jae .err
add ebx, 32
mov edx, [esi]
jmp .find
.next:
mov edi, [edi+list_fd]
test edi, edi
jnz .check_size
.err:
xor edi, edi
ret
; param
; eax= required size
;
; retval
; edi= memory block descriptor
; ebx= descriptor index
align 4
get_large_block:
mov edx, -1
mov ecx, eax
shr ecx, 22
dec ecx
cmp ecx, 31
jle .get_index
mov ecx, 31
.get_index:
shl edx, cl
and edx, [large_block_mask]
.find:
bsf edi, edx
mov ebx, edi
mov edi, [large_block_list+edi*4]
.check_size:
cmp eax, [edi+block_size]
ja .next
ret
.next:
mov edi, [edi+list_fd]
test edi, edi
jnz .check_size
.fail:
xor edi, edi
ret
align 4
alloc_mem_block:
mov ebx, [mem_block_start]
mov ecx, [mem_block_end]
.l1:
bsf eax,[ebx];
jnz found
add ebx,4
cmp ebx, ecx
jb .l1
xor eax,eax
ret
found:
btr [ebx], eax
mov [mem_block_start],ebx
sub ebx, mem_block_map
lea eax,[eax+ebx*8]
shl eax, 5
add eax, [mem_block_arr]
dec [free_blocks]
ret
align 4
free_mem_block:
mov dword [eax], 0
mov dword [eax+4], 0
mov dword [eax+8], 0
mov dword [eax+12], 0
mov dword [eax+16], 0
; mov dword [eax+20], 0
mov dword [eax+24], 0
mov dword [eax+28], 0
sub eax, [mem_block_arr]
shr eax, 5
mov ebx, mem_block_map
bts [ebx], eax
inc [free_blocks]
shr eax, 3
and eax, not 3
add eax, ebx
cmp [mem_block_start], eax
ja @f
ret
@@:
mov [mem_block_start], eax
ret
.err:
xor eax, eax
ret
align 4
proc alloc_kernel_space stdcall, size:dword
local block_ind:DWORD
push ebx
push esi
push edi
mov eax, [size]
add eax, 4095
and eax, not 4095
mov [size], eax
mov ebx, heap_mutex
call wait_mutex ;ebx
cmp eax, [heap_free]
ja .error
call get_small_block ; eax
test edi, edi
jz .error
cmp [edi+block_flags], FREE_BLOCK
jne .error
mov [block_ind], ebx ;index of allocated block
mov eax, [edi+block_size]
cmp eax, [size]
je .m_eq_size
call alloc_mem_block
test eax, eax
jz .error
mov esi, eax ;esi - splitted block
mov [esi+block_next], edi
mov eax, [edi+block_prev]
mov [esi+block_prev], eax
mov [edi+block_prev], esi
mov [esi+list_fd], 0
mov [esi+list_bk], 0
test eax, eax
jz @f
mov [eax+block_next], esi
@@:
mov ebx, [edi+block_base]
mov [esi+block_base], ebx
mov edx, [size]
mov [esi+block_size], edx
add [edi+block_base], edx
sub [edi+block_size], edx
mov eax, [edi+block_size]
shr eax, 12
sub eax, 1
cmp eax, 63
jna @f
mov eax, 63
@@:
cmp eax, [block_ind]
je .m_eq_ind
remove_from_list edi
mov ecx, [block_ind]
mov [mem_block_list+ecx*4], edx
test edx, edx
jnz @f
btr [mem_block_mask], ecx
@@:
mov edx, [mem_block_list+eax*4]
mov [edi+list_fd], edx
test edx, edx
jz @f
mov [edx+list_bk], edi
@@:
mov [mem_block_list+eax*4], edi
bts [mem_block_mask], eax
.m_eq_ind:
mov ecx, mem_used.fd-MEM_LIST_OFFSET
mov edx, [ecx+list_fd]
mov [esi+list_fd], edx
mov [esi+list_bk], ecx
mov [ecx+list_fd], esi
mov [edx+list_bk], esi
mov [esi+block_flags], USED_BLOCK
mov eax, [esi+block_base]
mov ebx, [size]
sub [heap_free], ebx
and [heap_mutex], 0
pop edi
pop esi
pop ebx
ret
.m_eq_size:
remove_from_list edi
mov [mem_block_list+ebx*4], edx
and edx, edx
jnz @f
btr [mem_block_mask], ebx
@@:
mov ecx, mem_used.fd-MEM_LIST_OFFSET
mov edx, [ecx+list_fd]
mov [edi+list_fd], edx
mov [edi+list_bk], ecx
mov [ecx+list_fd], edi
mov [edx+list_bk], edi
mov [edi+block_flags], USED_BLOCK
mov eax, [edi+block_base]
mov ebx, [size]
sub [heap_free], ebx
and [heap_mutex], 0
pop edi
pop esi
pop ebx
ret
.error:
xor eax, eax
mov [heap_mutex], eax
pop edi
pop esi
pop ebx
ret
endp
align 4
proc free_kernel_space stdcall uses ebx ecx edx esi edi, base:dword
push ebx
push esi
push edi
mov ebx, heap_mutex
call wait_mutex ;ebx
mov eax, [base]
mov esi, [mem_used.fd]
@@:
cmp esi, mem_used.fd-MEM_LIST_OFFSET
je .fail
cmp [esi+block_base], eax
je .found
mov esi, [esi+list_fd]
jmp @b
.found:
cmp [esi+block_flags], USED_BLOCK
jne .fail
mov eax, [esi+block_size]
add [heap_free], eax
mov edi, [esi+block_next]
test edi, edi
jz .prev
cmp [edi+block_flags], FREE_BLOCK
jne .prev
remove_from_free edi
mov edx, [edi+block_next]
mov [esi+block_next], edx
test edx, edx
jz @f
mov [edx+block_prev], esi
@@:
mov ecx, [edi+block_size]
add [esi+block_size], ecx
mov eax, edi
call free_mem_block
.prev:
mov edi, [esi+block_prev]
test edi, edi
jz .insert
cmp [edi+block_flags], FREE_BLOCK
jne .insert
remove_from_used esi
mov edx, [esi+block_next]
mov [edi+block_next], edx
test edx, edx
jz @f
mov [edx+block_prev], edi
@@:
mov eax, esi
call free_mem_block
mov ecx, [edi+block_size]
mov eax, [esi+block_size]
add eax, ecx
mov [edi+block_size], eax
calc_index eax
calc_index ecx
cmp eax, ecx
je .m_eq
push ecx
remove_from_list edi
pop ecx
cmp [mem_block_list+ecx*4], edi
jne @f
mov [mem_block_list+ecx*4], edx
@@:
cmp [mem_block_list+ecx*4], 0
jne @f
btr [mem_block_mask], ecx
@@:
mov esi, [mem_block_list+eax*4]
mov [mem_block_list+eax*4], edi
mov [edi+list_fd], esi
test esi, esi
jz @f
mov [esi+list_bk], edi
@@:
bts [mem_block_mask], eax
.m_eq:
xor eax, eax
mov [heap_mutex], eax
dec eax
pop edi
pop esi
pop ebx
ret
.insert:
remove_from_used esi
mov eax, [esi+block_size]
calc_index eax
mov edi, [mem_block_list+eax*4]
mov [mem_block_list+eax*4], esi
mov [esi+list_fd], edi
test edi, edi
jz @f
mov [edi+list_bk], esi
@@:
bts [mem_block_mask], eax
mov [esi+block_flags],FREE_BLOCK
xor eax, eax
mov [heap_mutex], eax
dec eax
pop edi
pop esi
pop ebx
ret
.fail:
xor eax, eax
mov [heap_mutex], eax
pop edi
pop esi
pop ebx
ret
endp
@ -555,8 +59,9 @@ proc kernel_alloc stdcall, size:dword
add eax, 4095
and eax, not 4095;
mov [size], eax
and eax, eax
test eax, eax
jz .err
mov ebx, eax
shr ebx, 12
mov [pages_count], ebx
@ -617,172 +122,11 @@ endp
align 4
proc kernel_free stdcall, base:dword
push ebx esi
mov ebx, heap_mutex
call wait_mutex ;ebx
mov eax, [base]
mov esi, [mem_used.fd]
@@:
cmp esi, mem_used.fd-MEM_LIST_OFFSET
je .fail
cmp [esi+block_base], eax
je .found
mov esi, [esi+list_fd]
jmp @b
.found:
cmp [esi+block_flags], USED_BLOCK
jne .fail
and [heap_mutex], 0
push ecx
mov ecx, [esi+block_size];
shr ecx, 12
call release_pages ;eax, ecx
pop ecx
stdcall free_kernel_space, [base]
pop esi ebx
ret
.fail:
and [heap_mutex], 0
pop esi ebx
ret
endp
align 4
proc alloc_large stdcall, size:dword
local block_ind:DWORD
push ebx
push esi
push edi
mov eax, [size]
add eax, 0x3FFFFF
and eax, not 0x3FFFFF
mov [size], eax
; mov ebx, heap_mutex
; call wait_mutex ;ebx
; cmp eax, [heap_free]
; ja .error
call get_large_block ; eax
test edi, edi
jz .error
cmp [edi+block_flags], FREE_BLOCK
jne .error
mov [block_ind], ebx ;index of allocated block
mov eax, [edi+block_size]
cmp eax, [size]
je .m_eq_size
call alloc_mem_block
and eax, eax
jz .error
mov esi, eax ;esi - splitted block
mov [esi+block_next], edi
mov eax, [edi+block_prev]
mov [esi+block_prev], eax
mov [edi+block_prev], esi
mov [esi+list_fd], 0
mov [esi+list_bk], 0
test eax, eax
jz @f
mov [eax+block_next], esi
@@:
mov ebx, [edi+block_base]
mov [esi+block_base], ebx
mov edx, [size]
mov [esi+block_size], edx
add [edi+block_base], edx
sub [edi+block_size], edx
mov eax, [edi+block_size]
shr eax, 22
dec eax
cmp eax, 31
jna @f
mov eax, 31
@@:
cmp eax, [block_ind]
je .m_eq_ind
remove_from_list edi
mov ecx, [block_ind]
mov [large_block_list+ecx*4], edx
test edx, edx
jnz @f
btr [large_block_mask], ecx
@@:
mov edx, [large_block_list+eax*4]
mov [edi+list_fd], edx
test edx, edx
jz @f
mov [edx+list_bk], edi
@@:
mov [large_block_list+eax*4], edi
bts [large_block_mask], eax
.m_eq_ind:
mov ecx, mem_used.fd-MEM_LIST_OFFSET
mov edx, [ecx+list_fd]
mov [esi+list_fd], edx
mov [esi+list_bk], ecx
mov [ecx+list_fd], esi
mov [edx+list_bk], esi
mov [esi+block_flags], USED_BLOCK
mov eax, [esi+block_base]
mov ebx, [size]
sub [heap_free], ebx
and [heap_mutex], 0
pop edi
pop esi
pop ebx
ret
.m_eq_size:
remove_from_list edi
mov [large_block_list+ebx*4], edx
and edx, edx
jnz @f
btr [large_block_mask], ebx
@@:
mov ecx, mem_used.fd-MEM_LIST_OFFSET
mov edx, [ecx+list_fd]
mov [edi+list_fd], edx
mov [edi+list_bk], ecx
mov [ecx+list_fd], edi
mov [edx+list_bk], edi
mov [edi+block_flags], USED_BLOCK
mov eax, [edi+block_base]
mov ebx, [size]
sub [heap_free], ebx
and [heap_mutex], 0
pop edi
pop esi
pop ebx
ret
.error:
xor eax, eax
mov [heap_mutex], eax
pop edi
pop esi
pop ebx
ret
endp
restore block_next
restore block_prev

View File

@ -174,7 +174,7 @@ release_pages:
add esi, 4
dec ecx
jnz @B
mov [pg_data.pages_free], ebp
; mov [pg_data.pages_free], ebp
and [pg_data.pg_mutex],0
popad
ret
@ -901,12 +901,13 @@ sysfn_meminfo:
cmp ebx, OS_BASE
jae .fail
mov eax, [pg_data.pages_count]
mov eax, [_mem_amount]
shr eax, 12
mov [ebx], eax
shl eax, 12
mov [esp+36], eax
mov ecx, [pg_data.pages_free]
mov [ebx+4], ecx
call _get_free_mem
mov [ebx+4], eax
mov edx, [pg_data.pages_faults]
mov [ebx+8], edx
mov esi, [heap_size]

View File

@ -30,13 +30,12 @@ static void __fastcall buddy_system_free(zone_t *z, link_t *block);
static void zone_mark_unavailable(zone_t *zone, index_t frame_idx);
static addr_t __fastcall zone_alloc(zone_t *zone, u32_t order);
void __fastcall zone_free(zone_t *zone, pfn_t frame_idx);
size_t buddy_conf_size(int max_order);
static inline void frame_initialize(frame_t *frame);
static inline u32_t fnzb(u32_t arg);
void init_mm();
@ -211,16 +210,6 @@ static inline void frame_initialize(frame_t *frame)
frame->buddy_order = 0;
}
static inline count_t fnzb(u32_t arg)
{
int n;
asm volatile ("xor %0, %0 \n\t"
"bsr %0, %1"
:"=r" (n)
:"r"(arg)
);
return n;
}
static link_t *buddy_find_block(zone_t *zone, link_t *child,
u32_t order)
@ -502,6 +491,7 @@ static __fastcall pfn_t zone_frame_alloc(zone_t *zone, u32_t order)
/* get frame address */
v = make_frame_index(zone, frame);
return v;
}
@ -518,6 +508,17 @@ void __fastcall frame_set_parent(pfn_t pfn, void *data)
spinlock_unlock(&z_core.lock);
}
void* __fastcall frame_get_parent(pfn_t pfn)
{
// zone_t *zone = find_zone_and_lock(pfn, &hint);
void *res;
spinlock_lock(&z_core.lock);
res = zone_get_frame(&z_core, pfn)->parent;
spinlock_unlock(&z_core.lock);
return res;
}
static inline int to_order(count_t arg)
{
@ -548,7 +549,7 @@ addr_t __fastcall zone_alloc(zone_t *zone, u32_t order)
return (v << FRAME_WIDTH);
}
addr_t core_alloc(u32_t order) //__cdecl __dllexport
addr_t __fastcall core_alloc(u32_t order) //export
{
eflags_t efl;
pfn_t v;
@ -562,7 +563,18 @@ addr_t core_alloc(u32_t order) //__cdecl __dllexport
return (v << FRAME_WIDTH);
};
addr_t alloc_page() //obsolete
void __fastcall core_free(addr_t frame) //export
{
eflags_t efl;
efl = safe_cli();
spinlock_lock(&z_core.lock);
zone_free(&z_core, frame>>12);
spinlock_unlock(&z_core.lock);
safe_sti(efl);
}
addr_t alloc_page() //obsolete
{
eflags_t efl;
u32_t edx;
@ -619,17 +631,6 @@ void __fastcall zone_free(zone_t *zone, pfn_t frame_idx)
}
}
void core_free(addr_t frame) //export
{
eflags_t efl;
efl = safe_cli();
spinlock_lock(&z_core.lock);
zone_free(&z_core, frame>>12);
spinlock_unlock(&z_core.lock);
safe_sti(efl);
}
void frame_free(addr_t frame) //export
{
eflags_t efl;
@ -642,3 +643,8 @@ void frame_free(addr_t frame) //export
safe_sti(efl);
}
count_t get_free_mem()
{
return z_core.free_count;
}

View File

@ -213,10 +213,10 @@ do_change_task:
; set thread io map
mov ecx, [ebx+APPDATA.io_map]
mov edx, [ebx+APPDATA.io_map+4]
mov dword [page_tabs+((tss._io_map_0 and -4096) shr 10)], ecx
mov dword [page_tabs+((tss._io_map_1 and -4096) shr 10)], edx
; mov ecx, [ebx+APPDATA.io_map]
; mov edx, [ebx+APPDATA.io_map+4]
; mov dword [page_tabs+((tss._io_map_0 and -4096) shr 10)], ecx
; mov dword [page_tabs+((tss._io_map_1 and -4096) shr 10)], edx
mov eax, [ebx+APPDATA.dir_table]
cmp eax, [esi+APPDATA.dir_table]

View File

@ -0,0 +1,471 @@
#include <types.h>
#include <core.h>
#include <spinlock.h>
#include <link.h>
#include <mm.h>
#include <slab.h>
extern zone_t z_core;
static LIST_INITIALIZE(slab_cache_list);
static slab_cache_t *slab_cache;
static slab_cache_t slab_cache_cache;
static slab_t *slab_create();
static slab_cache_t * slab_cache_alloc();
void slab_free(slab_cache_t *cache, void *obj);
/**
* Allocate frames for slab space and initialize
*
*/
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
{
void *data;
slab_t *slab;
size_t fsize;
unsigned int i;
u32_t p;
data = (void*)PA2KA(core_alloc(cache->order));
if (!data) {
return NULL;
}
slab = (slab_t*)slab_create();
if (!slab) {
core_free(KA2PA(data));
return NULL;
}
/* Fill in slab structures */
for (i = 0; i < ((u32_t) 1 << cache->order); i++)
frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab);
slab->start = data;
slab->available = cache->objects;
slab->nextavail = (void*)data;
slab->cache = cache;
for (i = 0, p = (u32_t)slab->start; i < cache->objects; i++)
{
*(addr_t *)p = p+cache->size;
p = p+cache->size;
};
atomic_inc(&cache->allocated_slabs);
return slab;
}
/**
* Take new object from slab or create new if needed
*
* @return Object address or null
*/
static void * slab_obj_create(slab_cache_t *cache, int flags)
{
slab_t *slab;
void *obj;
spinlock_lock(&cache->slablock);
if (list_empty(&cache->partial_slabs)) {
/* Allow recursion and reclaiming
* - this should work, as the slab control structures
* are small and do not need to allocate with anything
* other than frame_alloc when they are allocating,
* that's why we should get recursion at most 1-level deep
*/
slab = slab_space_alloc(cache, flags);
if (!slab)
{
spinlock_unlock(&cache->slablock);
return NULL;
}
} else {
slab = list_get_instance(cache->partial_slabs.next, slab_t, link);
list_remove(&slab->link);
}
obj = slab->nextavail;
slab->nextavail = *(void**)obj;
slab->available--;
if (!slab->available)
list_prepend(&slab->link, &cache->full_slabs);
else
list_prepend(&slab->link, &cache->partial_slabs);
spinlock_unlock(&cache->slablock);
// if (cache->constructor && cache->constructor(obj, flags)) {
/* Bad, bad, construction failed */
// slab_obj_destroy(cache, obj, slab);
// return NULL;
// }
return obj;
}
/** Map object to slab structure */
static slab_t * obj2slab(void *obj)
{
return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)));
}
/** Allocate new object from cache - if no flags given, always returns
memory */
void* __fastcall slab_alloc(slab_cache_t *cache, int flags)
{
eflags_t efl;
void *result = NULL;
/* Disable interrupts to avoid deadlocks with interrupt handlers */
efl = safe_cli();
// if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) {
// result = magazine_obj_get(cache);
// }
// if (!result)
result = slab_obj_create(cache, flags);
safe_sti(efl);
// if (result)
// atomic_inc(&cache->allocated_objs);
return result;
}
/**************************************/
/* Slab cache functions */
/** Return number of objects that fit in certain cache size */
static unsigned int comp_objects(slab_cache_t *cache)
{
if (cache->flags & SLAB_CACHE_SLINSIDE)
return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
else
return (PAGE_SIZE << cache->order) / cache->size;
}
/** Return wasted space in slab */
static unsigned int badness(slab_cache_t *cache)
{
unsigned int objects;
unsigned int ssize;
size_t val;
objects = comp_objects(cache);
ssize = PAGE_SIZE << cache->order;
if (cache->flags & SLAB_CACHE_SLINSIDE)
ssize -= sizeof(slab_t);
val = ssize - objects * cache->size;
return val;
}
/** Initialize allocated memory as a slab cache */
static void
_slab_cache_create(slab_cache_t *cache,
size_t size,
size_t align,
int (*constructor)(void *obj, int kmflag),
int (*destructor)(void *obj),
int flags)
{
int pages;
// ipl_t ipl;
// memsetb((uintptr_t)cache, sizeof(*cache), 0);
// cache->name = name;
//if (align < sizeof(unative_t))
// align = sizeof(unative_t);
// size = ALIGN_UP(size, align);
cache->size = size;
// cache->constructor = constructor;
// cache->destructor = destructor;
cache->flags = flags;
list_initialize(&cache->full_slabs);
list_initialize(&cache->partial_slabs);
list_initialize(&cache->magazines);
// spinlock_initialize(&cache->slablock, "slab_lock");
// spinlock_initialize(&cache->maglock, "slab_maglock");
// if (! (cache->flags & SLAB_CACHE_NOMAGAZINE))
// make_magcache(cache);
/* Compute slab sizes, object counts in slabs etc. */
/* Minimum slab order */
pages = SIZE2FRAMES(cache->size);
/* We need the 2^order >= pages */
if (pages == 1)
cache->order = 0;
else
cache->order = fnzb(pages-1)+1;
while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
cache->order += 1;
}
cache->objects = comp_objects(cache);
/* Add cache to cache list */
// ipl = interrupts_disable();
// spinlock_lock(&slab_cache_lock);
list_append(&cache->link, &slab_cache_list);
// spinlock_unlock(&slab_cache_lock);
// interrupts_restore(ipl);
}
/** Create slab cache */
slab_cache_t * slab_cache_create(
size_t size,
size_t align,
int (*constructor)(void *obj, int kmflag),
int (*destructor)(void *obj),
int flags)
{
slab_cache_t *cache;
cache = (slab_cache_t*)slab_cache_alloc();
_slab_cache_create(cache, size, align, constructor, destructor, flags);
return cache;
}
/**
* Deallocate space associated with slab
*
* @return number of freed frames
*/
static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
{
frame_free(KA2PA(slab->start));
if (! (cache->flags & SLAB_CACHE_SLINSIDE))
slab_free(slab_cache, slab);
// atomic_dec(&cache->allocated_slabs);
return 1 << cache->order;
}
/**
* Return object to slab and call a destructor
*
* @param slab If the caller knows directly slab of the object, otherwise NULL
*
* @return Number of freed pages
*/
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
slab_t *slab)
{
int freed = 0;
if (!slab)
slab = obj2slab(obj);
// ASSERT(slab->cache == cache);
// if (cache->destructor)
// freed = cache->destructor(obj);
// spinlock_lock(&cache->slablock);
// ASSERT(slab->available < cache->objects);
*(void**)obj = slab->nextavail;
slab->nextavail = obj;
slab->available++;
/* Move it to correct list */
if (slab->available == cache->objects) {
/* Free associated memory */
list_remove(&slab->link);
// spinlock_unlock(&cache->slablock);
return freed + slab_space_free(cache, slab);
} else if (slab->available == 1) {
/* It was in full, move to partial */
list_remove(&slab->link);
list_prepend(&slab->link, &cache->partial_slabs);
}
// spinlock_unlock(&cache->slablock);
return freed;
}
/** Return object to cache, use slab if known */
static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
{
// ipl_t ipl;
// ipl = interrupts_disable();
// if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
|| magazine_obj_put(cache, obj)) {
slab_obj_destroy(cache, obj, slab);
// }
// interrupts_restore(ipl);
// atomic_dec(&cache->allocated_objs);
}
/** Return slab object to cache */
void slab_free(slab_cache_t *cache, void *obj)
{
_slab_free(cache, obj, NULL);
}
static slab_t *slab_create()
{
slab_t *slab;
void *obj;
u32_t p;
// spinlock_lock(&cache->slablock);
if (list_empty(&slab_cache->partial_slabs)) {
/* Allow recursion and reclaiming
* - this should work, as the slab control structures
* are small and do not need to allocate with anything
* other than frame_alloc when they are allocating,
* that's why we should get recursion at most 1-level deep
*/
// spinlock_unlock(&cache->slablock);
// slab = slab_create();
void *data;
unsigned int i;
data = (void*)PA2KA(core_alloc(0));
if (!data) {
return NULL;
}
slab = (slab_t*)((u32_t)data + PAGE_SIZE - sizeof(slab_t));
/* Fill in slab structures */
frame_set_parent(ADDR2PFN(KA2PA(data)), slab);
slab->start = data;
slab->available = slab_cache->objects;
slab->nextavail = (void*)data;
slab->cache = slab_cache;
for (i = 0,p = (u32_t)slab->start;i < slab_cache->objects; i++)
{
*(int *)p = p+slab_cache->size;
p = p+slab_cache->size;
};
atomic_inc(&slab_cache->allocated_slabs);
// spinlock_lock(&cache->slablock);
} else {
slab = list_get_instance(slab_cache->partial_slabs.next, slab_t, link);
list_remove(&slab->link);
}
obj = slab->nextavail;
slab->nextavail = *((void**)obj);
slab->available--;
if (!slab->available)
list_prepend(&slab->link, &slab_cache->full_slabs);
else
list_prepend(&slab->link, &slab_cache->partial_slabs);
// spinlock_unlock(&cache->slablock);
return (slab_t*)obj;
}
static slab_cache_t * slab_cache_alloc()
{
slab_t *slab;
void *obj;
u32_t *p;
if (list_empty(&slab_cache_cache.partial_slabs)) {
/* Allow recursion and reclaiming
* - this should work, as the slab control structures
* are small and do not need to allocate with anything
* other than frame_alloc when they are allocating,
* that's why we should get recursion at most 1-level deep
*/
// spinlock_unlock(&cache->slablock);
// slab = slab_create();
void *data;
unsigned int i;
data = (void*)(PA2KA(core_alloc(0)));
if (!data) {
return NULL;
}
slab = (slab_t*)((u32_t)data + PAGE_SIZE - sizeof(slab_t));
/* Fill in slab structures */
frame_set_parent(ADDR2PFN(KA2PA(data)), slab);
slab->start = data;
slab->available = slab_cache_cache.objects;
slab->nextavail = (void*)data;
slab->cache = &slab_cache_cache;
for (i = 0,p = (u32_t*)slab->start;i < slab_cache_cache.objects; i++)
{
*p = (u32_t)p+slab_cache_cache.size;
p = (u32_t*)((u32_t)p+slab_cache_cache.size);
};
atomic_inc(&slab_cache_cache.allocated_slabs);
// spinlock_lock(&cache->slablock);
} else {
slab = list_get_instance(slab_cache_cache.partial_slabs.next, slab_t, link);
list_remove(&slab->link);
}
obj = slab->nextavail;
slab->nextavail = *((void**)obj);
slab->available--;
if (!slab->available)
list_prepend(&slab->link, &slab_cache_cache.full_slabs);
else
list_prepend(&slab->link, &slab_cache_cache.partial_slabs);
// spinlock_unlock(&cache->slablock);
return (slab_cache_t*)obj;
}
void slab_cache_init(void)
{
_slab_cache_create(&slab_cache_cache, sizeof(slab_cache_t),
sizeof(void *), NULL, NULL,
SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
/* Initialize external slab cache */
slab_cache = slab_cache_create(sizeof(slab_t),
0, NULL, NULL,SLAB_CACHE_MAGDEFERRED);
};

View File

@ -468,7 +468,7 @@ term9:
mov eax, [.slot]
shl eax, 8
mov eax,[SLOT_BASE+eax+APPDATA.dir_table]
stdcall destroy_app_space, eax
; stdcall destroy_app_space, eax
mov esi, [.slot]
cmp [fpu_owner],esi ; if user fpu last -> fpu user = 1

View File

@ -1074,7 +1074,7 @@ proc set_app_params stdcall,slot:dword, params:dword,\
mov eax, [esi+0x08] ;app_eip
mov [ebx+REG_EIP], eax ;app_entry
mov [ebx+REG_CS], dword app_code
mov [ebx+REG_EFLAGS], dword EFL_IOPL1+EFL_IF
mov [ebx+REG_EFLAGS], dword EFL_IOPL3+EFL_IF
mov eax, [esi+0x0C] ;app_esp
mov [ebx+REG_APP_ESP], eax ;app_stack

View File

@ -0,0 +1,123 @@
typedef unsigned char u8_t;
typedef unsigned short int u16_t;
typedef unsigned int u32_t;
typedef unsigned long long u64_t;
static inline u8_t inb(u16_t port)
{
u8_t val;
if(port < 0x100)
asm volatile ("in %b0, %w1 \n" : "=a" (val) : "dN" (port) );
else
asm volatile ("in %b0, %w1 \n" : "=a" (val) : "d" (port) );
return val;
}
static inline outb(u16_t port, u8_t val)
{
if (port < 0x100) /* GCC can optimize this if constant */
asm volatile ("out %w0, %b1" : :"dN"(port), "a"(val));
else
asm volatile ("out %w0, %b1" : :"d"(port), "a"(val));
}
/* Convert the integer D to a string and save the string in BUF. If
BASE is equal to 'd', interpret that D is decimal, and if BASE is
equal to 'x', interpret that D is hexadecimal. */
static void itoa (char *buf, int base, int d)
{
char *p = buf;
char *p1, *p2;
unsigned long ud = d;
int divisor = 10;
/* If %d is specified and D is minus, put `-' in the head. */
if (base == 'd' && d < 0)
{
*p++ = '-';
buf++;
ud = -d;
}
else if (base == 'x')
divisor = 16;
/* Divide UD by DIVISOR until UD == 0. */
do
{
int remainder = ud % divisor;
*p++ = (remainder < 10) ? remainder + '0' : remainder + 'a' - 10;
}
while (ud /= divisor);
/* Terminate BUF. */
*p = 0;
/* Reverse BUF. */
p1 = buf;
p2 = p - 1;
while (p1 < p2)
{
char tmp = *p1;
*p1 = *p2;
*p2 = tmp;
p1++;
p2--;
}
}
void putc(int c)
{
while (!(inb(0x3f8+5) & 0x60));
outb(0x3f8,c);
if (c == '\n')
putc('\r');
}
void _printf (const char *format, ...)
{
char **arg = (char **) &format;
int c;
char buf[20];
arg++;
while ((c = *format++) != 0)
{
if (c != '%')
putc(c);
else
{
char *p;
c = *format++;
switch (c)
{
case 'd':
case 'u':
case 'x':
itoa (buf, c, *((int *) arg++));
p = buf;
goto string;
break;
case 's':
p = *arg++;
if (! p)
p = "(null)";
string:
while (*p)
putc(*p++);
break;
default:
putc(*((int *) arg++));
break;
}
}
}
}

View File

@ -9,7 +9,8 @@ $Revision$
pusha
mov eax,[pg_data.pages_free]
call _get_free_mem
; 1/32
shr eax,5
; round off up to 8 pages

View File

@ -46,3 +46,43 @@ static inline void safe_sti(eflags_t efl)
: : "r" (efl)
);
}
static inline count_t fnzb(u32_t arg)
{
count_t n;
asm volatile ("xor %0, %0 \n\t"
"bsr %0, %1"
:"=&r" (n)
:"r"(arg)
);
return n;
}
static inline count_t _bsf(u32_t arg)
{
count_t n;
asm volatile ("xor %0, %0 \n\t"
"bsf %0, %1"
:"=&r" (n)
:"r"(arg)
);
return n;
}
static inline void _bts(u32_t *data, count_t val)
{
asm volatile ("bts %0, %1 \n\t"
:
:"g"(data), "r"(val)
:"cc"
);
}
static inline void _btr(u32_t *data, count_t val)
{
asm volatile ("btr %0, %1 \n\t"
:
:"g"(data), "r"(val)
:"cc"
);
}

View File

@ -5,6 +5,9 @@ typedef struct link
struct link *next;
}link_t;
#define LIST_INITIALIZE(name) \
link_t name = { .prev = &name, .next = &name }
#define list_get_instance(link, type, member) \
((type *)(((u8_t *)(link)) - ((u8_t *)&(((type *)NULL)->member))))
@ -48,3 +51,10 @@ static inline void list_prepend(link_t *link, link_t *head)
head->next = link;
}
static inline list_insert(link_t *old, link_t *new)
{
new->prev = old->prev;
new->next = old;
new->prev->next = new;
old->prev = new;
}

View File

@ -22,9 +22,15 @@ typedef struct {
int flags;
} zone_t;
typedef struct
{
count_t count;
addr_t frames[18];
}phismem_t;
# define PA2KA(x) (((u32_t) (x)) + OS_BASE)
# define KA2PA(x) (((u32_t) (x)) - OS_BASE)
# define PA2KA(x) (((addr_t) (x)) + OS_BASE)
# define KA2PA(x) (((addr_t) (x)) - OS_BASE)
#define PAGE_SIZE 4096
#define FRAME_WIDTH 12
@ -50,10 +56,13 @@ static inline pfn_t ADDR2PFN(addr_t addr)
void init_mm();
pfn_t core_alloc(u32_t order);
addr_t __fastcall core_alloc(u32_t order);
void __fastcall core_free(addr_t frame);
pfn_t alloc_page() __attribute__ ((deprecated));
pfn_t __stdcall alloc_pages(count_t count) __asm__ ("_alloc_pages") __attribute__ ((deprecated));
void core_free(pfn_t frame);
void frame_free(pfn_t frame);
void __fastcall frame_set_parent(pfn_t pfn, void *data);
void* __fastcall frame_get_parent(pfn_t pfn);

View File

@ -0,0 +1,80 @@
typedef struct {
link_t link;
count_t busy; /**< Count of full slots in magazine */
count_t size; /**< Number of slots in magazine */
void *objs[]; /**< Slots in magazine */
} slab_magazine_t;
typedef struct {
slab_magazine_t *current;
slab_magazine_t *last;
SPINLOCK_DECLARE(lock);
} slab_mag_cache_t;
typedef struct {
link_t link;
/* Configuration */
/** Size of slab position - align_up(sizeof(obj)) */
size_t size;
// int (*constructor)(void *obj, int kmflag);
// int (*destructor)(void *obj);
/** Flags changing behaviour of cache */
int flags;
/* Computed values */
u32_t order; /**< Order of frames to be allocated */
unsigned int objects; /**< Number of objects that fit in */
/* Statistics */
atomic_t allocated_slabs;
atomic_t allocated_objs;
atomic_t cached_objs;
/** How many magazines in magazines list */
atomic_t magazine_counter;
/* Slabs */
link_t full_slabs; /**< List of full slabs */
link_t partial_slabs; /**< List of partial slabs */
SPINLOCK_DECLARE(slablock);
/* Magazines */
link_t magazines; /**< List o full magazines */
SPINLOCK_DECLARE(maglock);
/** CPU cache */
slab_mag_cache_t *mag_cache;
} slab_cache_t;
typedef struct {
link_t link; /**< List of full/partial slabs. */
slab_cache_t *cache; /**< Pointer to parent cache. */
count_t available; /**< Count of available items in this slab. */
void *start; /**< Start address of first item. */
void *nextavail; /**< The index of next available item. */
} slab_t;
#define SLAB_INSIDE_SIZE (4096 >> 3)
/** Maximum wasted space we allow for cache */
#define SLAB_MAX_BADNESS(cache) (((size_t) PAGE_SIZE << (cache)->order) >> 2)
/** Do not use per-cpu cache */
#define SLAB_CACHE_NOMAGAZINE 0x1
/** Have control structure inside SLAB */
#define SLAB_CACHE_SLINSIDE 0x2
/** We add magazine cache later, if we have this flag */
#define SLAB_CACHE_MAGDEFERRED (0x4 | SLAB_CACHE_NOMAGAZINE)
slab_cache_t * slab_cache_create(
size_t size,
size_t align,
int (*constructor)(void *obj, int kmflag),
int (*destructor)(void *obj),
int flags);
void* __fastcall slab_alloc(slab_cache_t *cache, int flags);

View File

@ -123,6 +123,12 @@ public @balloc@4
public __setvars
public _rd_base
public _rd_fat
public _rd_fat_end
public _rd_root
public _rd_root_end
extrn _16bit_start
extrn _16bit_end
@ -131,15 +137,22 @@ extrn _enter_bootscreen
extrn _init
extrn _init_mm
public _rd_base
public _rd_fat
public _rd_fat_end
public _rd_root
public _rd_root_end
extrn @init_heap@8
extrn @find_large_md@4
extrn @find_small_md@4
extrn @phis_alloc@4
extrn @mem_alloc@8
extrn _alloc_kernel_space@4
extrn _slab_cache_init
extrn _alloc_pages
extrn _alloc_page
extrn _get_free_mem
alloc_kernel_space equ _alloc_kernel_space@4
extrn _bx_from_load
section '.flat' code readable align 4096
@ -349,11 +362,15 @@ _high_code:
; MEMORY MODEL
call init_kernel_heap ; FIXME initialize heap after pager
call _init_mm
mov [pg_data.pg_mutex], 0
call _slab_cache_init
mov ecx, 0x80000000
mov edx, 0x40000000
call @init_heap@8
mov esi, _16bit_start
mov ecx, _16bit_end
shr ecx, 2
@ -361,8 +378,6 @@ _high_code:
cld
rep movsd
xchg bx, bx
jmp far 0x60:_enter_bootscreen;
align 4
@ -379,8 +394,9 @@ __setvars:
; SAVE & CLEAR 0-0xffff
cld
xor esi, esi
mov edi,0x1F0000
mov edi,BOOT_VAR
mov ecx,0x10000 / 4
rep movsd
xor edi, edi
@ -388,13 +404,22 @@ __setvars:
mov ecx,0x10000 / 4
rep stosd
mov edi, SLOT_BASE
mov ecx,0x10000 / 4
rep stosd
mov dword [_sys_pdbr], eax
mov dword [_sys_pdbr+4], eax
mov eax, cr3
mov cr3, eax
; SAVE REAL MODE VARIABLES
mov ax, [BOOT_VAR + 0x9031]
mov [IDEContrRegsBaseAddr], ax
; --------------- APM ---------------------
; init selectors
mov ebx, [BOOT_VAR +0x9040] ; offset of APM entry point
mov ebx, [BOOT_VAR +0x9040] ; offset of APM entry point
movzx eax, word [BOOT_VAR+0x9050] ; real-mode segment base address of
; protected-mode 32-bit code segment
movzx ecx, word [BOOT_VAR+0x9052] ; real-mode segment base address of
@ -788,7 +813,6 @@ include 'detect/disks.inc'
; SET MOUSE
;call detect_devices
stdcall load_driver, szPS2MDriver
; stdcall load_driver, szCOM_MDriver
@ -829,10 +853,10 @@ include 'detect/disks.inc'
add esi, 0x1000
stdcall map_page,esi,(tss._io_map_1-OS_BASE), PG_MAP
stdcall map_page,tss._io_map_0,\
(tss._io_map_0-OS_BASE), PG_MAP
stdcall map_page,tss._io_map_1,\
(tss._io_map_1-OS_BASE), PG_MAP
; stdcall map_page,tss._io_map_0,\
; (tss._io_map_0-OS_BASE), PG_MAP
; stdcall map_page,tss._io_map_1,\
; (tss._io_map_1-OS_BASE), PG_MAP
; mov ax,[OS_BASE+0x10000+bx_from_load]
; cmp ax,'r1' ; if not rused ram disk - load network configuration from files {SPraid.simba}
@ -842,17 +866,6 @@ include 'detect/disks.inc'
; LOAD FIRST APPLICATION
cmp byte [BOOT_VAR+0x9030],1
jne no_load_vrr_m
mov ebp, vrr_m
call fs_execute_from_sysdir
cmp eax,2 ; if vrr_m app found (PID=2)
je first_app_found
no_load_vrr_m:
mov ebp, firstapp
call fs_execute_from_sysdir
@ -1125,13 +1138,13 @@ endg
set_variables:
mov ecx,0x100 ; flush port 0x60
mov ecx,0x100 ; flush port 0x60
.fl60: in al,0x60
loop .fl60
mov [MOUSE_BUFF_COUNT],byte 0 ; mouse buffer
mov [KEY_COUNT],byte 0 ; keyboard buffer
mov [BTN_COUNT],byte 0 ; button buffer
; mov [MOUSE_X],dword 100*65536+100 ; mouse x/y
mov [MOUSE_BUFF_COUNT],byte 0 ; mouse buffer
mov [KEY_COUNT],byte 0 ; keyboard buffer
mov [BTN_COUNT],byte 0 ; button buffer
; mov [MOUSE_X],dword 100*65536+100 ; mouse x/y
push eax
mov ax,[BOOT_VAR+0x900c]
@ -1142,11 +1155,11 @@ set_variables:
mov [MOUSE_X],eax
pop eax
mov [BTN_ADDR],dword BUTTON_INFO ; address of button list
mov [BTN_ADDR],dword BUTTON_INFO ; address of button list
;!! IP 04.02.2005:
mov [next_usage_update], 100
mov byte [DONT_SWITCH], 0 ; change task if possible
mov byte [DONT_SWITCH], 0 ; change task if possible
ret
@ -1893,18 +1906,6 @@ sys_midi:
ret
detect_devices:
;!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
;include 'detect/commouse.inc'
;include 'detect/ps2mouse.inc'
;include 'detect/dev_fd.inc'
;include 'detect/dev_hdcd.inc'
;include 'detect/sear_par.inc'
;!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
ret
sys_end:
mov eax,[TASK_BASE]
@ -2170,7 +2171,7 @@ sysfn_mouse_acceleration: ; 18.19 = set/get mouse features
ret
sysfn_getfreemem:
mov eax, [pg_data.pages_free]
call _get_free_mem
shl eax, 2
mov [esp+32],eax
ret

View File

@ -4,23 +4,31 @@ FASM = fasm.exe
INCLUDE = include/
DEFS = -DUSE_SMP
DEFS = -DUSE_SMP -DCONFIG_DEBUG
CFLAGS = -c -O2 -DCONFIG_DEBUG -I $(INCLUDE) -fomit-frame-pointer -fno-builtin-printf -masm=intel
CFLAGS = -c -O2 -I $(INCLUDE) -fomit-frame-pointer -fno-builtin-printf -masm=intel
LDFLAGS = -shared -s -Map kernel.map --image-base 0x100000 --file-alignment 32
KERNEL_SRC:= \
kernel.asm \
core/memory.inc \
core/heap.inc \
core/taskman.inc \
core/sys32.inc \
core/dll.inc
KERNEL_SRC:= \
kernel.asm \
PE_SRC:= \
init.c \
mm.c \
spinlock.c \
slab.c \
heap.c \
spinlock.c \
boot/boot.asm \
boot/start.asm
KERNEL_OBJS = $(patsubst %.s, bin/%.obj, $(patsubst %.asm, bin/%.obj,\
$(patsubst %.c, bin/%.obj, $(KERNEL_SRC))))
PE_OBJS = $(patsubst %.s, bin/%.obj, $(patsubst %.asm, bin/%.obj,\
$(patsubst %.c, bin/%.obj, $(PE_SRC))))
all: kernel.gz
@ -28,8 +36,8 @@ all: kernel.gz
kernel.gz :kernel.mnt
7z a -tgzip kernel.gz kernel.mnt
kernel.mnt: $(KERNEL_OBJS) Makefile ld.x
ld $(LDFLAGS) -T ld.x -o $@ $(KERNEL_OBJS)
kernel.mnt: kernel.obj $(PE_OBJS) Makefile ld.x
ld $(LDFLAGS) -T ld.x -o $@ kernel.obj $(PE_OBJS)
bin/%.obj : core/%.c Makefile
$(CC) $(CFLAGS) -o $@ $<
@ -37,6 +45,9 @@ bin/%.obj : core/%.c Makefile
bin/%.obj: %.asm
$(FASM) $< $@
kernel.obj: $(KERNEL_SRC)
$(FASM) kernel.asm
all: $(SUBDIRS)
.PHONY: all