diff --git a/kernel/branches/kolibri_pe/core/exports.inc b/kernel/branches/kolibri_pe/core/exports.inc
index 439fed19ab..c9981027e3 100644
--- a/kernel/branches/kolibri_pe/core/exports.inc
+++ b/kernel/branches/kolibri_pe/core/exports.inc
@@ -103,7 +103,6 @@ kernel_export:
   dd szPciWrite32      , pci_write32
 
   dd szAllocPage       , _alloc_page           ;stdcall
-  dd szFreePage        , free_page
   dd szMapPage         , map_page              ;stdcall
   dd szMapSpace        , map_space
   dd szMapIoMem        , map_io_mem            ;stdcall
diff --git a/kernel/branches/kolibri_pe/core/heap.c b/kernel/branches/kolibri_pe/core/heap.c
index c306d68628..9a7118965d 100644
--- a/kernel/branches/kolibri_pe/core/heap.c
+++ b/kernel/branches/kolibri_pe/core/heap.c
@@ -6,13 +6,15 @@
 #include <mm.h>
 #include <slab.h>
 
+#define page_tabs 0xDF800000
+
 typedef struct
 {
    link_t link;
    link_t adj;
    addr_t base;
    size_t size;
-   void*  parent;
+    void   *parent;
    u32_t  state;
 }md_t;
 
@@ -20,12 +22,15 @@ typedef struct
 #define   MD_USED    2
 
 typedef struct {
-    SPINLOCK_DECLARE(lock);   /**< this lock protects everything below */
+    u32_t  av_mapped;
+    u32_t  av_unmapped;
 
-    u32_t  availmask;
-    link_t free[32];
+    link_t mapped[32];
+    link_t unmapped[32];
 
     link_t used;
+
+    SPINLOCK_DECLARE(lock);   /**< this lock protects everything below */
 }heap_t;
 
 
@@ -37,19 +42,24 @@ heap_t        lheap;
 heap_t        sheap;
 
 
+static inline void _set_lavu(count_t idx)
+{ asm volatile ("bts %0, _lheap+4"::"r"(idx):"cc"); }
 
-static inline void _set_lmask(count_t idx)
-{ asm volatile ("bts %0, _lheap"::"r"(idx):"cc"); }
+static inline void _reset_lavu(count_t idx)
+{ asm volatile ("btr %0, _lheap+4"::"r"(idx):"cc"); }
 
-static inline void _reset_lmask(count_t idx)
-{ asm volatile ("btr %0, _lheap"::"r"(idx):"cc"); }
-
-static inline void _set_smask(count_t idx)
+static inline void _set_savm(count_t idx)
 { asm volatile ("bts %0, _sheap"::"r"(idx):"cc"); }
 
-static inline void _reset_smask(count_t idx)
+static inline void _reset_savm(count_t idx)
 { asm volatile ("btr %0, _sheap"::"r"(idx):"cc"); }
 
+static inline void _set_savu(count_t idx)
+{ asm volatile ("bts %0, _sheap+4"::"r"(idx):"cc"); }
+
+static inline void _reset_savu(count_t idx)
+{ asm volatile ("btr %0, _sheap+4"::"r"(idx):"cc"); }
+
 
 int __fastcall init_heap(addr_t base, size_t size)
 {
@@ -63,8 +73,11 @@ int __fastcall init_heap(addr_t base, size_t size)
 
    for (i = 0; i < 32; i++)
    {
-     list_initialize(&lheap.free[i]);
-     list_initialize(&sheap.free[i]);
+        list_initialize(&lheap.mapped[i]);
+        list_initialize(&lheap.unmapped[i]);
+
+        list_initialize(&sheap.mapped[i]);
+        list_initialize(&sheap.unmapped[i]);
    };
 
    list_initialize(&lheap.used);
@@ -81,11 +94,11 @@ int __fastcall init_heap(addr_t base, size_t size)
    md->parent = NULL;
    md->state = MD_FREE;
 
-   list_prepend(&md->link, &lheap.free[31]);
-   lheap.availmask = 0x80000000;
-   sheap.availmask = 0x00000000;
-
-  // phm_slab = slab_cache_create(sizeof(phismem_t), 32,NULL,NULL,SLAB_CACHE_MAGDEFERRED);
+    list_prepend(&md->link, &lheap.unmapped[31]);
+    lheap.av_mapped    = 0x00000000;
+    lheap.av_unmapped  = 0x80000000;
+    sheap.av_mapped    = 0x00000000;
+    sheap.av_unmapped  = 0x00000000;
 
    return 1;
 };
@@ -100,14 +113,14 @@ md_t* __fastcall find_large_md(size_t size)
    ASSERT((size & 0x3FFFFF) == 0);
 
    idx0 = (size>>22) - 1 < 32 ? (size>>22) - 1 : 31;
-   mask = lheap.availmask & ( -1<<idx0 );
+    mask = lheap.av_unmapped & ( -1<<idx0 );
 
    if(mask)
    {
      if(idx0 == 31)
      {
-        md_t *tmp = (md_t*)lheap.free[31].next;
-        while((link_t*)tmp != &lheap.free[31])
+            md_t *tmp = (md_t*)lheap.unmapped[31].next;
+            while((link_t*)tmp != &lheap.unmapped[31])
         {
           if(tmp->size >= size)
           {
@@ -123,9 +136,9 @@ md_t* __fastcall find_large_md(size_t size)
      {
        idx0 = _bsf(mask);
 
-       ASSERT( !list_empty(&lheap.free[idx0]))
+            ASSERT( !list_empty(&lheap.unmapped[idx0]))
 
-       md = (md_t*)lheap.free[idx0].next;
+            md = (md_t*)lheap.unmapped[idx0].next;
      };
    }
    else
@@ -134,8 +147,8 @@ md_t* __fastcall find_large_md(size_t size)
    ASSERT(md->state == MD_FREE);
 
    list_remove((link_t*)md);
-   if(list_empty(&lheap.free[idx0]))
-     _reset_lmask(idx0);
+    if(list_empty(&lheap.unmapped[idx0]))
+        _reset_lavu(idx0);
 
    if(md->size > size)
    {
@@ -147,6 +160,7 @@ md_t* __fastcall find_large_md(size_t size)
 
      new_md->base = md->base;
      new_md->size = size;
+        new_md->parent = NULL;
      new_md->state = MD_USED;
 
      md->base+= size;
@@ -154,8 +168,8 @@ md_t* __fastcall find_large_md(size_t size)
 
      idx1 = (md->size>>22) - 1 < 32 ? (md->size>>22) - 1 : 31;
 
-     list_prepend(&md->link, &lheap.free[idx1]);
-     _set_lmask(idx1);
+        list_prepend(&md->link, &lheap.unmapped[idx1]);
+        _set_lavu(idx1);
 
      return new_md;
    };
@@ -164,7 +178,7 @@ md_t* __fastcall find_large_md(size_t size)
    return md;
 }
 
-md_t* __fastcall find_small_md(size_t size)
+md_t* __fastcall find_unmapped_md(size_t size)
 {
     eflags_t efl;
 
@@ -178,18 +192,18 @@ md_t* __fastcall find_small_md(size_t size)
     efl = safe_cli();
 
     idx0 = (size>>12) - 1 < 32 ? (size>>12) - 1 : 31;
-    mask = sheap.availmask & ( -1<<idx0 );
+    mask = sheap.av_unmapped & ( -1<<idx0 );
 
-    DBG("smask %x size %x idx0 %x mask %x\n",sheap.availmask, size, idx0, mask);
+    DBG("smask %x size %x idx0 %x mask %x\n",sheap.av_unmapped, size, idx0, mask);
 
     if(mask)
     {
         if(idx0 == 31)
         {
-            ASSERT( !list_empty(&sheap.free[31]));
+            ASSERT( !list_empty(&sheap.unmapped[31]));
 
-            md_t *tmp = (md_t*)sheap.free[31].next;
-            while((link_t*)tmp != &sheap.free[31])
+            md_t *tmp = (md_t*)sheap.unmapped[31].next;
+            while((link_t*)tmp != &sheap.unmapped[31])
             {
                 if(tmp->size >= size)
                 {
@@ -203,9 +217,9 @@ md_t* __fastcall find_small_md(size_t size)
         {
             idx0 = _bsf(mask);
 
-            ASSERT( !list_empty(&sheap.free[idx0]));
+            ASSERT( !list_empty(&sheap.unmapped[idx0]));
 
-            md = (md_t*)sheap.free[idx0].next;
+            md = (md_t*)sheap.unmapped[idx0].next;
         }
     };
 
@@ -214,10 +228,11 @@ md_t* __fastcall find_small_md(size_t size)
         DBG("remove md %x\n", md);
 
         ASSERT(md->state==MD_FREE);
+        ASSERT(md->parent != NULL);
 
         list_remove((link_t*)md);
-        if(list_empty(&sheap.free[idx0]))
-            _reset_smask(idx0);
+        if(list_empty(&sheap.unmapped[idx0]))
+            _reset_savu(idx0);
     }
     else
     {
@@ -232,6 +247,11 @@ md_t* __fastcall find_small_md(size_t size)
             return NULL;
         };
 
+        ASSERT(lmd->size != 0);
+        ASSERT(lmd->base != 0);
+        ASSERT((lmd->base & 0x3FFFFF) == 0);
+        ASSERT(lmd->parent == NULL);
+
         md = (md_t*)slab_alloc(md_slab,0);    /* FIXME check */
 
         link_initialize(&md->link);
@@ -264,16 +284,16 @@ md_t* __fastcall find_small_md(size_t size)
         DBG("insert md %x, base %x size %x idx %x\n", md,md->base, md->size,idx1);
 
         if( idx1 < 31)
-          list_prepend(&md->link, &sheap.free[idx1]);
+          list_prepend(&md->link, &sheap.unmapped[idx1]);
         else
         {
-            if( list_empty(&sheap.free[31]))
-                list_prepend(&md->link, &sheap.free[31]);
+            if( list_empty(&sheap.unmapped[31]))
+                list_prepend(&md->link, &sheap.unmapped[31]);
             else
             {
-                md_t *tmp = (md_t*)sheap.free[31].next;
+                md_t *tmp = (md_t*)sheap.unmapped[31].next;
 
-                while((link_t*)tmp != &sheap.free[31])
+                while((link_t*)tmp != &sheap.unmapped[31])
                 {
                     if(md->base < tmp->base)
                         break;
@@ -283,7 +303,7 @@ md_t* __fastcall find_small_md(size_t size)
             };
         };
 
-        _set_smask(idx1);
+        _set_savu(idx1);
 
         safe_sti(efl);
 
@@ -297,13 +317,167 @@ md_t* __fastcall find_small_md(size_t size)
     return md;
 }
 
-void __fastcall free_small_md(md_t *md)
+md_t* __fastcall find_mapped_md(size_t size)
+{
+    eflags_t efl;
+
+    md_t *md = NULL;
+
+    count_t idx0;
+    u32_t mask;
+
+    ASSERT((size & 0xFFF) == 0);
+
+    efl = safe_cli();
+
+    idx0 = (size>>12) - 1 < 32 ? (size>>12) - 1 : 31;
+    mask = sheap.av_mapped & ( -1<<idx0 );
+
+    DBG("small av_mapped %x size %x idx0 %x mask %x\n",sheap.av_mapped, size,
+         idx0, mask);
+
+    if(mask)
+    {
+        if(idx0 == 31)
+        {
+            ASSERT( !list_empty(&sheap.mapped[31]));
+
+            md_t *tmp = (md_t*)sheap.mapped[31].next;
+            while((link_t*)tmp != &sheap.mapped[31])
+            {
+                if(tmp->size >= size)
+                {
+                    md = tmp;
+                    break;
+                };
+                tmp = (md_t*)tmp->link.next;
+            };
+        }
+        else
+        {
+            idx0 = _bsf(mask);
+
+            ASSERT( !list_empty(&sheap.mapped[idx0]));
+
+            md = (md_t*)sheap.mapped[idx0].next;
+        }
+    };
+
+    if(md)
+    {
+        DBG("remove md %x\n", md);
+
+        ASSERT(md->state==MD_FREE);
+
+        list_remove((link_t*)md);
+        if(list_empty(&sheap.mapped[idx0]))
+            _reset_savm(idx0);
+    }
+    else
+    {
+        md_t    *lmd;
+        addr_t  frame;
+        addr_t  *pte;
+        int i;
+
+        lmd = find_large_md((size+0x3FFFFF)&~0x3FFFFF);
+
+        DBG("get large md %x\n", lmd);
+
+        if( !lmd)
+        {
+            safe_sti(efl);
+            return NULL;
+        };
+
+        ASSERT(lmd->size != 0);
+        ASSERT(lmd->base != 0);
+        ASSERT((lmd->base & 0x3FFFFF) == 0);
+        ASSERT(lmd->parent == NULL);
+
+        frame = core_alloc(10);                        /* FIXME check */
+
+        lmd->parent = (void*)frame;
+
+        pte = &((addr_t*)page_tabs)[lmd->base>>12];    /* FIXME remove */
+
+        for(i = 0; i<1024; i++)
+        {
+           *pte++ = frame;
+           frame+= 4096;
+        }
+
+        md = (md_t*)slab_alloc(md_slab,0);             /* FIXME check */
+
+        link_initialize(&md->link);
+        list_initialize(&md->adj);
+        md->base = lmd->base;
+        md->size = lmd->size;
+        md->parent  = lmd;
+        md->state = MD_USED;
+    };
+
+    if(md->size > size)
+    {
+        count_t idx1;
+        md_t *new_md = (md_t*)slab_alloc(md_slab,0);    /* FIXME check */
+
+        link_initialize(&new_md->link);
+        list_insert(&new_md->adj, &md->adj);
+
+        new_md->base = md->base;
+        new_md->size = size;
+        new_md->parent = md->parent;
+
+        md->base+= size;
+        md->size-= size;
+        md->state = MD_FREE;
+
+        idx1 = (md->size>>12) - 1 < 32 ? (md->size>>12) - 1 : 31;
+
+        DBG("insert md %x, base %x size %x idx %x\n", md,md->base, md->size,idx1);
+
+        if( idx1 < 31)
+          list_prepend(&md->link, &sheap.mapped[idx1]);
+        else
+        {
+            if( list_empty(&sheap.mapped[31]))
+                list_prepend(&md->link, &sheap.mapped[31]);
+            else
+            {
+                md_t *tmp = (md_t*)sheap.mapped[31].next;
+
+                while((link_t*)tmp != &sheap.mapped[31])
+                {
+                    if(md->base < tmp->base)
+                        break;
+                    tmp = (md_t*)tmp->link.next;
+                }
+                list_insert(&md->link, &tmp->link);
+            };
+        };
+
+        _set_savm(idx1);
+
+        md = new_md;
+    };
+
+    md->state = MD_USED;
+
+    safe_sti(efl);
+
+    return md;
+}
+
+void __fastcall free_unmapped_md(md_t *md)
 {
     eflags_t  efl ;
     md_t     *fd;
     md_t     *bk;
     count_t   idx;
 
+    ASSERT(md->parent != NULL);
+
     efl = safe_cli();
     spinlock_lock(&sheap.lock);
 
@@ -317,8 +491,8 @@ void __fastcall free_small_md(md_t *md)
             idx = (fd->size>>12) - 1 < 32 ? (fd->size>>12) - 1 : 31;
 
             list_remove((link_t*)fd);
-            if(list_empty(&sheap.free[idx]))
-                _reset_smask(idx);
+            if(list_empty(&sheap.unmapped[idx]))
+                _reset_savu(idx);
 
             md->size+= fd->size;
             md->adj.next = fd->adj.next;
@@ -330,8 +504,8 @@ void __fastcall free_small_md(md_t *md)
             idx = (bk->size>>12) - 1 < 32 ? (bk->size>>12) - 1 : 31;
 
             list_remove((link_t*)bk);
-            if(list_empty(&sheap.free[idx]))
-                _reset_smask(idx);
+            if(list_empty(&sheap.unmapped[idx]))
+                _reset_savu(idx);
 
             bk->size+= md->size;
             bk->adj.next = md->adj.next;
@@ -345,19 +519,19 @@ void __fastcall free_small_md(md_t *md)
 
     idx = (md->size>>12) - 1 < 32 ? (md->size>>12) - 1 : 31;
 
-    _set_smask(idx);
+    _set_savu(idx);
 
     if( idx < 31)
-        list_prepend(&md->link, &sheap.free[idx]);
+        list_prepend(&md->link, &sheap.unmapped[idx]);
     else
     {
-        if( list_empty(&sheap.free[31]))
-            list_prepend(&md->link, &sheap.free[31]);
+        if( list_empty(&sheap.unmapped[31]))
+            list_prepend(&md->link, &sheap.unmapped[31]);
         else
         {
-            md_t *tmp = (md_t*)sheap.free[31].next;
+            md_t *tmp = (md_t*)sheap.unmapped[31].next;
 
-            while((link_t*)tmp != &sheap.free[31])
+            while((link_t*)tmp != &sheap.unmapped[31])
             {
                 if(md->base < tmp->base)
                     break;
@@ -371,59 +545,82 @@ void __fastcall free_small_md(md_t *md)
 
 };
 
-
-#define page_tabs 0xDF800000
-
-/*
-phismem_t* __fastcall phis_alloc(count_t count)
+void __fastcall free_mapped_md(md_t *md)
 {
-   phismem_t *phm;
-   count_t tmp;
-   phm = (phismem_t*)slab_alloc(phm_slab, 0);
+    eflags_t  efl ;
+    md_t     *fd;
+    md_t     *bk;
+    count_t   idx;
 
-   phm->count = count;
-   tmp = count;
-   while(tmp)
+    ASSERT(md->parent != NULL);
+    ASSERT( ((md_t*)(md->parent))->parent != NULL);
+
+    efl = safe_cli();
+    spinlock_lock(&sheap.lock);
+
+    if( !list_empty(&md->adj))
+    {
+        bk = (md_t*)md->adj.prev;
+        fd = (md_t*)md->adj.next;
+
+        if(fd->state == MD_FREE)
    {
-      u32_t order;
+            idx = (fd->size>>12) - 1 < 32 ? (fd->size>>12) - 1 : 31;
 
-      asm volatile ("bsr %0, %1":"=&r"(order):"r"(tmp):"cc");
-      asm volatile ("btr %0, %1" :"=r"(tmp):"r"(order):"cc");
+            list_remove((link_t*)fd);
+            if(list_empty(&sheap.mapped[idx]))
+                _reset_savm(idx);
 
-      phm->frames[order] = core_alloc(order);
+            md->size+= fd->size;
+            md->adj.next = fd->adj.next;
+            md->adj.next->prev = (link_t*)md;
+            slab_free(md_slab, fd);
+        };
+        if(bk->state == MD_FREE)
+        {
+            idx = (bk->size>>12) - 1 < 32 ? (bk->size>>12) - 1 : 31;
 
+            list_remove((link_t*)bk);
+            if(list_empty(&sheap.mapped[idx]))
+                _reset_savm(idx);
+
+            bk->size+= md->size;
+            bk->adj.next = md->adj.next;
+            bk->adj.next->prev = (link_t*)bk;
+            slab_free(md_slab, md);
+            md = fd;
+        };
    };
 
-   return phm;
-}
+    md->state = MD_FREE;
 
-void map_phm(addr_t base, phismem_t *phm, u32_t mapflags)
-{
-   count_t count;
-   addr_t  *pte;
+    idx = (md->size>>12) - 1 < 32 ? (md->size>>12) - 1 : 31;
 
-   count = phm->count;
-   pte = &((addr_t*)page_tabs)[base>>12];
+    _set_savm(idx);
 
-   while(count)
+    if( idx < 31)
+        list_prepend(&md->link, &sheap.mapped[idx]);
+    else
+    {
+        if( list_empty(&sheap.mapped[31]))
+            list_prepend(&md->link, &sheap.mapped[31]);
+        else
    {
-     u32_t order;
-     addr_t frame;
-     count_t size;
+            md_t *tmp = (md_t*)sheap.mapped[31].next;
 
-     asm volatile ("bsr %0, %1":"=&r"(order):"r"(count):"cc");
-     asm volatile ("btr %0, %1" :"=r"(count):"r"(order):"cc");
-
-     frame = phm->frames[order] | mapflags;
-     size = (1 << order);
-     while(size--)
+            while((link_t*)tmp != &sheap.mapped[31])
      {
-       *pte++ = frame;
-       frame+= 4096;
-     }
+                if(md->base < tmp->base)
+                    break;
+                tmp = (md_t*)tmp->link.next;
    }
+            list_insert(&md->link, &tmp->link);
+        };
+    };
+    spinlock_unlock(&sheap.lock);
+    safe_sti(efl);
 };
-*/
+
 
 void * __fastcall mem_alloc(size_t size, u32_t flags)
 {
@@ -437,36 +634,41 @@ void * __fastcall mem_alloc(size_t size, u32_t flags)
 
     size = (size+4095)&~4095;
 
-    md = find_small_md(size);
-
-    if( md )
+    if( flags & PG_MAP )
     {
-        ASSERT(md->state == MD_USED);
+        md = find_mapped_md(size);
 
-        if( flags & PG_MAP )
-        {
-            count_t tmp = size >> 12;
-            addr_t  *pte = &((addr_t*)page_tabs)[md->base>>12];
+        if( !md )
+            return NULL;
 
-            while(tmp)
-            {
-                u32_t  order;
-                addr_t frame;
-                size_t size;
+        md_t *lmd = (md_t*)md->parent;
 
-                asm volatile ("bsr %1, %0":"=&r"(order):"r"(tmp):"cc");
-                asm volatile ("btr %1, %0" :"=r"(tmp):"r"(order):"cc");
+        ASSERT( lmd != NULL);
+        ASSERT( lmd->parent != NULL);
 
-                frame = core_alloc(order) | flags;         /* FIXME check */
+        addr_t  frame  = (md->base - lmd->base + (addr_t)lmd->parent)|
+                         (flags & 0xFFF);
+        DBG("frame %x\n", frame);
+        ASSERT(frame != 0);
 
-                size = (1 << order);
-                while(size--)
+        count_t  tmp = size >> 12;
+        addr_t  *pte = &((addr_t*)page_tabs)[md->base>>12];
+
+        while(tmp--)
                 {
                     *pte++ = frame;
                     frame+= 4096;
                 };
-            };
-        };
+    }
+    else
+        md = find_unmapped_md(size);
+
+    if( !md )
+        return NULL;
+
+    ASSERT(md->parent != NULL);
+    ASSERT(md->state == MD_USED);
+
 
         efl = safe_cli();
         spinlock_lock(&sheap.lock);
@@ -491,8 +693,6 @@ void * __fastcall mem_alloc(size_t size, u32_t flags)
 
         DBG("allocate: %x size %x\n\n",md->base, size);
         return (void*)md->base;
-    };
-    return NULL;
 };
 
 void __fastcall mem_free(void *mem)
@@ -524,10 +724,20 @@ void __fastcall mem_free(void *mem)
 
     if( md )
     {
+        md_t *lmd;
+
         DBG("\tmd: %x base: %x size: %x\n",md, md->base, md->size);
 
         ASSERT(md->state == MD_USED);
 
+        list_remove((link_t*)md);
+
+        lmd = (md_t*)md->parent;
+
+        ASSERT(lmd != 0);
+
+        if(lmd->parent != 0)
+        {
         count_t tmp  = md->size >> 12;
         addr_t  *pte = &((addr_t*)page_tabs)[md->base>>12];
 
@@ -536,17 +746,17 @@ void __fastcall mem_free(void *mem)
             *pte++ = 0;
             asm volatile (
                 "invlpg (%0)"
-                :
-                :"r" (mem) );
+                    ::"r" (mem) );
             mem+= 4096;
         };
-        list_remove((link_t*)md);
-        free_small_md( md );
+
+            free_mapped_md( md );
+        }
+        else
+            free_unmapped_md( md );
     }
     else
-    {
         DBG("\tERROR: invalid base address: %x\n", mem);
-    };
 
     safe_sti(efl);
 };
diff --git a/kernel/branches/kolibri_pe/core/heap.inc b/kernel/branches/kolibri_pe/core/heap.inc
index 71b6827b04..fb3ff6a43b 100644
--- a/kernel/branches/kolibri_pe/core/heap.inc
+++ b/kernel/branches/kolibri_pe/core/heap.inc
@@ -149,30 +149,31 @@ proc user_free stdcall, base:dword
            test al, DONT_FREE_BLOCK
            jnz .cantfree
 
+           push edi
+
            and eax, not 4095
-           mov ecx, eax
+           mov edi, eax
            or al, FREE_BLOCK
            mov [page_tabs+(esi-1)*4], eax
-           sub ecx, 4096
-           mov ebx, ecx
-           shr ecx, 12
+           sub edi, 4096
+           mov ebx, edi
+           shr edi, 12
            jz .released
 .release:
-           xor eax, eax
-           xchg eax, [page_tabs+esi*4]
-           test al, 1
+           xor ecx, ecx
+           xchg ecx, [page_tabs+esi*4]
+           test cl, 1
            jz @F
-           call free_page
+
+           call @core_free@4
            mov eax, esi
            shl eax, 12
            invlpg [eax]
 @@:
            inc esi
-           dec ecx
+           dec edi
            jnz .release
 .released:
-           push edi
-
            mov edx, [current_slot]
            mov esi, dword [edx+APPDATA.heap_base]
            mov edi, dword [edx+APPDATA.heap_top]
@@ -276,20 +277,28 @@ user_realloc:
         cmp     edx, ebx
         jb      .realloc_add
 ; release part of allocated memory
+
+        push ecx
 .loop:
         cmp     edx, ebx
         jz      .release_done
         dec     edx
-        xor     eax, eax
-        xchg    eax, [page_tabs+edx*4]
+        xor     ecx, ecx
+        xchg    ecx, [page_tabs+edx*4]
         test    al, 1
         jz      .loop
-        call    free_page
+
+        push edx
+        call    @core_free@4
+        pop edx
         mov     eax, edx
         shl     eax, 12
         invlpg  [eax]
         jmp     .loop
 .release_done:
+
+        pop ecx
+
         sub     ebx, ecx
         cmp     ebx, 1
         jnz     .nofreeall
diff --git a/kernel/branches/kolibri_pe/core/memory.inc b/kernel/branches/kolibri_pe/core/memory.inc
index 158d9a350a..41acb8e137 100644
--- a/kernel/branches/kolibri_pe/core/memory.inc
+++ b/kernel/branches/kolibri_pe/core/memory.inc
@@ -29,12 +29,6 @@ map_space:    ;not implemented
 
            ret
 
-
-align 4
-free_page:
-
-           ret
-
 proc map_io_mem stdcall, base:dword, size:dword, flags:dword
 
            push edi
@@ -246,18 +240,16 @@ proc new_mem_resize stdcall, new_size:dword
            shr edi, 12
            shr esi, 12
 @@:
-           mov eax, [app_page_tabs+edi*4]
-           test eax, 1
+           mov ecx, [app_page_tabs+edi*4]
+           test ecx, 1
            jz .next
            mov dword [app_page_tabs+edi*4], 2
            mov ebx, edi
            shl ebx, 12
-           push eax
            invlpg [ebx]
-           pop eax
-           call free_page
-
-.next:     add edi, 1
+           call @core_free@4
+.next:
+           add edi, 1
            cmp edi, esi
            jb @B
 
diff --git a/kernel/branches/kolibri_pe/core/mm.c b/kernel/branches/kolibri_pe/core/mm.c
index 1655794e69..fca1c019c1 100644
--- a/kernel/branches/kolibri_pe/core/mm.c
+++ b/kernel/branches/kolibri_pe/core/mm.c
@@ -549,7 +549,7 @@ addr_t __fastcall zone_alloc(zone_t *zone, u32_t order)
    return (v << FRAME_WIDTH);
 }
 
-addr_t  __fastcall core_alloc(u32_t order)        //export
+addr_t  __fastcall core_alloc(u32_t order)
 {
    eflags_t efl;
    pfn_t v;
@@ -559,11 +559,14 @@ addr_t  __fastcall core_alloc(u32_t order)        //export
        v = zone_frame_alloc(&z_core, order);
      spinlock_unlock(&z_core.lock);
    safe_sti(efl);
-   DBG("core alloc: %x, size %x\n", v << FRAME_WIDTH, (1<<order)<<12);
+
+   DBG("core alloc: %x, size %x   remain  %d\n", v << FRAME_WIDTH,
+        ((1<<order)<<12), z_core.free_count);
+
    return (v << FRAME_WIDTH);
 };
 
-void __fastcall core_free(addr_t frame)            //export
+void __fastcall core_free(addr_t frame)
 {
    eflags_t efl;
 
@@ -572,6 +575,9 @@ void __fastcall core_free(addr_t frame)            //export
        zone_free(&z_core, frame>>12);
      spinlock_unlock(&z_core.lock);
    safe_sti(efl);
+
+   DBG("core free %x  remain %d\n", frame, z_core.free_count);
+
 }
 
 addr_t alloc_page()                                //obsolete
@@ -587,7 +593,7 @@ addr_t alloc_page()                                //obsolete
      spinlock_unlock(&z_core.lock);
    safe_sti(efl);
 
-   DBG("alloc_page: %x\n", v << FRAME_WIDTH);
+   DBG("alloc_page: %x   remain  %d\n", v << FRAME_WIDTH, z_core.free_count);
 
    restore_edx(edx);
    return (v << FRAME_WIDTH);
@@ -605,7 +611,8 @@ void __fastcall zone_free(zone_t *zone, pfn_t frame_idx)
 
   ASSERT(frame->refcount);
 
-	if (!--frame->refcount) {
+    if (!--frame->refcount)
+    {
 		buddy_system_free(zone, &frame->buddy_link);
 
 		/* Update zone information. */
diff --git a/kernel/branches/kolibri_pe/core/sys32.inc b/kernel/branches/kolibri_pe/core/sys32.inc
index aacd603b5e..26b40eb2ea 100644
--- a/kernel/branches/kolibri_pe/core/sys32.inc
+++ b/kernel/branches/kolibri_pe/core/sys32.inc
@@ -500,7 +500,7 @@ term9:
 	   mov eax, [.slot]
 	   shl eax, 8
 	   mov eax,[SLOT_BASE+eax+APPDATA.dir_table]
-       ;    stdcall destroy_app_space, eax
+       stdcall destroy_app_space, eax
 
 	   mov esi, [.slot]
 	   cmp [fpu_owner],esi	 ; if user fpu last -> fpu user = 1
@@ -629,23 +629,23 @@ term9:
            shl edi, 8
 	   add edi,SLOT_BASE
 
-           mov eax,[edi+APPDATA.pl0_stack]
-           sub eax, OS_BASE
-           call free_page
+           mov ecx,[edi+APPDATA.pl0_stack]
+           sub ecx, OS_BASE
+           call @core_free@4
 
-           mov eax,[edi+APPDATA.cur_dir]
-           sub eax, OS_BASE
-           call free_page
+           mov ecx,[edi+APPDATA.cur_dir]
+           sub ecx, OS_BASE
+           call @core_free@4
 
-	   mov eax, [edi+APPDATA.io_map]
-	   cmp eax, (tss._io_map_0-OS_BASE+PG_MAP)
+           mov ecx, [edi+APPDATA.io_map]
+           cmp ecx, (tss._io_map_0-OS_BASE+PG_MAP)
 	   je @F
-	   call free_page
+           call @core_free@4
 @@:
-	   mov eax, [edi+APPDATA.io_map+4]
-	   cmp eax, (tss._io_map_1-OS_BASE+PG_MAP)
+           mov ecx, [edi+APPDATA.io_map+4]
+           cmp ecx, (tss._io_map_1-OS_BASE+PG_MAP)
 	   je @F
-	   call free_page
+           call @core_free@4
 @@:
 	   mov eax, 0x20202020
 	   stosd
diff --git a/kernel/branches/kolibri_pe/core/taskman.inc b/kernel/branches/kolibri_pe/core/taskman.inc
index 8f5b0c2d4b..74a8b4c046 100644
--- a/kernel/branches/kolibri_pe/core/taskman.inc
+++ b/kernel/branches/kolibri_pe/core/taskman.inc
@@ -200,8 +200,6 @@ proc fs_execute
            mov   eax,[hdr_mem]
            mov   [ebx+APPDATA.mem_size],eax
 
-if GREEDY_KERNEL
-else
            mov ecx, [hdr_mem]
            mov edi, [file_size]
            add edi, 4095
@@ -213,11 +211,9 @@ else
                cld
            rep stosb
 @@:
-end if
+           mov ecx, [file_base]
+           call @mem_free@4
 
-; release only virtual space, not phisical memory
-
-           stdcall free_kernel_space, [file_base]   ;
            lea eax, [hdr_cmdline]
            lea ebx, [cmdline]
            lea ecx, [filename]
@@ -375,14 +371,6 @@ proc create_app_space stdcall, app_size:dword,img_base:dword,img_size:dword
            shr ecx, 12
            mov [img_pages], ecx
 
-;     if GREEDY_KERNEL
-;           lea eax, [ecx+ebx+2]    ;only image size
-;     else
-;           lea eax, [eax+ebx+2]    ;all requested memory
-;     end if
-      ;     cmp eax, [pg_data.pages_free]
-      ;     ja .fail
-
            call _alloc_page
            test eax, eax
            mov [dir_addr], eax
@@ -434,41 +422,24 @@ proc create_app_space stdcall, app_size:dword,img_base:dword,img_size:dword
            xor eax, eax
            rep stosd
 
-           mov ecx, [img_pages]
-           mov ebx, PG_UW
-               mov esi, [img_base]
-           shr esi, 10
-           add esi, page_tabs
-           xor edx, edx
-           mov edi, page_tabs
-.remap:
-           lodsd
-           or eax, ebx      ; force user level r/w access
-           stosd
-           add edx, 0x1000
-           dec [app_pages]
-           dec ecx
-           jnz .remap
-
            mov ecx, [app_pages]
-           test ecx, ecx
-           jz .done
-
-if GREEDY_KERNEL
-           mov eax, 0x02
-           rep stosd
-else
-
+           xor ebx, ebx
 .alloc:
-           call _alloc_page
+           xor ecx, ecx
+           call @core_alloc@4
            test eax, eax
            jz .fail
 
-           stdcall map_page,edx,eax,dword PG_UW
-           add edx, 0x1000
+           stdcall map_page,ebx,eax,dword PG_UW
+           add ebx, 0x1000
            dec [app_pages]
            jnz .alloc
-end if
+
+           mov ecx, [img_size]                 ; FIXME remap md
+           mov esi, [img_base]
+           xor edi, edi
+
+           rep movsb
 
 .done:
            dec [pg_data.pg_mutex]
@@ -495,24 +466,26 @@ set_cr3:
 align 4
 proc destroy_page_table stdcall, pg_tab:dword
 
+           push ebx
            push esi
 
            mov esi, [pg_tab]
-           mov ecx, 1024
+           mov ebx, 1024
 .free:
-           mov eax, [esi]
-           test eax, 1
+           mov ecx, [esi]
+           test ecx, 1
            jz .next
 
-           test eax, 1 shl 9
+           test ecx, 1 shl 9
            jnz .next                      ;skip shared pages
 
-           call free_page
+           call @core_free@4
 .next:
            add esi, 4
-           dec ecx
+           dec ebx
            jnz .free
            pop esi
+           pop ebx
            ret
 endp
 
@@ -563,15 +536,15 @@ proc destroy_app_space stdcall, pg_dir:dword
 
            stdcall destroy_page_table, eax
 
-           mov eax, [esi]
-           call free_page
+           mov ecx, [esi]
+           call @core_free@4
 .next:
            add esi, 4
            dec edi
            jnz .destroy
 
-           mov eax, [pg_dir]
-           call free_page
+           mov ecx, [pg_dir]
+           call @core_free@4
 .exit:
            dec [pg_data.pg_mutex]
            ret
@@ -744,7 +717,6 @@ proc read_process_memory
 	   mov eax, [slot]
 	   shl	eax,8
 	   mov ebx, [offset]
-     ;      add ebx, new_app_base
 	   push ecx
 	   stdcall map_memEx, [proc_mem_map],\
 			      [SLOT_BASE+eax+0xB8],\
diff --git a/kernel/branches/kolibri_pe/core/test.c b/kernel/branches/kolibri_pe/core/test.c
deleted file mode 100644
index 6679945b1a..0000000000
--- a/kernel/branches/kolibri_pe/core/test.c
+++ /dev/null
@@ -1,123 +0,0 @@
-
-typedef  unsigned char        u8_t;
-typedef  unsigned short int   u16_t;
-typedef  unsigned int         u32_t;
-typedef  unsigned long long   u64_t;
-
-static inline u8_t inb(u16_t port)
-{
-  u8_t val;
-  if(port < 0x100)
-    asm volatile ("in %b0, %w1 \n" : "=a" (val) : "dN" (port) );
-  else
-    asm volatile ("in %b0, %w1 \n" : "=a" (val) : "d" (port) );
-	return val;
-}
-
-static inline outb(u16_t port, u8_t val)
-{
-  if (port < 0x100) /* GCC can optimize this if constant */
-    asm volatile ("out %w0, %b1" : :"dN"(port), "a"(val));
-  else
-    asm volatile ("out %w0, %b1" : :"d"(port), "a"(val));
-}
-
-
-/* Convert the integer D to a string and save the string in BUF. If
-   BASE is equal to 'd', interpret that D is decimal, and if BASE is
-   equal to 'x', interpret that D is hexadecimal.  */
-static void itoa (char *buf, int base, int d)
-{
-  char *p = buf;
-  char *p1, *p2;
-  unsigned long ud = d;
-  int divisor = 10;
-
-  /* If %d is specified and D is minus, put `-' in the head.  */
-  if (base == 'd' && d < 0)
-    {
-      *p++ = '-';
-      buf++;
-      ud = -d;
-    }
-  else if (base == 'x')
-    divisor = 16;
-
-  /* Divide UD by DIVISOR until UD == 0.  */
-  do
-    {
-      int remainder = ud % divisor;
-
-      *p++ = (remainder < 10) ? remainder + '0' : remainder + 'a' - 10;
-    }
-  while (ud /= divisor);
-
-  /* Terminate BUF.  */
-  *p = 0;
-
-  /* Reverse BUF.  */
-  p1 = buf;
-  p2 = p - 1;
-  while (p1 < p2)
-    {
-      char tmp = *p1;
-      *p1 = *p2;
-      *p2 = tmp;
-      p1++;
-      p2--;
-    }
-}
-
-void putc(int c)
-{
-    while (!(inb(0x3f8+5) & 0x60));
-    outb(0x3f8,c);
-    if (c == '\n')
-      putc('\r');
-}
-
-void _printf (const char *format, ...)
-{
-  char **arg = (char **) &format;
-  int c;
-  char buf[20];
-
-  arg++;
-
-  while ((c = *format++) != 0)
-  {
-    if (c != '%')
-      putc(c);
-    else
-    {
-      char *p;
-
-      c = *format++;
-      switch (c)
-      {
-        case 'd':
-        case 'u':
-        case 'x':
-          itoa (buf, c, *((int *) arg++));
-          p = buf;
-          goto string;
-          break;
-
-        case 's':
-          p = *arg++;
-          if (! p)
-            p = "(null)";
-
-  string:
-          while (*p)
-          putc(*p++);
-          break;
-
-        default:
-          putc(*((int *) arg++));
-          break;
-	    }
-    }
-  }
-}
-
diff --git a/kernel/branches/kolibri_pe/include/core.h b/kernel/branches/kolibri_pe/include/core.h
index 1826e7bf17..fa92a9d9fb 100644
--- a/kernel/branches/kolibri_pe/include/core.h
+++ b/kernel/branches/kolibri_pe/include/core.h
@@ -30,6 +30,13 @@ extern void panic_printf(char *fmt, ...) __attribute__((noreturn));
 
 # define DBG(format,...)
 
+# define PANIC(expr)   \
+      if (!(expr)) {   \
+         panic_printf("Kernel panic in %s() at %s:%u: " \
+                      "assertion failed (%s)",__func__ ,__FILE__,__LINE__, \
+                       #expr); \
+      };
+
 #endif
 
 
diff --git a/kernel/branches/kolibri_pe/include/mm.h b/kernel/branches/kolibri_pe/include/mm.h
index 69ca399525..ab42ac1de5 100644
--- a/kernel/branches/kolibri_pe/include/mm.h
+++ b/kernel/branches/kolibri_pe/include/mm.h
@@ -71,4 +71,4 @@ void frame_free(pfn_t frame);
 void __fastcall frame_set_parent(pfn_t pfn, void *data);
 void* __fastcall frame_get_parent(pfn_t pfn);
 
-void* __fastcall heap_alloc(size_t size, u32_t flags) ;
+void* __fastcall mem_alloc(size_t size, u32_t flags) ;
diff --git a/kernel/branches/kolibri_pe/kernel.asm b/kernel/branches/kolibri_pe/kernel.asm
index 63f4135d8f..e3ad11b008 100644
--- a/kernel/branches/kolibri_pe/kernel.asm
+++ b/kernel/branches/kolibri_pe/kernel.asm
@@ -146,7 +146,6 @@ extrn @core_free@4
 
 extrn @init_heap@8
 extrn @find_large_md@4
-extrn @find_small_md@4
 extrn @phis_alloc@4
 
 extrn @mem_alloc@8
@@ -2397,11 +2396,6 @@ draw_background_temp:
 	add	ecx, 0xFFF
 	shr	ecx, 12
 .z:
-	mov	eax, [page_tabs+ebx*4]
-	test	al, 1
-	jz	@f
-	call	free_page
-@@:
 	mov	eax, [page_tabs+esi*4]
 	or	al, PG_UW
 	mov	[page_tabs+ebx*4], eax