From 8304dbb5d47cff404dd37f5a5e412196dc39c035 Mon Sep 17 00:00:00 2001 From: "Sergey Semyonov (Serge)" Date: Wed, 15 Jan 2014 12:12:52 +0000 Subject: [PATCH] kolibri-process: add child thread's to doubly linked list git-svn-id: svn://kolibrios.org@4457 a494cfbc-eb01-0410-851d-a64ba20cac60 --- .../branches/kolibri-process/blkdev/disk.inc | 7 +- .../kolibri-process/blkdev/disk_cache.inc | 1397 +++++++++++++---- .../branches/kolibri-process/core/sys32.inc | 30 +- .../branches/kolibri-process/core/taskman.inc | 8 + kernel/branches/kolibri-process/kernel.asm | 2 + kernel/branches/kolibri-process/kernel32.inc | 13 +- kernel/branches/kolibri-process/macros.inc | 8 +- .../kolibri-process/network/socket.inc | 2 + .../kolibri-process/video/blitter.inc | 97 +- .../branches/kolibri-process/video/vesa20.inc | 7 - 10 files changed, 1182 insertions(+), 389 deletions(-) diff --git a/kernel/branches/kolibri-process/blkdev/disk.inc b/kernel/branches/kolibri-process/blkdev/disk.inc index e08cf28038..f9036a67c4 100644 --- a/kernel/branches/kolibri-process/blkdev/disk.inc +++ b/kernel/branches/kolibri-process/blkdev/disk.inc @@ -1,6 +1,6 @@ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; -;; Copyright (C) KolibriOS team 2011-2012. All rights reserved. ;; +;; Copyright (C) KolibriOS team 2011-2014. All rights reserved. ;; ;; Distributed under terms of the GNU General Public License ;; ;; ;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; @@ -16,6 +16,7 @@ DISK_STATUS_GENERAL_ERROR = -1; if no other code is suitable DISK_STATUS_INVALID_CALL = 1 ; invalid input parameters DISK_STATUS_NO_MEDIA = 2 ; no media present DISK_STATUS_END_OF_MEDIA = 3 ; end of media while reading/writing data +DISK_STATUS_NO_MEMORY = 4 ; insufficient memory for driver operation ; Driver flags. Represent bits in DISK.DriverFlags. DISK_NO_INSERT_NOTIFICATION = 1 ; Media flags. Represent bits in DISKMEDIAINFO.Flags. @@ -101,8 +102,6 @@ ends ; there are two distinct caches for a disk, one for "system" data,and the other ; for "application" data. struct DISKCACHE - mutex MUTEX -; Lock to protect the cache. ; The following fields are inherited from data32.inc:cache_ideX. pointer dd ? data_size dd ? ; unused @@ -169,6 +168,8 @@ struct DISK ; Pointer to array of .NumPartitions pointers to PARTITION structures. cache_size dd ? ; inherited from cache_ideX_size + CacheLock MUTEX +; Lock to protect both caches. SysCache DISKCACHE AppCache DISKCACHE ; Two caches for the disk. diff --git a/kernel/branches/kolibri-process/blkdev/disk_cache.inc b/kernel/branches/kolibri-process/blkdev/disk_cache.inc index 1b862a0de4..7cd21da002 100644 --- a/kernel/branches/kolibri-process/blkdev/disk_cache.inc +++ b/kernel/branches/kolibri-process/blkdev/disk_cache.inc @@ -1,12 +1,495 @@ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; -;; Copyright (C) KolibriOS team 2011-2012. All rights reserved. ;; +;; Copyright (C) KolibriOS team 2011-2014. All rights reserved. ;; ;; Distributed under terms of the GNU General Public License ;; ;; ;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; $Revision: 4133 $ +; Read/write functions try to do large operations, +; it is significantly faster than several small operations. +; This requires large buffers. +; We can't use input/output buffers directly - they can be controlled +; by user-mode application, so they can be modified between the operation +; and copying to/from cache, giving invalid data in cache. +; It is unclear how to use cache directly, currently cache items are +; allocated/freed sector-wise, so items for sequential sectors can be +; scattered over all the cache. +; So read/write functions allocate a temporary buffer which is +; 1) not greater than half of free memory and +; 2) not greater than the following constant. +CACHE_MAX_ALLOC_SIZE = 4 shl 20 + +; Legacy interface for filesystems fs_{read,write}32_{sys,app} +; gives only one sector for FS. However, per-sector reading is inefficient, +; so internally fs_read32_{sys,app} reads to the cache several sequential +; sectors, hoping that they will be useful. +; Total number of sectors is given by the following constant. +CACHE_LEGACY_READ_SIZE = 16 + +; This structure describes one item in the cache. +struct CACHE_ITEM +SectorLo dd ? ; low 32 bits of sector +SectorHi dd ? ; high 32 bits of sector +Status dd ? ; one of CACHE_ITEM_* +ends + +; Possible values for CACHE_ITEM_* +CACHE_ITEM_EMPTY = 0 +CACHE_ITEM_COPY = 1 +CACHE_ITEM_MODIFIED = 2 + +; Read several sequential sectors using cache #1. +; in: edx:eax = start sector, relative to start of partition +; in: ecx = number of sectors to read +; in: ebx -> buffer +; in: ebp -> PARTITION +; out: eax = error code, 0 = ok +; out: ecx = number of sectors that were read +fs_read64_sys: +; Save ebx, set ebx to SysCache and let the common part do its work. + push ebx + mov ebx, [ebp+PARTITION.Disk] + add ebx, DISK.SysCache + jmp fs_read64_common + +; Read several sequential sectors using cache #2. +; in: edx:eax = start sector, relative to start of partition +; in: ecx = number of sectors to read +; in: edi -> buffer +; in: ebp -> PARTITION +; out: eax = error code, 0 = ok +; out: ecx = number of sectors that were read +fs_read64_app: +; Save ebx, set ebx to AppCache and let the common part do its work. + push ebx + mov ebx, [ebp+PARTITION.Disk] + add ebx, DISK.AppCache + +; Common part of fs_read64_{app,sys}: +; read several sequential sectors using the given cache. +fs_read64_common: +; 1. Setup stack frame. + push esi edi ; save used registers to be stdcall + push 0 ; initialize .error_code + push ebx edx eax ecx ecx ; initialize stack variables +virtual at esp +.local_vars: +.num_sectors_orig dd ? +; Number of sectors that should be read. Used to generate output value of ecx. +.num_sectors dd ? +; Number of sectors that remain to be read. Decreases from .num_sectors_orig to 0. +.sector_lo dd ? ; low 32 bits of the current sector +.sector_hi dd ? ; high 32 bits of the current sector +.cache dd ? ; pointer to DISKCACHE +.error_code dd ? ; current status +.local_vars_size = $ - .local_vars +.saved_regs rd 2 +.buffer dd ? ; filled by fs_read64_{sys,app} +end virtual +; 2. Validate parameters against partition length: +; immediately return error if edx:eax are beyond partition end, +; decrease .num_sectors and .num_sectors_orig, if needed, +; so that the entire operation fits in the partition limits. + mov eax, dword [ebp+PARTITION.Length] + mov edx, dword [ebp+PARTITION.Length+4] + sub eax, [.sector_lo] + sbb edx, [.sector_hi] + jb .end_of_media + jnz .no_end_of_media + cmp ecx, eax + jbe .no_end_of_media +; If .num_sectors got decreased, set status to DISK_STATUS_END_OF_MEDIA; +; if all subsequent operations would be successful, this would become the final +; status, otherwise this would be rewritten by failed operation. + mov [.num_sectors], eax + mov [.num_sectors_orig], eax + mov [.error_code], DISK_STATUS_END_OF_MEDIA +.no_end_of_media: +; 3. If number of sectors to read is zero, either because zero-sectors operation +; was requested or because it got decreased to zero due to partition limits, +; just return the current status. + cmp [.num_sectors], 0 + jz .return +; 4. Shift sector from partition-relative to absolute. + mov eax, dword [ebp+PARTITION.FirstSector] + mov edx, dword [ebp+PARTITION.FirstSector+4] + add [.sector_lo], eax + adc [.sector_hi], edx +; 5. If the cache is disabled, pass the request directly to the driver. + mov edi, [.buffer] + cmp [ebx+DISKCACHE.pointer], 0 + jz .nocache +; 6. Look for sectors in the cache, sequentially from the beginning. +; Stop at the first sector that is not in the cache +; or when all sectors were read from the cache. +; 6a. Acquire the lock. + mov ecx, [ebp+PARTITION.Disk] + add ecx, DISK.CacheLock + call mutex_lock +.lookup_in_cache_loop: +; 6b. For each sector, call the lookup function without adding to the cache. + mov eax, [.sector_lo] + mov edx, [.sector_hi] + call cache_lookup_read +; 6c. If it has failed, the sector is not in cache; +; release the lock and go to 7. + jc .not_found_in_cache +; The sector is found in cache. +; 6d. Copy data for the caller. +; Note that buffer in edi is advanced automatically. + mov esi, ecx + shl esi, 9 + add esi, [ebx+DISKCACHE.data] + mov ecx, 512/4 + rep movsd +; 6e. Advance the sector. + add [.sector_lo], 1 + adc [.sector_hi], 0 +; 6f. Decrement number of sectors left. +; If all sectors were read, release the lock and return. + dec [.num_sectors] + jnz .lookup_in_cache_loop +; Release the lock acquired at 6a. + mov ecx, [ebp+PARTITION.Disk] + add ecx, DISK.CacheLock + call mutex_unlock +.return: + mov eax, [.error_code] + mov ecx, [.num_sectors_orig] + sub ecx, [.num_sectors] +.nothing: + add esp, .local_vars_size + pop edi esi ebx ; restore used registers to be stdcall + ret +.not_found_in_cache: +; Release the lock acquired at 6a. + mov ecx, [ebp+PARTITION.Disk] + add ecx, DISK.CacheLock + call mutex_unlock +; The current sector is not present in the cache. +; Ask the driver to read all requested not-yet-read sectors, +; put results in the cache. +; Also, see the comment before the definition of CACHE_MAX_ALLOC_SIZE. +; 7. Allocate buffer for operations. +; Normally, create buffer that is sufficient for all remaining data. +; However, for extra-large requests make an upper limit: +; do not use more than half of the free memory +; or more than CACHE_MAX_ALLOC_SIZE bytes. + mov ebx, [pg_data.pages_free] + shr ebx, 1 + jz .nomemory + cmp ebx, CACHE_MAX_ALLOC_SIZE shr 12 + jbe @f + mov ebx, CACHE_MAX_ALLOC_SIZE shr 12 +@@: + shl ebx, 12 - 9 + cmp ebx, [.num_sectors] + jbe @f + mov ebx, [.num_sectors] +@@: + mov eax, ebx + shl eax, 9 + stdcall kernel_alloc, eax +; If failed, return the appropriate error code. + test eax, eax + jz .nomemory + mov esi, eax +; Split the request to chunks that fit in the allocated buffer. +.read_loop: +; 8. Get iteration size: either size of allocated buffer in sectors +; or number of sectors left, select what is smaller. + cmp ebx, [.num_sectors] + jbe @f + mov ebx, [.num_sectors] +@@: +; 9. Create second portion of local variables. +; Note that variables here and above are esp-relative; +; it means that all addresses should be corrected when esp is changing. + push ebx esi esi + push ebx +; In particular, num_sectors is now [.num_sectors+.local_vars2_size]. +virtual at esp +.local_vars2: +.current_num_sectors dd ? ; number of sectors that were read +.current_buffer dd ? +; pointer inside .allocated_buffer that points +; to the beginning of not-processed data +.allocated_buffer dd ? ; saved in safe place +.iteration_size dd ? ; saved in safe place +.local_vars2_size = $ - .local_vars2 +end virtual +; 10. Call the driver, reading the next chunk. + push esp ; numsectors + push [.sector_hi+.local_vars2_size+4] ; startsector + push [.sector_lo+.local_vars2_size+8] ; startsector + push esi ; buffer + mov esi, [ebp+PARTITION.Disk] + mov al, DISKFUNC.read + call disk_call_driver +; If failed, save error code. + test eax, eax + jz @f + mov [.error_code+.local_vars2_size], eax +@@: +; 11. Copy data for the caller. +; Note that buffer in edi is advanced automatically. + cmp [.current_num_sectors], 0 + jz .copy_done + mov ecx, [.current_num_sectors] + shl ecx, 9-2 + mov esi, [.allocated_buffer] + rep movsd +; 12. Copy data to the cache. +; 12a. Acquire the lock. + mov ebx, [.cache+.local_vars2_size] + mov ecx, [ebp+PARTITION.Disk] + add ecx, DISK.CacheLock + call mutex_lock +; 12b. Prepare for the loop: save edi and create a local variable that +; stores number of sectors to be copied. + push edi + push [.current_num_sectors+4] +.store_to_cache: +; 12c. For each sector, call the lookup function with adding to the cache, if not yet. + mov eax, [.sector_lo+.local_vars2_size+8] + mov edx, [.sector_hi+.local_vars2_size+8] + call cache_lookup_write + test eax, eax + jnz .cache_error +; 12d. For each sector, copy data, mark the item as not-modified copy of the disk, +; advance .current_buffer and .sector_hi:.sector_lo to the next sector. + mov [esi+CACHE_ITEM.Status], CACHE_ITEM_COPY + mov esi, [.current_buffer+8] + mov edi, ecx + shl edi, 9 + add edi, [ebx+DISKCACHE.data] + mov ecx, 512/4 + rep movsd + mov [.current_buffer+8], esi + add [.sector_lo+.local_vars2_size+8], 1 + adc [.sector_hi+.local_vars2_size+8], 0 +; 12e. Continue the loop 12c-12d until all sectors are read. + dec dword [esp] + jnz .store_to_cache +.cache_error: +; 12f. Restore after the loop: pop the local variable and restore edi. + pop ecx + pop edi +; 12g. Release the lock. + mov ecx, [ebp+PARTITION.Disk] + add ecx, DISK.CacheLock + call mutex_unlock +.copy_done: +; 13. Remove portion of local variables created at step 9. + pop ecx + pop esi esi ebx +; 14. Continue iterations while number of sectors read by the driver +; is equal to number of sectors requested and there are additional sectors. + cmp ecx, ebx + jnz @f + sub [.num_sectors], ebx + jnz .read_loop +@@: +; 15. Free the buffer allocated at step 7 and return. + stdcall kernel_free, esi + jmp .return + +; Special branches: +.nomemory: +; memory allocation failed at step 7: return the corresponding error + mov [.error_code], DISK_STATUS_NO_MEMORY + jmp .return +.nocache: +; step 5, after correcting number of sectors to fit in partition limits +; and advancing partition-relative sector to absolute, +; sees that cache is disabled: pass corrected request to the driver + lea eax, [.num_sectors] + push eax ; numsectors + push [.sector_hi+4] ; startsector + push [.sector_lo+8] ; startsector + push edi ; buffer + mov esi, [ebp+PARTITION.Disk] + mov al, DISKFUNC.read + call disk_call_driver + test eax, eax + jnz @f + mov eax, [.error_code] +@@: + mov ecx, [.num_sectors] + jmp .nothing +.end_of_media: +; requested sector is beyond the partition end: return the corresponding error + mov [.error_code], DISK_STATUS_END_OF_MEDIA + jmp .return + +; Write several sequential sectors using cache #1. +; in: edx:eax = start sector +; in: ecx = number of sectors to write +; in: ebx -> buffer +; in: ebp -> PARTITION +; out: eax = error code, 0 = ok +; out: ecx = number of sectors that were written +fs_write64_sys: +; Save ebx, set ebx to SysCache and let the common part do its work. + push ebx + mov ebx, [ebp+PARTITION.Disk] + add ebx, DISK.SysCache + jmp fs_write64_common + +; Write several sequential sectors using cache #2. +; in: edx:eax = start sector +; in: ecx = number of sectors to write +; in: ebx -> buffer +; in: ebp -> PARTITION +; out: eax = error code, 0 = ok +; out: ecx = number of sectors that were written +fs_write64_app: +; Save ebx, set ebx to AppCache and let the common part do its work. + push ebx + mov ebx, [ebp+PARTITION.Disk] + add ebx, DISK.AppCache + +; Common part of fs_write64_{app,sys}: +; write several sequential sectors using the given cache. +fs_write64_common: +; 1. Setup stack frame. + push esi edi ; save used registers to be stdcall + push 0 ; initialize .error_code + push edx eax ecx ecx ; initialize stack variables + push [.buffer-4] ; copy [.buffer] to [.cur_buffer] + ; -4 is due to esp-relative addressing +virtual at esp +.local_vars: +.cur_buffer dd ? ; pointer to data that are currently copying +.num_sectors_orig dd ? +; Number of sectors that should be written. Used to generate output value of ecx. +.num_sectors dd ? +; Number of sectors that remain to be written. +.sector_lo dd ? ; low 32 bits of the current sector +.sector_hi dd ? ; high 32 bits of the current sector +.error_code dd ? ; current status +.local_vars_size = $ - .local_vars +.saved_regs rd 2 +.buffer dd ? ; filled by fs_write64_{sys,app} +end virtual +; 2. Validate parameters against partition length: +; immediately return error if edx:eax are beyond partition end, +; decrease .num_sectors and .num_sectors_orig, if needed, +; so that the entire operation fits in the partition limits. + mov eax, dword [ebp+PARTITION.Length] + mov edx, dword [ebp+PARTITION.Length+4] + sub eax, [.sector_lo] + sbb edx, [.sector_hi] + jb .end_of_media + jnz .no_end_of_media + cmp ecx, eax + jbe .no_end_of_media +; If .num_sectors got decreased, set status to DISK_STATUS_END_OF_MEDIA; +; if all subsequent operations would be successful, this would become the final +; status, otherwise this would be rewritten by failed operation. + mov [.num_sectors], eax + mov [.num_sectors_orig], eax + mov [.error_code], DISK_STATUS_END_OF_MEDIA +.no_end_of_media: +; 3. If number of sectors to write is zero, either because zero-sectors operation +; was requested or because it got decreased to zero due to partition limits, +; just return the current status. + cmp [.num_sectors], 0 + jz .return +; 4. Shift sector from partition-relative to absolute. + mov eax, dword [ebp+PARTITION.FirstSector] + mov edx, dword [ebp+PARTITION.FirstSector+4] + add [.sector_lo], eax + adc [.sector_hi], edx +; 5. If the cache is disabled, pass the request directly to the driver. + cmp [ebx+DISKCACHE.pointer], 0 + jz .nocache +; 6. Store sectors in the cache, sequentially from the beginning. +; 6a. Acquire the lock. + mov ecx, [ebp+PARTITION.Disk] + add ecx, DISK.CacheLock + call mutex_lock +.lookup_in_cache_loop: +; 6b. For each sector, call the lookup function with adding to the cache, if not yet. + mov eax, [.sector_lo] + mov edx, [.sector_hi] + call cache_lookup_write + test eax, eax + jnz .cache_error +; 6c. For each sector, copy data, mark the item as modified and not saved, +; advance .current_buffer to the next sector. + mov [esi+CACHE_ITEM.Status], CACHE_ITEM_MODIFIED + mov esi, [.cur_buffer] + mov edi, ecx + shl edi, 9 + add edi, [ebx+DISKCACHE.data] + mov ecx, 512/4 + rep movsd + mov [.cur_buffer], esi +; 6d. Remove the sector from the other cache. +; Normally it should not be there, but prefetching could put to the app cache +; data that normally should belong to the sys cache and vice versa. +; Note: this requires that both caches must be protected by the same lock. + mov eax, [.sector_lo] + mov edx, [.sector_hi] + push ebx + sub ebx, [ebp+PARTITION.Disk] + xor ebx, DISK.SysCache xor DISK.AppCache + add ebx, [ebp+PARTITION.Disk] + call cache_lookup_read + jc @f + mov [esi+CACHE_ITEM.Status], CACHE_ITEM_EMPTY +@@: + pop ebx +; 6e. Advance .sector_hi:.sector_lo to the next sector. + add [.sector_lo], 1 + adc [.sector_hi], 0 +; 6f. Continue the loop at 6b-6e until all sectors are processed. + dec [.num_sectors] + jnz .lookup_in_cache_loop +.unlock_return: +; 6g. Release the lock and return. + mov ecx, [ebp+PARTITION.Disk] + add ecx, DISK.CacheLock + call mutex_unlock +.return: + mov eax, [.error_code] + mov ecx, [.num_sectors_orig] + sub ecx, [.num_sectors] +.nothing: + add esp, .local_vars_size + pop edi esi ebx + ret + +; Special branches: +.cache_error: +; error at flushing the cache while adding sector to the cache: +; return the error from the lookup function + mov [.error_code], eax + jmp .unlock_return +.end_of_media: +; requested sector is beyond the partition end: return the corresponding error + mov eax, DISK_STATUS_END_OF_MEDIA + xor ecx, ecx + jmp .nothing +.nocache: +; step 5, after correcting number of sectors to fit in partition limits +; and advancing partition-relative sector to absolute, +; sees that cache is disabled: pass corrected request to the driver + lea eax, [.num_sectors] + push eax ; numsectors + push [.sector_hi+4] ; startsector + push [.sector_lo+8] ; startsector + push [.buffer+12] ; buffer + mov esi, [ebp+PARTITION.Disk] + mov al, DISKFUNC.write + call disk_call_driver + mov ecx, [.num_sectors] + jmp .nothing + +; Legacy. Use fs_read64_sys instead. ; This function is intended to replace the old 'hd_read' function when ; [hdd_appl_data] = 0, so its input/output parameters are the same, except ; that it can't use the global variables 'hd_error' and 'hdd_appl_data'. @@ -14,12 +497,13 @@ $Revision: 4133 $ ; eax is relative to partition start ; out: eax = error code; 0 = ok fs_read32_sys: -; Save ecx, set ecx to SysCache and let the common part do its work. - push ecx - mov ecx, [ebp+PARTITION.Disk] - add ecx, DISK.SysCache +; Save ebx, set ebx to SysCache and let the common part do its work. + push ebx + mov ebx, [ebp+PARTITION.Disk] + add ebx, DISK.SysCache jmp fs_read32_common +; Legacy. Use fs_read64_app instead. ; This function is intended to replace the old 'hd_read' function when ; [hdd_appl_data] = 1, so its input/output parameters are the same, except ; that it can't use the global variables 'hd_error' and 'hdd_appl_data'. @@ -27,10 +511,10 @@ fs_read32_sys: ; eax is relative to partition start ; out: eax = error code; 0 = ok fs_read32_app: -; Save ecx, set ecx to AppCache and let the common part do its work. - push ecx - mov ecx, [ebp+PARTITION.Disk] - add ecx, DISK.AppCache +; Save ebx, set ebx to AppCache and let the common part do its work. + push ebx + mov ebx, [ebp+PARTITION.Disk] + add ebx, DISK.AppCache ; This label is the common part of fs_read32_sys and fs_read32_app. fs_read32_common: @@ -41,119 +525,215 @@ fs_read32_common: cmp dword [ebp+PARTITION.Length], eax ja @f mov eax, DISK_STATUS_END_OF_MEDIA - pop ecx + pop ebx ret @@: ; 2. Get the absolute sector on the disk. - push edx esi + push ecx edx esi edi xor edx, edx add eax, dword [ebp+PARTITION.FirstSector] adc edx, dword [ebp+PARTITION.FirstSector+4] ; 3. If there is no cache for this disk, just pass the request to the driver. - cmp [ecx+DISKCACHE.pointer], 0 + cmp [ebx+DISKCACHE.pointer], 0 jnz .scancache push 1 push esp ; numsectors push edx ; startsector push eax ; startsector - push ebx ; buffer + pushd [esp+32]; buffer mov esi, [ebp+PARTITION.Disk] mov al, DISKFUNC.read call disk_call_driver pop ecx - pop esi edx - pop ecx + pop edi esi edx ecx + pop ebx ret .scancache: -; 4. Scan the cache. - push edi ecx ; scan cache - push edx eax + push ebx edx eax virtual at esp +.local_vars: .sector_lo dd ? .sector_hi dd ? .cache dd ? +.local_vars_size = $ - .local_vars +.saved_regs rd 4 +.buffer dd ? end virtual -; The following code is inherited from hd_read. The differences are: -; all code is protected by the cache lock; instead of static calls -; to hd_read_dma/hd_read_pio/bd_read the dynamic call to DISKFUNC.read is used; -; sector is 64-bit, not 32-bit. +; 4. Scan for the requested sector in the cache. +; If found, copy the data and return. +; 4a. Acquire the lock. + mov ecx, [ebp+PARTITION.Disk] + add ecx, DISK.CacheLock call mutex_lock +; 4b. Call the lookup function without adding to the cache. mov eax, [.sector_lo] mov edx, [.sector_hi] - mov esi, [ecx+DISKCACHE.pointer] - mov ecx, [ecx+DISKCACHE.sad_size] - add esi, 12 - - mov edi, 1 - -.hdreadcache: - - cmp dword [esi+8], 0 ; empty - je .nohdcache - - cmp [esi], eax ; correct sector - jne .nohdcache - cmp [esi+4], edx ; correct sector - je .yeshdcache - -.nohdcache: - - add esi, 12 - inc edi - dec ecx - jnz .hdreadcache - - mov esi, [.cache] - call find_empty_slot64 ; ret in edi + call cache_lookup_read +; If not found, go to 5. + jc .not_found_in_cache +.found_in_cache: +; 4c. Copy the data. + mov edi, [.buffer] + mov esi, ecx + shl esi, 9 + add esi, [ebx+DISKCACHE.data] + mov ecx, 512/4 + rep movsd +; 4d. Release the lock and return success. + mov ecx, [ebp+PARTITION.Disk] + add ecx, DISK.CacheLock + call mutex_unlock +.return: + xor eax, eax +.return_eax: + add esp, .local_vars_size + pop edi esi edx ecx + pop ebx + ret +.not_found_in_cache: +; 5. Decide whether we need to prefetch further sectors. +; If so, advance to 6. If not, go to 13. +; Assume that devices < 3MB are floppies which are slow +; (ramdisk does not have a cache, so we don't even get here for ramdisk). +; This is a dirty hack, but the entire function is somewhat hacky. Use fs_read64*. + mov eax, [ebp+PARTITION.Disk] + cmp dword [eax+DISK.MediaInfo.Capacity+4], 0 + jnz @f + cmp dword [eax+DISK.MediaInfo.Capacity], 3 shl (20-9) + jb .floppy +@@: +; We want to prefetch CACHE_LEGACY_READ_SIZE sectors. +; 6. Release the lock acquired at step 4a. + mov ecx, [ebp+PARTITION.Disk] + add ecx, DISK.CacheLock + call mutex_unlock +; 7. Allocate buffer for CACHE_LEGACY_READ_SIZE sectors. + stdcall kernel_alloc, CACHE_LEGACY_READ_SIZE shl 9 +; If failed, return the corresponding error code. test eax, eax - jnz .read_done + jz .nomemory +; 8. Create second portion of local variables. + push eax eax + push CACHE_LEGACY_READ_SIZE +virtual at esp +.local_vars2: +.num_sectors dd ? ; number of sectors left +.current_buffer dd ? ; pointer to data that are currently copying +.allocated_buffer dd ? ; saved at safe place +.local_vars2_size = $ - .local_vars2 +end virtual +; 9. Call the driver to read CACHE_LEGACY_READ_SIZE sectors. + push esp ; numsectors + push [.sector_hi+.local_vars2_size+4] ; startsector + push [.sector_lo+.local_vars2_size+8] ; startsector + push eax ; buffer + mov esi, [ebp+PARTITION.Disk] + mov al, DISKFUNC.read + call disk_call_driver +; Note: we're ok if at least one sector is read, +; read error somewhere after that just limits data to be put in cache. + cmp [.num_sectors], 0 + jz .read_error +; 10. Copy data for the caller. + mov esi, [.allocated_buffer] + mov edi, [.buffer+.local_vars2_size] + mov ecx, 512/4 + rep movsd +; 11. Store all sectors that were successfully read to the cache. +; 11a. Acquire the lock. + mov ecx, [ebp+PARTITION.Disk] + add ecx, DISK.CacheLock + call mutex_lock +.store_to_cache: +; 11b. For each sector, call the lookup function with adding to the cache, if not yet. + mov eax, [.sector_lo+.local_vars2_size] + mov edx, [.sector_hi+.local_vars2_size] + call cache_lookup_write + test eax, eax + jnz .cache_error +; 11c. For each sector, copy data, mark the item as not-modified copy of the disk, +; advance .current_buffer and .sector_hi:.sector_lo to the next sector. + mov [esi+CACHE_ITEM.Status], CACHE_ITEM_COPY + mov esi, [.current_buffer] + mov edi, ecx + shl edi, 9 + add edi, [ebx+DISKCACHE.data] + mov ecx, 512/4 + rep movsd + mov [.current_buffer], esi + add [.sector_lo+.local_vars2_size], 1 + adc [.sector_hi+.local_vars2_size], 0 +; 11d. Continue the loop at 11b-11c until all sectors are processed. + dec [.num_sectors] + jnz .store_to_cache +.cache_error: +; 11e. Release the lock. + mov ecx, [ebp+PARTITION.Disk] + add ecx, DISK.CacheLock + call mutex_unlock +.copy_done: +; 12. Remove portion of local variables created at step 8, +; free the buffer allocated at step 7 and return. + pop ecx ecx + stdcall kernel_free + jmp .return +.read_error: +; If no sectors were read, free the buffer allocated at step 7 +; and pass the error to the caller. + push eax + stdcall kernel_free, [.allocated_buffer+4] + pop eax + add esp, .local_vars2_size + jmp .return_eax +.nomemory: + mov eax, DISK_STATUS_NO_MEMORY + jmp .return_eax +.floppy: +; We don't want to prefetch anything, just read one sector. +; We are still holding the lock acquired at step 4a. +; 13. Call the lookup function adding sector to the cache. + call cache_lookup_write + test eax, eax + jnz .floppy_cache_error +; 14. Mark the item as empty for the case of read error. + mov [esi+CACHE_ITEM.Status], CACHE_ITEM_EMPTY + push ecx +; 15. Call the driver to read one sector. push 1 push esp push edx - push [.sector_lo+12] - mov ecx, [.cache+16] - mov eax, edi - shl eax, 9 - add eax, [ecx+DISKCACHE.data] - push eax + push [.sector_lo+16] + shl ecx, 9 + add ecx, [ebx+DISKCACHE.data] + push ecx mov esi, [ebp+PARTITION.Disk] mov al, DISKFUNC.read call disk_call_driver pop ecx dec ecx - jnz .read_done + jnz .floppy_read_error +; 16. Get the slot and pointer to the cache item, +; change the status to not-modified copy of the disk +; and go to 4c. + pop ecx + lea esi, [ecx*sizeof.CACHE_ITEM/4] + shl esi, 2 + add esi, [ebx+DISKCACHE.pointer] + mov [esi+CACHE_ITEM.Status], CACHE_ITEM_COPY + jmp .found_in_cache - mov ecx, [.cache] - lea eax, [edi*3] - mov esi, [ecx+DISKCACHE.pointer] - lea esi, [eax*4+esi] - - mov eax, [.sector_lo] - mov edx, [.sector_hi] - mov [esi], eax ; sector number - mov [esi+4], edx ; sector number - mov dword [esi+8], 1; hd read - mark as same as in hd - -.yeshdcache: - - mov esi, edi - mov ecx, [.cache] - shl esi, 9 - add esi, [ecx+DISKCACHE.data] - - mov edi, ebx - mov ecx, 512/4 - rep movsd ; move data - xor eax, eax ; successful read -.read_done: - mov ecx, [.cache] +; On error at steps 13-15, release the lock +; and pass the error to the caller. +.floppy_read_error: + pop ecx +.floppy_cache_error: + mov ecx, [ebp+PARTITION.Disk] + add ecx, DISK.CacheLock push eax call mutex_unlock pop eax - add esp, 12 - pop edi esi edx ecx - ret + jmp .return_eax ; This function is intended to replace the old 'hd_write' function when ; [hdd_appl_data] = 0, so its input/output parameters are the same, except @@ -162,11 +742,13 @@ end virtual ; eax is relative to partition start ; out: eax = error code; 0 = ok fs_write32_sys: -; Save ecx, set ecx to SysCache and let the common part do its work. - push ecx - mov ecx, [ebp+PARTITION.Disk] - add ecx, DISK.SysCache - jmp fs_write32_common +; Just call the advanced function. + push ecx edx + xor edx, edx + mov ecx, 1 + call fs_write64_sys + pop edx ecx + ret ; This function is intended to replace the old 'hd_write' function when ; [hdd_appl_data] = 1, so its input/output parameters are the same, except @@ -175,269 +757,413 @@ fs_write32_sys: ; eax is relative to partition start ; out: eax = error code; 0 = ok fs_write32_app: -; Save ecx, set ecx to AppCache and let the common part do its work. - push ecx - mov ecx, [ebp+PARTITION.Disk] - add ecx, DISK.AppCache - -; This label is the common part of fs_read32_sys and fs_read32_app. -fs_write32_common: -; 1. Check that the required sector is inside the partition. If no, return -; DISK_STATUS_END_OF_MEDIA. - cmp dword [ebp+PARTITION.Length+4], 0 - jnz @f - cmp dword [ebp+PARTITION.Length], eax - ja @f - mov eax, DISK_STATUS_END_OF_MEDIA - pop ecx - ret -@@: - push edx esi -; 2. Get the absolute sector on the disk. +; Just call the advanced function. + push ecx edx xor edx, edx - add eax, dword [ebp+PARTITION.FirstSector] - adc edx, dword [ebp+PARTITION.FirstSector+4] -; 3. If there is no cache for this disk, just pass request to the driver. - cmp [ecx+DISKCACHE.pointer], 0 - jnz .scancache - push 1 - push esp ; numsectors - push edx ; startsector - push eax ; startsector - push ebx ; buffer - mov esi, [ebp+PARTITION.Disk] - mov al, DISKFUNC.write - call disk_call_driver - pop ecx - pop esi edx - pop ecx + mov ecx, 1 + call fs_write64_app + pop edx ecx ret -.scancache: -; 4. Scan the cache. - push edi ecx ; scan cache + +; Lookup for the given sector in the given cache. +; If the sector is not present, return error. +; The caller must acquire the cache lock. +; in: edx:eax = sector +; in: ebx -> DISKCACHE structure +; out: CF set if sector is not in cache +; out: ecx = index in cache +; out: esi -> sector:status +proc cache_lookup_read + mov esi, [ebx+DISKCACHE.pointer] + add esi, sizeof.CACHE_ITEM + + mov ecx, 1 + +.hdreadcache: + + cmp [esi+CACHE_ITEM.Status], CACHE_ITEM_EMPTY + je .nohdcache + + cmp [esi+CACHE_ITEM.SectorLo], eax + jne .nohdcache + cmp [esi+CACHE_ITEM.SectorHi], edx + jne .nohdcache + clc + ret + +.nohdcache: + + add esi, sizeof.CACHE_ITEM + inc ecx + cmp ecx, [ebx+DISKCACHE.sad_size] + jbe .hdreadcache + stc + ret +endp + +; Lookup for the given sector in the given cache. +; If the sector is not present, allocate space for it, +; possibly flushing data. +; in: edx:eax = sector +; in: ebx -> DISKCACHE structure +; in: ebp -> PARTITION structure +; out: eax = error code +; out: ecx = index in cache +; out: esi -> sector:status +proc cache_lookup_write + call cache_lookup_read + jnc .return0 push edx eax -virtual at esp -.sector_lo dd ? -.sector_hi dd ? -.cache dd ? -end virtual -; The following code is inherited from hd_write. The differences are: -; all code is protected by the cache lock; -; sector is 64-bit, not 32-bit. - call mutex_lock - - ; check if the cache already has the sector and overwrite it - mov eax, [.sector_lo] - mov edx, [.sector_hi] - mov esi, [ecx+DISKCACHE.pointer] - mov ecx, [ecx+DISKCACHE.sad_size] - add esi, 12 - - mov edi, 1 - -.hdwritecache: - cmp dword [esi+8], 0 ; if cache slot is empty - je .not_in_cache_write - - cmp [esi], eax ; if the slot has the sector - jne .not_in_cache_write - cmp [esi+4], edx ; if the slot has the sector - je .yes_in_cache_write - -.not_in_cache_write: - - add esi, 12 - inc edi - dec ecx - jnz .hdwritecache - - ; sector not found in cache - ; write the block to a new location - - mov esi, [.cache] - call find_empty_slot64 ; ret in edi - test eax, eax - jne .hd_write_access_denied - - mov ecx, [.cache] - lea eax, [edi*3] - mov esi, [ecx+DISKCACHE.pointer] - lea esi, [eax*4+esi] - - mov eax, [.sector_lo] - mov edx, [.sector_hi] - mov [esi], eax ; sector number - mov [esi+4], edx ; sector number - -.yes_in_cache_write: - - mov dword [esi+8], 2 ; write - differs from hd - - shl edi, 9 - mov ecx, [.cache] - add edi, [ecx+DISKCACHE.data] - - mov esi, ebx - mov ecx, 512/4 - rep movsd ; move data - xor eax, eax ; success -.hd_write_access_denied: - mov ecx, [.cache] - push eax - call mutex_unlock - pop eax - add esp, 12 - pop edi esi edx ecx - ret - -; This internal function is called from fs_read32_* and fs_write32_*. It is the -; analogue of find_empty_slot for 64-bit sectors. -find_empty_slot64: ;----------------------------------------------------------- ; find empty or read slot, flush cache if next 12.5% is used by write -; output : edi = cache slot +; output : ecx = cache slot ;----------------------------------------------------------- +; Note: the code is essentially inherited, so probably +; no analysis of efficiency were done. +; However, it works. .search_again: - mov ecx, [esi+DISKCACHE.sad_size] - mov edi, [esi+DISKCACHE.search_start] - shr ecx, 3 + mov eax, [ebx+DISKCACHE.sad_size] + mov ecx, [ebx+DISKCACHE.search_start] + shr eax, 3 + lea esi, [ecx*sizeof.CACHE_ITEM/4] + shl esi, 2 + add esi, [ebx+DISKCACHE.pointer] .search_for_empty: - inc edi - cmp edi, [esi+DISKCACHE.sad_size] + inc ecx + add esi, sizeof.CACHE_ITEM + cmp ecx, [ebx+DISKCACHE.sad_size] jbe .inside_cache - mov edi, 1 + mov ecx, 1 + mov esi, [ebx+DISKCACHE.pointer] + add esi, sizeof.CACHE_ITEM .inside_cache: - lea eax, [edi*3] - shl eax, 2 - add eax, [esi+DISKCACHE.pointer] - cmp dword [eax+8], 2 + cmp [esi+CACHE_ITEM.Status], CACHE_ITEM_MODIFIED jb .found_slot ; it's empty or read - dec ecx + dec eax jnz .search_for_empty stdcall write_cache64, [ebp+PARTITION.Disk] ; no empty slots found, write all test eax, eax jne .found_slot_access_denied jmp .search_again ; and start again .found_slot: - mov [esi+DISKCACHE.search_start], edi + mov [ebx+DISKCACHE.search_start], ecx + popd [esi+CACHE_ITEM.SectorLo] + popd [esi+CACHE_ITEM.SectorHi] +.return0: xor eax, eax ; success -.found_slot_access_denied: ret +.found_slot_access_denied: + add esp, 8 + ret +endp -; This function is intended to replace the old 'write_cache' function. -proc write_cache64 uses ecx edx esi edi, disk:dword -locals -cache_chain_started dd 0 -cache_chain_size dd ? -cache_chain_pos dd ? -cache_chain_ptr dd ? -endl -saved_esi_pos = 16+12 ; size of local variables + size of registers before esi -; If there is no cache for this disk, nothing to do. - cmp [esi+DISKCACHE.pointer], 0 - jz .flush -;----------------------------------------------------------- -; write all changed sectors to disk -;----------------------------------------------------------- - - ; write difference ( 2 ) from cache to DISK - mov ecx, [esi+DISKCACHE.sad_size] - mov esi, [esi+DISKCACHE.pointer] - add esi, 12 - mov edi, 1 -.write_cache_more: - cmp dword [esi+8], 2 ; if cache slot is not different - jne .write_chain - mov dword [esi+8], 1 ; same as in hd - mov eax, [esi] - mov edx, [esi+4] ; edx:eax = sector to write -; Объединяем запись цепочки последовательных секторов в одно обращение к диску - cmp ecx, 1 - jz .nonext - cmp dword [esi+12+8], 2 - jnz .nonext - push eax edx +; Flush the given cache. +; The caller must acquire the cache lock. +; in: ebx -> DISKCACHE +; in: first argument in stdcall convention -> PARTITION +proc write_cache64 +; 1. Setup stack frame. + push esi edi ; save used registers to be stdcall + sub esp, .local_vars_size ; reserve space for local vars +virtual at esp +.local_vars: +.cache_end dd ? ; item past the end of the cache +.size_left dd ? ; items left to scan +.current_ptr dd ? ; pointer to the current item +; +; Write operations are coalesced in chains, +; one chain describes a sequential interval of sectors, +; they can be sequential or scattered in the cache. +.sequential dd ? +; boolean variable, 1 if the current chain is sequential in the cache, +; 0 if additional buffer is needed to perform the operation +.chain_start_pos dd ? ; slot of chain start item +.chain_start_ptr dd ? ; pointer to chain start item +.chain_size dd ? ; chain size (thanks, C.O.) +.iteration_size dd ? +; If the chain size is too large, split the operation to several iterations. +; This is size in sectors for one iterations. +.iteration_buffer dd ? ; temporary buffer for non-sequential chains +.local_vars_size = $ - .local_vars + rd 2 ; saved registers + dd ? ; return address +.disk dd ? ; first argument +end virtual +; 1. If there is no cache for this disk, nothing to do, just return zero. + cmp [ebx+DISKCACHE.pointer], 0 + jz .return0 +; 2. Prepare for the loop: initialize current pointer and .size_left, +; calculate .cache_end. + mov ecx, [ebx+DISKCACHE.sad_size] + mov [.size_left], ecx + lea ecx, [ecx*sizeof.CACHE_ITEM/4] + shl ecx, 2 + mov esi, [ebx+DISKCACHE.pointer] + add esi, sizeof.CACHE_ITEM + add ecx, esi + mov [.cache_end], ecx +; 3. Main loop: go over all items, go to 5 for every modified item. +.look: + cmp [esi+CACHE_ITEM.Status], CACHE_ITEM_MODIFIED + jz .begin_write +.look_next: + add esi, sizeof.CACHE_ITEM + dec [.size_left] + jnz .look +; 4. Return success. +.return0: + xor eax, eax +.return: + add esp, .local_vars_size + pop edi esi ; restore used registers to be stdcall + ret 4 ; return popping one argument +.begin_write: +; We have found a modified item. +; 5. Prepare for chain finding: save the current item, initialize chain variables. + mov [.current_ptr], esi +; Initialize chain as sequential zero-length starting at the current item. + mov [.chain_start_ptr], esi + mov eax, [ebx+DISKCACHE.sad_size] + sub eax, [.size_left] + inc eax + mov [.chain_start_pos], eax + mov [.chain_size], 0 + mov [.sequential], 1 +; 6. Expand the chain backward. +; Note: the main loop in step 2 looks for items sequentially, +; so the previous item is not modified. If the previous sector +; is present in the cache, it automatically makes the chain scattered. +; 6a. Calculate sector number: one before the sector for the current item. + mov eax, [esi+CACHE_ITEM.SectorLo] + mov edx, [esi+CACHE_ITEM.SectorHi] + sub eax, 1 + sbb edx, 0 +.find_chain_start: +; 6b. For each sector where the previous item does not expand the chain, +; call the lookup function without adding to the cache. + call cache_lookup_read +; 6c. If the sector is not found in cache or is not modified, stop expanding +; and advance to step 7. + jc .found_chain_start + cmp [esi+CACHE_ITEM.Status], CACHE_ITEM_MODIFIED + jnz .found_chain_start +; 6d. We have found a new block that expands the chain backwards. +; It makes the chain non-sequential. +; Normally, sectors come in sequential blocks, so try to look at previous items +; before returning to 6b; if there is a sequential block indeed, this saves some +; time instead of many full-fledged lookups. + mov [.sequential], 0 + mov [.chain_start_pos], ecx +.look_backward: +; 6e. For each sector, update chain start pos/ptr, decrement sector number, +; look at the previous item. + mov [.chain_start_ptr], esi + inc [.chain_size] + sub eax, 1 + sbb edx, 0 + sub esi, sizeof.CACHE_ITEM +; If the previous item exists... + cmp esi, [ebx+DISKCACHE.pointer] + jbe .find_chain_start +; ...describes the correct sector... + cmp [esi+CACHE_ITEM.SectorLo], eax + jnz .find_chain_start + cmp [esi+CACHE_ITEM.SectorHi], edx + jnz .find_chain_start +; ...and is modified... + cmp [esi+CACHE_ITEM.Status], CACHE_ITEM_MODIFIED + jnz .found_chain_start +; ...expand the chain one sector backwards and continue the loop at 6e. +; Otherwise, advance to step 7 if the previous item describes the correct sector +; but is not modified, and return to step 6b otherwise. + dec [.chain_start_pos] + jmp .look_backward +.found_chain_start: +; 7. Expand the chain forward. +; 7a. Prepare for the loop at 7b: +; set esi = pointer to current item, edx:eax = current sector. + mov esi, [.current_ptr] + mov eax, [esi+CACHE_ITEM.SectorLo] + mov edx, [esi+CACHE_ITEM.SectorHi] +.look_forward: +; 7b. First, look at the next item. If it describes the next sector: +; if it is modified, expand the chain with that sector and continue this step, +; if it is not modified, the chain is completed, so advance to step 8. + inc [.chain_size] add eax, 1 adc edx, 0 - cmp eax, [esi+12] - jnz @f - cmp edx, [esi+12+4] -@@: - pop edx eax - jnz .nonext - cmp [cache_chain_started], 1 - jz @f - mov [cache_chain_started], 1 - mov [cache_chain_size], 0 - mov [cache_chain_pos], edi - mov [cache_chain_ptr], esi -@@: - inc [cache_chain_size] - cmp [cache_chain_size], 16 - jnz .continue - jmp .write_chain -.nonext: - call .flush_cache_chain - test eax, eax - jnz .nothing - mov [cache_chain_size], 1 - mov [cache_chain_ptr], esi - call .write_cache_sector - test eax, eax - jnz .nothing - jmp .continue -.write_chain: - call .flush_cache_chain - test eax, eax - jnz .nothing -.continue: - add esi, 12 - inc edi - dec ecx - jnz .write_cache_more - call .flush_cache_chain - test eax, eax - jnz .nothing -.flush: - mov esi, [disk] - mov al, DISKFUNC.flush - call disk_call_driver -.nothing: - ret - -.flush_cache_chain: - xor eax, eax - cmp [cache_chain_started], eax - jz @f - call .write_cache_chain - mov [cache_chain_started], 0 -@@: - retn - -.write_cache_sector: - mov [cache_chain_size], 1 - mov [cache_chain_pos], edi -.write_cache_chain: - pusha - mov edi, [cache_chain_pos] - mov ecx, [ebp-saved_esi_pos] - shl edi, 9 - add edi, [ecx+DISKCACHE.data] - mov ecx, [cache_chain_size] - push ecx - push esp ; numsectors - mov eax, [cache_chain_ptr] - pushd [eax+4] - pushd [eax] ; startsector - push edi ; buffer - mov esi, [ebp] - mov esi, [esi+PARTITION.Disk] + add esi, sizeof.CACHE_ITEM + cmp esi, [.cache_end] + jae .find_chain_end + cmp [esi+CACHE_ITEM.SectorLo], eax + jnz .find_chain_end + cmp [esi+CACHE_ITEM.SectorHi], edx + jnz .find_chain_end + cmp [esi+CACHE_ITEM.Status], CACHE_ITEM_MODIFIED + jnz .found_chain_end + jmp .look_forward +.find_chain_end: +; 7c. Otherwise, call the lookup function. + call cache_lookup_read +; 7d. If the next sector is present in the cache and is modified, +; mark the chain as non-sequential and continue to step 7b. + jc .found_chain_end + cmp [esi+CACHE_ITEM.Status], CACHE_ITEM_MODIFIED + jnz .found_chain_end + mov [.sequential], 0 + jmp .look_forward +.found_chain_end: +; 8. Decide whether the chain is sequential or scattered. +; Advance to step 9 for sequential chains, go to step 10 for scattered chains. + cmp [.sequential], 0 + jz .write_non_sequential +.write_sequential: +; 9. Write a sequential chain to disk. +; 9a. Pass the entire chain to the driver. + mov eax, [.chain_start_ptr] + mov edx, [.chain_start_pos] + shl edx, 9 + add edx, [ebx+DISKCACHE.data] + lea ecx, [.chain_size] + push ecx ; numsectors + pushd [eax+CACHE_ITEM.SectorHi] ; startsector + pushd [eax+CACHE_ITEM.SectorLo] ; startsector + push edx ; buffer + mov esi, [ebp+PARTITION.Disk] mov al, DISKFUNC.write call disk_call_driver +; 9b. If failed, pass the error code to the driver. + test eax, eax + jnz .return +; 9c. If succeeded, mark all sectors in the chain as not-modified, +; advance current item and number of items left to skip the chain. + mov esi, [.current_ptr] + mov eax, [.chain_size] + sub [.size_left], eax +@@: + mov [esi+CACHE_ITEM.Status], CACHE_ITEM_COPY + add esi, sizeof.CACHE_ITEM + dec eax + jnz @b +; 9d. Continue the main loop at step 2 if there are more sectors. +; Return success otherwise. + cmp [.size_left], 0 + jnz .look + jmp .return0 +.write_non_sequential: +; Write a non-sequential chain to the disk. +; 10. Allocate a temporary buffer. +; Use [.chain_size] sectors, but +; not greater than CACHE_MAX_ALLOC_SIZE bytes +; and not greater than half of free memory. + mov eax, [pg_data.pages_free] + shr eax, 1 + jz .nomemory + cmp eax, CACHE_MAX_ALLOC_SIZE shr 12 + jbe @f + mov eax, CACHE_MAX_ALLOC_SIZE shr 12 +@@: + shl eax, 12 - 9 + cmp eax, [.chain_size] + jbe @f + mov eax, [.chain_size] +@@: + mov [.iteration_size], eax + shl eax, 9 + stdcall kernel_alloc, eax + test eax, eax + jz .nomemory + mov [.iteration_buffer], eax +.write_non_sequential_iteration: +; 11. Split the chain so that each iteration fits in the allocated buffer. +; Iteration size is the minimum of chain size and allocated size. + mov eax, [.chain_size] + cmp eax, [.iteration_size] + jae @f + mov [.iteration_size], eax +@@: +; 12. Prepare arguments for the driver. + mov esi, [.chain_start_ptr] + mov edi, [.iteration_buffer] + push [.iteration_size] + push esp ; numsectors + push [esi+CACHE_ITEM.SectorHi] ; startsector + push [esi+CACHE_ITEM.SectorLo] ; startsector + push edi ; buffer +; 13. Copy data from the cache to the temporary buffer, +; advancing chain_start pos/ptr and marking sectors as not-modified. +; 13a. Prepare for the loop: push number of sectors to process. + push [.iteration_size+20] ; temporary variable +.copy_loop: +; 13b. For each sector, copy the data. +; Note that edi is advanced automatically. + mov esi, [.chain_start_pos+24] + shl esi, 9 + add esi, [ebx+DISKCACHE.data] + mov ecx, 512/4 + rep movsd +; 13c. Mark the item as not-modified. + mov esi, [.chain_start_ptr+24] + mov [esi+CACHE_ITEM.Status], CACHE_ITEM_COPY +; 13d. Check whether the next sector continues the chain. +; If so, advance to 13e. Otherwise, go to 13f. + mov eax, [esi+CACHE_ITEM.SectorLo] + mov edx, [esi+CACHE_ITEM.SectorHi] + add esi, sizeof.CACHE_ITEM + add eax, 1 + adc edx, 0 + cmp esi, [.cache_end+24] + jae .no_forward + cmp [esi+CACHE_ITEM.SectorLo], eax + jnz .no_forward + cmp [esi+CACHE_ITEM.SectorHi], edx + jnz .no_forward +; 13e. Increment position/pointer to the chain and +; continue the loop. + inc [.chain_start_pos+24] + mov [.chain_start_ptr+24], esi + dec dword [esp] + jnz .copy_loop + jmp .copy_done +.no_forward: +; 13f. Call the lookup function without adding to the cache. +; Update position/pointer with returned value. +; Note: for the last sector in the chain, ecx/esi may contain +; garbage; we are not going to use them in this case. + call cache_lookup_read + mov [.chain_start_pos+24], ecx + mov [.chain_start_ptr+24], esi + dec dword [esp] + jnz .copy_loop +.copy_done: +; 13g. Restore the stack after 13a. pop ecx - mov [esp+28], eax - popa - retn +; 14. Call the driver. + mov esi, [ebp+PARTITION.Disk] + mov al, DISKFUNC.write + call disk_call_driver + pop ecx ; numsectors +; 15. If the driver has returned an error, free the buffer allocated at step 10 +; and pass the error to the caller. +; Otherwise, remove the processed part from the chain and continue iterations +; starting in step 11 if there are more data to process. + test eax, eax + jnz .nonsequential_error + sub [.chain_size], ecx + jnz .write_non_sequential_iteration +; 16. The chain is written. Free the temporary buffer +; and continue the loop at step 2. + stdcall kernel_free, [.iteration_buffer] + mov esi, [.current_ptr] + jmp .look_next +.nonsequential_error: + push eax + stdcall kernel_free, [.iteration_buffer+4] + pop eax + jmp .return +.nomemory: + mov eax, DISK_STATUS_NO_MEMORY + jmp .return endp ; This internal function is called from disk_add to initialize the caching for @@ -486,9 +1212,7 @@ disk_init_cache: @@: ; 3. Fill two DISKCACHE structures. mov [esi+DISK.SysCache.pointer], eax - lea ecx, [esi+DISK.SysCache.mutex] - call mutex_init - lea ecx, [esi+DISK.AppCache.mutex] + lea ecx, [esi+DISK.CacheLock] call mutex_init ; The following code is inherited from getcache.inc. mov edx, [esi+DISK.SysCache.pointer] @@ -577,12 +1301,21 @@ disk_free_cache: ; esi = pointer to DISK disk_sync: ; The algorithm is straightforward. - push esi + cmp [esi+DISK.SysCache.pointer], 0 + jz .nothing + lea ecx, [esi+DISK.CacheLock] + call mutex_lock + push ebx push esi ; for second write_cache64 push esi ; for first write_cache64 - add esi, DISK.SysCache + lea ebx, [esi+DISK.SysCache] call write_cache64 - add esi, DISK.AppCache - DISK.SysCache + add ebx, DISK.AppCache - DISK.SysCache call write_cache64 - pop esi + pop ebx + lea ecx, [esi+DISK.CacheLock] + call mutex_unlock +.nothing: + mov al, DISKFUNC.flush + call disk_call_driver ret diff --git a/kernel/branches/kolibri-process/core/sys32.inc b/kernel/branches/kolibri-process/core/sys32.inc index fa86ec1894..8462d2ddaf 100644 --- a/kernel/branches/kolibri-process/core/sys32.inc +++ b/kernel/branches/kolibri-process/core/sys32.inc @@ -415,18 +415,21 @@ align 4 terminate: ; terminate application destroy_thread: - .slot equ esp ;locals + .slot equ esp+4 ;locals + .process equ esp ;ptr to parent process - push esi ;save .slot + push esi ;save .slot shl esi, 8 - cmp [SLOT_BASE+esi+APPDATA.process], 0 - jne @F + mov edx, [SLOT_BASE+esi+APPDATA.process] + test edx, edx + jnz @F pop esi shl esi, 5 mov [CURRENT_TASK+esi+TASKDATA.state], 9 ret @@: + push edx ;save .process lea edx, [SLOT_BASE+esi] call scheduler_remove_thread call lock_application_table @@ -624,6 +627,9 @@ destroy_thread: je @F call free_page @@: + lea ebx, [edi+APPDATA.list] + list_del ebx ;destroys edx, ecx + mov eax, 0x20202020 stosd stosd @@ -740,8 +746,19 @@ destroy_thread: jmp .xd0 .xd1: ;release slot + + xchg bx, bx + bts [thr_slot_map], esi + mov ebx, [.process] + add ebx, PROC.thr_list + cmp ebx, [ebx+LHEAD.next] + jne @F + + DEBUGF 1,"%s",msg_process_destroy + +@@: sti ; .. and life goes on mov eax, [draw_limits.left] @@ -756,9 +773,12 @@ destroy_thread: call unlock_application_table ;mov esi,process_terminated ;call sys_msg_board_str - add esp, 4 + add esp, 8 ret restore .slot +restore .process + +msg_process_destroy: db 'K: destroy process', 0x0d, 0x0a,0 ; Three following procedures are used to guarantee that diff --git a/kernel/branches/kolibri-process/core/taskman.inc b/kernel/branches/kolibri-process/core/taskman.inc index 492d0e0ef0..24a0276016 100644 --- a/kernel/branches/kolibri-process/core/taskman.inc +++ b/kernel/branches/kolibri-process/core/taskman.inc @@ -262,6 +262,10 @@ proc fs_execute mov ebx, [slot_base] mov [ebx+APPDATA.process], eax + lea edx, [ebx+APPDATA.list] + lea ecx, [eax+PROC.thr_list] + list_add_tail edx, ecx + xor edx, edx cmp word [6], '02' jne @f @@ -980,6 +984,10 @@ proc new_sys_threads mov eax, [ebx+APPDATA.process] mov [edx+APPDATA.process], eax + lea ebx, [edx+APPDATA.list] + lea ecx, [eax+PROC.thr_list] + list_add_tail edx, ecx ;add thread to process child's list + mov eax, [ebx+APPDATA.tls_base] test eax, eax jz @F diff --git a/kernel/branches/kolibri-process/kernel.asm b/kernel/branches/kolibri-process/kernel.asm index b4daba2863..2e0f2fd302 100644 --- a/kernel/branches/kolibri-process/kernel.asm +++ b/kernel/branches/kolibri-process/kernel.asm @@ -74,6 +74,8 @@ include 'struct.inc' $Revision: 4381 $ +USE_FIX_FOR_INVALID_MS_VIRTUAL_PC_2007 equ 0 + USE_COM_IRQ equ 1 ; make irq 3 and irq 4 available for PCI devices VESA_1_2_VIDEO equ 0 ; enable vesa 1.2 bank switch functions diff --git a/kernel/branches/kolibri-process/kernel32.inc b/kernel/branches/kolibri-process/kernel32.inc index 2e8a717527..52d22d51f8 100644 --- a/kernel/branches/kolibri-process/kernel32.inc +++ b/kernel/branches/kolibri-process/kernel32.inc @@ -119,13 +119,12 @@ struct APPDATA app_name rb 11 rb 5 - process dd ? ;+16 - fpu_state dd ? ;+20 - exc_handler dd ? ;+24 - except_mask dd ? ;+28 - pl0_stack dd ? ;+32 - dd ? ;+36 - dd ? ;+40 + list LHEAD ;+16 + process dd ? ;+24 + fpu_state dd ? ;+28 + exc_handler dd ? ;+32 + except_mask dd ? ;+36 + pl0_stack dd ? ;+40 cursor dd ? ;+44 fd_ev dd ? ;+48 bk_ev dd ? ;+52 diff --git a/kernel/branches/kolibri-process/macros.inc b/kernel/branches/kolibri-process/macros.inc index 8a79935d9c..b5f36a441c 100644 --- a/kernel/branches/kolibri-process/macros.inc +++ b/kernel/branches/kolibri-process/macros.inc @@ -117,10 +117,10 @@ macro list_add_tail new, head macro list_del entry { - mov edx, [entry+list_fd] - mov ecx, [entry+list_bk] - mov [edx+list_bk], ecx - mov [ecx+list_fd], edx + mov edx, [entry+LHEAD.next] + mov ecx, [entry+LHEAD.prev] + mov [edx+LHEAD.prev], ecx + mov [ecx+LHEAD.next], edx } ; MOV Immediate. diff --git a/kernel/branches/kolibri-process/network/socket.inc b/kernel/branches/kolibri-process/network/socket.inc index 2aaa541fc7..57f4663ff1 100644 --- a/kernel/branches/kolibri-process/network/socket.inc +++ b/kernel/branches/kolibri-process/network/socket.inc @@ -2210,6 +2210,8 @@ SOCKET_check_owner: align 4 SOCKET_process_end: + ret ; FIXME + cmp [net_sockets + SOCKET.NextPtr], 0 ; Are there any active sockets at all? je .quickret ; nope, exit immediately diff --git a/kernel/branches/kolibri-process/video/blitter.inc b/kernel/branches/kolibri-process/video/blitter.inc index 15223ee882..c4536c258b 100644 --- a/kernel/branches/kolibri-process/video/blitter.inc +++ b/kernel/branches/kolibri-process/video/blitter.inc @@ -206,7 +206,13 @@ blit_32: push edi push esi push ebx - sub esp, 72 +virtual at sizeof.BLITTER +.position dd ? ; (x shl 16) + y +; ??? +.extra_var1 dd ? +.local_vars_size = $ +end virtual + sub esp, .local_vars_size mov eax, [TASK_BASE] mov ebx, [eax-twdw + WDATA.box.width] @@ -246,9 +252,9 @@ blit_32: mov eax, [ecx+32] - mov [esp+56], eax + mov [esp+BLITTER.bitmap], eax mov eax, [ecx+36] - mov [esp+60], eax + mov [esp+BLITTER.stride], eax mov ecx, esp call blit_clip @@ -268,6 +274,11 @@ blit_32: mov cx, bp add ecx, [esp+BLITTER.h] + mov eax, ebx + shl eax, 16 + mov ax, bp + mov [esp+.position], eax + mov edi, ebp ; imul edi, [_display.pitch] @@ -300,47 +311,37 @@ blit_32: lea edi, [edi+ebx*4] mov ebx, [CURRENT_TASK] +; check for hardware cursor + cmp [_display.select_cursor], select_cursor + je .core_32.software_cursor + cmp [_display.select_cursor], 0 + jne .core_32.hardware_cursor +;-------------------------------------- +.core_32.software_cursor: align 4 .outer32: - xor ecx, ecx align 4 .inner32: - cmp [ebp+ecx], bl + cmp [ebp], bl jne .skip ;-------------------------------------- - push eax - mov eax, [esi+ecx*4] + mov eax, [esi] -; check for hardware cursor - cmp [_display.select_cursor], select_cursor - je @f - cmp [_display.select_cursor], 0 - jne .no_mouseunder -;-------------------------------------- -align 4 -@@: - push ecx - - mov ecx, [esp+4] - ror ecx, 16 - sub ecx, edx - rol ecx, 16 - sub ecx, [esp+BLITTER.h + 8] + mov ecx, [esp+.position] ; check mouse area for putpixel call [_display.check_mouse] - pop ecx ;-------------------------------------- -align 4 -.no_mouseunder: ; store to real LFB - mov [LFB_BASE+edi+ecx*4], eax - pop eax + mov [LFB_BASE+edi], eax ;-------------------------------------- align 4 .skip: - inc ecx + add esi, 4 + add edi, 4 + inc ebp + add [esp+.position], 1 shl 16 dec edx jnz .inner32 @@ -349,14 +350,48 @@ align 4 add ebp, [_display.width] mov edx, [esp+BLITTER.w] + mov eax, edx + inc [esp+.position] + sub ebp, edx + shl eax, 2 + sub esi, eax + sub edi, eax + shl eax, 16-2 + sub [esp+.position], eax dec [esp+BLITTER.h] jnz .outer32 + jmp .done +.core_32.hardware_cursor: +align 4 +.hw.outer32: + xor ecx, ecx + +align 4 +.hw.inner32: + cmp [ebp+ecx], bl + jne .hw.skip + mov eax, [esi+ecx*4] + mov [LFB_BASE+edi+ecx*4], eax + +align 4 +.hw.skip: + inc ecx + dec edx + jnz .hw.inner32 + + add esi, [esp+BLITTER.stride] + add edi, [_display.pitch] + add ebp, [_display.width] + + mov edx, [esp+BLITTER.w] + dec [esp+BLITTER.h] + jnz .hw.outer32 .done: ; call [draw_pointer] ; call __sys_draw_pointer .L57: - add esp, 72 + add esp, .local_vars_size pop ebx pop esi pop edi @@ -370,7 +405,7 @@ align 4 align 4 .outer24: - mov [esp+64], edi + mov [esp+.extra_var1], edi xor ecx, ecx align 4 @@ -413,7 +448,7 @@ align 4 ;-------------------------------------- align 4 .skip_1: - mov edi, [esp+64] + mov edi, [esp+.extra_var1] inc ecx dec edx jnz .inner24 diff --git a/kernel/branches/kolibri-process/video/vesa20.inc b/kernel/branches/kolibri-process/video/vesa20.inc index 9b45f453a5..5f5b6774fb 100644 --- a/kernel/branches/kolibri-process/video/vesa20.inc +++ b/kernel/branches/kolibri-process/video/vesa20.inc @@ -23,13 +23,6 @@ $Revision: 3606 $ ; If you're planning to write your own video driver I suggest ; you replace the VESA12.INC file and see those instructions. -;Screen_Max_X equ 0xfe00 -;Screen_Max_Y equ 0xfe04 -;BytesPerScanLine equ 0xfe08 -;LFBAddress equ 0xfe80 -;ScreenBPP equ 0xfbf1 - - ;----------------------------------------------------------------------------- ; getpixel