KernelAlloc/KernelFree are called from IRQ handlers in network drivers, so make them spinlock-protected instead of mutex-protected

git-svn-id: svn://kolibrios.org@4391 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
CleverMouse 2013-12-20 17:38:01 +00:00
parent 3a9d9f866f
commit 0fb3c4300c

View File

@ -284,8 +284,7 @@ proc alloc_kernel_space stdcall, size:dword
cmp eax, [heap_free] cmp eax, [heap_free]
ja .error ja .error
mov ecx, heap_mutex spin_lock_irqsave heap_mutex
call mutex_lock
mov eax, [size] mov eax, [size]
@ -326,11 +325,11 @@ proc alloc_kernel_space stdcall, size:dword
sub [edi+block_size], edx sub [edi+block_size], edx
mov eax, [edi+block_size] mov eax, [edi+block_size]
calc_index eax calc_index eax
cmp eax, [block_ind] cmp eax, [block_ind]
je .add_used je .add_used
list_del edi list_del edi
mov ecx, [block_ind] mov ecx, [block_ind]
lea edx, [mem_block_list+ecx*8] lea edx, [mem_block_list+ecx*8]
@ -340,13 +339,12 @@ proc alloc_kernel_space stdcall, size:dword
@@: @@:
bts [mem_block_mask], eax bts [mem_block_mask], eax
lea edx, [mem_block_list+eax*8] ;edx= list head lea edx, [mem_block_list+eax*8] ;edx= list head
list_add edi, edx list_add edi, edx
.add_used: .add_used:
call md.add_to_used call md.add_to_used
mov ecx, heap_mutex spin_unlock_irqrestore heap_mutex
call mutex_unlock
mov eax, [esi+block_base] mov eax, [esi+block_base]
pop edi pop edi
pop esi pop esi
@ -354,7 +352,7 @@ proc alloc_kernel_space stdcall, size:dword
ret ret
.m_eq_size: .m_eq_size:
list_del edi list_del edi
lea edx, [mem_block_list+ebx*8] lea edx, [mem_block_list+ebx*8]
cmp edx, [edx] cmp edx, [edx]
jnz @f jnz @f
@ -364,8 +362,7 @@ proc alloc_kernel_space stdcall, size:dword
jmp .add_used jmp .add_used
.error_unlock: .error_unlock:
mov ecx, heap_mutex spin_unlock_irqrestore heap_mutex
call mutex_unlock
.error: .error:
xor eax, eax xor eax, eax
pop edi pop edi
@ -377,8 +374,7 @@ endp
align 4 align 4
proc free_kernel_space stdcall uses ebx ecx edx esi edi, base:dword proc free_kernel_space stdcall uses ebx ecx edx esi edi, base:dword
mov ecx, heap_mutex spin_lock_irqsave heap_mutex
call mutex_lock
mov eax, [base] mov eax, [base]
@ -446,8 +442,7 @@ proc free_kernel_space stdcall uses ebx ecx edx esi edi, base:dword
lea edx, [mem_block_list+eax*8] lea edx, [mem_block_list+eax*8]
list_add edi, edx list_add edi, edx
.m_eq: .m_eq:
mov ecx, heap_mutex spin_unlock_irqrestore heap_mutex
call mutex_unlock
xor eax, eax xor eax, eax
not eax not eax
ret ret
@ -459,8 +454,7 @@ proc free_kernel_space stdcall uses ebx ecx edx esi edi, base:dword
jmp .add_block jmp .add_block
.fail: .fail:
mov ecx, heap_mutex spin_unlock_irqrestore heap_mutex
call mutex_unlock
xor eax, eax xor eax, eax
ret ret
endp endp
@ -544,17 +538,15 @@ proc kernel_free stdcall, base:dword
push ebx esi push ebx esi
mov ecx, heap_mutex spin_lock_irqsave heap_mutex
call mutex_lock
mov eax, [base] mov eax, [base]
call md.find_used call md.find_used
mov ecx, heap_mutex
cmp [esi+block_flags], USED_BLOCK cmp [esi+block_flags], USED_BLOCK
jne .fail jne .fail
call mutex_unlock spin_unlock_irqrestore heap_mutex
mov eax, [esi+block_base] mov eax, [esi+block_base]
mov ecx, [esi+block_size] mov ecx, [esi+block_size]
@ -564,7 +556,7 @@ proc kernel_free stdcall, base:dword
pop esi ebx pop esi ebx
ret ret
.fail: .fail:
call mutex_unlock spin_unlock_irqrestore heap_mutex
xor eax, eax xor eax, eax
pop esi ebx pop esi ebx
ret ret