NNP: removed dependency on libc library, clean code #178

Merged
mxlgv merged 1 commits from nnp-update into main 2025-03-28 20:26:04 +01:00

View File

@@ -32,7 +32,6 @@ section '.flat' code readable align 16
include "..\..\..\KOSfuncs.inc" include "..\..\..\KOSfuncs.inc"
include "..\..\..\proc32.inc" include "..\..\..\proc32.inc"
include "..\..\..\macros.inc" include "..\..\..\macros.inc"
include "..\..\..\dll.inc"
include "..\..\..\bcc32\include\kos_func.inc" include "..\..\..\bcc32\include\kos_func.inc"
include "..\..\..\bcc32\include\kos_heap.inc" include "..\..\..\bcc32\include\kos_heap.inc"
@@ -44,9 +43,9 @@ include "..\..\..\bcc32\include\kos_heap.inc"
@@StrToInt$qpc equ @StrToInt$qpc @@StrToInt$qpc equ @StrToInt$qpc
@@StrToDouble$qpc equ @StrToDouble$qpc @@StrToDouble$qpc equ @StrToDouble$qpc
mem.alloc dd ? ;функция для выделения памяти mem.alloc dd ? ;memory allocation function
mem.free dd ? ;функция для освобождения памяти mem.free dd ? ;function to free up memory
mem.realloc dd ? ;функция для перераспределения памяти mem.realloc dd ? ;function for memory reallocation
dll.load dd ? dll.load dd ?
PRECISION equ 16 PRECISION equ 16
@@ -54,20 +53,20 @@ NNP_FF_BIN equ 0x6e6962
NNP_FF_JSON equ 0x6e6f736a NNP_FF_JSON equ 0x6e6f736a
struct Layer struct Layer
c_size dd ? ;+ 0 curent size - число нейронов в текущем слое c_size dd ? ;+ 0 curent size - number of neurons in the current layer
n_size dd ? ;+ 4 next size - число нейронов на следующем слое n_size dd ? ;+ 4 next size - number of neurons in the next layer
neurons dd ? ;+ 8 [] neurons dd ? ;+ 8 []
biases dd ? ;+12 [] biases dd ? ;+12 []
weights dd ? ;+16 [][] weights dd ? ;+16 [][]
ends ends
struct NeuralNetwork struct NeuralNetwork
learningRate dq ? ;+ 0 скорость обучения learningRate dq ? ;+ 0
layers dd ? ;+ 8 [] слои layers dd ? ;+ 8 []
layers_length dd ? ;+12 число слоев layers_length dd ? ;+12 number of layers
activation dd ? ;+16 указатель на функцию активации activation dd ? ;+16 pointer to activation function
derivative dd ? ;+20 указатель на функцию derivative dd ? ;+20 function pointer
errors dd ? ;+24 массив для вычислений errors dd ? ;+24 array for calculations
errorsNext dd ? ;+28 errorsNext dd ? ;+28
gradients dd ? ;+32 gradients dd ? ;+32
deltas dd ? ;+36 deltas dd ? ;+36
@@ -113,11 +112,6 @@ proc lib_init
mov [mem.free], ebx mov [mem.free], ebx
mov [mem.realloc], ecx mov [mem.realloc], ecx
mov [dll.load], edx mov [dll.load], edx
or edx, edx
jz @f
invoke dll.load, @IMPORT
@@:
ret ret
endp endp
@@ -137,6 +131,21 @@ Math_random:
@@: @@:
db 0,0,128,55 ;dd 1.0/65536.0 db 0,0,128,55 ;dd 1.0/65536.0
align 16
ieee754_exp:
fld qword[esp+4]
fldl2e ;push log2(e) onto stack
fmulp
fst st1 ;copies st0 to st1
frndint ;round to integer
fst st2 ;copies st0 to st2
fsubp ;subtraction with stack pop
mxlgv marked this conversation as resolved Outdated
Outdated
Review

Strange symbol. Look further, there is more like this. And let's not create new comments (unless we're already correcting the old ones) in Russian. This is an international project.

Strange symbol. Look further, there is more like this. And let's not create new comments (unless we're already correcting the old ones) in Russian. This is an international project.
Outdated
Review

Fixed

Fixed
f2xm1 ;raises 2 to the power st0 and subtracts 1
fld1
faddp
fscale ;scale by powers of two
ret
align 16 align 16
sigmoid: sigmoid:
push ebp push ebp
@@ -145,7 +154,7 @@ sigmoid:
fld qword[ebp+8] fld qword[ebp+8]
fchs fchs
fstp qword[esp] fstp qword[esp]
call dword[_exp] call ieee754_exp
add esp,8 add esp,8
fadd dword[f_1_0] fadd dword[f_1_0]
fdivr dword[f_1_0] fdivr dword[f_1_0]
@@ -190,7 +199,7 @@ Layer_Create:
mov dword[esi+Layer.weights],eax mov dword[esi+Layer.weights],eax
xor ebx,ebx xor ebx,ebx
cmp edi,ebx cmp edi,ebx
jbe .end_f jbe .end_f
@@: @@:
mov eax,[ebp+16] mov eax,[ebp+16]
shl eax,3 shl eax,3
@@ -250,18 +259,18 @@ NNP_Create:
mov eax,[ebp+28] ;sizes mov eax,[ebp+28] ;sizes
lea edx,[eax+4] lea edx,[eax+4]
mov dword[ebp-8],edx ;save &sizes[i+1] mov dword[ebp-8],edx ;save &sizes[i+1]
jmp .150 jmp .3
.cycle_0: ;for (i=0; i < sizes_length; i++) .cycle_0: ;for (i=0; i < sizes_length; i++)
xor ecx,ecx xor ecx,ecx
mov dword[ebp-4],ecx ;nextSize = 0 mov dword[ebp-4],ecx ;nextSize = 0
mov eax,[ebp+32] ;sizes_length mov eax,[ebp+32] ;sizes_length
dec eax dec eax
cmp edi,eax cmp edi,eax
jae .152 jae @f
mov edx,[ebp-8] mov edx,[ebp-8]
mov ecx,[edx] mov ecx,[edx]
mov dword[ebp-4],ecx ;nextSize = sizes[i+1] mov dword[ebp-4],ecx ;nextSize = sizes[i+1]
.152: @@:
mov eax,[ebp-4] mov eax,[ebp-4]
push eax push eax
mov edx,[ebp-8] mov edx,[ebp-8]
@@ -277,7 +286,7 @@ NNP_Create:
mov eax,[ebp-8] mov eax,[ebp-8]
lea edx,[eax-4] lea edx,[eax-4]
mov dword[ebp-12],edx ;save &sizes[i] mov dword[ebp-12],edx ;save &sizes[i]
jmp .154 jmp .2
.cycle_1: ;for (j=0; j < sizes[i]; j++) .cycle_1: ;for (j=0; j < sizes[i]; j++)
call Math_random call Math_random
fmul dword[f_2_0] fmul dword[f_2_0]
@@ -290,7 +299,7 @@ NNP_Create:
fstp qword[ecx+8*esi] fstp qword[ecx+8*esi]
xor ebx,ebx ;k=0 xor ebx,ebx ;k=0
cmp ebx,[ebp-4] cmp ebx,[ebp-4]
jae .157 jae .1
@@: ;for (k=0; k < nextSize; k++) @@: ;for (k=0; k < nextSize; k++)
call Math_random call Math_random
fmul dword[f_2_0] fmul dword[f_2_0]
@@ -305,15 +314,15 @@ NNP_Create:
inc ebx inc ebx
cmp ebx,[ebp-4] cmp ebx,[ebp-4]
jb @b jb @b
.157: .1:
inc esi inc esi
.154: .2:
mov ecx,[ebp-12] mov ecx,[ebp-12]
cmp esi,[ecx] cmp esi,[ecx]
jb .cycle_1 jb .cycle_1
inc edi inc edi
add dword[ebp-8],4 add dword[ebp-8],4
.150: .3:
cmp edi,[ebp+32] ;sizes_length cmp edi,[ebp+32] ;sizes_length
jb .cycle_0 jb .cycle_0
;create errors array ;create errors array
@@ -349,7 +358,7 @@ f_2_0:
f_1_0: f_1_0:
dd 1.0 dd 1.0
;заполнение случайными числами ;random number filling
;+ 8 NeuralNetwork* o ;+ 8 NeuralNetwork* o
align 16 align 16
NNP_Reset: NNP_Reset:
@@ -413,7 +422,7 @@ NNP_Reset:
pop ebp pop ebp
ret 4 ret 4
;расчет входных и выходных нейронов ;calculation of input and output neurons
;+ 8 NeuralNetwork* o ;+ 8 NeuralNetwork* o
;+12 double* inputs ;+12 double* inputs
align 16 align 16
@@ -508,7 +517,7 @@ NNP_BackPropagation:
add edi,[esi+NeuralNetwork.layers] add edi,[esi+NeuralNetwork.layers]
xor ebx,ebx ;i=0 xor ebx,ebx ;i=0
mov eax,[ebp+12] ;eax = targets[] mov eax,[ebp+12] ;eax = targets[]
jmp .180 jmp .1
align 4 align 4
.cycle_0: .cycle_0:
mov edx,[edi+Layer.neurons] mov edx,[edi+Layer.neurons]
@@ -518,7 +527,7 @@ align 4
fstp qword[ecx+8*ebx] fstp qword[ecx+8*ebx]
inc ebx inc ebx
add eax,8 add eax,8
.180: .1:
cmp ebx,[edi+Layer.c_size] cmp ebx,[edi+Layer.c_size]
jb .cycle_0 jb .cycle_0
dec dword[ebp-4] ;k-- dec dword[ebp-4] ;k--
@@ -528,7 +537,7 @@ align 4
.cycle_1: .cycle_1:
sub edi,sizeof.Layer sub edi,sizeof.Layer
xor ebx,ebx ;i=0 xor ebx,ebx ;i=0
jmp .186 jmp .2
align 4 align 4
.cycle_2: .cycle_2:
mov eax,[edi+sizeof.Layer+Layer.neurons] mov eax,[edi+sizeof.Layer+Layer.neurons]
@@ -541,12 +550,12 @@ align 4
mov edx,[esi+NeuralNetwork.gradients] mov edx,[esi+NeuralNetwork.gradients]
fstp qword[edx+8*ebx] fstp qword[edx+8*ebx]
inc ebx inc ebx
.186: .2:
cmp ebx,[edi+sizeof.Layer+Layer.c_size] cmp ebx,[edi+sizeof.Layer+Layer.c_size]
jb .cycle_2 jb .cycle_2
mov edx,[esi+NeuralNetwork.deltas] mov edx,[esi+NeuralNetwork.deltas]
xor ebx,ebx xor ebx,ebx
jmp .189 jmp .3
align 4 align 4
.cycle_3: .cycle_3:
mov eax,[edi+Layer.c_size] mov eax,[edi+Layer.c_size]
@@ -556,7 +565,7 @@ align 4
pop ecx pop ecx
mov dword[edx],eax mov dword[edx],eax
xor eax,eax ;j=0 xor eax,eax ;j=0
jmp .191 jmp @f
align 4 align 4
.cycle_4: .cycle_4:
mov ecx,[esi+NeuralNetwork.gradients] mov ecx,[esi+NeuralNetwork.gradients]
@@ -566,16 +575,16 @@ align 4
mov ecx,[edx] mov ecx,[edx]
fstp qword[ecx+8*eax] fstp qword[ecx+8*eax]
inc eax inc eax
.191: @@:
cmp eax,[edi+Layer.c_size] cmp eax,[edi+Layer.c_size]
jb .cycle_4 jb .cycle_4
inc ebx inc ebx
add edx,4 add edx,4
.189: .3:
cmp ebx,[edi+sizeof.Layer+Layer.c_size] cmp ebx,[edi+sizeof.Layer+Layer.c_size]
jb .cycle_3 jb .cycle_3
xor ebx,ebx xor ebx,ebx
jmp .195 jmp .4
align 4 align 4
.cycle_5: .cycle_5:
mov eax,[esi+NeuralNetwork.errorsNext] mov eax,[esi+NeuralNetwork.errorsNext]
@@ -583,7 +592,7 @@ align 4
mov dword[eax+8*ebx],edx mov dword[eax+8*ebx],edx
mov dword[eax+8*ebx+4],edx mov dword[eax+8*ebx+4],edx
xor eax,eax ;j=0 xor eax,eax ;j=0
jmp .197 jmp @f
align 4 align 4
.cycle_6: .cycle_6:
mov edx,[edi+Layer.weights] mov edx,[edi+Layer.weights]
@@ -595,11 +604,11 @@ align 4
fadd qword[ecx+8*ebx] fadd qword[ecx+8*ebx]
fstp qword[ecx+8*ebx] fstp qword[ecx+8*ebx]
inc eax inc eax
.197: @@:
cmp eax,[edi+sizeof.Layer+Layer.c_size] cmp eax,[edi+sizeof.Layer+Layer.c_size]
jb .cycle_6 jb .cycle_6
inc ebx inc ebx
.195: .4:
cmp ebx,[edi] cmp ebx,[edi]
jb .cycle_5 jb .cycle_5
;copy errors to next level ;copy errors to next level
@@ -610,7 +619,7 @@ align 4
mov eax,[esi+NeuralNetwork.deltas] mov eax,[esi+NeuralNetwork.deltas]
mov dword[ebp-12],eax mov dword[ebp-12],eax
xor ebx,ebx ;i=0 xor ebx,ebx ;i=0
jmp .201 jmp .6
align 4 align 4
.cycle_7: .cycle_7:
mov ecx,[esi+NeuralNetwork.gradients] mov ecx,[esi+NeuralNetwork.gradients]
@@ -620,7 +629,7 @@ align 4
fstp qword[eax+8*ebx] fstp qword[eax+8*ebx]
xor eax,eax ;j=0 xor eax,eax ;j=0
mov edx,[ebp-12] ;edx = deltas[i] mov edx,[ebp-12] ;edx = deltas[i]
jmp .203 jmp .5
align 4 align 4
.cycle_8: .cycle_8:
; mov ecx,[edx] ; mov ecx,[edx]
@@ -656,7 +665,7 @@ align 4
; pop edx ; pop edx
;@@: ;@@:
inc eax inc eax
.203: .5:
cmp eax,[edi+Layer.c_size] cmp eax,[edi+Layer.c_size]
jb .cycle_8 jb .cycle_8
mov eax,[ebp-12] mov eax,[ebp-12]
@@ -664,7 +673,7 @@ align 4
pop ecx pop ecx
inc ebx inc ebx
add dword[ebp-12],4 add dword[ebp-12],4
.201: .6:
cmp ebx,[edi+sizeof.Layer+Layer.c_size] cmp ebx,[edi+sizeof.Layer+Layer.c_size]
jb .cycle_7 jb .cycle_7
dec dword[ebp-4] dec dword[ebp-4]
@@ -693,8 +702,7 @@ NNP_GetMemData:
jne .end_f jne .end_f
mov esi,[ebp+16] mov esi,[ebp+16]
mov byte[esi],0 mov byte[esi],0
stdcall [_strcat], esi,txt_QlearningRateQ_ stdcall str_cat, esi,txt_QlearningRateQ_
add esp,8
push 1 push 1
push PRECISION push PRECISION
mov eax,[ebp+8] mov eax,[ebp+8]
@@ -702,12 +710,9 @@ NNP_GetMemData:
push dword[eax+NeuralNetwork.learningRate] push dword[eax+NeuralNetwork.learningRate]
call @@DoubleToStr$qduso call @@DoubleToStr$qduso
add esp,16 add esp,16
stdcall [_strcat], esi,eax stdcall str_cat, esi,eax
add esp,8 stdcall str_cat, esi,txt_zap_nl
stdcall [_strcat], esi,txt_zap_nl stdcall str_cat, esi,txt_Qlayers_lengthQ
add esp,8
stdcall [_strcat], esi,txt_Qlayers_lengthQ
add esp,8
push 1 push 1
push 0 push 0
mov ecx,[ebp+8] mov ecx,[ebp+8]
@@ -716,23 +721,18 @@ NNP_GetMemData:
fstp qword[esp] fstp qword[esp]
call @@DoubleToStr$qduso call @@DoubleToStr$qduso
add esp,16 add esp,16
stdcall [_strcat], esi,eax stdcall str_cat, esi,eax
add esp,8 stdcall str_cat, esi,txt_zap_nl
stdcall [_strcat], esi,txt_zap_nl stdcall str_cat, esi,txt_QlayersQ
add esp,8
.230:
stdcall [_strcat], esi,txt_QlayersQ
add esp,8
xor edi,edi ;i=0 xor edi,edi ;i=0
jmp .232 jmp .7
align 4 align 4
.cycle_0: .cycle_0:
push esi push esi
call @@strlen$qpxc call @@strlen$qpxc
pop ecx pop ecx
add esi,eax add esi,eax
stdcall [_strcat], esi,txt_nl_t_Qc_sizeQ stdcall str_cat, esi,txt_nl_t_Qc_sizeQ
add esp,8
mov ebx,edi mov ebx,edi
imul ebx,sizeof.Layer imul ebx,sizeof.Layer
push 1 push 1
@@ -748,10 +748,8 @@ align 4
fstp qword[esp] fstp qword[esp]
call @@DoubleToStr$qduso call @@DoubleToStr$qduso
add esp,16 add esp,16
stdcall [_strcat], esi,eax stdcall str_cat, esi,eax
add esp,8 stdcall str_cat, esi,txt_zap_nl_t_Qn_sizeQ
stdcall [_strcat], esi,txt_zap_nl_t_Qn_sizeQ
add esp,8
push 1 push 1
push 0 push 0
mov ecx,[ebp+8] mov ecx,[ebp+8]
@@ -765,21 +763,17 @@ align 4
fstp qword[esp] fstp qword[esp]
call @@DoubleToStr$qduso call @@DoubleToStr$qduso
add esp,16 add esp,16
stdcall [_strcat], esi,eax stdcall str_cat, esi,eax
add esp,8 stdcall str_cat, esi,txt_zap_nl
stdcall [_strcat], esi,txt_zap_nl stdcall str_cat, esi,txt_t_QneuronsQ
add esp,8
stdcall [_strcat], esi,txt_t_QneuronsQ
add esp,8
xor ebx,ebx ;j=0 xor ebx,ebx ;j=0
jmp .234 jmp .1
align 4 align 4
.cycle_1: .cycle_1:
test ebx,ebx test ebx,ebx
je .235 je @f
stdcall [_strcat], esi,txt_zap_sp stdcall str_cat, esi,txt_zap_sp
add esp,8 @@:
.235:
push 1 push 1
push PRECISION push PRECISION
mov eax,edi mov eax,edi
@@ -791,29 +785,25 @@ align 4
push dword[eax+8*ebx] push dword[eax+8*ebx]
call @@DoubleToStr$qduso call @@DoubleToStr$qduso
add esp,16 add esp,16
stdcall [_strcat], esi,eax stdcall str_cat, esi,eax
add esp,8
inc ebx inc ebx
.234: .1:
mov ecx,edi mov ecx,edi
imul ecx,sizeof.Layer imul ecx,sizeof.Layer
mov eax,[ebp+8] mov eax,[ebp+8]
add ecx,[eax+NeuralNetwork.layers] add ecx,[eax+NeuralNetwork.layers]
cmp ebx,[ecx+Layer.c_size] cmp ebx,[ecx+Layer.c_size]
jb .cycle_1 jb .cycle_1
stdcall [_strcat], esi,txt_sqbr_zap_nl stdcall str_cat, esi,txt_sqbr_zap_nl
add esp,8 stdcall str_cat, esi,txt_t_QbiasesQ
stdcall [_strcat], esi,txt_t_QbiasesQ
add esp,8
xor ebx,ebx ;j=0 xor ebx,ebx ;j=0
jmp .238 jmp .2
align 4 align 4
.cycle_2: .cycle_2:
test ebx,ebx test ebx,ebx
je .239 je @f
stdcall [_strcat], esi,txt_zap_sp stdcall str_cat, esi,txt_zap_sp
add esp,8 @@:
.239:
push 1 push 1
push PRECISION push PRECISION
mov eax,edi mov eax,edi
@@ -825,43 +815,38 @@ align 4
push dword[eax+8*ebx] push dword[eax+8*ebx]
call @@DoubleToStr$qduso call @@DoubleToStr$qduso
add esp,16 add esp,16
stdcall [_strcat], esi,eax stdcall str_cat, esi,eax
add esp,8
inc ebx inc ebx
.238: .2:
mov ecx,edi mov ecx,edi
imul ecx,sizeof.Layer imul ecx,sizeof.Layer
mov eax,[ebp+8] mov eax,[ebp+8]
add ecx,[eax+NeuralNetwork.layers] add ecx,[eax+NeuralNetwork.layers]
cmp ebx,[ecx+Layer.c_size] cmp ebx,[ecx+Layer.c_size]
jb .cycle_2 jb .cycle_2
stdcall [_strcat], esi,txt_sqbr_zap_t_QweightsQ stdcall str_cat, esi,txt_sqbr_zap_t_QweightsQ
add esp,8
mov eax,[ebp+8] mov eax,[ebp+8]
mov ecx,edi mov ecx,edi
imul ecx,sizeof.Layer imul ecx,sizeof.Layer
add ecx,[eax+NeuralNetwork.layers] add ecx,[eax+NeuralNetwork.layers]
cmp dword[ecx+Layer.n_size],0 cmp dword[ecx+Layer.n_size],0
je .241 je .6
xor ebx,ebx xor ebx,ebx
jmp .243 jmp .5
.242: .cycle_3:
test ebx,ebx test ebx,ebx
je .244 je @f
stdcall [_strcat], esi,txt_zap_nl_t_t stdcall str_cat, esi,txt_zap_nl_t_t
add esp,8 @@:
.244: stdcall str_cat, esi,txt_sqbro
stdcall [_strcat], esi,txt_sqbro
add esp,8
xor eax,eax xor eax,eax
mov dword[ebp-4],eax mov dword[ebp-4],eax
jmp .246 jmp .4
.245: .3:
cmp dword[ebp-4],0 cmp dword[ebp-4],0
je .247 je @f
stdcall [_strcat], esi,txt_zap_sp stdcall str_cat, esi,txt_zap_sp
add esp,8 @@:
.247:
push 1 push 1
push PRECISION push PRECISION
mov edx,edi mov edx,edi
@@ -875,43 +860,39 @@ align 4
push dword[ecx+8*eax] push dword[ecx+8*eax]
@@: @@:
call @@DoubleToStr$qduso call @@DoubleToStr$qduso
dec dword[esp+8] ;уменьшаем PRECISION dec dword[esp+8] ;decrease PRECISION
jz @f ;для избежания зацикливания jz @f ;to avoid looping
cmp word[eax],'#' cmp word[eax],'#'
je @b ;если число не поместилось пробуем перевести с меньшей точностью je @b ;if the number does not fit, we try to translate with less precision
@@: @@:
add esp,16 add esp,16
stdcall [_strcat], esi,eax stdcall str_cat, esi,eax
add esp,8
inc dword[ebp-4] inc dword[ebp-4]
.246: .4:
mov ecx,edi mov ecx,edi
imul ecx,sizeof.Layer imul ecx,sizeof.Layer
mov eax,[ebp+8] mov eax,[ebp+8]
add ecx,[eax+NeuralNetwork.layers] add ecx,[eax+NeuralNetwork.layers]
mov ecx,[ecx+Layer.n_size] mov ecx,[ecx+Layer.n_size]
cmp ecx,[ebp-4] cmp ecx,[ebp-4]
ja .245 ja .3
stdcall [_strcat], esi,txt_sqbr stdcall str_cat, esi,txt_sqbr
add esp,8
inc ebx inc ebx
.243: .5:
mov eax,edi mov eax,edi
imul eax,sizeof.Layer imul eax,sizeof.Layer
mov ecx,[ebp+8] mov ecx,[ebp+8]
add eax,[ecx+NeuralNetwork.layers] add eax,[ecx+NeuralNetwork.layers]
cmp ebx,[eax+Layer.c_size] cmp ebx,[eax+Layer.c_size]
jb .242 jb .cycle_3
.241: .6:
stdcall [_strcat], esi,txt_sqbr_fbr_zap stdcall str_cat, esi,txt_sqbr_fbr_zap
add esp,8
inc edi inc edi
.232: .7:
mov eax,[ebp+8] mov eax,[ebp+8]
cmp edi,[eax+NeuralNetwork.layers_length] cmp edi,[eax+NeuralNetwork.layers_length]
jb .cycle_0 jb .cycle_0
stdcall [_strcat], esi,txt_nl_t_sqbr stdcall str_cat, esi,txt_nl_t_sqbr
add esp,8
.end_f: .end_f:
pop edi esi ebx pop edi esi ebx
mov esp,ebp mov esp,ebp
@@ -963,12 +944,11 @@ NNP_SetMemData:
mov eax,[ebp+16] mov eax,[ebp+16]
mov edx,[ebp+12] mov edx,[ebp+12]
; cmp edx,NNP_FF_BIN ; cmp edx,NNP_FF_BIN
; jne .191 ; jne @f
;... ;...
;.191: ;@@:
cmp edx,NNP_FF_JSON cmp edx,NNP_FF_JSON
jne .198 jne .9
.199:
stdcall @@strstr$qpxct1, eax,txt_learningRate stdcall @@strstr$qpxct1, eax,txt_learningRate
add esp,8 add esp,8
mov esi,eax mov esi,eax
@@ -976,27 +956,27 @@ NNP_SetMemData:
add esp,8 add esp,8
mov esi,eax mov esi,eax
test esi,esi test esi,esi
jne .200 jne @f
mov eax,1 mov eax,1
jmp .193 jmp .end_f
.200: @@:
stdcall @@strchr$qpxci, esi,':' stdcall @@strchr$qpxci, esi,':'
add esp,8 add esp,8
mov ebx,eax mov ebx,eax
test ebx,ebx test ebx,ebx
jne .201 jne @f
mov eax,2 mov eax,2
jmp .193 jmp .end_f
.201: @@:
inc ebx inc ebx
stdcall @@strchr$qpxci, esi,',' stdcall @@strchr$qpxci, esi,','
add esp,8 add esp,8
mov esi,eax mov esi,eax
test esi,esi test esi,esi
jne .202 jne @f
mov eax,3 mov eax,3
jmp .193 jmp .end_f
.202: @@:
mov byte[esi],0 mov byte[esi],0
inc esi inc esi
stdcall @@StrToInt$qpc, ebx stdcall @@StrToInt$qpc, ebx
@@ -1008,38 +988,38 @@ NNP_SetMemData:
mov eax,[ebp+8] mov eax,[ebp+8]
mov edx,[eax+12] mov edx,[eax+12]
cmp edx,[ebp-4] cmp edx,[ebp-4]
je .203 je @f
mov eax,txt_err_layers_neq mov eax,txt_err_layers_neq
jmp .193 jmp .end_f
.203: @@:
xor edi,edi ;i=0 xor edi,edi ;i=0
jmp .205 jmp .8
.204: ;for(i=0;i<o->layers_length;i++) .cycle_0: ;for(i=0;i<o->layers_length;i++)
stdcall @@strstr$qpxct1, esi,txt_c_size stdcall @@strstr$qpxct1, esi,txt_c_size
add esp,8 add esp,8
mov esi,eax mov esi,eax
test esi,esi test esi,esi
jne .206 jne @f
mov eax,txt_err_c_size mov eax,txt_err_c_size
jmp .193 jmp .end_f
.206: @@:
stdcall @@strchr$qpxci, esi,':' stdcall @@strchr$qpxci, esi,':'
add esp,8 add esp,8
mov ebx,eax mov ebx,eax
test ebx,ebx test ebx,ebx
jne .207 jne @f
mov eax,6 mov eax,6
jmp .193 jmp .end_f
.207: @@:
inc ebx inc ebx
stdcall @@strchr$qpxci, esi,',' stdcall @@strchr$qpxci, esi,','
add esp,8 add esp,8
mov esi,eax mov esi,eax
test esi,esi test esi,esi
jne .208 jne @f
mov eax,7 mov eax,7
jmp .193 jmp .end_f
.208: @@:
mov byte[esi],0 mov byte[esi],0
inc esi inc esi
stdcall @@StrToInt$qpc, ebx stdcall @@StrToInt$qpc, ebx
@@ -1049,27 +1029,27 @@ NNP_SetMemData:
add esp,8 add esp,8
mov esi,eax mov esi,eax
test esi,esi test esi,esi
jne .209 jne @f
mov eax,8 mov eax,8
jmp .193 jmp .end_f
.209: @@:
stdcall @@strchr$qpxci, esi,':' stdcall @@strchr$qpxci, esi,':'
add esp,8 add esp,8
mov ebx,eax mov ebx,eax
test ebx,ebx test ebx,ebx
jne .210 jne @f
mov eax,9 mov eax,9
jmp .193 jmp .end_f
.210: @@:
inc ebx inc ebx
stdcall @@strchr$qpxci, esi,',' stdcall @@strchr$qpxci, esi,','
add esp,8 add esp,8
mov esi,eax mov esi,eax
test esi,esi test esi,esi
jne .211 jne @f
mov eax,10 mov eax,10
jmp .193 jmp .end_f
.211: @@:
mov byte[esi],0 mov byte[esi],0
inc esi inc esi
stdcall @@StrToInt$qpc,ebx stdcall @@StrToInt$qpc,ebx
@@ -1081,11 +1061,11 @@ NNP_SetMemData:
add eax,[edx+NeuralNetwork.layers] add eax,[edx+NeuralNetwork.layers]
mov edx,[eax+Layer.c_size] mov edx,[eax+Layer.c_size]
cmp edx,[ebp-4] cmp edx,[ebp-4]
jne .213 jne @f
mov edx,[eax+Layer.n_size] mov edx,[eax+Layer.n_size]
cmp edx,[ebp-8] cmp edx,[ebp-8]
je .214 je .1
.213: @@:
mov ecx,[ebp+8] mov ecx,[ebp+8]
stdcall NNP_GetMaxLLen,ecx stdcall NNP_GetMaxLLen,ecx
mov ecx,edi mov ecx,edi
@@ -1106,10 +1086,10 @@ NNP_SetMemData:
add eax,edx add eax,edx
stdcall Layer_Create,eax stdcall Layer_Create,eax
cmp ebx,[ebp-4] ;if(n>s || k>s) cmp ebx,[ebp-4] ;if(n>s || k>s)
jb .215 jb @f
cmp ebx,[ebp-8] cmp ebx,[ebp-8]
jae .214 jae .1
.215: @@:
mov edx,[ebp+8] mov edx,[ebp+8]
mov ecx,[edx+NeuralNetwork.errors] mov ecx,[edx+NeuralNetwork.errors]
cmp ecx,[edx+NeuralNetwork.errorsNext] cmp ecx,[edx+NeuralNetwork.errorsNext]
@@ -1135,45 +1115,45 @@ NNP_SetMemData:
stdcall [mem.realloc], [edx+NeuralNetwork.deltas],ebx stdcall [mem.realloc], [edx+NeuralNetwork.deltas],ebx
mov edx,[ebp+8] mov edx,[ebp+8]
mov dword[edx+NeuralNetwork.deltas],eax mov dword[edx+NeuralNetwork.deltas],eax
.214: .1:
stdcall @@strstr$qpxct1, esi,txt_biases stdcall @@strstr$qpxct1, esi,txt_biases
add esp,8 add esp,8
mov esi,eax mov esi,eax
test esi,esi test esi,esi
jne .216 jne @f
mov eax,11 mov eax,11
jmp .193 jmp .end_f
.216: @@:
stdcall @@strchr$qpxci, esi,'[' stdcall @@strchr$qpxci, esi,'['
add esp,8 add esp,8
mov ebx,eax mov ebx,eax
test ebx,ebx test ebx,ebx
jne .217 jne @f
mov eax,txt_err_sqbrl_b1 mov eax,txt_err_sqbrl_b1
jmp .193 jmp .end_f
.217: @@:
inc ebx inc ebx
xor edx,edx xor edx,edx
mov dword[ebp-8],edx mov dword[ebp-8],edx
jmp .219 jmp .4
.218: .2:
dec edx dec edx
cmp eax,edx cmp eax,edx
jae .220 jae @f
stdcall @@strchr$qpxci, ebx,',' stdcall @@strchr$qpxci, ebx,','
add esp,8 add esp,8
mov esi,eax mov esi,eax
jmp .221 jmp .3
.220: @@:
stdcall @@strchr$qpxci, ebx,']' stdcall @@strchr$qpxci, ebx,']'
add esp,8 add esp,8
mov esi,eax mov esi,eax
.221: .3:
test esi,esi test esi,esi
jne .222 jne @f
mov eax,13 mov eax,13
jmp .193 jmp .end_f
.222: @@:
mov byte[esi],0 mov byte[esi],0
stdcall @@StrToDouble$qpc,ebx stdcall @@StrToDouble$qpc,ebx
pop ecx pop ecx
@@ -1186,7 +1166,7 @@ NNP_SetMemData:
mov edx,[eax+edx+Layer.biases] mov edx,[eax+edx+Layer.biases]
fstp qword[edx+8*ecx] fstp qword[edx+8*ecx]
inc dword[ebp-8] inc dword[ebp-8]
.219: .4:
mov edx,edi mov edx,edi
imul edx,sizeof.Layer imul edx,sizeof.Layer
mov ecx,[ebp+8] mov ecx,[ebp+8]
@@ -1194,29 +1174,29 @@ NNP_SetMemData:
mov edx,[edx+Layer.c_size] mov edx,[edx+Layer.c_size]
mov eax,[ebp-8] mov eax,[ebp-8]
cmp edx,eax cmp edx,eax
ja .218 ja .2
mov esi,ebx mov esi,ebx
stdcall @@strstr$qpxct1, esi,txt_weights stdcall @@strstr$qpxct1, esi,txt_weights
add esp,8 add esp,8
mov esi,eax mov esi,eax
test esi,esi test esi,esi
jne .224 jne @f
mov eax,14 mov eax,14
jmp .193 jmp .end_f
.224: @@:
stdcall @@strchr$qpxci, esi,'[' stdcall @@strchr$qpxci, esi,'['
add esp,8 add esp,8
mov esi,eax mov esi,eax
test esi,esi test esi,esi
jne .225 jne @f
mov eax,txt_err_sqbrl_w1 mov eax,txt_err_sqbrl_w1
jmp .193 jmp .end_f
.225: @@:
inc esi inc esi
xor edx,edx xor edx,edx
mov dword[ebp-8],edx ;k=0 mov dword[ebp-8],edx ;k=0
jmp .227 jmp .7
.226: ;for(k=0;k<o->layers[i].c_size;k++) .cycle_1: ;for(k=0;k<o->layers[i].c_size;k++)
mov eax,edi mov eax,edi
imul eax,sizeof.Layer imul eax,sizeof.Layer
@@ -1226,39 +1206,39 @@ NNP_SetMemData:
or eax,eax or eax,eax
jnz .end_null_we jnz .end_null_we
inc dword[ebp-8] ;k++ inc dword[ebp-8] ;k++
jmp .227 ;if 'weights' is null array jmp .7 ;if 'weights' is null array
.end_null_we: .end_null_we:
stdcall @@strchr$qpxci, esi,'[' stdcall @@strchr$qpxci, esi,'['
add esp,8 add esp,8
mov ebx,eax mov ebx,eax
test ebx,ebx test ebx,ebx
jne .228 jne @f
mov eax,txt_err_sqbrl_w2 mov eax,txt_err_sqbrl_w2
jmp .193 jmp .end_f
.228: @@:
inc ebx inc ebx
xor edx,edx xor edx,edx
mov dword[ebp-12],edx ;j=0 mov dword[ebp-12],edx ;j=0
jmp .230 jmp .6
.229: ;for(j=0;j<o->layers[i].n_size;j++) .cycle_2: ;for(j=0;j<o->layers[i].n_size;j++)
dec edx dec edx
cmp eax,edx ;eax = j, edx = n_size-1 cmp eax,edx ;eax = j, edx = n_size-1
jae .231 jae @f
stdcall @@strchr$qpxci, ebx,',' stdcall @@strchr$qpxci, ebx,','
add esp,8 add esp,8
mov esi,eax mov esi,eax
jmp .232 jmp .5
.231: @@:
stdcall @@strchr$qpxci, ebx,']' stdcall @@strchr$qpxci, ebx,']'
add esp,8 add esp,8
mov esi,eax mov esi,eax
.232: .5:
test esi,esi test esi,esi
jne .233 jne @f
mov eax,txt_err_sqbrr_w2 mov eax,txt_err_sqbrr_w2
jmp .193 jmp .end_f
.233: @@:
mov byte[esi],0 mov byte[esi],0
stdcall @@StrToDouble$qpc,ebx stdcall @@StrToDouble$qpc,ebx
pop ecx pop ecx
@@ -1273,7 +1253,7 @@ NNP_SetMemData:
mov edx,[ebp-12] mov edx,[ebp-12]
fstp qword[eax+8*edx] fstp qword[eax+8*edx]
inc dword[ebp-12] inc dword[ebp-12]
.230: .6:
mov edx,edi mov edx,edi
imul edx,sizeof.Layer imul edx,sizeof.Layer
mov ecx,[ebp+8] mov ecx,[ebp+8]
@@ -1281,27 +1261,27 @@ NNP_SetMemData:
mov edx,[edx+Layer.n_size] mov edx,[edx+Layer.n_size]
mov eax,[ebp-12] mov eax,[ebp-12]
cmp edx,eax cmp edx,eax
ja .229 ja .cycle_2
mov esi,ebx mov esi,ebx
inc dword[ebp-8] inc dword[ebp-8]
.227: .7:
mov eax,edi mov eax,edi
imul eax,sizeof.Layer imul eax,sizeof.Layer
mov edx,[ebp+8] mov edx,[ebp+8]
add eax,[edx+NeuralNetwork.layers] add eax,[edx+NeuralNetwork.layers]
mov eax,[eax+Layer.c_size] mov eax,[eax+Layer.c_size]
cmp eax,[ebp-8] cmp eax,[ebp-8]
ja .226 ja .cycle_1
inc edi inc edi
.205: .8:
mov edx,[ebp+8] mov edx,[ebp+8]
cmp edi,[edx+NeuralNetwork.layers_length] cmp edi,[edx+NeuralNetwork.layers_length]
jb .204 jb .cycle_0
xor eax,eax xor eax,eax
jmp .193 jmp .end_f
.198: .9:
mov eax,1000 mov eax,1000
.193: .end_f:
pop edi esi ebx pop edi esi ebx
mov esp,ebp mov esp,ebp
pop ebp pop ebp
@@ -1320,20 +1300,19 @@ Layer_Destroy:
call @$bdele$qpv call @$bdele$qpv
pop ecx pop ecx
xor ebx,ebx xor ebx,ebx
jmp .143 jmp @f
.142: .cycle_1:
mov eax,[esi+Layer.weights] mov eax,[esi+Layer.weights]
push dword[eax+4*ebx] push dword[eax+4*ebx]
call @$bdele$qpv call @$bdele$qpv
pop ecx pop ecx
inc ebx inc ebx
.143: @@:
cmp ebx,[esi+Layer.c_size] cmp ebx,[esi+Layer.c_size]
jb .142 jb .cycle_1
push dword[esi+Layer.weights] push dword[esi+Layer.weights]
call @$bdele$qpv call @$bdele$qpv
pop ecx pop ecx
.145:
pop esi ebx ebp pop esi ebx ebp
ret 4 ret 4
@@ -1344,17 +1323,17 @@ NNP_Destroy:
push ebx esi push ebx esi
mov esi,[ebp+8] mov esi,[ebp+8]
xor ebx,ebx xor ebx,ebx
jmp .232 jmp @f
.231: .cycle_1:
mov eax,ebx mov eax,ebx
imul eax,sizeof.Layer imul eax,sizeof.Layer
add eax,[esi+NeuralNetwork.layers] add eax,[esi+NeuralNetwork.layers]
push eax push eax
call Layer_Destroy call Layer_Destroy
inc ebx inc ebx
.232: @@:
cmp ebx,[esi+NeuralNetwork.layers_length] cmp ebx,[esi+NeuralNetwork.layers_length]
jb .231 jb .cycle_1
push dword[esi+NeuralNetwork.layers] push dword[esi+NeuralNetwork.layers]
call @$bdele$qpv call @$bdele$qpv
pop ecx pop ecx
@@ -1398,14 +1377,3 @@ EXPORTS:
sz_getmemdata db 'NNP_GetMemData',0 sz_getmemdata db 'NNP_GetMemData',0
sz_setmemdata db 'NNP_SetMemData',0 sz_setmemdata db 'NNP_SetMemData',0
sz_destroy db 'NNP_Destroy',0 sz_destroy db 'NNP_Destroy',0
align 16
@IMPORT:
library \
libc, 'libc.obj'
import libc, \
_strcat, 'strcat',\
_exp, 'exp'
;_scanf, 'scanf',\ ;???