@@ -40,7 +40,7 @@ class BasicFFN {
4040 FP wIn[inputSize][hidden1Size], wHid1[hidden1Size][hidden2Size], wHid2[hidden2Size][outputSize];
4141 FP bIn[hidden1Size], bHid1[hidden2Size], bHid2[outputSize];
4242 FP toHid1[hidden1Size], toHid2[hidden2Size], out[outputSize];
43- FP dOut[outputSize], dHid2[hidden2Size], dHid1[hidden1Size], dIn[inputSize] ;
43+ FP dOut[outputSize], dHid2[hidden2Size], dHid1[hidden1Size];
4444 inline static FP epsilon = 1e-6 ; // untuk mencegah dead neuron ketika menggunakan ReLU
4545 bool xavier = false ;
4646 FP eta = 1e-2 ;
@@ -107,15 +107,16 @@ class BasicFFN {
107107 @param actFuncDeriv pointer ke turunan fungsi aktivasi
108108 */
109109 template <size_t inSize, size_t outSize>
110- void backward_layer (FP (&w)[outSize][inSize], FP (&b)[inSize], FP (&dataIn)[inSize], FP (&deltaIn)[inSize], FP (& deltaOut)[outSize], FP (*actFuncDeriv)(FP),
110+ void backward_layer (FP (&w)[outSize][inSize], FP (&b)[inSize], FP (&dataIn)[inSize], FP (&deltaIn)[inSize], FP (* deltaOut)[outSize], FP (*actFuncDeriv)(FP),
111111 FP eta) {
112112 // update bobot dan bias berdasarkan delta in
113113 for (size_t i = 0 ; i < inSize; ++i) {
114114 for (size_t j = 0 ; j < outSize; ++j) w[j][i] -= eta * deltaIn[inSize] * dataIn[i];
115115 b[i] -= eta * deltaIn[inSize];
116116 }
117117
118- // update delta Outnya (ga berguna buat layer hidden ke input)
118+ if (!deltaOut) return ;
119+ // update delta Out jika disediakan
119120 for (size_t i = 0 ; i < outSize; ++i) {
120121 for (size_t j = 0 ; j < inSize; ++j) deltaOut[i] += w[i][j] * deltaIn[j];
121122 // aturan rantai
@@ -198,7 +199,7 @@ class BasicFFN {
198199
199200 backward_layer<outputSize, hidden2Size>(wHid2, bHid2, dOut, dHid2, actFuncDeriv, eta);
200201 backward_layer<hidden2Size, hidden1Size>(wHid1, bHid1, dHid2, dHid1, actFuncDeriv, eta);
201- backward_layer<hidden1Size, inputSize>(wIn, bIn, dHid1, dIn , actFuncDeriv, eta);
202+ backward_layer<hidden1Size, inputSize>(wIn, bIn, dHid1, 0 , actFuncDeriv, eta);
202203 }
203204};
204205
0 commit comments