0% found this document useful (0 votes)
13 views

"Init - Weight - TXT" "Jacobi - TXT" "Network - TXT"

This document contains code for initializing weights randomly and normalizing them using the Nguyen-Widrow method for a neural network with one hidden layer. It defines functions for randomizing weights, calculating normalized weights and biases, computing the weighted sums and outputs. The functions are used to simulate the network for three different input datasets - industrial, residential and commercial - and calculate the errors between actual and predicted outputs at each step. The normalized weights, weighted sums, outputs and errors are written to output files for analysis.

Uploaded by

Ogunranti Rasaq
Copyright
© © All Rights Reserved
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
13 views

"Init - Weight - TXT" "Jacobi - TXT" "Network - TXT"

This document contains code for initializing weights randomly and normalizing them using the Nguyen-Widrow method for a neural network with one hidden layer. It defines functions for randomizing weights, calculating normalized weights and biases, computing the weighted sums and outputs. The functions are used to simulate the network for three different input datasets - industrial, residential and commercial - and calculate the errors between actual and predicted outputs at each step. The normalized weights, weighted sums, outputs and errors are written to output files for analysis.

Uploaded by

Ogunranti Rasaq
Copyright
© © All Rights Reserved
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 16

1:

2:
3:
4:
5:
6:
7:
8:
9:
10:
11:
12:
13:
14:
15:
16:
17:
18:
19:
20:
21:
22:
23:
24:
25:
26:
27:
28:
29:
30:
31:
32:
33:
34:
35:
36:
37:
38:
39:
40:
41:
42:
43:
44:
45:
46:
47:
48:
49:
50:
51:
52:
53:
54:

#include
#include
#include
#include
#include

<cstdlib>
<iostream>
<math.h>
"neural_variable.h"
<fstream>

using namespace std


ofstream
ofstream
ofstream

let("init_weight.txt") ;
jacobian("jacobi.txt") ;
net("network.txt") ;

void randomize_weights()
{
;
int i,j
;
int min=0
int max=200 ;
int number ;
srand((unsigned)time(0)) ;
//////////////// range randomized weight /////////////
cout << " *************** RANDOM WEIGHTS ********************" << endl << endl;
let << " *************** RANDOM WEIGHTS ********************" << endl << endl;
/////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////
let<<"weight_1[" << input_var << "]= " ;
for(i=0 ; i<input_var ; i++)
////// column
{
number=( (( abs( rand() ) % (max-min+1) ) + min )) ;
if(number > max )
number=max ;
if(number < min )
number=min ;
weight_1[i] = ((number / 100.0F) - 1) ;
cout << weight_1[i] << " \t" ;
let << weight_1[i] << " \t\t" ;
}
cout<< endl ;
let<< endl ;

////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////
let<<"weight_2[" << input_var << "]= " ;
for(i=0 ; i<input_var ; i++)
////// column
{

55:
56:
57:
58:
59:
60:
61:
62:
63:
64:
65:
66:
67:
68:
69:
70:
71:
72:
73:
74:
75:
76:
77:
78:
79:
80:
81:
82:
83:
84:
85:
86:
87:
88:
89:
90:
91:
92:
93:
94:
95:
96:
97:
98:
99:
100:
101:
102:
103:
104:
105:
106:
107:
108:

number=( (( abs( rand() ) % (max-min+1) ) + min )) ;


if(number > max )
number=max ;
if(number < min )
number=min ;
weight_2[i] = ((number / 100.0F) - 1) ;
cout << weight_2[i] << " \t" ;
let << weight_2[i] << " \t\t" ;
}
cout<< endl ;
let<< endl ;
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
let<<"weight_3["<< hidden_neuron <<"]= " ;
for(k=0 ; k<hidden_neuron ; k++)
////// column
{
number=( (( abs( rand() ) % (max-min+1) ) + min )) ;
if(number > max )
number=max ;
if(number < min )
number=min ;
weight_3[k] = ((number / 100.0F) - 1) ;
cout << weight_3[k] << " \t\t" ;
let << weight_3[k] << " \t\t" ;
}
cout<< endl ;
let<< endl ;
/////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////
let<<"weight_4["<< hidden_neuron <<"]= " ;
for(k=0 ; k<hidden_neuron ; k++)
////// column
{
number=( (( abs( rand() ) % (max-min+1) ) + min )) ;
if(number > max )
number=max ;
if(number < min )
number=min ;
weight_4[k] = ((number / 100.0F) - 1) ;
cout << weight_4[k] << " \t" ;

109:
110:
111:
112:
113:
114:
115:
116:
117:
118:
119:
120:
121:
122:
123:
124:
125:
126:
127:
128:
129:
130:
131:
132:
133:
134:
135:
136:
137:
138:
139:
140:
141:
142:
143:
144:
145:
146:
147:
148:
149:
150:
151:
152:
153:
154:
155:
156:
157:
158:
159:
160:
161:
162:

let << weight_4[k] << " \t\t" ;


}
cout<< endl ;
let << endl ;

///////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
let << "

bias[" <<hidden_neuron + output_neuron << "]=

"

for(i=0 ; i<(hidden_neuron + output_neuron) ; i++)


////// column
{
number=( (( abs( rand() ) % (max-min+1) ) + min )) ;
if(number > max )
number=max ;
if(number < min )
number=min ;
bias[i] = ((number / 100.0F) - 1) ;
cout << bias[i] << " \t" ;
let << bias[i] << " \t\t" ;
}
cout<< endl ;
let<< endl ;
}

void nguyen_widrow()
{
randomize_weights() ;
alpha= 1.0/input_var ;
gamma= pow(hidden_neuron,alpha ) ;
beta = ( 0.7 * gamma )
;

///
///

for(k=0 ; k<6 ; k++ )


{
eucl_sum_1 = eucl_sum_1 + pow(weight_1[k],2)
eucl_sum_1 += pow(weight_1[k],2)
;
eucl_sum_2 = eucl_sum_2 + pow(weight_2[k],2)
eucl_sum_2 += pow(weight_2[k],2)
;
}
eucl_sum_1 += ( pow(bias[0],2) + pow(weight_3[0],2) )
eucl_sum_2 += ( pow(bias[1],2) + pow(weight_3[1],2) )
eucl_norm_1= sqrt( eucl_sum_1 ) ;
eucl_norm_2= sqrt( eucl_sum_2 ) ;

;
;

163:
164:
165:
166:
167:
168:
169:
170:
171:
172:
173:
174:
175:
176:
177:
178:
179:
180:
181:
182:
183:
184:
185:
186:
187:
188:
189:
190:
191:
192:
193:
194:
195:
196:
197:
198:
199:
200:
201:
202:
203:
204:
205:
206:
207:
208:
209:
210:
211:
212:
213:
214:
215:

let << endl;


let

}
let<< "\tbeta= " << beta << endl ;
let<< "\thidden_norm_1= " << eucl_norm_1 << endl ;
let<< "\thidden_norm_2= " << eucl_norm_2 << endl << endl

cout << " NGUYEN WIDROW WEIGHT " << endl << endl ;
let << "\t*** NGUYEN WIDROW INPUT ---> HIDDEN NEURON 1 WEIGHT*** " <<endl<<endl;

for(n=0 ; n<input_var ; n++ )


{
i_weight_1[n] = ( beta * weight_1[n] )/( eucl_norm_1 )

cout << i_weight_1[n] << "\t " ;


let << i_weight_1[n] << "\t " ;
}
cout << endl ;
let << endl ;
let << "\t** NGUYEN WIDROW INPUT ---> HIDDEN NEURON 2 WEIGHT*** "<< endl<<endl ;

for(n=0 ; n<(input_var) ; n++ )


{
i_weight_2[n] = ( beta * weight_2[n] )/( eucl_norm_2 )

cout << i_weight_2[n] << "\t " ;


let << i_weight_2[n] << "\t " ;
}
cout << endl ;
let << endl ;

let << "\t******** NGUYEN WIDROW FEEDBACK WEIGHT****** " <<endl << endl ;
for(n=0 ; n< (hidden_neuron) ; n++ )
{
i_weight_3[n] = ( beta * weight_3[n] )/( eucl_norm_2 )

cout << i_weight_3[n] << "\t " ;


let << i_weight_3[n] << "\t " ;
}
cout << endl ;
let << endl ;
let << "\t*** NGUYEN WIDROW

HIDDEN ---> OUTPUT NEURON

for(n=0 ; n<(hidden_neuron) ; n++)


{
cout << (i_weight_4[n]=weight_4[n]) << "\t" ;
let << (i_weight_4[n]=weight_4[n]) << "\t\t" ;
}

cout <<

endl ;

let <<

endl ;

WEIGHT*** "<<endl<<endl;

216:
217:
218:
219:
220:
221:
222:
223:
224:
225:
226:
227:
228:
229:
230:
231:
232:
233:
234:
235:
236:
237:
238:
239:
240:
241:
242:
243:
244:
245:
246:
247:
248:
249:
250:
251:
252:
253:
254:
255:
256:
257:
258:
259:
260:
261:
262:
263:
264:
265:
266:
267:
268:
269:

cout << " NGUYEN WIDROW BIAS " << endl << endl ;
let << "************* NGUYEN WIDROW BIAS*************** " <<
i_bias[0] = ( beta *
i_bias[1] = ( beta *

bias[0] )/( eucl_norm_1 )


bias[1] )/( eucl_norm_2 )

endl << endl ;

;
;

for(n=0; n<(hidden_neuron+output_neuron) ; n++ )


{
if( n==2 )
{
i_bias[2] = bias[2] ;
}
cout << "\t " ;
let
<< "\t " ;
cout << i_bias[n]
let << i_bias[n]
}
cout <<

endl ;

;
;

let <<

endl ;

void weight_summer()
{
nguyen_widrow() ;
//////////// for industrial input : output /////////////////////////////
if(n==1)
{
for(i=0 ; i<output_var ; i++ )
{
for( j=0 ; j<input_var ; j++ )
{
weighted_sum_1[i]= ( i_weight_1[j] * ind_input[i][j] ) +
( i_weight_3[0] * cal_output[i][0] ) + ( 1 * i_bias[0] ) ;
weighted_sum_2[i]= ( i_weight_2[j] * ind_input[i][j] ) +
( i_weight_3[1] * cal_output[i][0] ) + ( 1 * i_bias[1] ) ;
}
}
}

//////////// for residential input : output


else if(n==2)
{
for(i=0 ; i<output_var ; i++ )
{
for( j=0 ; j<input_var ; j++ )

///////////////

270:
{
271:
weighted_sum_1[i]= ( i_weight_1[j] * res_input[i][j] ) +
272:
( i_weight_3[0] * cal_output[i][0] ) + ( 1 * i_bias[0] ) ;
273:
weighted_sum_2[i]= ( i_weight_2[j] * res_input[i][j] ) +
274:
( i_weight_3[1] * cal_output[i][0] ) + ( 1 * i_bias[1] ) ;
275:
}
276:
}
277:
}
278:
279:
//////////// for commercial input : output
///////////////////////
280:
281:
else if(n==3)
282:
{
283:
for(i=0 ; i<output_var ; i++ )
284:
{
285:
for( j=0 ; j<input_var ; j++ )
286:
{
287:
weighted_sum_1[i]= ( i_weight_1[j] * com_input[i][j] ) +
288:
( i_weight_3[0] * cal_output[i][0] ) + ( 1 * i_bias[0] ) ;
289:
weighted_sum_2[i]= ( i_weight_2[j] * com_input[i][j] ) +
290:
( i_weight_3[1] * cal_output[i][0] ) + ( 1 * i_bias[1] ) ;
291:
}
292:
}
293:
}
294:
295: }
296:
297:
298: void forward_pass()
299: {
300:
weight_summer() ;
301:
302: /////////////// WEIGHTED SUM PASSES THROUGH THE SIGMOID ACTIVATION FUNCTION
////////
303:
304:
for(i=0 ; i<output_var ; i++)
305:
{
306:
net_out_1[i] = ( 1.0 / ( 1.0 + exp ( -weighted_sum_1[i]) ) ) ;
307:
net_out_2[i] = ( 1.0 / ( 1.0 + exp ( -weighted_sum_2[i]) ) ) ;
308:
}
309: }
310:
311: void final_sum()
312: {
313:
forward_pass() ;
314:
let<< endl << endl ;
315: for(i=0 ; i<output_var ; i++)
316: {
317: out_sum_1[i] = ( i_weight_4[0] * net_out_1[i] ) ;
318: out_sum_2[i] = ( i_weight_4[1] * net_out_2[i] ) ;
319: }
320:
321: for(i=0 ; i<output_var ; i++)
322: {

323:
out_sum_3[i] = (1 * i_bias[2]) + ( out_sum_1[i] ) + ( out_sum_2[i] ) ;
324:
let << "output_sum_[" << i << "]" <<"= " << out_sum_3[i] << endl
;
325: }
326:
327: }
328:
329:
330: void final_output()
331: {
332:
333:
/////// industrial
334:
if(n==1)
335:
{
336:
final_sum() ;
337:
let<< endl<<endl ;
338:
let<<
}
339:
340:
for(i=0 ; i<output_var ; i++ )
341:
{
342:
cal_output[i][0] = ( 1.0 / ( 1.0 + exp( -out_sum_3[i] ) ) ) ;
343:
344:
error[i] = ( ind_output[i][0] - cal_output[i][0] )
;
345: let << "ind_output_["
<< i << "]" << "= " << ind_output[i][0] << "\t\t"
346:
<< "final_output_[" << i << "]" << "= " << cal_output[i][0] << "\t\t"
347:
<< "error_vector_[" << i << "]" << "= " << error[i]
<< endl ;
348:
}
349:
}
350:
351:
//// residential
352:
else if(n==2)
353:
{
354:
final_sum() ;
355:
let<< endl<<endl ;
356:
let<< " \t\t\tTHE NETWORK ERROR---------> "<< endl<< endl ;
357:
358:
for(i=0 ; i<output_var ; i++ )
359:
{
360:
cal_output[i][0] = ( 1.0 / ( 1.0 + exp( -out_sum_3[i] ) ) ) ;
361:
error[i] = ( res_output[i][0] - cal_output[i][0] )
;
362:
let << "res_output_["
<< i << "]" << "= " << res_output[i][0] << "\t\t"
363:
<< "final_output_[" << i << "]" << "= " << cal_output[i][0] << "\t\t"
364:
<< "error_vector_[" << i << "]" << "= " << error[i]
<< endl ;
365:
}
366:
}
367:
368:
//////// commercial
369:
else if(n==3)
370:
{
371:
final_sum() ;
372:
let<< endl<<endl ;
373:
let<< " \t\t\tTHE NETWORK ERROR---------> "<< endl<< endl ;
374:
375:
for(i=0 ; i<output_var ; i++ )
376:
{

377:
378:
379:
380:
381:
382:
383:
384:
385:
386:
387:
388:
389:
390:
391:
392:
393:
394:
395:
396:
397:
398:
399:
400:
401:
402:
403:
404:
405:
406:
407:
408:
409:
410:
411:
412:
413:
414:
415:
416:
417:
418:
419:
420:
421:
422:
423:
424:
425:
426:
427:
428:
429:
430:

cal_output[i][0] = ( 1.0 / ( 1.0 + exp( -out_sum_3[i] ) ) ) ;


error[i] = ( com_output[i][0] - cal_output[i][0] )
;
let << "com_output_["
<< i << "]" << "= " << com_output[i][0] << "\t\t"
<< "final_output_[" << i << "]" << "= " << cal_output[i][0] << "\t\t"
<< "error_vector_[" << i << "]" << "= " << error[i]
<< endl ;
}
}

void back_pass()
{
////////////////
BACKWARDS
//////////
local
let<<endl << endl ;
for ( i=0 ; i<output_var ; i++)
{
delta_o[i] = (cal_output[i][0] * ( 1
let << "delta_o_["
<< i << "]" <<
}

PASS BEGINS.....
/////////////////////
gradients
//////////

- cal_output[i][0] ) ) ;
"= " << delta_o[i] << endl

let << endl << endl ;


for ( i=0 ; i<output_var ; i++)
{
delta_h1[i] =
( net_out_1[i] * ( 1 - net_out_1[i]) ) *
(delta_o[i] * i_weight_4[0] ) ;
let << "delta_h1_["
<< i << "]" << "= " << delta_h1[i] << endl
}

let<< endl << endl ;


for ( i=0 ; i<output_var ; i++)
{
delta_h2[i] =
( net_out_2[i] * ( 1 - net_out_2[i]) ) *
( delta_o[i] * i_weight_4[1] ) ;
let << "delta_h2_["
<< i << "]" << "= " << delta_h2[i] << endl ;
}
}

void sse_error_1()
{
SSE_1=0 ;
squared_error_sum_1=0 ;
for(i=0 ; i<output_var ; i++)
{
squared_error_sum_1+=pow (error[i],2)

431:
432:
433:
434:
435:
436:
437:
438:
439:
440:
441:
442:
443:
444:
445:
446:
447:
448:
449:
450:
451:
452:
453:
454:
455:
456:
457:
458:
459:
460:
461:
462:
463:
464:
465:
466:
467:
468:
469:
470:
471:
472:
473:
474:
475:
476:
477:
478:
479:
480:
481:
482:
483:
484:

}
SSE_1 = (squared_error_sum_1 / 2 ) ;
let<<endl ;
let << "THE SUM OF SQUARED ERROR"
let << "sum_squared_error_1= " <<

<< endl << endl ;


SSE_1 << endl <<

endl ;

void sse_error_2()
{
SSE_2=0 ;
squared_error_sum_2=0 ;
for(i=0 ; i<output_var ; i++)
{
squared_error_sum_2+=pow (error[i],2)
}

SSE_2 = (squared_error_sum_2 / 2 ) ;
let << endl ;
let << "THE SUM OF SQUARED ERROR"
let << "sum_squared_error_2= " <<

<< endl << endl ;


SSE_2 << endl <<

endl ;

/////////////////////////////// forward and backward computation


void FB_computation_1()
{
final_output()
}

void FB_computation_2()
{
final_output()
}

///////////

; back_pass() ; sse_error_1() ;

; back_pass() ; sse_error_2() ;

///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
////////////////////// jacobian matrix all rows ///////////////////////////////
void jacobian_matrix()
{
FB_computation_1() ;

485:
486: ///////////// if industrial inputs/outputs variable is selected for training ///
487:
if(i==1)
488:
{
489:
///////// hidden layer 1 jacobian ////////
490:
491:
for(i=0 ; i<output_var ; i++)
492:
{
493:
for(j=0 ; j<input_var ; j++)
494:
{
495:
H_layer1[i][j] = (ind_input[i][j] * delta_h1[i] ) ;
496:
}
497:
}
498:
499:
//////
hidden layer 2 jacobian
///////////////
500:
501:
for( i=0 ; i<output_var ; i++ )
502:
{
503:
for(j=0 ; j<input_var ; j++)
504:
{
505:
H_layer2[i][j] = ( ind_input[i][j] * delta_h2[i] ) ;
506:
}
507:
}
508:
509:
////// output layer jacobian /////////
510:
for( i=0 ; i<output_var ; i++ )
511:
{
512:
o_layer_1[i] = ( net_out_1[i] * delta_o[i] )
;
513:
o_layer_2[i] = ( net_out_2[i] * delta_o[i] )
;
514:
}
515:
516:
////// hidden layer 1 bias jacobian ////////
517:
for(i=0 ; i<output_var ; i++)
518:
{
519:
H_layer_3[i] = ( cal_output[i][0] * delta_h1[i] )
;
520:
H_layer_4[i] = ( cal_output[i][0] * delta_h2[i] )
;
521:
}
522:
523:
524: ////// hidden layer 1 bias jacobian ////////
525:
for(i=0 ; i<output_var ; i++)
526:
{
527:
j_bias1[i] = ( i_bias[0] * delta_h1[i] )
;
528:
}
529:
530:
////// " hidden layer 2 bias jacobian " //////////
531:
for(i=0 ; i<output_var ; i++)
532:
{
533:
j_bias2[i] = ( i_bias[1] * delta_h2[i] )
;
534:
}
535:
536:
537:
///////" output layer bias jacobian " //////////
538:
for(i=0 ; i<output_var ; i++)

539:
{
540:
j_bias3[i] = ( i_bias[2] * delta_o[i] )
;
541:
}
542:
543:
}
544:
545: ///////////////////////////////////////////////////////////////////////////////
546: ///////////////////////////////////////////////////////////////////////////////
547:
548: //////////// if residential input/output is selected for training //////////////
549: if(i==2)
550:
{
551:
///////// hidden layer 1 jacobian ////////
552:
553:
for(i=0 ; i<output_var ; i++)
554:
{
555:
for(j=0 ; j<input_var ; j++)
556:
{
557:
H_layer1[i][j] = (res_input[i][j] * delta_h1[i] ) ;
558:
}
559:
}
560:
561:
//////
hidden layer 2 jacobian
///////////////
562:
563:
for( i=0 ; i<output_var ; i++ )
564:
{
565:
for(j=0 ; j<input_var ; j++)
566:
{
567:
H_layer2[i][j] = ( res_input[i][j] * delta_h2[i] ) ;
568:
}
569:
}
570:
571:
////// output layer jacobian /////////
572:
for( i=0 ; i<output_var ; i++ )
573:
{
574:
o_layer_1[i] = ( net_out_1[i] * delta_o[i] )
;
575:
o_layer_2[i] = ( net_out_2[i] * delta_o[i] )
;
576:
}
577:
578:
////// hidden layer 1 bias jacobian ////////
579:
for(i=0 ; i<output_var ; i++)
580:
{
581:
H_layer_3[i] = ( cal_output[i][0] * delta_h1[i] )
;
582:
H_layer_4[i] = ( cal_output[i][0] * delta_h2[i] )
;
583:
}
584:
585:
586: ////// hidden layer 1 bias jacobian ////////
587:
for(i=0 ; i<output_var ; i++)
588:
{
589:
j_bias1[i] = ( i_bias[0] * delta_h1[i] )
;
590:
}
591:
592:
////// " hidden layer 2 bias jacobian " //////////

593:
for(i=0 ; i<output_var ; i++)
594:
{
595:
j_bias2[i] = ( i_bias[1] * delta_h2[i] )
;
596:
}
597:
598:
599:
///////" output layer bias jacobian " //////////
600:
for(i=0 ; i<output_var ; i++)
601:
{
602:
j_bias3[i] = ( i_bias[2] * delta_o[i] )
;
603:
}
604:
605:
}
606:
607: ////////////////////// jacobian matrix row 1 ///////////////////////////////
608:
609:
for(i=0 ; i<(output_var) ; i++)
610:
{
611:
for(j=0 ; j< ((2*input_var) + (2*hidden_neuron) +
612:
(hidden_neuron + output_neuron ) ) ; j++ )
613:
{
614:
615:
if ( j>=0 && j<6 )
616:
{
617:
jac[i][j]= H_layer1[i][j] ;
618:
}
619:
620:
if ( j>=6 && j<12 )
621:
{
622:
jac[i][j]= H_layer2[i][j-6] ;
623:
}
624:
625:
626:
if( j==12 )
627:
{
628:
jac[i][j]= H_layer_3[j-12+i] ;
629:
}
630:
631:
if( j==13 )
632:
{
633:
jac[i][j]= H_layer_4[j-13+i] ;
634:
}
635:
636:
637:
if( j==14 )
638:
{
639:
jac[i][j]= o_layer_1[j-14+i] ;
640:
}
641:
642:
if( j==15 )
643:
{
644:
jac[i][j]= o_layer_2[j-15+i] ;
645:
}
646:

647:
648:
649:
650:
651:
652:
653:
654:
655:
656:
657:
658:
659:
660:
661:
662:
663:
664:
665:
666:
667:
668:
669:
670:
671:
672:
673:
674:
675:
676:
677:
678:
679:
680:
681:
682:
683:
684:
685:
686:
687:
688:
689:
690:
691:
692:
693:
694:
695:
696:
697:
698:
699:
700:

if( j==16 )
{
jac[i][j]=
}
if( j==17 )
{
jac[i][j]=
}
if( j==18 )
{
jac[i][j]=
}
}
}

j_bias1[j-16+i] ;

j_bias2[j-17+i] ;

j_bias3[j-18+i] ;

}
///////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////
/////////// if commercial input/output is selected for training ///////////
if(i==3)
{
///////// hidden layer 1 jacobian ////////
for(i=0 ; i<output_var ; i++)
{
for(j=0 ; j<input_var ; j++)
{
H_layer1[i][j] = (com_input[i][j] * delta_h1[i] ) ;
}
}
//////

hidden layer 2 jacobian

///////////////

for( i=0 ; i<output_var ; i++ )


{
for(j=0 ; j<input_var ; j++)
{
H_layer2[i][j] = ( com_input[i][j] * delta_h2[i] ) ;
}
}
////// output layer jacobian /////////
for( i=0 ; i<output_var ; i++ )
{
o_layer_1[i] = ( net_out_1[i] * delta_o[i] )
o_layer_2[i] = ( net_out_2[i] * delta_o[i] )
}
////// hidden layer 1 bias jacobian ////////
for(i=0 ; i<output_var ; i++)
{

;
;

701:
702:
703:
704:
705:
706:
707:
708:
709:
710:
711:
712:
713:
714:
715:
716:
717:
718:
719:
720:
721:
722:
723:
724:
725:
726:
727:
728:
729:
730:
731:
732:
733:
734:
735:
736:
737:
738:
739:
740:
741:
742:
743:
744:
745:
746:
747:
748:
749:
750:
751:
752:
753:
754:

H_layer_3[i] = ( cal_output[i][0] * delta_h1[i] )


H_layer_4[i] = ( cal_output[i][0] * delta_h2[i] )

;
;

////// hidden layer 1 bias jacobian ////////


for(i=0 ; i<output_var ; i++)
{
j_bias1[i] = ( i_bias[0] * delta_h1[i] )
}

////// " hidden layer 2 bias jacobian " //////////


for(i=0 ; i<output_var ; i++)
{
j_bias2[i] = ( i_bias[1] * delta_h2[i] )
;
}

///////" output layer bias jacobian " //////////


for(i=0 ; i<output_var ; i++)
{
j_bias3[i] = ( i_bias[2] * delta_o[i] )
;
}
}
if(i==1)
{
jacobian << " \t\tRESIDENTIAL JACOBIAN MATRICES

" << endl << endl ;

{
for(j=0 ; j< ((2*input_var) + (2*hidden_neuron) +
(hidden_neuron + output_neuron ) ) ; j++ )
{
jacobian << jac[i][j] << " \t\t" ;
}
jacobian << endl ;
}
}
if(i==2)
{
jacobian << " \t\tRESIDENTIAL JACOBIAN MATRICES
" <<endl << endl ;
for(i=0 ; i<(output_var) ; i++)
{
for(j=0 ; j< ((2*input_var) + (2*hidden_neuron) +
(hidden_neuron + output_neuron ) ) ; j++ )
{
jacobian << jac[i][j] << " \t\t" ;
}
jacobian << endl ;
}
}

755:
756:
757:
758:
759:
760:
761:
762:
763:
764:
765:
766:
767:
768:
769:
770:
771:
772:
773:
774:
775:
776:
777:
778:
779:
780:
781:
782:
783:
784:
785:
786:
787:
788:
789:
790:
791:
792:
793:
794:
795:
796:
797:
798:
799:
800:
801:
802:
803:
804:
805:
806:
807:
808:

if(i==3)
{
jacobian << " \t\tCOMMERCIAL JACOBIAN MATRICES
" << endl << endl ;
for(i=0 ; i<(output_var) ; i++)
{
for(j=0 ; j< ((2*input_var) + (2*hidden_neuron) +
(hidden_neuron + output_neuron ) ) ; j++ )
{
jacobian << jac[i][j] << " \t\t" ;
}
jacobian << endl ;
}
}
}

int main(int argc, char *argv[])


{
cout << "NUMBER OF INPUT VARIABLE = "
cin >> input_var ;
cout << endl ;

cout << "NUMBER OF OUTPUT VARIABLE = "


cin >> output_var ;
cout << endl ;
cout << "NUMBER OF HIDDEN NEURON= "
cin >> hidden_neuron ;
cout << endl ;

cout << "NUMBER OF OUTPUT NEURON= "


cin >> output_neuron ;
cout << endl ;

cout << "Enter


cout << "Enter
cout << "Enter
cin >> n

: 1
: 2
: 3

------> for industrial input : output


------> for residential input : output
------> for commercial input : output

"
"
"

<< endl ;
<< endl ;
<< endl ;

//// ask for input

switch(n)
{
1 :
{
case
cout<< "you have selected < INDUSTRIAL INPUT : OUTPUT

>" << endl << endl ;

}
break ;
2 :
{
case
cout<< "you have selected < RESIDENTIAL INPUT : OUTPUT > " << endl <<endl ;

809:
810:
811:
812:
813:
814:
815:
816:
817:
818:
819:
820:
821:
822:
823:
824:
825: }
826:
827:
828:

}
break ;
3 :
{
case
cout<< "you have selected < COMMERCIAL INPUT : OUTPUT
}
break ;
}

jacobian_matrix() ;
system("PAUSE");
return EXIT_SUCCESS;

> " << endl << endl ;

You might also like