2. Implementing a Neural Network

2.1 Hyperparameters

In [7]:
# Hyperparameters
training_epochs = 100 # Total number of training epochs
learning_rate = 0.01 # The learning rate

2.2 Creating a model

In [8]:
# create a model
def create_model():
  model = Sequential()
  # Input layer
  model.add(Dense(64, input_dim=64, kernel_initializer='normal',
            kernel_regularizer= keras.regularizers.l2(0.01),activation='tanh'))
  # Output layer
  model.add(Dense(10, activation='softmax'))
 
  # Compile a model
  model.compile(loss='categorical_crossentropy', optimizer=adam(learning_rate), metrics=['accuracy'])
  return model
model = create_model()
model.summary()
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense_1 (Dense)              (None, 64)                4160      
_________________________________________________________________
dense_2 (Dense)              (None, 10)                650       
=================================================================
Total params: 4,810
Trainable params: 4,810
Non-trainable params: 0
_________________________________________________________________

2.3 Train the mode

Let's trains the model for a given number of epochs.

In [9]:
results = model.fit(
 X_train, y_train,
 epochs= training_epochs,
 batch_size = 516,
 validation_data = (X_test, y_test),
 verbose = 0
)

2.4 Test the model

Model can generate output predictions for the input samples.

In [10]:
prediction_values = model.predict_classes(X_test)

2.5 Accuracy

Test-Accuracy :

In [11]:
print("Test-Accuracy:","%.2f%%" % (np.mean(results.history["val_acc"])*100))
Test-Accuracy: 96.26%

2.6 Evaluate the model to see the accuracy

Now we can check the accuracy of our model

In [12]:
print("Evaluating on training set...")
(loss, accuracy) = model.evaluate(X_train,y_train)
print("loss={:.4f}, accuracy: {:.4f}%".format(loss,accuracy * 100))


print("Evaluating on testing set...")
(loss, accuracy) = model.evaluate(X_test, y_test)
print("loss={:.4f}, accuracy: {:.4f}%".format(loss,accuracy * 100))
Evaluating on training set...
1203/1203 [==============================] - 0s 17us/step
loss=0.0735, accuracy: 99.9169%
Evaluating on testing set...
594/594 [==============================] - 0s 18us/step
loss=0.1393, accuracy: 97.9798%

2.7 Summarize history for accuracy

In [13]:
# summarize history for accuracy
plt.plot(results.history['acc'])
plt.plot(results.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='down right')
Out[13]:
<matplotlib.legend.Legend at 0x2cc83bc3e10>

2.8 Summarize history for loss

In [14]:
# summarize history for loss
plt.plot(results.history['loss'])
plt.plot(results.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')

max_loss = np.max(results.history['loss'])
min_loss = np.min(results.history['loss'])
print("Maximum Loss : {:.4f}".format(max_loss))
print("")
print("Minimum Loss : {:.4f}".format(min_loss))
print("")
print("Loss difference : {:.4f}".format((max_loss - min_loss)))
Maximum Loss : 2.8768

Minimum Loss : 0.0351

Loss difference : 2.8417