6. Activation Maps

In [0]:
# !!! THIS IS JUST FOR GOOGLE COLAB
# !!! If you are using jupyter, you just need to install the package once

# This should be executed every time when kernel is restarted
# Default colab settings, and versions of tensorflow,keras and keras-vis lead to a error
# This should be solved in newer versions

#!pip install git+https://github.com/raghakot/keras-vis.git -U
In [0]:
from matplotlib import cm
from vis.visualization import visualize_activation
from vis.utils import utils
from keras import activations
In [0]:
model.summary()
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d_1 (Conv2D)            (None, 26, 26, 16)        160       
_________________________________________________________________
batch_normalization_1 (Batch (None, 26, 26, 16)        64        
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 24, 24, 16)        2320      
_________________________________________________________________
batch_normalization_2 (Batch (None, 24, 24, 16)        64        
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 12, 12, 16)        0         
_________________________________________________________________
dropout_1 (Dropout)          (None, 12, 12, 16)        0         
_________________________________________________________________
conv2d_3 (Conv2D)            (None, 10, 10, 32)        4640      
_________________________________________________________________
batch_normalization_3 (Batch (None, 10, 10, 32)        128       
_________________________________________________________________
conv2d_4 (Conv2D)            (None, 8, 8, 32)          9248      
_________________________________________________________________
batch_normalization_4 (Batch (None, 8, 8, 32)          128       
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 4, 4, 32)          0         
_________________________________________________________________
dropout_2 (Dropout)          (None, 4, 4, 32)          0         
_________________________________________________________________
flatten_1 (Flatten)          (None, 512)               0         
_________________________________________________________________
dense_1 (Dense)              (None, 512)               262656    
_________________________________________________________________
dropout_3 (Dropout)          (None, 512)               0         
_________________________________________________________________
dense_2 (Dense)              (None, 1024)              525312    
_________________________________________________________________
dropout_4 (Dropout)          (None, 1024)              0         
_________________________________________________________________
dense_3 (Dense)              (None, 10)                10250     
=================================================================
Total params: 814,970
Trainable params: 814,778
Non-trainable params: 192
_________________________________________________________________

6.1 Maximal Activations

In [0]:
# Utility to search for layer index by name.
# Alternatively we can specify this as -1 since it corresponds to the last layer.
layer_idx = utils.find_layer_idx(model, 'conv2d_1')
plt.figure(figsize=(10,6))
model = utils.apply_modifications(model)
for i in range(0,16):
# This is the output node we want to maximize.
    filter_idx = i
    img = visualize_activation(model, layer_idx, filter_indices=filter_idx, input_range=(0., 255.))
    plt.subplot(4,5,i+1)
    plt.imshow(img[..., 0],cmap=cm.viridis)
In [0]:
# Utility to search for layer index by name.
# Alternatively we can specify this as -1 since it corresponds to the last layer.
layer_idx = utils.find_layer_idx(model, 'conv2d_2')
plt.figure(figsize=(10,6))
model = utils.apply_modifications(model)
for i in range(0,16):
# This is the output node we want to maximize.
    filter_idx = i
    img = visualize_activation(model, layer_idx, filter_indices=filter_idx, input_range=(0., 255.))
    plt.subplot(4,5,i+1)
    plt.imshow(img[..., 0],cmap=cm.viridis)
In [0]:
# Utility to search for layer index by name.
# Alternatively we can specify this as -1 since it corresponds to the last layer.
layer_idx = utils.find_layer_idx(model, 'conv2d_3')
plt.figure(figsize=(10,6))
model = utils.apply_modifications(model)
for i in range(0,32):
# This is the output node we want to maximize.
    filter_idx = i
    img = visualize_activation(model, layer_idx, filter_indices=filter_idx, input_range=(0., 255.))
    plt.subplot(6,6,i+1)
    plt.imshow(img[..., 0],cmap=cm.viridis)
In [0]:
# Utility to search for layer index by name.
# Alternatively we can specify this as -1 since it corresponds to the last layer.
layer_idx = utils.find_layer_idx(model, 'conv2d_4')
plt.figure(figsize=(10,6))
model = utils.apply_modifications(model)
for i in range(0,32):
# This is the output node we want to maximize.
    filter_idx = i
    img = visualize_activation(model, layer_idx, filter_indices=filter_idx, input_range=(0., 255.))
    plt.subplot(6,6,i+1)
    plt.imshow(img[..., 0],cmap=cm.viridis)
In [0]:
# Utility to search for layer index by name.
# Alternatively we can specify this as -1 since it corresponds to the last layer.
layer_idx = utils.find_layer_idx(model, 'dense_1')
plt.figure(figsize=(10,6))
# Swap softmax with linear
model.layers[layer_idx].activation = activations.linear
model = utils.apply_modifications(model)
for i in range(0,10):
# This is the output node we want to maximize.
    filter_idx = i
    img = visualize_activation(model, layer_idx, filter_indices=filter_idx, input_range=(0., 1.))
    plt.subplot(6,6,i+1)
    plt.imshow(img[..., 0],cmap=cm.viridis)
In [0]:
# Utility to search for layer index by name.
# Alternatively we can specify this as -1 since it corresponds to the last layer.
layer_idx = utils.find_layer_idx(model, 'dense_2')
plt.figure(figsize=(10,6))
# Swap softmax with linear
model.layers[layer_idx].activation = activations.linear
model = utils.apply_modifications(model)
for i in range(0,10):
# This is the output node we want to maximize.
    filter_idx = i
    img = visualize_activation(model, layer_idx, filter_indices=filter_idx, input_range=(0., 1.))
    plt.subplot(6,6,i+1)
    plt.imshow(img[..., 0],cmap=cm.viridis)
In [0]:
# Utility to search for layer index by name.
# Alternatively we can specify this as -1 since it corresponds to the last layer.
layer_idx = utils.find_layer_idx(model, 'dense_3')
plt.figure(figsize=(10,6))
# Swap softmax with linear
model.layers[layer_idx].activation = activations.linear
model = utils.apply_modifications(model)
for i in range(0,10):
# This is the output node we want to maximize.
    filter_idx = i
    img = visualize_activation(model, layer_idx, filter_indices=filter_idx, input_range=(0., 1.))
    plt.subplot(6,6,i+1)
    plt.imshow(img[..., 0],cmap=cm.viridis)