def mask (path) :
x = cv2.resize(cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB), (128,128))
m = np.ones(x.shape)*255.0
m[(x[:,:,0] >= 160) & (x[:,:,1] >= 140) & (x[:,:,2] >= 140)] = [0.0,0.0,0.0]
kernel = np.ones((3,3), dtype = np.uint8)
m = cv2.dilate(m, kernel, iterations = 2)
return cv2.bitwise_and(x, np.array(m, np.uint8))
Output images
Model
The model is a Sequential network of Convolution and Fully connected layers. Dropout of 0.4 is added to the fully connected layer. And weight regularization is added to keep the weights small and generalize the model better. In addition, four convolutional layers were added with a 3x3 kernel size and strides of 1. And a max-pooling layer was added to reduce the image dimensions.
def conv_layer (filters) :
model = Sequential()
model.add(Conv2D(filters,
(3,3),
strides = 1,
padding = 'same',
kernel_regularizer = 'l2'))
model.add(BatchNormalization())
model.add(LeakyReLU())
model.add(MaxPooling2D((2, 2)))
return model
def dens_layer (hiddenx) :
model = Sequential()
model.add(Dense(hiddenx,
kernel_regularizer = 'l2'))
model.add(BatchNormalization())
model.add(Dropout(.4))
model.add(LeakyReLU())
return model
def cnn (filter1,
filter2,
filter3,
filter4, hidden1) :
model = Sequential([
Input((128,128,3,)),
conv_layer(filter1),
conv_layer(filter2),
conv_layer(filter3),
conv_layer(filter4),
Flatten(),
dens_layer(hidden1),
Dense(4, activation = 'softmax')
])
model.compile(loss = 'categorical_crossentropy', optimizer = Adam(learning_rate = 0.0001),
metrics = ['accuracy'])
return model
Performance
Validation data
Testing data