/ architecture / conv_classifier.py
conv_classifier.py
  1  #!/usr/bin/env python3
  2  # -*- coding: utf-8 -*-
  3  """
  4  Created on Wed Mar 23 15:49:31 2022
  5  
  6  @author: aleoikon
  7  """
  8  import tensorflow as tf
  9  from tensorflow import keras
 10  from tensorflow.keras import layers
 11  from keras.regularizers import l2
 12  
 13  def abs_diff(vects):
 14      x,y = vects
 15      result = tf.math.abs(tf.math.subtract(x,y))
 16      return result
 17  
 18  def conv_classifier(depth, dropout, decay, IMG_HEIGHT=96, IMG_WIDTH=96, IMG_CHANNELS=3):
 19      input_1 = layers.Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
 20      input_2 = layers.Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
 21      
 22      ##depth 1
 23      x_1 = layers.BatchNormalization(name='norm_1')(input_1)
 24      
 25      conv1_1 = layers.Conv2D(32, (3, 3), activation="relu", kernel_regularizer=l2(decay), bias_regularizer=l2(decay), padding="same", name='conv1_1')(x_1)
 26      drop1_1 = layers.Dropout(dropout, seed=1, name = 'dropout1_1')(conv1_1)
 27      ##depth 2
 28      conv2_1 = layers.Conv2D(32, (3, 3), activation="relu", kernel_regularizer=l2(decay), bias_regularizer=l2(decay), padding="same",name='conv2_1')(drop1_1)
 29      drop2_1 = layers.Dropout(dropout, seed=1, name='dropout2_1')(conv2_1)
 30      
 31      ##depth 3
 32      conv3_1 = layers.Conv2D(32, (3, 3), activation="relu", kernel_regularizer=l2(decay), bias_regularizer=l2(decay), padding="same",name='conv3_1')(drop2_1)
 33      drop3_1 = layers.Dropout(dropout, seed=1, name='dropout3_1')(conv3_1)
 34         
 35      x_2 = layers.BatchNormalization(name='norm_2')(input_2)
 36      ##depth 1
 37      conv1_2 = layers.Conv2D(32, (3, 3), activation="relu", kernel_regularizer=l2(decay), bias_regularizer=l2(decay), padding="same", name='conv1_2')(x_2)
 38      drop1_2 = layers.Dropout(dropout, seed=1, name = 'dropout1_2')(conv1_2)
 39      
 40      ##depth 2
 41      conv2_2 = layers.Conv2D(32, (3, 3), activation="relu", kernel_regularizer=l2(decay), bias_regularizer=l2(decay), padding="same",name='conv2_2')(drop1_2)
 42      drop2_2 = layers.Dropout(dropout, seed=1, name='dropout2_2')(conv2_2)
 43       
 44      ##depth 3
 45      conv3_2 = layers.Conv2D(32, (3, 3), activation="relu", kernel_regularizer=l2(decay), bias_regularizer=l2(decay), padding="same",name='conv3_2')(drop2_2)
 46      drop3_2 = layers.Dropout(dropout, seed=1, name='dropout3_2')(conv3_2)
 47      #######################################UPSAMPLING#####################################################
 48      
 49      if depth == 1:
 50          distance = layers.Lambda(abs_diff)([drop1_1, drop1_2])
 51      if depth == 2:
 52          distance = layers.Lambda(abs_diff)([drop2_1, drop2_2])
 53      if depth == 3:
 54          distance = layers.Lambda(abs_diff)([drop3_1, drop3_2])
 55      
 56      
 57      
 58      conv2dsmall_1 = layers.Conv2D(32, (1,1), activation="relu", padding="same", name='conv2dsmall_1')(distance)
 59      conv2dsmall_2 = layers.Conv2D(16, (1,1), activation="relu", padding="same", name='conv2dsmall_2')(conv2dsmall_1)
 60      conv2dsmall_3 = layers.Conv2D(8, (1,1), activation="relu", padding="same", name='conv2dsmall_3')(conv2dsmall_2)    
 61      conv2dsmall_4 = layers.Conv2D(4, (1,1), activation="relu", padding="same", name='conv2dsmall_4')(conv2dsmall_3)
 62      
 63      output = layers.Conv2D(2,(1,1), activation = "softmax", padding="same", name="output")(conv2dsmall_4)
 64      
 65      change_detection = keras.Model([input_1, input_2], output, name='change_detection')
 66      
 67      return change_detection
 68  
 69  def conv_classifier_two(depth, dropout, decay, IMG_HEIGHT=96, IMG_WIDTH=96, IMG_CHANNELS=3):
 70      input_1 = layers.Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
 71      input_2 = layers.Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
 72      
 73      ##depth 1
 74      x_1 = layers.BatchNormalization(name='norm_1')(input_1)
 75      
 76      conv1_1 = layers.Conv2D(32, (3, 3), kernel_regularizer=l2(decay), bias_regularizer=l2(decay), padding="same", name='conv1_1')(x_1)
 77      batch_norm1_1 = layers.BatchNormalization(name='norm1_1')(conv1_1)
 78      activation1_1 = layers.Activation('relu', name='relu1_1')(batch_norm1_1)
 79      drop1_1 = layers.Dropout(dropout, seed=1, name='dropout1_1')(activation1_1)
 80      
 81      ##depth 2
 82      conv2_1 = layers.Conv2D(32, (3, 3), kernel_regularizer=l2(decay), bias_regularizer=l2(decay), padding="same",name='conv2_1')(drop1_1)
 83      batch_norm2_1 = layers.BatchNormalization(name='norm2_1')(conv2_1)
 84      activation2_1 = layers.Activation('relu', name='relu2_1')(batch_norm2_1)
 85      drop2_1 = layers.Dropout(dropout, seed=1, name='dropout2_1')(activation2_1)
 86      
 87      ##depth 3
 88      conv3_1 = layers.Conv2D(32, (3, 3), kernel_regularizer=l2(decay), bias_regularizer=l2(decay), padding="same",name='conv3_1')(drop2_1)
 89      batch_norm3_1 = layers.BatchNormalization(name='norm3_1')(conv3_1)
 90      activation3_1 = layers.Activation('relu', name='relu3_1')(batch_norm3_1)
 91      drop3_1 = layers.Dropout(dropout, seed=1, name='dropout3_1')(activation3_1)
 92         
 93      x_2 = layers.BatchNormalization(name='norm_2')(input_2)
 94      
 95      ##depth 1
 96      conv1_2 = layers.Conv2D(32, (3, 3), kernel_regularizer=l2(decay), bias_regularizer=l2(decay), padding="same", name='conv1_2')(x_2)
 97      batch_norm1_2 = layers.BatchNormalization(name='norm1_2')(conv1_2)
 98      activation1_2 = layers.Activation('relu', name='relu1_2')(batch_norm1_2)
 99      drop1_2 = layers.Dropout(dropout, seed=1, name='dropout1_2')(activation1_2)
100      
101      ##depth 2
102      conv2_2 = layers.Conv2D(32, (3, 3), kernel_regularizer=l2(decay), bias_regularizer=l2(decay), padding="same",name='conv2_2')(drop1_2)
103      batch_norm2_2 = layers.BatchNormalization(name='norm2_2')(conv2_2)
104      activation2_2 = layers.Activation('relu', name='relu2_2')(batch_norm2_2)
105      drop2_2 = layers.Dropout(dropout, seed=1, name='dropout2_2')(activation2_2)
106       
107      ##depth 3
108      conv3_2 = layers.Conv2D(32, (3, 3), kernel_regularizer=l2(decay), bias_regularizer=l2(decay), padding="same",name='conv3_2')(drop2_2)
109      batch_norm3_2 = layers.BatchNormalization(name='norm3_2')(conv3_2)
110      activation3_2 = layers.Activation('relu', name='relu3_2')(batch_norm3_2)
111      drop3_2 = layers.Dropout(dropout, seed=1, name='dropout3_2')(activation3_2)
112      #######################################UPSAMPLING#####################################################
113      
114      if depth == 1:
115          distance = layers.Lambda(abs_diff)([drop1_1, drop1_2])
116      if depth == 2:
117          distance = layers.Lambda(abs_diff)([drop2_1, drop2_2])
118      if depth == 3:
119          distance = layers.Lambda(abs_diff)([drop3_1, drop3_2])
120      
121      
122      '''
123      conv2dsmall_1 = layers.Conv2D(32, (1,1), activation="relu", padding="same", name='conv2dsmall_1')(distance)
124      conv2dsmall_2 = layers.Conv2D(16, (1,1), activation="relu", padding="same", name='conv2dsmall_2')(conv2dsmall_1)
125      conv2dsmall_3 = layers.Conv2D(8, (1,1), activation="relu", padding="same", name='conv2dsmall_3')(conv2dsmall_2)    
126      conv2dsmall_4 = layers.Conv2D(4, (1,1), activation="relu", padding="same", name='conv2dsmall_4')(conv2dsmall_3)
127      '''
128      conv2dsmall_1 = layers.Conv2D(32, (1,1), padding="same", name='conv2dsmall_1')(distance)
129      batch_norm_1 = layers.BatchNormalization(name='norm_out_1')(conv2dsmall_1)
130      activation_1 = layers.Activation('relu', name='relu_1')(batch_norm_1)
131      drop_1 = layers.Dropout(dropout, seed=1, name='dropout_1')(activation_1)
132      conv2dsmall_2 = layers.Conv2D(16, (1,1), padding="same", name='conv2dsmall_2')(drop_1)
133      batch_norm_2 = layers.BatchNormalization(name='norm_out_2')(conv2dsmall_2)
134      activation_2 = layers.Activation('relu', name='relu_2')(batch_norm_2)
135      drop_2 = layers.Dropout(dropout, seed=1, name='dropout_2')(activation_2)
136      conv2dsmall_3 = layers.Conv2D(8, (1,1), padding="same", name='conv2dsmall_3')(drop_2)   
137      batch_norm_3 = layers.BatchNormalization(name='norm_out_3')(conv2dsmall_3)
138      activation_3 = layers.Activation('relu', name='relu_3')(batch_norm_3)
139      drop_3 = layers.Dropout(dropout, seed=1, name='dropout_3')(activation_3)
140      conv2dsmall_4 = layers.Conv2D(4, (1,1), padding="same", name='conv2dsmall_4')(drop_3)
141      batch_norm_4 = layers.BatchNormalization(name='norm_out_4')(conv2dsmall_4 )
142      activation_4 = layers.Activation('relu', name='relu_4')(batch_norm_4)
143      drop_4 = layers.Dropout(dropout, seed=1, name='dropout_4')(activation_4)
144      
145      output = layers.Conv2D(2,(1,1), activation = "softmax", padding="same", name="output")(drop_4)
146      
147      change_detection = keras.Model([input_1, input_2], output, name='change_detection')
148      
149      return change_detection
150  
151  
152  
153  
154  
155  
156  
157  
158  def ASPP(inputs, filters, dilation_rates):
159      # 1x1 convolution
160      conv_1x1 = layers.Conv2D(filters, (1, 1), padding='same', name='aspp_conv_1x1')(inputs)
161      conv_1x1_bn = layers.BatchNormalization(name='aspp_conv_1x1_bn')(conv_1x1)
162      conv_1x1_relu = layers.Activation('relu', name='aspp_conv_1x1_relu')(conv_1x1_bn)
163  
164      # Atrous convolutions with different dilation rates
165      atrous_layers = [conv_1x1_relu]
166      for idx, rate in enumerate(dilation_rates):
167          atrous_conv = layers.Conv2D(filters, (3, 3), dilation_rate=rate, padding='same', name=f'aspp_conv_{rate}')(inputs)
168          atrous_conv_bn = layers.BatchNormalization(name=f'aspp_conv_{rate}_bn')(atrous_conv)
169          atrous_conv_relu = layers.Activation('relu', name=f'aspp_conv_{rate}_relu')(atrous_conv_bn)
170          atrous_layers.append(atrous_conv_relu)
171      
172      # Concatenate the atrous convolutions
173      concatenated = layers.Concatenate(axis=-1, name='aspp_concat')(atrous_layers)
174  
175      # Reduce the number of channels
176      reduced = layers.Conv2D(filters, (1, 1), padding='same', name='aspp_reduced')(concatenated)
177      reduced_bn = layers.BatchNormalization(name='aspp_reduced_bn')(reduced)
178      reduced_relu = layers.Activation('relu', name='aspp_reduced_relu')(reduced_bn)
179      
180      return reduced_relu
181  
182  
183  
184  
185  def conv_classifier_two_with_aspp(depth, dropout, decay, IMG_HEIGHT=96, IMG_WIDTH=96, IMG_CHANNELS=3, aspp_filters=32, aspp_rates=[6, 12]):
186      input_1 = layers.Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
187      input_2 = layers.Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
188      
189      ##depth 1
190      x_1 = layers.BatchNormalization(name='norm_1')(input_1)
191      
192      conv1_1 = layers.Conv2D(32, (3, 3), kernel_regularizer=l2(decay), bias_regularizer=l2(decay), padding="same", name='conv1_1')(x_1)
193      batch_norm1_1 = layers.BatchNormalization(name='norm1_1')(conv1_1)
194      activation1_1 = layers.Activation('relu', name='relu1_1')(batch_norm1_1)
195      drop1_1 = layers.Dropout(dropout, seed=1, name='dropout1_1')(activation1_1)
196      
197      ##depth 2
198      conv2_1 = layers.Conv2D(32, (3, 3), kernel_regularizer=l2(decay), bias_regularizer=l2(decay), padding="same",name='conv2_1')(drop1_1)
199      batch_norm2_1 = layers.BatchNormalization(name='norm2_1')(conv2_1)
200      activation2_1 = layers.Activation('relu', name='relu2_1')(batch_norm2_1)
201      drop2_1 = layers.Dropout(dropout, seed=1, name='dropout2_1')(activation2_1)
202      
203      ##depth 3
204      conv3_1 = layers.Conv2D(32, (3, 3), kernel_regularizer=l2(decay), bias_regularizer=l2(decay), padding="same",name='conv3_1')(drop2_1)
205      batch_norm3_1 = layers.BatchNormalization(name='norm3_1')(conv3_1)
206      activation3_1 = layers.Activation('relu', name='relu3_1')(batch_norm3_1)
207      drop3_1 = layers.Dropout(dropout, seed=1, name='dropout3_1')(activation3_1)
208         
209      x_2 = layers.BatchNormalization(name='norm_2')(input_2)
210      
211      ##depth 1
212      conv1_2 = layers.Conv2D(32, (3, 3), kernel_regularizer=l2(decay), bias_regularizer=l2(decay), padding="same", name='conv1_2')(x_2)
213      batch_norm1_2 = layers.BatchNormalization(name='norm1_2')(conv1_2)
214      activation1_2 = layers.Activation('relu', name='relu1_2')(batch_norm1_2)
215      drop1_2 = layers.Dropout(dropout, seed=1, name='dropout1_2')(activation1_2)
216      
217      ##depth 2
218      conv2_2 = layers.Conv2D(32, (3, 3), kernel_regularizer=l2(decay), bias_regularizer=l2(decay), padding="same",name='conv2_2')(drop1_2)
219      batch_norm2_2 = layers.BatchNormalization(name='norm2_2')(conv2_2)
220      activation2_2 = layers.Activation('relu', name='relu2_2')(batch_norm2_2)
221      drop2_2 = layers.Dropout(dropout, seed=1, name='dropout2_2')(activation2_2)
222       
223      ##depth 3
224      conv3_2 = layers.Conv2D(32, (3, 3), kernel_regularizer=l2(decay), bias_regularizer=l2(decay), padding="same",name='conv3_2')(drop2_2)
225      batch_norm3_2 = layers.BatchNormalization(name='norm3_2')(conv3_2)
226      activation3_2 = layers.Activation('relu', name='relu3_2')(batch_norm3_2)
227      drop3_2 = layers.Dropout(dropout, seed=1, name='dropout3_2')(activation3_2)
228      #######################################UPSAMPLING#####################################################
229      
230      if depth == 1:
231          distance = layers.Lambda(abs_diff)([drop1_1, drop1_2])
232      if depth == 2:
233          distance = layers.Lambda(abs_diff, name='abs_diff_2')([drop2_1, drop2_2])
234      if depth == 3:
235          distance = layers.Lambda(abs_diff)([drop3_1, drop3_2])
236      
237      encoder_output = distance  # Output from the encoder stage
238  
239      # Apply the ASPP block after the last convolutional layer
240      aspp_output = ASPP(encoder_output, aspp_filters, aspp_rates)
241  
242      # After ASPP, reducing the channels
243      reduced_aspp_output = layers.Conv2D(32, (1, 1), padding="same", name='reduced_aspp_output')(aspp_output)
244      reduced_aspp_output = layers.BatchNormalization()(reduced_aspp_output)
245      reduced_aspp_output = layers.Activation('relu')(reduced_aspp_output)
246  
247      # Final output layer
248      output = layers.Conv2D(2, (1, 1), activation="softmax", padding="same", name="output")(reduced_aspp_output)
249      
250      # Create model
251      change_detection = keras.Model(inputs=[input_1, input_2], outputs=output, name='change_detection')
252      
253      return change_detection