/ architecture / similarity_detection.py
similarity_detection.py
  1  from architectures.branch import branches, branches_nopool, branch_cva
  2  import tensorflow as tf
  3  from tensorflow import keras
  4  from tensorflow.keras import layers
  5  from keras.regularizers import l2
  6  
  7  def abs_diff(vects):
  8      x,y = vects
  9      result = tf.math.abs(tf.math.subtract(x,y))
 10      return result
 11  
 12  def pretext_task_one(IMG_HEIGHT=96, IMG_WIDTH=96, IMG_CHANNELS=3):
 13      
 14      input_1 = layers.Input((int(IMG_HEIGHT), int(IMG_WIDTH), int(IMG_CHANNELS)))
 15      input_2 = layers.Input((int(IMG_HEIGHT), int(IMG_WIDTH), int(IMG_CHANNELS)))
 16      
 17      embedding_network  = branches(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS)
 18      
 19      x1 = embedding_network(input_1)
 20      x2 = embedding_network(input_2)
 21      
 22      #absolute difference layer
 23      merge_layer = layers.Lambda(abs_diff)([x1,x2])
 24      
 25      #joint, a 3x3 conv layer with a relu activation function
 26      joint = layers.Conv2D(32, (3, 3), activation="relu", padding="same", name='joint')(merge_layer)
 27      dr_joint= layers.Dropout(0.1, seed=1, name = 'dropout_joint')(joint)
 28      pool_joint = layers.MaxPooling2D(pool_size=(2, 2), name = 'pool_joint')(dr_joint)
 29      
 30      flatten = layers.Flatten()(pool_joint)
 31      dense1 = layers.Dense(128, activation="relu", name='dense1')(flatten)
 32      dense2 = layers.Dense(64, activation="relu", name='dense2')(dense1)
 33      output_layer = layers.Dense(1, activation="sigmoid", name='output')(dense2)
 34      siamese = keras.Model(inputs=[input_1, input_2], outputs=output_layer, name='pretext_task1')
 35  
 36      return siamese
 37  
 38  def pretext_task_one_nopool(dropout, decay, IMG_HEIGHT=96, IMG_WIDTH=96, IMG_CHANNELS=3):
 39  
 40      input_1 = layers.Input((int(IMG_HEIGHT), int(IMG_WIDTH), int(IMG_CHANNELS)))
 41      input_2 = layers.Input((int(IMG_HEIGHT), int(IMG_WIDTH), int(IMG_CHANNELS)))
 42      
 43      embedding_network  = branches_nopool(dropout, decay, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS)
 44      
 45      x1 = embedding_network(input_1)
 46      x2 = embedding_network(input_2)
 47      
 48      #absolute difference layer
 49      merge_layer = layers.Lambda(abs_diff)([x1,x2])
 50      
 51      #joint, a 3x3 conv layer with a relu activation function
 52      joint = layers.Conv2D(32, (3, 3), activation="relu", kernel_regularizer=l2(decay), bias_regularizer=l2(decay), padding="same", name='joint')(merge_layer)
 53      dr_joint= layers.Dropout(dropout, seed=1, name = 'dropout_joint')(joint)
 54      #pool_joint = layers.MaxPooling2D(pool_size=(2, 2), name = 'pool_joint')(dr_joint)
 55      
 56      flatten = layers.Flatten()(dr_joint)
 57      dense1 = layers.Dense(128, activation="relu", name='dense1')(flatten)
 58      dense2 = layers.Dense(64, activation="relu", name='dense2')(dense1)
 59      output_layer = layers.Dense(1, activation="sigmoid", name='output')(dense2)
 60      siamese = keras.Model(inputs=[input_1, input_2], outputs=output_layer, name='pretext_task1')
 61  
 62      return siamese
 63  
 64  def pretext_one(dropout, decay, IMG_HEIGHT=96, IMG_WIDTH=96, IMG_CHANNELS=3):
 65      input_1 = layers.Input((int(IMG_HEIGHT), int(IMG_WIDTH), int(IMG_CHANNELS)))
 66      input_2 = layers.Input((int(IMG_HEIGHT), int(IMG_WIDTH), int(IMG_CHANNELS)))
 67      
 68      embedding_network  = branch_cva(dropout, decay,3,IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS)
 69      
 70      x1 = embedding_network(input_1)
 71      x2 = embedding_network(input_2)
 72      
 73      #absolute difference layer
 74      merge_layer = layers.Lambda(abs_diff)([x1,x2])
 75      
 76      #joint, a 3x3 conv layer with a relu activation function
 77      joint = layers.Conv2D(32, (3, 3), kernel_regularizer=l2(decay), bias_regularizer=l2(decay), padding="same", name='joint')(merge_layer)
 78      batch_norm_joint = layers.BatchNormalization(name='norm_joint')(joint)
 79      activation_joint = layers.Activation('relu', name='relu_joint')(batch_norm_joint)
 80      drop_joint = layers.Dropout(dropout, seed=1, name='dropout_joint')(activation_joint)
 81      
 82      flatten = layers.Flatten()(drop_joint)
 83      dense1 = layers.Dense(128, activation="relu", name='dense1')(flatten)
 84      dense2 = layers.Dense(64, activation="relu", name='dense2')(dense1)
 85      output_layer = layers.Dense(1, activation="sigmoid", name='output')(dense2)
 86      siamese = keras.Model(inputs=[input_1, input_2], outputs=output_layer, name='pretext_task1')
 87  
 88      return siamese
 89  
 90  
 91  
 92  
 93  def ASPP(inputs, filters, dilation_rates):
 94      # 1x1 convolution
 95      conv_1x1 = layers.Conv2D(filters, (1, 1), padding='same', name='aspp_conv_1x1')(inputs)
 96      conv_1x1_bn = layers.BatchNormalization(name='aspp_conv_1x1_bn')(conv_1x1)
 97      conv_1x1_relu = layers.Activation('relu', name='aspp_conv_1x1_relu')(conv_1x1_bn)
 98  
 99      # Atrous convolutions with different dilation rates
100      atrous_layers = [conv_1x1_relu]
101      for idx, rate in enumerate(dilation_rates):
102          atrous_conv = layers.Conv2D(filters, (3, 3), dilation_rate=rate, padding='same', name=f'aspp_conv_{rate}')(inputs)
103          atrous_conv_bn = layers.BatchNormalization(name=f'aspp_conv_{rate}_bn')(atrous_conv)
104          atrous_conv_relu = layers.Activation('relu', name=f'aspp_conv_{rate}_relu')(atrous_conv_bn)
105          atrous_layers.append(atrous_conv_relu)
106      
107      # Concatenate the atrous convolutions
108      concatenated = layers.Concatenate(axis=-1, name='aspp_concat')(atrous_layers)
109  
110      # Reduce the number of channels
111      reduced = layers.Conv2D(filters, (1, 1), padding='same', name='aspp_reduced')(concatenated)
112      reduced_bn = layers.BatchNormalization(name='aspp_reduced_bn')(reduced)
113      reduced_relu = layers.Activation('relu', name='aspp_reduced_relu')(reduced_bn)
114      
115      return reduced_relu
116  
117  def pretext_task_one_aspp(dropout, decay, IMG_HEIGHT=96, IMG_WIDTH=96, IMG_CHANNELS=3, aspp_filters=32, aspp_rates=[6, 12]):
118  
119      input_1 = layers.Input((int(IMG_HEIGHT), int(IMG_WIDTH), int(IMG_CHANNELS)))
120      input_2 = layers.Input((int(IMG_HEIGHT), int(IMG_WIDTH), int(IMG_CHANNELS)))
121      
122      embedding_network = branches_nopool(dropout, decay, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS)
123      
124      x1 = embedding_network(input_1)
125      x2 = embedding_network(input_2)
126      
127      # Absolute difference layer
128      merge_layer = layers.Lambda(abs_diff)([x1, x2])
129      
130      # Insert ASPP block right after the encoder phase
131      aspp_output = ASPP(merge_layer, aspp_filters, aspp_rates)
132      
133      # Continue with joint, a 3x3 conv layer with a relu activation function
134      joint = layers.Conv2D(32, (3, 3), activation="relu", kernel_regularizer=l2(decay), bias_regularizer=l2(decay), padding="same", name='joint')(aspp_output)
135      dr_joint = layers.Dropout(dropout, seed=1, name='dropout_joint')(joint)
136      
137      flatten = layers.Flatten()(dr_joint)
138      dense1 = layers.Dense(128, activation="relu", name='dense1')(flatten)
139      dense2 = layers.Dense(64, activation="relu", name='dense2')(dense1)
140      output_layer = layers.Dense(1, activation="sigmoid", name='output')(dense2)
141      siamese = keras.Model(inputs=[input_1, input_2], outputs=output_layer, name='pretext_task1_with_aspp')
142  
143      return siamese
144