/ training / downstream_tasks / sysu_simple_cd.py
sysu_simple_cd.py
  1  #!/usr/bin/env python3
  2  # -*- coding: utf-8 -*-
  3  """
  4  Created on Wed Mar  9 20:15:48 2022
  5  
  6  @author: aleoikon
  7  """
  8  
  9  import sys
 10  sys.path.append('/home/dvalsamis/Documents/projects/Change_detection_SSL_Siamese')
 11  import time
 12  
 13  from architectures.similarity_detection import pretext_task_one_nopool,pretext_task_one_aspp
 14  
 15  # Now you can use pretext_task_one_nopool in your script
 16  
 17  #from tests import change_detection_noup, change_detection_noup_1x1convs
 18  #from similarity_detection import pretext_task_one_nopool
 19  import tensorflow
 20  from tensorflow.keras import layers, Model
 21  from architectures.branch import branches_triplet
 22  from tensorflow.keras.optimizers import Adam
 23  from tensorflow.keras.utils import plot_model
 24  
 25  import pandas as pd
 26  import numpy as np
 27  import os
 28  from tensorflow import keras
 29  from architectures.conv_classifier import conv_classifier_two, conv_classifier_two_with_nspp,conv_classifier_two_with_aspp
 30  from utils.layer_select import feature_selector, feature_selector_simple, transfer_learning_model #, feature_selector_task2
 31  from utils.my_metrics import recall, accuracy, specificity, precision, f_measure, get_confusion_matrix
 32  from utils.log_params import log_params_sim1
 33  
 34  import matplotlib
 35  matplotlib.use('TkAgg')
 36  import matplotlib.pyplot as plt
 37  import uuid
 38  import random
 39  from utils.weighted_cross_entropy import weighted_categorical_crossentropy
 40  os.environ["CUDA_VISIBLE_DEVICES"]="1"
 41  
 42  channel = 'rgb'
 43  
 44  
 45  # Set random seed for TensorFlow
 46  tensorflow.random.set_seed(1234)
 47  
 48  # Set random seed for NumPy
 49  np.random.seed(1234)
 50  
 51  
 52  
 53  
 54  def create_rgb_onera(x,channel):
 55      if channel == 'red':
 56          r = x[:,:,2]
 57          r = np.expand_dims(r, axis=2)
 58          return r
 59      if channel == 'green':
 60          g = x[:,:,1]
 61          g = np.expand_dims(g, axis=2)
 62          return g
 63      if channel == 'blue':
 64          b = x[:,:,0]
 65          b = np.expand_dims(b, axis=2)
 66          return b
 67      if channel == 'rgb':
 68          r = x[:,:,2]
 69          g = x[:,:,1]
 70          b  = x[:,:,0]
 71          rgb = np.dstack((r,g,b))
 72          return(rgb)
 73      if channel == 'rgbvnir':
 74          r = x[:,:,2]
 75          g = x[:,:,1]
 76          b  = x[:,:,0]
 77          vnir = x[:,:,3]
 78          rgbvnir = np.stack((r,g,b,vnir),axis=2).astype('float')
 79          return(rgbvnir)
 80      else:
 81          return x
 82          print("NOT CORRECT CHANNELS")
 83  
 84  def generate_short_id():
 85      # Generate a UUID
 86      unique_id = uuid.uuid4()
 87  
 88      # Convert UUID to a hex string and take the first 4 characters
 89      short_id = str(unique_id.hex)[:4]
 90  
 91      return short_id
 92  
 93  def get_layer_weights(model, layer_names):
 94      layer_weights = {}
 95      for layer_name in layer_names:
 96          layer = model.get_layer(layer_name)
 97          if layer:
 98              weights = layer.get_weights()
 99              if weights:
100                  layer_weights[layer_name] = weights
101              else:
102                  print("No weights found for layer:", layer_name)
103          else:
104              print("Layer not found:", layer_name)
105      return layer_weights
106  
107  def compare_weights(before_training_weights, after_training_weights):
108      for layer_name in before_training_weights:
109          if layer_name in after_training_weights:
110              before_weights = before_training_weights[layer_name]
111              after_weights = after_training_weights[layer_name]
112              if len(before_weights) != len(after_weights):
113                  print(f"Number of weight arrays different for layer {layer_name}")
114                  continue
115              
116              all_equal = all((before_weights[i] == after_weights[i]).all() for i in range(len(before_weights)))
117              if all_equal:
118                  print(f"Weights for layer {layer_name} are the same before and after training.")
119              else:
120                  print(f"Weights for layer {layer_name} are different before and after training.")
121          else:
122              print(f"Layer {layer_name} weights not found after training.")
123  
124  def print_layer_weights(model, layer_names):
125      for layer_name in layer_names:
126          layer = model.get_layer(layer_name)
127          if layer:
128              weights = layer.get_weights()
129              if weights:
130                  print("Weights for layer", layer_name, ":", weights)
131              else:
132                  print("No weights found for layer:", layer_name)
133          else:
134              print("Layer not found:", layer_name)
135  
136  
137  def visualize_images_and_mask(img1, img2, mask):
138      """
139      Display two images and their corresponding change mask.
140      
141      Parameters:
142          img1 (numpy.ndarray): First image array.
143          img2 (numpy.ndarray): Second image array.
144          mask (numpy.ndarray): Change mask array.
145      """
146      fig, ax = plt.subplots(1, 3, figsize=(12, 4))
147      ax[0].imshow(img1)
148      ax[0].set_title('Image 1')
149      ax[0].axis('off')
150      
151      ax[1].imshow(img2)
152      ax[1].set_title('Image 2')
153      ax[1].axis('off')
154      
155      ax[2].imshow(mask, cmap='gray')
156      ax[2].set_title('Change Mask')
157      ax[2].axis('off')
158      
159      plt.show()
160  
161  #----------------------------------------------------------------------------------------------------------------------------------------------------------
162  
163          
164  
165  
166  onera_train_target =  '/data/valsamis_data/data/sysu/SYSU_NPY/aug_train_data/'  
167  onera_test_target = '/data/valsamis_data/data/sysu/SYSU_NPY/aug_test_data/'
168  
169  
170  
171  #pretext_models_df = pd.read_csv('training/pretext_tasks/pretext_task_one_models.csv')
172  train = pd.read_csv(onera_train_target + "dataset_train.csv")
173  test = pd.read_csv(onera_test_target + "dataset_test.csv")
174  
175  train = train.sample(frac=1, random_state=1)
176  test = test.sample(frac=1, random_state=1)
177  print("Train Data", len(train))
178  print("Test Data", len(test))
179  
180  NORM = True
181  n_ch = 3
182  #Load everything in memory
183  X_train1 = np.ndarray(shape=(len(train),96,96,n_ch))
184  X_train2 = np.ndarray(shape=(len(train),96,96,n_ch))
185  y_train =  np.ndarray(shape=(len(train),96,96))
186  
187  pos = 0
188  for index in train.index:
189      img1 = np.load(onera_train_target + train['pair1'][index])
190      img2 = np.load(onera_train_target + train['pair2'][index])
191      X1 = create_rgb_onera(img1, channel)
192      X2 = create_rgb_onera(img2, channel)
193  
194      X1 = (X1 - X1.mean()) / X1.std()
195      X2 = (X2 - X2.mean()) / X2.std()
196      X_train1[pos] = X1
197      X_train2[pos] = X2
198      y_train[pos] = np.load(onera_train_target + train['change_mask'][index])
199  
200      # if pos % 10 == 0:  # Adjust this condition to visualize less frequently if needed
201      #     visualize_images_and_mask(X1, X2, y_train[pos])
202  
203  
204  
205  
206      pos += 1
207  
208  ##### see the ration of 1 to 0s
209  train_balance = y_train.flatten()
210  (unique, counts) = np.unique(train_balance , return_counts=True)
211  frequencies = np.asarray((unique, counts)).T
212  print(frequencies[0][1]/frequencies[1][1])
213  
214  ## one hot the train
215  
216  y_hot_train = keras.utils.to_categorical(y_train, num_classes=2)
217      
218  X_test1 = np.ndarray(shape=(len(test),96,96,n_ch))
219  X_test2 = np.ndarray(shape=(len(test),96,96,n_ch))
220  y_test =  np.ndarray(shape=(len(test),96,96))
221  
222  pos = 0
223  for index in test.index:
224      img1 = np.load(onera_test_target + test['pair1'][index])
225      img2 = np.load(onera_test_target + test['pair2'][index])
226      X1 = create_rgb_onera(img1, channel)
227      X2 = create_rgb_onera(img2, channel)
228      X1 = (X1 - X1.mean()) / X1.std()
229      X2 = (X2 - X2.mean()) / X2.std()
230      X_test1[pos] = X1
231      X_test2[pos] = X2
232      y_test[pos] = np.load(onera_test_target + test['change_mask'][index])
233      pos += 1
234      
235  ## one hot the test
236  y_hot_test = keras.utils.to_categorical(y_test, num_classes=2)
237  #########
238  ind = random.randint(0, 1000)
239  ###############
240  
241  depth = 2
242  dropout = 0.1
243  decay = 0.0001
244  LEARNING_RATE = 0.001
245  EPOCHS = 55
246  
247  model_id = generate_short_id()
248  
249  
250  # #------------------------------------------------------------------------------------
251  
252  cd_model = conv_classifier_two_with_aspp(depth, dropout, decay, 96, 96, n_ch)
253  cd_model.summary()
254  
255  
256  #task1
257  sim_model = pretext_task_one_nopool( dropout, decay, 96, 96, n_ch)
258  
259  
260  #Load either a task1 or a task2 model
261  pretext_model_name = '/home/dvalsamis/Documents/projects/Change_detection_SSL_Siamese/saved_models/model_pretext1_unclouded_results.h5'
262  pretext_model = 'model_pretext1_unclouded_results'
263  
264  
265  
266  #task1
267  sim_model.load_weights(pretext_model_name)
268  
269  
270  # Feature selection(task1)
271  cd_model = feature_selector_simple(depth, sim_model, cd_model)
272  
273  
274  
275  # -------------------------------------------------------------------------------------------------------------------------------------------------
276  
277  wx = 0.1
278  wy = 0.2
279  weights = np.array([wx, wy]) #!!!!!!!!!!!!!!!!!!!!!!!! -> change
280  #weights = np.array([0.1,0.2])
281  
282  
283  LEARNING_RATE = 0.0001
284  EPOCHS = 55
285  optimizer= Adam(learning_rate=LEARNING_RATE)
286  #loss='categorical_crossentropy' #weighted_bincrossentropy 'categorical_crossentropy' 'binary_crossentropy'
287  cd_model.compile(optimizer=optimizer, loss=weighted_categorical_crossentropy(weights), metrics=['accuracy'])
288  
289  BATCH_SIZE=5
290  
291      
292  
293  
294  
295  
296  
297  # Before training
298  print("Before Training:")
299  before_training_weights = get_layer_weights(cd_model, ["norm_1", "conv1_1", "norm1_1", "relu1_1", "dropout1_1", "conv2_1", "norm2_1", "relu2_1", "dropout2_1"])
300  
301  
302  
303  # # Record start time
304  start_time = time.time()
305  
306  history = cd_model.fit(
307      [X_train1 , X_train2],
308      y_hot_train,
309      validation_data=([X_test1, X_test2], y_hot_test),
310      batch_size=BATCH_SIZE,
311      epochs=EPOCHS
312  )
313  
314  # After training
315  print("After Training:")
316  after_training_weights = get_layer_weights(cd_model, ["norm_1", "conv1_1", "norm1_1", "relu1_1", "dropout1_1", "conv2_1", "norm2_1", "relu2_1", "dropout2_1"])
317  
318  # Compare weights
319  compare_weights(before_training_weights, after_training_weights)
320  
321  
322  
323  # Record end time
324  end_time = time.time()
325  
326  # Calculate elapsed time
327  elapsed_time = end_time - start_time
328  elapsed_time_minutes = elapsed_time / 60
329  
330  # cd_model = conv_classifier_two_with_aspp(depth, dropout, decay, 96, 96, n_ch)
331  # cd_model_path = '/home/dvalsamis/Documents/projects/Change_detection_SSL_Siamese/saved_models/CD_Simple_Levir_8192.h5'
332  # cd_model.load_weights(cd_model_path)
333  
334  
335  # ###predictions
336  predictions = cd_model.predict([X_test1, X_test2])
337  y_pred = np.argmax(predictions, axis=3)
338  
339  
340  
341  
342  y_true = y_test
343  
344  
345  get_confusion_matrix(y_true, y_pred)
346  acc = accuracy(y_true,y_pred)
347  spec = specificity(y_true,y_pred)
348  rec = recall(y_true, y_pred)
349  prec = precision(y_true, y_pred)
350  f_m = f_measure(y_true, y_pred)
351  
352  
353  print(rec)
354  print(spec)
355  print(prec)
356  print(f_m)
357  print(acc)
358  
359  
360  ####see some predictions
361  def scaleMinMax(x):
362      return ((x - np.nanpercentile(x,2)) / (np.nanpercentile(x,98) - np.nanpercentile(x,2)))
363  
364  
365  def create_rgb(x, channel):
366      if channel == 'rgb':
367          r = x[:,:,2]
368          r = scaleMinMax(r)
369          g = x[:,:,1]
370          g = scaleMinMax(g)
371          b  = x[:,:,0]
372          b = scaleMinMax(b)
373          rgb = np.dstack((r,g,b))
374          return(rgb)
375      else:
376          one = x[:,:,0]
377          one = scaleMinMax(one)
378          return one        
379          
380  
381  # '''
382  # Log Params
383  # '''
384  import csv
385  
386  
387  
388  predictions = cd_model.predict([X_test1, X_test2])
389  y_pred = np.argmax(predictions, axis=3)
390  y_true = y_test
391  
392  cd_model_name = "CD_Simple_"+"Sysu_"+model_id+".h5"
393  
394  model_path = '/home/dvalsamis/Documents/projects/Change_detection_SSL_Siamese/saved_models/'
395  #cd_model_name = "03_conv_classifier_two_drop0.1_abs_diff_50epochs_weighted0.1_4.5.h5"
396  cd_model.save_weights(model_path+cd_model_name)
397  print("Saved model to disk")
398  
399  weight_par = '[' + str(wx) + ',' + str(wy) + ']'
400  log_params_sim1("Task 1 (T)", "Linear", 'ASPP', weight_par, depth, "Softmax", LEARNING_RATE, 'Adam', EPOCHS, 'weighted_categorical_crossentropy', BATCH_SIZE, rec, spec, prec, f_m, acc, "SYSU Set", 96, NORM, pretext_model,cd_model_name)
401  
402  print("Done")