/ training / downstream_tasks / levir_simple_cd.py
levir_simple_cd.py
  1  #!/usr/bin/env python3
  2  # -*- coding: utf-8 -*-
  3  """
  4  Created on Wed Mar  9 20:15:48 2022
  5  
  6  @author: aleoikon
  7  """
  8  
  9  import sys
 10  sys.path.append('/home/dvalsamis/Documents/projects/Change_detection_SSL_Siamese')
 11  import time
 12  
 13  from architectures.similarity_detection import pretext_task_one_nopool,pretext_task_one_aspp
 14  
 15  # Now you can use pretext_task_one_nopool in your script
 16  
 17  #from tests import change_detection_noup, change_detection_noup_1x1convs
 18  #from similarity_detection import pretext_task_one_nopool
 19  import tensorflow
 20  from tensorflow.keras import layers, Model
 21  from architectures.branch import branches_triplet
 22  from tensorflow.keras.optimizers import Adam
 23  from tensorflow.keras.utils import plot_model
 24  
 25  import pandas as pd
 26  import numpy as np
 27  import os
 28  from tensorflow import keras
 29  from architectures.conv_classifier import conv_classifier_two, conv_classifier_two_with_nspp,conv_classifier_two_with_aspp
 30  from utils.layer_select import feature_selector, feature_selector_simple, transfer_learning_model #, feature_selector_task2
 31  from utils.my_metrics import recall, accuracy, specificity, precision, f_measure, get_confusion_matrix
 32  from utils.log_params import log_params_sim1
 33  
 34  import matplotlib
 35  matplotlib.use('TkAgg')
 36  import matplotlib.pyplot as plt
 37  import uuid
 38  import random
 39  from utils.weighted_cross_entropy import weighted_categorical_crossentropy
 40  os.environ["CUDA_VISIBLE_DEVICES"]="1"
 41  
 42  channel = 'rgb'
 43  
 44  
 45  # Set random seed for TensorFlow
 46  tensorflow.random.set_seed(1234)
 47  
 48  # Set random seed for NumPy
 49  np.random.seed(1234)
 50  
 51  
 52  
 53  
 54  def create_rgb_onera(x,channel):
 55      if channel == 'red':
 56          r = x[:,:,2]
 57          r = np.expand_dims(r, axis=2)
 58          return r
 59      if channel == 'green':
 60          g = x[:,:,1]
 61          g = np.expand_dims(g, axis=2)
 62          return g
 63      if channel == 'blue':
 64          b = x[:,:,0]
 65          b = np.expand_dims(b, axis=2)
 66          return b
 67      if channel == 'rgb':
 68          r = x[:,:,2]
 69          g = x[:,:,1]
 70          b  = x[:,:,0]
 71          rgb = np.dstack((r,g,b))
 72          return(rgb)
 73      if channel == 'rgbvnir':
 74          r = x[:,:,2]
 75          g = x[:,:,1]
 76          b  = x[:,:,0]
 77          vnir = x[:,:,3]
 78          rgbvnir = np.stack((r,g,b,vnir),axis=2).astype('float')
 79          return(rgbvnir)
 80      else:
 81          return x
 82          print("NOT CORRECT CHANNELS")
 83  
 84  def generate_short_id():
 85      # Generate a UUID
 86      unique_id = uuid.uuid4()
 87  
 88      # Convert UUID to a hex string and take the first 4 characters
 89      short_id = str(unique_id.hex)[:4]
 90  
 91      return short_id
 92  
 93  def get_layer_weights(model, layer_names):
 94      layer_weights = {}
 95      for layer_name in layer_names:
 96          layer = model.get_layer(layer_name)
 97          if layer:
 98              weights = layer.get_weights()
 99              if weights:
100                  layer_weights[layer_name] = weights
101              else:
102                  print("No weights found for layer:", layer_name)
103          else:
104              print("Layer not found:", layer_name)
105      return layer_weights
106  
107  def compare_weights(before_training_weights, after_training_weights):
108      for layer_name in before_training_weights:
109          if layer_name in after_training_weights:
110              before_weights = before_training_weights[layer_name]
111              after_weights = after_training_weights[layer_name]
112              if len(before_weights) != len(after_weights):
113                  print(f"Number of weight arrays different for layer {layer_name}")
114                  continue
115              
116              all_equal = all((before_weights[i] == after_weights[i]).all() for i in range(len(before_weights)))
117              if all_equal:
118                  print(f"Weights for layer {layer_name} are the same before and after training.")
119              else:
120                  print(f"Weights for layer {layer_name} are different before and after training.")
121          else:
122              print(f"Layer {layer_name} weights not found after training.")
123  
124  def print_layer_weights(model, layer_names):
125      for layer_name in layer_names:
126          layer = model.get_layer(layer_name)
127          if layer:
128              weights = layer.get_weights()
129              if weights:
130                  print("Weights for layer", layer_name, ":", weights)
131              else:
132                  print("No weights found for layer:", layer_name)
133          else:
134              print("Layer not found:", layer_name)
135  
136  
137  def visualize_images_and_mask(img1, img2, mask):
138      """
139      Display two images and their corresponding change mask.
140      
141      Parameters:
142          img1 (numpy.ndarray): First image array.
143          img2 (numpy.ndarray): Second image array.
144          mask (numpy.ndarray): Change mask array.
145      """
146      fig, ax = plt.subplots(1, 3, figsize=(12, 4))
147      ax[0].imshow(img1)
148      ax[0].set_title('Image 1')
149      ax[0].axis('off')
150      
151      ax[1].imshow(img2)
152      ax[1].set_title('Image 2')
153      ax[1].axis('off')
154      
155      ax[2].imshow(mask, cmap='gray')
156      ax[2].set_title('Change Mask')
157      ax[2].axis('off')
158      
159      plt.show()
160  
161  #----------------------------------------------------------------------------------------------------------------------------------------------------------
162  
163          
164  
165  
166  onera_train_target =  '/data/valsamis_data/data/LEVIR-CD/Levir_NPY/aug_train_data/'  
167  onera_test_target = '/data/valsamis_data/data/LEVIR-CD/Levir_NPY/aug_test_data/'
168  
169  
170  
171  #pretext_models_df = pd.read_csv('training/pretext_tasks/pretext_task_one_models.csv')
172  train = pd.read_csv(onera_train_target + "dataset_train.csv")
173  test = pd.read_csv(onera_test_target + "dataset_test.csv")
174  
175  train = train.sample(frac=1, random_state=1)
176  test = test.sample(frac=1, random_state=1)
177  print("Train Data", len(train))
178  print("Test Data", len(test))
179  
180  NORM = True
181  n_ch = 3
182  #Load everything in memory
183  X_train1 = np.ndarray(shape=(len(train),96,96,n_ch))
184  X_train2 = np.ndarray(shape=(len(train),96,96,n_ch))
185  y_train =  np.ndarray(shape=(len(train),96,96))
186  
187  pos = 0
188  for index in train.index:
189      img1 = np.load(onera_train_target + train['pair1'][index])
190      img2 = np.load(onera_train_target + train['pair2'][index])
191      X1 = create_rgb_onera(img1, channel)
192      X2 = create_rgb_onera(img2, channel)
193  
194      X1 = (X1 - X1.mean()) / X1.std()
195      X2 = (X2 - X2.mean()) / X2.std()
196      X_train1[pos] = X1
197      X_train2[pos] = X2
198      y_train[pos] = np.load(onera_train_target + train['change_mask'][index])
199  
200      # if pos % 10 == 0:  # Adjust this condition to visualize less frequently if needed
201      #     visualize_images_and_mask(X1, X2, y_train[pos])
202  
203      pos += 1
204  
205  ##### see the ration of 1 to 0s
206  train_balance = y_train.flatten()
207  (unique, counts) = np.unique(train_balance , return_counts=True)
208  frequencies = np.asarray((unique, counts)).T
209  print(frequencies[0][1]/frequencies[1][1])
210  
211  ## one hot the train
212  
213  y_hot_train = keras.utils.to_categorical(y_train, num_classes=2)
214      
215  X_test1 = np.ndarray(shape=(len(test),96,96,n_ch))
216  X_test2 = np.ndarray(shape=(len(test),96,96,n_ch))
217  y_test =  np.ndarray(shape=(len(test),96,96))
218  
219  pos = 0
220  for index in test.index:
221      img1 = np.load(onera_test_target + test['pair1'][index])
222      img2 = np.load(onera_test_target + test['pair2'][index])
223      X1 = create_rgb_onera(img1, channel)
224      X2 = create_rgb_onera(img2, channel)
225      X1 = (X1 - X1.mean()) / X1.std()
226      X2 = (X2 - X2.mean()) / X2.std()
227      X_test1[pos] = X1
228      X_test2[pos] = X2
229      y_test[pos] = np.load(onera_test_target + test['change_mask'][index])
230      pos += 1
231      
232  ## one hot the test
233  y_hot_test = keras.utils.to_categorical(y_test, num_classes=2)
234  #########
235  ind = random.randint(0, 1000)
236  ###############
237  
238  depth = 2
239  dropout = 0.1
240  decay = 0.0001
241  LEARNING_RATE = 0.001
242  EPOCHS = 30
243  
244  model_id = generate_short_id()
245  
246  
247  # #------------------------------------------------------------------------------------
248  
249  cd_model = conv_classifier_two(depth, dropout, decay, 96, 96, n_ch)
250  cd_model.summary()
251  
252  
253  #task1
254  sim_model = pretext_task_one_nopool( dropout, decay, 96, 96, n_ch)
255  
256  
257  #Load either a task1 or a task2 model
258  pretext_model_name = '/home/dvalsamis/Documents/projects/Change_detection_SSL_Siamese/saved_models/model_pretext1_unclouded_results.h5'
259  pretext_model = 'model_pretext1_unclouded_results'
260  
261  
262  
263  #task1
264  sim_model.load_weights(pretext_model_name)
265  
266  
267  # Feature selection(task1)
268  cd_model = feature_selector_simple(depth, sim_model, cd_model)
269  
270  
271  
272  # -------------------------------------------------------------------------------------------------------------------------------------------------
273  
274  wx = 0.1
275  wy = 0.2
276  weights = np.array([wx, wy]) #!!!!!!!!!!!!!!!!!!!!!!!! -> change
277  #weights = np.array([0.1,0.2])
278  
279  
280  LEARNING_RATE = 0.0001
281  EPOCHS = 20
282  optimizer= Adam(learning_rate=LEARNING_RATE)
283  #loss='categorical_crossentropy' #weighted_bincrossentropy 'categorical_crossentropy' 'binary_crossentropy'
284  cd_model.compile(optimizer=optimizer, loss=weighted_categorical_crossentropy(weights), metrics=['accuracy'])
285  
286  BATCH_SIZE=5
287  
288      
289  
290  
291  
292  
293  # Before training
294  print("Before Training:")
295  before_training_weights = get_layer_weights(cd_model, ["norm_1", "conv1_1", "norm1_1", "relu1_1", "dropout1_1", "conv2_1", "norm2_1", "relu2_1", "dropout2_1"])
296  
297  
298  
299  # # Record start time
300  start_time = time.time()
301  
302  history = cd_model.fit(
303      [X_train1 , X_train2],
304      y_hot_train,
305      validation_data=([X_test1, X_test2], y_hot_test),
306      batch_size=BATCH_SIZE,
307      epochs=EPOCHS
308  )
309  
310  # After training
311  print("After Training:")
312  after_training_weights = get_layer_weights(cd_model, ["norm_1", "conv1_1", "norm1_1", "relu1_1", "dropout1_1", "conv2_1", "norm2_1", "relu2_1", "dropout2_1"])
313  
314  # Compare weights
315  compare_weights(before_training_weights, after_training_weights)
316  
317  
318  # Record end time
319  end_time = time.time()
320  
321  # Calculate elapsed time
322  elapsed_time = end_time - start_time
323  elapsed_time_minutes = elapsed_time / 60
324  
325  # cd_model = conv_classifier_two_with_aspp(depth, dropout, decay, 96, 96, n_ch)
326  # cd_model_path = '/home/dvalsamis/Documents/projects/Change_detection_SSL_Siamese/saved_models/CD_Simple_CBMI_c141.h5'
327  # cd_model.load_weights(cd_model_path)
328  
329  
330  # ###predictions
331  predictions = cd_model.predict([X_test1, X_test2])
332  y_pred = np.argmax(predictions, axis=3)
333  
334  
335  '''
336  metrics
337  '''
338  #y_true = np.ndarray(shape=(y_pred.shape))
339  #for i in range(y_true.shape[0]):
340  #    y_true[i] = np.load(onera_test_target+onera_test_df['change_mask'][i]
341  
342  
343  y_true = y_test
344  
345  
346  get_confusion_matrix(y_true, y_pred)
347  acc = accuracy(y_true,y_pred)
348  spec = specificity(y_true,y_pred)
349  rec = recall(y_true, y_pred)
350  prec = precision(y_true, y_pred)
351  f_m = f_measure(y_true, y_pred)
352  
353  
354  print(rec)
355  print(spec)
356  print(prec)
357  print(f_m)
358  print(acc)
359  
360  
361  ####see some predictions
362  def scaleMinMax(x):
363      return ((x - np.nanpercentile(x,2)) / (np.nanpercentile(x,98) - np.nanpercentile(x,2)))
364  
365  
366  def create_rgb(x, channel):
367      if channel == 'rgb':
368          r = x[:,:,2]
369          r = scaleMinMax(r)
370          g = x[:,:,1]
371          g = scaleMinMax(g)
372          b  = x[:,:,0]
373          b = scaleMinMax(b)
374          rgb = np.dstack((r,g,b))
375          return(rgb)
376      else:
377          one = x[:,:,0]
378          one = scaleMinMax(one)
379          return one        
380          
381  
382  import csv
383  
384  
385  
386  predictions = cd_model.predict([X_test1, X_test2])
387  y_pred = np.argmax(predictions, axis=3)
388  y_true = y_test
389  
390  cd_model_name = "CD_Simple_"+"Levir_"+model_id+".h5"
391  
392  model_path = '/home/dvalsamis/Documents/projects/Change_detection_SSL_Siamese/saved_models/'
393  #cd_model_name = "03_conv_classifier_two_drop0.1_abs_diff_50epochs_weighted0.1_4.5.h5"
394  cd_model.save_weights(model_path+cd_model_name)
395  print("Saved model to disk")
396  
397  weight_par = '[' + str(wx) + ',' + str(wy) + ']'
398  log_params_sim1("Task 1", "Linear", 'ASPP', weight_par, depth, "Softmax", LEARNING_RATE, 'Adam', EPOCHS, 'weighted_categorical_crossentropy', BATCH_SIZE, rec, spec, prec, f_m, acc, "Levir-CD Set", 96, NORM, pretext_model,cd_model_name)
399  
400  print("Done")
401