/ examples / pytorch / CaptumExample / Titanic_Captum_Interpret.py
Titanic_Captum_Interpret.py
  1  """
  2  Getting started with Captum - Titanic Data Analysis
  3  """
  4  
  5  # Initial imports
  6  import os
  7  from argparse import ArgumentParser
  8  
  9  import matplotlib.pyplot as plt
 10  import numpy as np
 11  import pandas as pd
 12  import torch
 13  from captum.attr import IntegratedGradients, LayerConductance, NeuronConductance
 14  from prettytable import PrettyTable
 15  from scipy import stats
 16  from sklearn.model_selection import train_test_split
 17  from torch import nn
 18  
 19  import mlflow
 20  
 21  
 22  def get_titanic():
 23      """
 24      we now preprocess the data by converting some categorical features such as
 25      gender, location of embarcation, and passenger class into one-hot encodings
 26      We also remove some features that are more difficult to analyze
 27      After processing, the features we have are:
 28      Age: Passenger Age
 29      Sibsp: Number of Siblings / Spouses Aboard
 30      Parch: Number of Parents / Children Aboard
 31      Fare: Fare Amount Paid in British Pounds
 32      Female: Binary variable indicating whether passenger is female
 33      Male: Binary variable indicating whether passenger is male
 34      EmbarkC : Binary var indicating whether passenger embarked @ Cherbourg
 35      EmbarkQ : Binary var indicating whether passenger embarked @ Queenstown
 36      EmbarkS : Binary var indicating whether passenger embarked @ Southampton
 37      Class1 : Binary var indicating whether passenger was in first class
 38      Class2 : Binary var indicating whether passenger was in second class
 39      Class3 : Binary var indicating whether passenger was in third class
 40      """
 41      data_path = "titanic3.csv"
 42      titanic_data = pd.read_csv(data_path)
 43      titanic_data = pd.concat(
 44          [
 45              titanic_data,
 46              pd.get_dummies(titanic_data["sex"], dtype=np.uint8),
 47              pd.get_dummies(titanic_data["embarked"], prefix="embark", dtype=np.uint8),
 48              pd.get_dummies(titanic_data["pclass"], prefix="class", dtype=np.uint8),
 49          ],
 50          axis=1,
 51      )
 52  
 53      titanic_data["age"] = titanic_data["age"].fillna(titanic_data["age"].mean())
 54      titanic_data["fare"] = titanic_data["fare"].fillna(titanic_data["fare"].mean())
 55      return titanic_data.drop(
 56          [
 57              "passengerid",
 58              "name",
 59              "ticket",
 60              "cabin",
 61              "sex",
 62              "embarked",
 63              "pclass",
 64          ],
 65          axis=1,
 66      )
 67  
 68  
 69  torch.manual_seed(1)  # Set seed for reproducibility.
 70  
 71  
 72  class TitanicSimpleNNModel(nn.Module):
 73      def __init__(self):
 74          super().__init__()
 75          self.linear1 = nn.Linear(12, 12)
 76          self.sigmoid1 = nn.Sigmoid()
 77          self.linear2 = nn.Linear(12, 8)
 78          self.sigmoid2 = nn.Sigmoid()
 79          self.linear3 = nn.Linear(8, 2)
 80          self.softmax = nn.Softmax(dim=1)
 81  
 82      def forward(self, x):
 83          lin1_out = self.linear1(x)
 84          sigmoid_out1 = self.sigmoid1(lin1_out)
 85          sigmoid_out2 = self.sigmoid2(self.linear2(sigmoid_out1))
 86          return self.softmax(self.linear3(sigmoid_out2))
 87  
 88  
 89  def prepare():
 90      RANDOM_SEED = 42
 91      titanic_data = get_titanic()
 92      print(titanic_data)
 93  
 94      labels = titanic_data["survived"].to_numpy()
 95      titanic_data = titanic_data.drop(["survived"], axis=1)
 96      feature_names = list(titanic_data.columns)
 97      data = titanic_data.to_numpy()
 98      # Separate training and test sets using
 99      train_features, test_features, train_labels, test_labels = train_test_split(
100          data, labels, test_size=0.3, random_state=RANDOM_SEED, stratify=labels
101      )
102      train_features = np.vstack(train_features[:, :]).astype(np.float32)
103      test_features = np.vstack(test_features[:, :]).astype(np.float32)
104      return train_features, train_labels, test_features, test_labels, feature_names
105  
106  
107  def count_model_parameters(model):
108      table = PrettyTable(["Modules", "Parameters"])
109      total_params = 0
110      for name, parameter in model.named_parameters():
111          if not parameter.requires_grad:
112              continue
113          param = parameter.nonzero(as_tuple=False).size(0)
114          table.add_row([name, param])
115          total_params += param
116  
117      return table, total_params
118  
119  
120  def visualize_importances(
121      feature_names,
122      importances,
123      title="Average Feature Importances",
124      plot=True,
125      axis_title="Features",
126  ):
127      print(title)
128      feature_imp = PrettyTable(["feature_name", "importances"])
129      feature_imp_dict = {}
130      for i in range(len(feature_names)):
131          print(feature_names[i], ": ", f"{importances[i]:.3f}")
132          feature_imp.add_row([feature_names[i], importances[i]])
133          feature_imp_dict[str(feature_names[i])] = importances[i]
134      x_pos = np.arange(len(feature_names))
135      if plot:
136          fig, ax = plt.subplots(figsize=(12, 6))
137          ax.bar(x_pos, importances, align="center")
138          ax.set(title=title, xlabel=axis_title)
139          ax.set_xticks(x_pos)
140          ax.set_xticklabels(feature_names, rotation="vertical")
141          mlflow.log_figure(fig, title + ".png")
142      return feature_imp, feature_imp_dict
143  
144  
145  def train(USE_PRETRAINED_MODEL=False):
146      net = TitanicSimpleNNModel()
147      train_features, train_labels, test_features, test_labels, feature_names = prepare()
148      USE_PRETRAINED_MODEL = dict_args["use_pretrained_model"]
149      if USE_PRETRAINED_MODEL:
150          net.load_state_dict(torch.load("models/titanic_state_dict.pt"))
151          net.eval()
152          print("Model Loaded!")
153      else:
154          criterion = nn.CrossEntropyLoss()
155          num_epochs = dict_args["max_epochs"]
156          mlflow.log_param("epochs", num_epochs)
157          mlflow.log_param("lr", dict_args["lr"])
158  
159          optimizer = torch.optim.Adam(net.parameters(), lr=dict_args["lr"])
160          print(train_features.dtype)
161          input_tensor = torch.from_numpy(train_features).type(torch.FloatTensor)
162          label_tensor = torch.from_numpy(train_labels)
163          for epoch in range(num_epochs):
164              output = net(input_tensor)
165              loss = criterion(output, label_tensor)
166              optimizer.zero_grad()
167              loss.backward()
168              optimizer.step()
169              if epoch % 50 == 0:
170                  print(f"Epoch {epoch + 1}/{num_epochs} => Train Loss: {loss.item():.2f}")
171                  mlflow.log_metric(
172                      f"Epoch {epoch + 1!s} Loss",
173                      float(loss.item()),
174                      step=epoch,
175                  )
176          if not os.path.isdir("models"):
177              os.makedirs("models")
178              torch.save(net.state_dict(), "models/titanic_state_dict.pt")
179      summary, _ = count_model_parameters(net)
180      mlflow.log_text(str(summary), "model_summary.txt")
181      return (
182          net,
183          train_features,
184          train_labels,
185          test_features,
186          test_labels,
187          feature_names,
188      )
189  
190  
191  def compute_accuracy(net, features, labels, title=None):
192      input_tensor = torch.from_numpy(features).type(torch.FloatTensor)
193      out_probs = net(input_tensor).detach().numpy()
194      out_classes = np.argmax(out_probs, axis=1)
195      mlflow.log_metric(title, float(sum(out_classes == labels) / len(labels)))
196      print(title, sum(out_classes == labels) / len(labels))
197      return input_tensor
198  
199  
200  def feature_conductance(net, test_input_tensor):
201      """
202      The method takes tensor(s) of input examples (matching the forward function of the model),
203      and returns the input attributions for the given input example.
204      The returned values of the attribute method are the attributions,
205      which match the size of the given inputs, and delta,
206      which approximates the error between the approximated integral and true integral.
207      This method saves the distribution of avg attributions of the trained features for the given target.
208      """
209      ig = IntegratedGradients(net)
210      test_input_tensor.requires_grad_()
211      attr, _ = ig.attribute(test_input_tensor, target=1, return_convergence_delta=True)
212      attr = attr.detach().numpy()
213      # To understand these attributions, we can first average them across all the inputs and print and visualize the average attribution for each feature.
214      feature_imp, feature_imp_dict = visualize_importances(feature_names, np.mean(attr, axis=0))
215      mlflow.log_metrics(feature_imp_dict)
216      mlflow.log_text(str(feature_imp), "feature_imp_summary.txt")
217      fig, (ax1, ax2) = plt.subplots(2, 1)
218      fig.tight_layout(pad=3)
219      ax1.hist(attr[:, 1], 100)
220      ax1.set(title="Distribution of Sibsp Attribution Values")
221  
222      # we can bucket the examples by the value of the sibsp feature and plot the average attribution for the feature.
223      # In the plot below, the size of the dot is proportional to the number of examples with that value.
224  
225      bin_means, bin_edges, _ = stats.binned_statistic(
226          test_features[:, 1], attr[:, 1], statistic="mean", bins=6
227      )
228      bin_count, _, _ = stats.binned_statistic(
229          test_features[:, 1], attr[:, 1], statistic="count", bins=6
230      )
231  
232      bin_width = bin_edges[1] - bin_edges[0]
233      bin_centers = bin_edges[1:] - bin_width / 2
234      ax2.scatter(bin_centers, bin_means, s=bin_count)
235      ax2.set(xlabel="Average Sibsp Feature Value", ylabel="Average Attribution")
236      mlflow.log_figure(fig, "Average_Sibsp_Feature_Value.png")
237  
238  
239  def layer_conductance(net, test_input_tensor):
240      """
241      To use Layer Conductance, we create a LayerConductance object passing in the model as well as the module (layer) whose output we would like to understand.
242      In this case, we choose net.sigmoid1, the output of the first hidden layer.
243      Now obtain the conductance values for all the test examples by calling attribute on the LayerConductance object.
244      LayerConductance also requires a target index for networks with multiple outputs, defining the index of the output for which gradients are computed.
245      Similar to feature attributions, we provide target = 1, corresponding to survival.
246      LayerConductance also utilizes a baseline, but we simply use the default zero baseline as in integrated gradients.
247      """
248  
249      cond = LayerConductance(net, net.sigmoid1)
250  
251      cond_vals = cond.attribute(test_input_tensor, target=1)
252      cond_vals = cond_vals.detach().numpy()
253      # We can begin by visualizing the average conductance for each neuron.
254      neuron_names = ["neuron " + str(x) for x in range(12)]
255      avg_neuron_imp, neuron_imp_dict = visualize_importances(
256          neuron_names,
257          np.mean(cond_vals, axis=0),
258          title="Average Neuron Importances",
259          axis_title="Neurons",
260      )
261      mlflow.log_metrics(neuron_imp_dict)
262      mlflow.log_text(str(avg_neuron_imp), "neuron_imp_summary.txt")
263      # We can also look at the distribution of each neuron's attributions. Below we look at the distributions for neurons 7 and 9,
264      # and we can confirm that their attribution distributions are very close to 0, suggesting they are not learning substantial features.
265      fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(9, 6))
266      fig.tight_layout(pad=3)
267      ax1.hist(cond_vals[:, 9], 100)
268      ax1.set(title="Neuron 9 Distribution")
269      ax2.hist(cond_vals[:, 7], 100)
270      ax2.set(title="Neuron 7 Distribution")
271      mlflow.log_figure(fig, "Neurons_Distribution.png")
272  
273  
274  def neuron_conductance(net, test_input_tensor, neuron_selector=None):
275      """
276      We have identified that some of the neurons are not learning important features, while others are.
277      Can we now understand what each of these important neurons are looking at in the input?
278      For instance, are they identifying different features in the input or similar ones?
279  
280      To answer these questions, we can apply the third type of attributions available in Captum, **Neuron Attributions**.
281      This allows us to understand what parts of the input contribute to activating a particular input neuron. For this example,
282      we will apply Neuron Conductance, which divides the neuron's total conductance value into the contribution from each individual input feature.
283  
284      To use Neuron Conductance, we create a NeuronConductance object, analogously to Conductance,
285      passing in the model as well as the module (layer) whose output we would like to understand, in this case, net.sigmoid1, as before.
286      """
287      neuron_selector = 0
288      neuron_cond = NeuronConductance(net, net.sigmoid1)
289  
290      # We can now obtain the neuron conductance values for all the test examples by calling attribute on the NeuronConductance object.
291      # Neuron Conductance requires the neuron index in the target layer for which attributions are requested as well as the target index for networks with multiple outputs,
292      # similar to layer conductance. As before, we provide target = 1, corresponding to survival, and compute neuron conductance for neurons 0 and 10, the significant neurons identified above.
293      # The neuron index can be provided either as a tuple or as just an integer if the layer output is 1-dimensional.
294  
295      neuron_cond_vals = neuron_cond.attribute(
296          test_input_tensor, neuron_selector=neuron_selector, target=1
297      )
298      neuron_cond, _ = visualize_importances(
299          feature_names,
300          neuron_cond_vals.mean(dim=0).detach().numpy(),
301          title=f"Average Feature Importances for Neuron {neuron_selector}",
302      )
303      mlflow.log_text(
304          str(neuron_cond), "Avg_Feature_Importances_Neuron_" + str(neuron_selector) + ".txt"
305      )
306  
307  
308  if __name__ == "__main__":
309      parser = ArgumentParser(description="Titanic Captum Example")
310  
311      parser.add_argument(
312          "--use_pretrained_model",
313          default=False,
314          metavar="N",
315          help="Use pretrained model or train from the scratch",
316      )
317  
318      parser.add_argument(
319          "--max_epochs",
320          type=int,
321          default=100,
322          metavar="N",
323          help="Number of epochs to be used for training",
324      )
325  
326      parser.add_argument(
327          "--lr",
328          type=float,
329          default=0.1,
330          metavar="LR",
331          help="learning rate (default: 0.1)",
332      )
333  
334      args = parser.parse_args()
335      dict_args = vars(args)
336  
337      with mlflow.start_run(run_name="Titanic_Captum_mlflow"):
338          net, train_features, train_labels, test_features, test_labels, feature_names = train()
339  
340          compute_accuracy(net, train_features, train_labels, title="Train Accuracy")
341          test_input_tensor = compute_accuracy(net, test_features, test_labels, title="Test Accuracy")
342          feature_conductance(net, test_input_tensor)
343          layer_conductance(net, test_input_tensor)
344          neuron_conductance(net, test_input_tensor)
345          mlflow.log_param("Train Size", len(train_labels))
346          mlflow.log_param("Test Size", len(test_labels))