/ tests / test_analysis_cproc.py
test_analysis_cproc.py
  1  # Python Imports
  2  import unittest
  3  from unittest.mock import patch, mock_open
  4  
  5  # Project Imports
  6  from src import analysis_cproc
  7  
  8  
  9  class TestAnalysisCProc(unittest.TestCase):
 10  
 11      def test_compute_simulation_time_window(self):
 12              min_tss = 1000000
 13              max_tss = 2000000
 14              expected_simulation_start_ts = min_tss
 15              expected_simulation_end_ts = max_tss
 16              expected_simulation_time_ms = round((max_tss - min_tss) / 1000000)
 17  
 18              actual_simulation_start_ts, actual_simulation_end_ts, actual_simulation_time_ms = analysis_cproc.compute_simulation_time_window(min_tss, max_tss)
 19  
 20              self.assertEqual(actual_simulation_start_ts, expected_simulation_start_ts)
 21              self.assertEqual(actual_simulation_end_ts, expected_simulation_end_ts)
 22              self.assertEqual(actual_simulation_time_ms, expected_simulation_time_ms)
 23  
 24      def test_extract_node_id(self):
 25          # Case 1: Standard string with node ID
 26          s = "node-123.toml"
 27          expected_node_id = "node_123"
 28          self.assertEqual(analysis_cproc.extract_node_id(s), expected_node_id)
 29  
 30          # Case 2: String with node ID but additional characters
 31          s = "prefix-node-456.toml-suffix"
 32          expected_node_id = "node_456"
 33          self.assertEqual(analysis_cproc.extract_node_id(s), expected_node_id)
 34  
 35          # Case 3: String without node ID
 36          s = "node.toml"
 37          expected_node_id = None
 38          self.assertEqual(analysis_cproc.extract_node_id(s), expected_node_id)
 39  
 40          # Case 4: Empty string
 41          s = ""
 42          expected_node_id = None
 43          self.assertEqual(analysis_cproc.extract_node_id(s), expected_node_id)
 44  
 45      def test_add_sample_to_metrics(self):
 46          # Case 1: Adding a sample to an empty metrics_dict
 47          metrics_dict = {}
 48          node_id = "node_123"
 49          sample = {'PID': 123}
 50  
 51          nodes_cnt = analysis_cproc.add_sample_to_metrics(sample, node_id, metrics_dict)
 52  
 53          self.assertEqual(nodes_cnt, 1)
 54          self.assertEqual(metrics_dict, {node_id: {'samples' : [sample]}})
 55  
 56          # Case 2: Adding a sample to a metrics_dict that already contains the node_id
 57          sample2 = {'PID': 456}
 58          nodes_cnt = analysis_cproc.add_sample_to_metrics(sample2, node_id, metrics_dict)
 59  
 60          self.assertEqual(nodes_cnt, 0)  # It should return 0 because the node_id already exists in the metrics_dict
 61          self.assertEqual(metrics_dict, {node_id: {'samples' : [sample, sample2]}})
 62  
 63      def test_parse_container_nodes(self):
 64          # Case 1: Sample PID exists in container nodes
 65          container_id = 'container_123'
 66          container_data = {'samples': [{'PID': 1}, {'PID': 2}]}
 67          container_nodes = {1: 'node_1', 2: 'node_2'}
 68          metrics_dict = {}
 69  
 70          nodes_cnt = analysis_cproc.parse_container_nodes(container_id, container_data, container_nodes, metrics_dict)
 71  
 72          self.assertEqual(nodes_cnt, 2)
 73          self.assertDictEqual(metrics_dict, {'node_1': {'samples': [{'PID': 1}]}, 'node_2': {'samples': [{'PID': 2}]}})
 74  
 75          # Case 2: Sample PID does not exist in container nodes
 76          container_id = 'container_456'
 77          container_data = {'samples': [{'PID': 3}]}
 78          container_nodes = {1: 'node_1', 2: 'node_2'}
 79          metrics_dict = {}
 80  
 81          nodes_cnt = analysis_cproc.parse_container_nodes(container_id, container_data, container_nodes, metrics_dict)
 82  
 83          self.assertEqual(nodes_cnt, 0)
 84          self.assertDictEqual(metrics_dict, {})
 85  
 86      def test_extract_container_nodes(self):
 87          # Case 1: All processes have node IDs
 88          container_id = 'container_123'
 89          container_data = {'info': {'processes': [{'binary': 'node-1.toml', 'pid': 1}, {'binary': 'node-2.toml', 'pid': 2}]}}
 90  
 91          container_nodes = analysis_cproc.extract_container_nodes(container_id, container_data)
 92  
 93          self.assertDictEqual(container_nodes, {1: 'node_1', 2: 'node_2'})
 94  
 95          # Case 2: Some processes don't have node IDs
 96          container_id = 'container_456'
 97          container_data = {'info': {'processes': [{'binary': 'node.toml', 'pid': 1}, {'binary': 'node-2.toml', 'pid': 2}]}}
 98  
 99          container_nodes = analysis_cproc.extract_container_nodes(container_id, container_data)
100  
101          self.assertDictEqual(container_nodes, {2: 'node_2'})
102  
103          # Case 3: No processes have node IDs
104          container_id = 'container_789'
105          container_data = {'info': {'processes': [{'binary': 'node.toml', 'pid': 1}, {'binary': 'node.toml', 'pid': 2}]}}
106  
107          container_nodes = analysis_cproc.extract_container_nodes(container_id, container_data)
108  
109          self.assertDictEqual(container_nodes, {})
110  
111      @patch("json.load")
112      @patch("builtins.open", new_callable=mock_open, read_data="data")
113      def test_load_metrics_file(self, mock_file, mock_json):
114          # Case 1: Successful load
115          mock_json.return_value = {'header': 'header', 'containers': {'container1': 'data1', 'container2': 'data2'}}
116          metrics_file_path = 'path/to/metrics_file.json'
117  
118          metrics_obj = analysis_cproc.load_metrics_file(metrics_file_path)
119  
120          self.assertDictEqual(metrics_obj, {'header': 'header', 'containers': {'container1': 'data1', 'container2': 'data2'}})
121  
122          # Reset the mock
123          mock_file.reset_mock()
124  
125          # Case 2: Unsuccessful load (file does not exist)
126          mock_file.side_effect = FileNotFoundError()
127          metrics_file_path = 'path/to/non_existent_file.json'
128  
129          with self.assertRaises(FileNotFoundError):  # The function should raise FileNotFoundError, not SystemExit
130              analysis_cproc.load_metrics_file(metrics_file_path)
131  
132  
133  
134  
135  @patch("builtins.open", new_callable=mock_open, read_data='{"key":"value"}')
136  @patch("analysis_cproc.load_metrics_file")
137  @patch("analysis_cproc.process_metrics_file")
138  @patch("analysis_cproc.analysis_logger.G_LOGGER")
139  def test_load_process_level_metrics(self, mock_logger, mock_process_metrics_file, mock_load_metrics_file, mock_open):
140      # Case 1: Normal execution
141      metrics_file_path = 'path/to/metrics_file.json'
142      mock_load_metrics_file.return_value = {
143          'header': 'header',
144          'containers': {
145              'container_1': {
146                  'info': {'processes': [{'binary': 'node-1.toml', 'pid': 1}]},
147                  'samples': [{'PID': 1}]
148              }
149          }
150      }
151      mock_process_metrics_file.return_value = (mock_load_metrics_file.return_value, None)
152  
153      try:
154          analysis_cproc.load_process_level_metrics(metrics_file_path)
155      except SystemExit:
156          pass
157  
158      mock_load_metrics_file.assert_called_once_with(metrics_file_path)
159      mock_process_metrics_file.assert_called_once_with(mock_load_metrics_file.return_value)
160  
161      # Case 2: Exception handling
162      mock_load_metrics_file.reset_mock()
163      mock_load_metrics_file.side_effect = Exception("Test exception")
164      try:
165          analysis_cproc.load_process_level_metrics(metrics_file_path)
166      except SystemExit:
167          pass
168      mock_logger.error.assert_called_once()
169  
170  
171  
172  def test_compute_node_metrics(self):
173      # Case 1: Valid node data
174      node_obj = {
175          'samples': [
176              {'CPUPercentage': 10, 'MemoryUsageMB': 500, 
177              'NetStats': {'all': {'total_received': 2048, 'total_sent': 2048}}, 
178              'DiskIORChar': 2048, 'DiskIOWChar': 2048}
179          ]
180      }
181  
182      num_samples, max_cpu_usage, max_memory_usage, total_rx_mbytes, total_tx_mbytes, max_disk_read_mbytes, max_disk_write_mbytes = analysis_cproc.compute_node_metrics(node_obj)
183  
184      self.assertEqual(num_samples, 1)
185      self.assertEqual(max_cpu_usage, 10)
186      self.assertEqual(max_memory_usage, 500)
187      self.assertEqual(total_rx_mbytes, 0.001953125)
188      self.assertEqual(total_tx_mbytes, 0.001953125)
189      self.assertEqual(max_disk_read_mbytes, 0.001953125)
190      self.assertEqual(max_disk_write_mbytes, 0.001953125)
191  
192  
193  @patch("analysis_cproc.load_metrics_file")
194  @patch("analysis_cproc.compute_node_metrics")
195  def test_compute_process_level_metrics(self, mock_compute_node_metrics, mock_load_metrics_file):
196      # Case 1: Normal execution
197      simulation_path = 'path/to/simulation'
198      config_obj = {'key': 'value'}
199      mock_load_metrics_file.return_value = (
200          {
201              'header': 'header',
202              'containers': {
203                  'container_1': {
204                      'info': {'processes': [{'binary': 'node-1.toml', 'pid': 1}]},
205                      'samples': [{'PID': 1}]
206                  }
207              }
208          },
209          'info'
210      )
211      mock_compute_node_metrics.return_value = (0, 0, 0, 0, 0, 0, 0)  # mock_compute_node_metrics should return a tuple
212      result = analysis_cproc.compute_process_level_metrics(simulation_path, config_obj)
213  
214      self.assertIsInstance(result, tuple)
215      self.assertEqual(len(result), 6)
216      mock_compute_node_metrics.assert_called()
217  
218  @patch("builtins.open", new_callable=mock_open)
219  @patch("json.dump")
220  def test_export_summary(self, mock_json_dump, mock_open):
221      # Case 1: Normal execution
222      simulation_path = 'path/to/simulation'
223      summary = {'key': 'value'}
224  
225      analysis_cproc.export_summary(simulation_path, summary)
226  
227      mock_open.assert_called_with(f'{simulation_path}/summary.json', 'w')
228      mock_json_dump.assert_called_with(summary, mock_open(), indent=4)
229  
230