/ tests / tools / test_browser_console.py
test_browser_console.py
  1  """Tests for browser_console tool and browser_vision annotate param."""
  2  
  3  import json
  4  import os
  5  import sys
  6  from pathlib import Path
  7  from unittest.mock import patch, MagicMock
  8  
  9  import pytest
 10  
 11  sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
 12  
 13  
 14  # ── browser_console ──────────────────────────────────────────────────
 15  
 16  
 17  class TestBrowserConsole:
 18      """browser_console() returns console messages + JS errors in one call."""
 19  
 20      def test_returns_console_messages_and_errors(self):
 21          from tools.browser_tool import browser_console
 22  
 23          console_response = {
 24              "success": True,
 25              "data": {
 26                  "messages": [
 27                      {"text": "hello", "type": "log", "timestamp": 1},
 28                      {"text": "oops", "type": "error", "timestamp": 2},
 29                  ]
 30              },
 31          }
 32          errors_response = {
 33              "success": True,
 34              "data": {
 35                  "errors": [
 36                      {"message": "Uncaught TypeError", "timestamp": 3},
 37                  ]
 38              },
 39          }
 40  
 41          with patch("tools.browser_tool._run_browser_command") as mock_cmd:
 42              mock_cmd.side_effect = [console_response, errors_response]
 43              result = json.loads(browser_console(task_id="test"))
 44  
 45          assert result["success"] is True
 46          assert result["total_messages"] == 2
 47          assert result["total_errors"] == 1
 48          assert result["console_messages"][0]["text"] == "hello"
 49          assert result["console_messages"][1]["text"] == "oops"
 50          assert result["js_errors"][0]["message"] == "Uncaught TypeError"
 51  
 52      def test_passes_clear_flag(self):
 53          from tools.browser_tool import browser_console
 54  
 55          empty = {"success": True, "data": {"messages": [], "errors": []}}
 56          with patch("tools.browser_tool._run_browser_command", return_value=empty) as mock_cmd:
 57              browser_console(clear=True, task_id="test")
 58  
 59          calls = mock_cmd.call_args_list
 60          # Both console and errors should get --clear
 61          assert calls[0][0] == ("test", "console", ["--clear"])
 62          assert calls[1][0] == ("test", "errors", ["--clear"])
 63  
 64      def test_no_clear_by_default(self):
 65          from tools.browser_tool import browser_console
 66  
 67          empty = {"success": True, "data": {"messages": [], "errors": []}}
 68          with patch("tools.browser_tool._run_browser_command", return_value=empty) as mock_cmd:
 69              browser_console(task_id="test")
 70  
 71          calls = mock_cmd.call_args_list
 72          assert calls[0][0] == ("test", "console", [])
 73          assert calls[1][0] == ("test", "errors", [])
 74  
 75      def test_empty_console_and_errors(self):
 76          from tools.browser_tool import browser_console
 77  
 78          empty = {"success": True, "data": {"messages": [], "errors": []}}
 79          with patch("tools.browser_tool._run_browser_command", return_value=empty):
 80              result = json.loads(browser_console(task_id="test"))
 81  
 82          assert result["total_messages"] == 0
 83          assert result["total_errors"] == 0
 84          assert result["console_messages"] == []
 85          assert result["js_errors"] == []
 86  
 87      def test_handles_failed_commands(self):
 88          from tools.browser_tool import browser_console
 89  
 90          failed = {"success": False, "error": "No session"}
 91          with patch("tools.browser_tool._run_browser_command", return_value=failed):
 92              result = json.loads(browser_console(task_id="test"))
 93  
 94          # Should still return success with empty data
 95          assert result["success"] is True
 96          assert result["total_messages"] == 0
 97          assert result["total_errors"] == 0
 98  
 99  
100  # ── browser_console schema ───────────────────────────────────────────
101  
102  
103  class TestBrowserConsoleSchema:
104      """browser_console is properly registered in the tool registry."""
105  
106      def test_schema_in_browser_schemas(self):
107          from tools.browser_tool import BROWSER_TOOL_SCHEMAS
108  
109          names = [s["name"] for s in BROWSER_TOOL_SCHEMAS]
110          assert "browser_console" in names
111  
112      def test_schema_has_clear_param(self):
113          from tools.browser_tool import BROWSER_TOOL_SCHEMAS
114  
115          schema = next(s for s in BROWSER_TOOL_SCHEMAS if s["name"] == "browser_console")
116          props = schema["parameters"]["properties"]
117          assert "clear" in props
118          assert props["clear"]["type"] == "boolean"
119  
120  
121  class TestBrowserConsoleToolsetWiring:
122      """browser_console must be reachable via toolset resolution."""
123  
124      def test_in_browser_toolset(self):
125          from toolsets import TOOLSETS
126          assert "browser_console" in TOOLSETS["browser"]["tools"]
127  
128      def test_in_hermes_core_tools(self):
129          from toolsets import _HERMES_CORE_TOOLS
130          assert "browser_console" in _HERMES_CORE_TOOLS
131  
132      def test_in_legacy_toolset_map(self):
133          from model_tools import _LEGACY_TOOLSET_MAP
134          assert "browser_console" in _LEGACY_TOOLSET_MAP["browser_tools"]
135  
136      def test_in_registry(self):
137          from tools.registry import registry
138          from tools import browser_tool  # noqa: F401
139          assert "browser_console" in registry._tools
140  
141  
142  # ── browser_vision annotate ──────────────────────────────────────────
143  
144  
145  class TestBrowserVisionAnnotate:
146      """browser_vision supports annotate parameter."""
147  
148      def test_schema_has_annotate_param(self):
149          from tools.browser_tool import BROWSER_TOOL_SCHEMAS
150  
151          schema = next(s for s in BROWSER_TOOL_SCHEMAS if s["name"] == "browser_vision")
152          props = schema["parameters"]["properties"]
153          assert "annotate" in props
154          assert props["annotate"]["type"] == "boolean"
155  
156      def test_annotate_false_no_flag(self):
157          """Without annotate, screenshot command has no --annotate flag."""
158          from tools.browser_tool import browser_vision
159  
160          with (
161              patch("tools.browser_tool._run_browser_command") as mock_cmd,
162              patch("tools.browser_tool.call_llm") as mock_call_llm,
163              patch("tools.browser_tool._get_vision_model", return_value="test-model"),
164          ):
165              mock_cmd.return_value = {"success": True, "data": {}}
166              # Will fail at screenshot file read, but we can check the command
167              try:
168                  browser_vision("test", annotate=False, task_id="test")
169              except Exception:
170                  pass
171  
172              if mock_cmd.called:
173                  args = mock_cmd.call_args[0]
174                  cmd_args = args[2] if len(args) > 2 else []
175                  assert "--annotate" not in cmd_args
176  
177      def test_annotate_true_adds_flag(self):
178          """With annotate=True, screenshot command includes --annotate."""
179          from tools.browser_tool import browser_vision
180  
181          with (
182              patch("tools.browser_tool._run_browser_command") as mock_cmd,
183              patch("tools.browser_tool.call_llm") as mock_call_llm,
184              patch("tools.browser_tool._get_vision_model", return_value="test-model"),
185          ):
186              mock_cmd.return_value = {"success": True, "data": {}}
187              try:
188                  browser_vision("test", annotate=True, task_id="test")
189              except Exception:
190                  pass
191  
192              if mock_cmd.called:
193                  args = mock_cmd.call_args[0]
194                  cmd_args = args[2] if len(args) > 2 else []
195                  assert "--annotate" in cmd_args
196  
197  
198  class TestBrowserVisionConfig:
199      def _setup_screenshot(self, tmp_path):
200          shots_dir = tmp_path / "browser_screenshots"
201          shots_dir.mkdir()
202          screenshot = shots_dir / "shot.png"
203          screenshot.write_bytes(b"\x89PNG\r\n\x1a\n" + b"\x00" * 8)
204          return shots_dir, screenshot
205  
206      def test_browser_vision_uses_configured_temperature_and_timeout(self, tmp_path):
207          from tools.browser_tool import browser_vision
208  
209          shots_dir, screenshot = self._setup_screenshot(tmp_path)
210          mock_response = MagicMock()
211          mock_choice = MagicMock()
212          mock_choice.message.content = "Annotated screenshot analysis"
213          mock_response.choices = [mock_choice]
214  
215          with (
216              patch("hermes_constants.get_hermes_dir", return_value=shots_dir),
217              patch("tools.browser_tool._cleanup_old_screenshots"),
218              patch("tools.browser_tool._run_browser_command", return_value={"success": True, "data": {"path": str(screenshot)}}),
219              patch("tools.browser_tool._get_vision_model", return_value="test-model"),
220              patch("hermes_cli.config.load_config", return_value={"auxiliary": {"vision": {"temperature": 1, "timeout": 45}}}),
221              patch("tools.browser_tool.call_llm", return_value=mock_response) as mock_llm,
222          ):
223              result = json.loads(browser_vision("what is on the page?", task_id="test"))
224  
225          assert result["success"] is True
226          assert result["analysis"] == "Annotated screenshot analysis"
227          assert mock_llm.call_args.kwargs["temperature"] == 1.0
228          assert mock_llm.call_args.kwargs["timeout"] == 45.0
229  
230      def test_browser_vision_defaults_temperature_when_config_omits_it(self, tmp_path):
231          from tools.browser_tool import browser_vision
232  
233          shots_dir, screenshot = self._setup_screenshot(tmp_path)
234          mock_response = MagicMock()
235          mock_choice = MagicMock()
236          mock_choice.message.content = "Default screenshot analysis"
237          mock_response.choices = [mock_choice]
238  
239          with (
240              patch("hermes_constants.get_hermes_dir", return_value=shots_dir),
241              patch("tools.browser_tool._cleanup_old_screenshots"),
242              patch("tools.browser_tool._run_browser_command", return_value={"success": True, "data": {"path": str(screenshot)}}),
243              patch("tools.browser_tool._get_vision_model", return_value="test-model"),
244              patch("hermes_cli.config.load_config", return_value={"auxiliary": {"vision": {}}}),
245              patch("tools.browser_tool.call_llm", return_value=mock_response) as mock_llm,
246          ):
247              result = json.loads(browser_vision("what is on the page?", task_id="test"))
248  
249          assert result["success"] is True
250          assert result["analysis"] == "Default screenshot analysis"
251          assert mock_llm.call_args.kwargs["temperature"] == 0.1
252          assert mock_llm.call_args.kwargs["timeout"] == 120.0
253  
254  
255  # ── auto-recording config ────────────────────────────────────────────
256  
257  
258  class TestRecordSessionsConfig:
259      """browser.record_sessions config option."""
260  
261      def test_default_config_has_record_sessions(self):
262          from hermes_cli.config import DEFAULT_CONFIG
263  
264          browser_cfg = DEFAULT_CONFIG.get("browser", {})
265          assert "record_sessions" in browser_cfg
266          assert browser_cfg["record_sessions"] is False
267  
268      def test_maybe_start_recording_disabled(self):
269          """Recording doesn't start when config says record_sessions: false."""
270          from tools.browser_tool import _maybe_start_recording, _recording_sessions
271  
272          with (
273              patch("tools.browser_tool._run_browser_command") as mock_cmd,
274              patch("builtins.open", side_effect=FileNotFoundError),
275          ):
276              _maybe_start_recording("test-task")
277  
278          mock_cmd.assert_not_called()
279          assert "test-task" not in _recording_sessions
280  
281      def test_maybe_stop_recording_noop_when_not_recording(self):
282          """Stopping when not recording is a no-op."""
283          from tools.browser_tool import _maybe_stop_recording, _recording_sessions
284  
285          _recording_sessions.discard("test-task")  # ensure not in set
286          with patch("tools.browser_tool._run_browser_command") as mock_cmd:
287              _maybe_stop_recording("test-task")
288  
289          mock_cmd.assert_not_called()
290  
291  
292  # ── dogfood skill files ──────────────────────────────────────────────
293  
294  
295  class TestDogfoodSkill:
296      """Dogfood skill files exist and have correct structure."""
297  
298      @pytest.fixture(autouse=True)
299      def _skill_dir(self):
300          # Use the actual repo skills dir (not temp)
301          self.skill_dir = os.path.join(
302              os.path.dirname(__file__), "..", "..", "skills", "dogfood"
303          )
304  
305      def test_skill_md_exists(self):
306          assert os.path.exists(os.path.join(self.skill_dir, "SKILL.md"))
307  
308      def test_taxonomy_exists(self):
309          assert os.path.exists(
310              os.path.join(self.skill_dir, "references", "issue-taxonomy.md")
311          )
312  
313      def test_report_template_exists(self):
314          assert os.path.exists(
315              os.path.join(self.skill_dir, "templates", "dogfood-report-template.md")
316          )
317  
318      def test_skill_md_has_frontmatter(self):
319          with open(os.path.join(self.skill_dir, "SKILL.md")) as f:
320              content = f.read()
321          assert content.startswith("---")
322          assert "name: dogfood" in content
323          assert "description:" in content
324  
325      def test_skill_references_browser_console(self):
326          with open(os.path.join(self.skill_dir, "SKILL.md")) as f:
327              content = f.read()
328          assert "browser_console" in content
329  
330      def test_skill_references_annotate(self):
331          with open(os.path.join(self.skill_dir, "SKILL.md")) as f:
332              content = f.read()
333          assert "annotate" in content
334  
335      def test_taxonomy_has_severity_levels(self):
336          with open(
337              os.path.join(self.skill_dir, "references", "issue-taxonomy.md")
338          ) as f:
339              content = f.read()
340          assert "Critical" in content
341          assert "High" in content
342          assert "Medium" in content
343          assert "Low" in content
344  
345      def test_taxonomy_has_categories(self):
346          with open(
347              os.path.join(self.skill_dir, "references", "issue-taxonomy.md")
348          ) as f:
349              content = f.read()
350          assert "Functional" in content
351          assert "Visual" in content
352          assert "Accessibility" in content
353          assert "Console" in content