/ tests / agent / test_curator_backup.py
test_curator_backup.py
  1  """Tests for agent/curator_backup.py — snapshot + rollback of the skills tree."""
  2  
  3  from __future__ import annotations
  4  
  5  import importlib
  6  import json
  7  import os
  8  import sys
  9  import tarfile
 10  import tempfile
 11  from pathlib import Path
 12  
 13  import pytest
 14  
 15  
 16  @pytest.fixture
 17  def backup_env(monkeypatch, tmp_path):
 18      """Isolate HERMES_HOME + reload modules so every test starts clean."""
 19      home = tmp_path / ".hermes"
 20      home.mkdir()
 21      (home / "skills").mkdir()
 22      monkeypatch.setenv("HERMES_HOME", str(home))
 23      monkeypatch.setattr(Path, "home", lambda: tmp_path)
 24  
 25      # Reload so get_hermes_home picks up the env var fresh.
 26      import hermes_constants
 27      importlib.reload(hermes_constants)
 28      from agent import curator_backup
 29      importlib.reload(curator_backup)
 30      return {"home": home, "skills": home / "skills", "cb": curator_backup}
 31  
 32  
 33  def _write_skill(skills_dir: Path, name: str, body: str = "body") -> Path:
 34      d = skills_dir / name
 35      d.mkdir(parents=True, exist_ok=True)
 36      (d / "SKILL.md").write_text(
 37          f"---\nname: {name}\ndescription: t\nversion: 1.0\n---\n\n{body}\n",
 38          encoding="utf-8",
 39      )
 40      return d
 41  
 42  
 43  # ---------------------------------------------------------------------------
 44  # snapshot_skills
 45  # ---------------------------------------------------------------------------
 46  
 47  def test_snapshot_creates_tarball_and_manifest(backup_env):
 48      cb = backup_env["cb"]
 49      _write_skill(backup_env["skills"], "alpha")
 50      _write_skill(backup_env["skills"], "beta")
 51  
 52      snap = cb.snapshot_skills(reason="test")
 53      assert snap is not None, "snapshot should succeed with a populated skills dir"
 54      assert (snap / "skills.tar.gz").exists()
 55      manifest = json.loads((snap / "manifest.json").read_text())
 56      assert manifest["reason"] == "test"
 57      assert manifest["skill_files"] == 2
 58      assert manifest["archive_bytes"] > 0
 59  
 60  
 61  def test_snapshot_excludes_backups_dir_itself(backup_env):
 62      """The backup must NOT contain .curator_backups/ — that would recurse
 63      with every subsequent snapshot and balloon disk usage."""
 64      cb = backup_env["cb"]
 65      _write_skill(backup_env["skills"], "alpha")
 66      snap1 = cb.snapshot_skills(reason="first")
 67      assert snap1 is not None
 68      snap2 = cb.snapshot_skills(reason="second")
 69      assert snap2 is not None
 70      with tarfile.open(snap2 / "skills.tar.gz") as tf:
 71          names = tf.getnames()
 72      assert not any(n.startswith(".curator_backups") for n in names), (
 73          "second snapshot must not contain the first snapshot recursively"
 74      )
 75  
 76  
 77  def test_snapshot_excludes_hub_dir(backup_env):
 78      """.hub/ is managed by the skills hub. Rolling it back would break
 79      lockfile invariants, so the snapshot omits it entirely."""
 80      cb = backup_env["cb"]
 81      hub = backup_env["skills"] / ".hub"
 82      hub.mkdir()
 83      (hub / "lock.json").write_text("{}")
 84      _write_skill(backup_env["skills"], "alpha")
 85      snap = cb.snapshot_skills(reason="t")
 86      assert snap is not None
 87      with tarfile.open(snap / "skills.tar.gz") as tf:
 88          names = tf.getnames()
 89      assert not any(n.startswith(".hub") for n in names)
 90  
 91  
 92  def test_snapshot_disabled_returns_none(backup_env, monkeypatch):
 93      cb = backup_env["cb"]
 94      monkeypatch.setattr(cb, "is_enabled", lambda: False)
 95      _write_skill(backup_env["skills"], "alpha")
 96      assert cb.snapshot_skills() is None
 97      # And no backup dir should have been created
 98      assert not (backup_env["skills"] / ".curator_backups").exists()
 99  
100  
101  def test_snapshot_uniquifies_when_same_second(backup_env, monkeypatch):
102      """Two snapshots in the same wallclock second must not clobber each
103      other. The module appends a counter to the second snapshot's id."""
104      cb = backup_env["cb"]
105      _write_skill(backup_env["skills"], "alpha")
106      frozen = "2026-05-01T12-00-00Z"
107      monkeypatch.setattr(cb, "_utc_id", lambda now=None: frozen)
108      s1 = cb.snapshot_skills(reason="a")
109      s2 = cb.snapshot_skills(reason="b")
110      assert s1 is not None and s2 is not None
111      assert s1.name == frozen
112      assert s2.name == f"{frozen}-01"
113  
114  
115  def test_snapshot_prunes_to_keep_count(backup_env, monkeypatch):
116      cb = backup_env["cb"]
117      _write_skill(backup_env["skills"], "alpha")
118      monkeypatch.setattr(cb, "get_keep", lambda: 3)
119  
120      # Create 5 snapshots with monotonically increasing fake ids
121      ids = [f"2026-05-0{i}T00-00-00Z" for i in range(1, 6)]
122      for i, fid in enumerate(ids):
123          monkeypatch.setattr(cb, "_utc_id", lambda now=None, _f=fid: _f)
124          cb.snapshot_skills(reason=f"n{i}")
125  
126      remaining = sorted(p.name for p in (backup_env["skills"] / ".curator_backups").iterdir())
127      # Newest 3 kept (lex order == date order for this id format)
128      assert remaining == ids[2:], f"expected newest 3, got {remaining}"
129  
130  
131  # ---------------------------------------------------------------------------
132  # list_backups / _resolve_backup
133  # ---------------------------------------------------------------------------
134  
135  def test_list_backups_empty(backup_env):
136      cb = backup_env["cb"]
137      assert cb.list_backups() == []
138  
139  
140  def test_list_backups_returns_manifest_data(backup_env):
141      cb = backup_env["cb"]
142      _write_skill(backup_env["skills"], "alpha")
143      cb.snapshot_skills(reason="m1")
144      rows = cb.list_backups()
145      assert len(rows) == 1
146      assert rows[0]["reason"] == "m1"
147      assert rows[0]["skill_files"] == 1
148  
149  
150  def test_resolve_backup_newest_when_no_id(backup_env, monkeypatch):
151      cb = backup_env["cb"]
152      _write_skill(backup_env["skills"], "alpha")
153      ids = ["2026-05-01T00-00-00Z", "2026-05-02T00-00-00Z"]
154      for fid in ids:
155          monkeypatch.setattr(cb, "_utc_id", lambda now=None, _f=fid: _f)
156          cb.snapshot_skills()
157      resolved = cb._resolve_backup(None)
158      assert resolved is not None
159      assert resolved.name == "2026-05-02T00-00-00Z", (
160          "resolve(None) must return newest regular snapshot"
161      )
162  
163  
164  def test_resolve_backup_unknown_id_returns_none(backup_env):
165      cb = backup_env["cb"]
166      _write_skill(backup_env["skills"], "alpha")
167      cb.snapshot_skills()
168      assert cb._resolve_backup("not-an-id") is None
169  
170  
171  # ---------------------------------------------------------------------------
172  # rollback
173  # ---------------------------------------------------------------------------
174  
175  def test_rollback_restores_deleted_skill(backup_env):
176      """The whole point of this feature: user loses a skill, rollback
177      brings it back."""
178      cb = backup_env["cb"]
179      skills = backup_env["skills"]
180      user_skill = _write_skill(skills, "my-personal-workflow", body="important content")
181      cb.snapshot_skills(reason="pre-simulated-curator")
182  
183      # Simulate curator archiving it out of existence
184      import shutil as _sh
185      _sh.rmtree(user_skill)
186      assert not user_skill.exists()
187  
188      ok, msg, _ = cb.rollback()
189      assert ok, f"rollback failed: {msg}"
190      assert user_skill.exists(), "my-personal-workflow should be restored"
191      assert "important content" in (user_skill / "SKILL.md").read_text()
192  
193  
194  def test_rollback_is_itself_undoable(backup_env):
195      """A rollback creates its own safety snapshot before replacing the
196      tree, so the user can undo a mistaken rollback. The safety snapshot
197      is a real tarball with reason='pre-rollback to <id>' — it's
198      listed by list_backups() just like any other snapshot and can be
199      restored the same way."""
200      cb = backup_env["cb"]
201      skills = backup_env["skills"]
202      _write_skill(skills, "v1")
203      cb.snapshot_skills(reason="snapshot-of-v1")
204  
205      # Overwrite with a new skill state
206      import shutil as _sh
207      _sh.rmtree(skills / "v1")
208      _write_skill(skills, "v2")
209  
210      ok, _, _ = cb.rollback()
211      assert ok
212      assert (skills / "v1").exists()
213  
214      # list_backups should show a safety snapshot tagged "pre-rollback to <target-id>"
215      rows = cb.list_backups()
216      pre_rollback_entries = [r for r in rows if "pre-rollback" in (r.get("reason") or "")]
217      assert len(pre_rollback_entries) >= 1, (
218          f"expected a pre-rollback safety snapshot in list_backups(), got: "
219          f"{[(r.get('id'), r.get('reason')) for r in rows]}"
220      )
221      # And the transient staging dir must be gone (it's implementation detail)
222      backups_dir = skills / ".curator_backups"
223      staging_dirs = [p for p in backups_dir.iterdir() if p.name.startswith(".rollback-staging-")]
224      assert staging_dirs == [], (
225          f"staging dir should be cleaned up on success, got: {staging_dirs}"
226      )
227  
228  
229  def test_rollback_no_snapshots_returns_error(backup_env):
230      cb = backup_env["cb"]
231      ok, msg, _ = cb.rollback()
232      assert not ok
233      assert "no matching backup" in msg.lower() or "no snapshot" in msg.lower()
234  
235  
236  def test_rollback_rejects_unsafe_tarball(backup_env, monkeypatch):
237      """Tarballs with absolute paths or .. components must be refused even
238      if someone crafts a malicious snapshot. Defense in depth — normal
239      curator snapshots never produce these."""
240      cb = backup_env["cb"]
241      skills = backup_env["skills"]
242      _write_skill(skills, "alpha")
243      cb.snapshot_skills(reason="legit")
244  
245      # Hand-craft a malicious tarball replacing the legit one
246      rows = cb.list_backups()
247      snap_dir = Path(rows[0]["path"])
248      mal = snap_dir / "skills.tar.gz"
249      mal.unlink()
250      with tarfile.open(mal, "w:gz") as tf:
251          evil = tempfile.NamedTemporaryFile(delete=False, suffix=".md")
252          evil.write(b"evil")
253          evil.close()
254          tf.add(evil.name, arcname="../../etc/evil.md")
255          os.unlink(evil.name)
256  
257      ok, msg, _ = cb.rollback()
258      assert not ok
259      assert "unsafe" in msg.lower() or "refus" in msg.lower() or "extract" in msg.lower()
260  
261  
262  # ---------------------------------------------------------------------------
263  # Integration with run_curator_review
264  # ---------------------------------------------------------------------------
265  
266  def test_real_run_takes_pre_snapshot(backup_env, monkeypatch):
267      """A real (non-dry) curator pass must snapshot the tree before calling
268      apply_automatic_transitions. This is the safety net #18373 asked for."""
269      cb = backup_env["cb"]
270      skills = backup_env["skills"]
271      _write_skill(skills, "alpha")
272  
273      # Reload curator module against the freshly-env'd hermes_constants
274      from agent import curator
275      importlib.reload(curator)
276  
277      # Stub out LLM review and auto transitions — we only care about the
278      # snapshot side-effect.
279      monkeypatch.setattr(
280          curator, "_run_llm_review",
281          lambda p: {"final": "", "summary": "s", "model": "", "provider": "",
282                     "tool_calls": [], "error": None},
283      )
284      monkeypatch.setattr(
285          curator, "apply_automatic_transitions",
286          lambda now=None: {"checked": 1, "marked_stale": 0, "archived": 0, "reactivated": 0},
287      )
288  
289      curator.run_curator_review(synchronous=True)
290      # Pre-run snapshot should exist
291      rows = cb.list_backups()
292      assert any(r.get("reason") == "pre-curator-run" for r in rows), (
293          f"expected a pre-curator-run snapshot, got {[r.get('reason') for r in rows]}"
294      )
295  
296  
297  def test_dry_run_skips_snapshot(backup_env, monkeypatch):
298      """Dry-run previews must not spend disk on a snapshot — they don't
299      mutate anything, so there's nothing to back up."""
300      cb = backup_env["cb"]
301      skills = backup_env["skills"]
302      _write_skill(skills, "alpha")
303  
304      from agent import curator
305      importlib.reload(curator)
306      monkeypatch.setattr(
307          curator, "_run_llm_review",
308          lambda p: {"final": "", "summary": "s", "model": "", "provider": "",
309                     "tool_calls": [], "error": None},
310      )
311  
312      curator.run_curator_review(synchronous=True, dry_run=True)
313      rows = cb.list_backups()
314      assert not any(r.get("reason") == "pre-curator-run" for r in rows), (
315          "dry-run must not create a pre-run snapshot"
316      )
317  
318  
319  # ---------------------------------------------------------------------------
320  # cron-jobs backup + rollback (the part issue #18671's follow-up adds)
321  # ---------------------------------------------------------------------------
322  
323  
324  def _write_cron_jobs(home: Path, jobs: list) -> Path:
325      """Write a synthetic cron/jobs.json under HERMES_HOME. Returns the path.
326      Mirrors cron.jobs.save_jobs() wrapper shape: `{"jobs": [...], "updated_at": ...}`.
327      """
328      cron_dir = home / "cron"
329      cron_dir.mkdir(parents=True, exist_ok=True)
330      path = cron_dir / "jobs.json"
331      path.write_text(
332          json.dumps({"jobs": jobs, "updated_at": "2026-05-01T00:00:00Z"}, indent=2),
333          encoding="utf-8",
334      )
335      return path
336  
337  
338  def _reload_cron_jobs(home: Path):
339      """Reload cron.jobs so its module-level HERMES_DIR picks up the tmp HOME."""
340      import hermes_constants
341      importlib.reload(hermes_constants)
342      if "cron.jobs" in sys.modules:
343          import cron.jobs as _cj
344          importlib.reload(_cj)
345      else:
346          import cron.jobs as _cj  # noqa: F401
347      import cron.jobs as cj
348      return cj
349  
350  
351  def test_snapshot_includes_cron_jobs(backup_env):
352      """With a cron/jobs.json present, snapshot writes cron-jobs.json and records it in manifest."""
353      cb = backup_env["cb"]
354      _write_skill(backup_env["skills"], "alpha")
355      _write_cron_jobs(backup_env["home"], [
356          {"id": "job-a", "name": "a", "schedule": "every 1h", "skills": ["alpha"]},
357          {"id": "job-b", "name": "b", "schedule": "every 2h", "skill": "alpha"},
358      ])
359  
360      snap = cb.snapshot_skills(reason="test")
361      assert snap is not None
362      assert (snap / cb.CRON_JOBS_FILENAME).exists()
363  
364      mf = json.loads((snap / "manifest.json").read_text(encoding="utf-8"))
365      assert mf["cron_jobs"]["backed_up"] is True
366      assert mf["cron_jobs"]["jobs_count"] == 2
367  
368  
369  def test_snapshot_without_cron_jobs_file_still_succeeds(backup_env):
370      """No cron/jobs.json on disk → snapshot succeeds, manifest records absence."""
371      cb = backup_env["cb"]
372      _write_skill(backup_env["skills"], "alpha")
373      # Deliberately do not create ~/.hermes/cron/jobs.json
374  
375      snap = cb.snapshot_skills(reason="test")
376      assert snap is not None
377      assert not (snap / cb.CRON_JOBS_FILENAME).exists()
378  
379      mf = json.loads((snap / "manifest.json").read_text(encoding="utf-8"))
380      assert mf["cron_jobs"]["backed_up"] is False
381      assert "cron/jobs.json" in mf["cron_jobs"]["reason"]
382  
383  
384  def test_snapshot_cron_jobs_malformed_json_still_captured(backup_env):
385      """Malformed jobs.json is still copied to the snapshot (fidelity over
386      validation); the manifest notes the parse warning."""
387      cb = backup_env["cb"]
388      _write_skill(backup_env["skills"], "alpha")
389      (backup_env["home"] / "cron").mkdir()
390      (backup_env["home"] / "cron" / "jobs.json").write_text("{oh no", encoding="utf-8")
391  
392      snap = cb.snapshot_skills(reason="test")
393      assert snap is not None
394      # Raw file was copied even though we couldn't parse it
395      assert (snap / cb.CRON_JOBS_FILENAME).read_text() == "{oh no"
396  
397      mf = json.loads((snap / "manifest.json").read_text(encoding="utf-8"))
398      assert mf["cron_jobs"]["backed_up"] is True
399      assert mf["cron_jobs"]["jobs_count"] == 0
400      assert "parse_warning" in mf["cron_jobs"]
401  
402  
403  def test_rollback_restores_cron_skill_links(backup_env):
404      """End-to-end: snapshot with job [alpha,beta], curator-style in-place
405      rewrite to [umbrella], then rollback → skills restored to [alpha,beta]."""
406      cb = backup_env["cb"]
407      home = backup_env["home"]
408      _write_skill(backup_env["skills"], "alpha")
409      _write_skill(backup_env["skills"], "beta")
410      _write_skill(backup_env["skills"], "umbrella")
411  
412      cj = _reload_cron_jobs(home)
413      cj.create_job(name="weekly", prompt="p", schedule="every 7d",
414                    skills=["alpha", "beta"])
415  
416      snap = cb.snapshot_skills(reason="pre-curator-run")
417      assert snap is not None
418  
419      # Simulate the curator's in-place cron rewrite after consolidation
420      cj.rewrite_skill_refs(
421          consolidated={"alpha": "umbrella", "beta": "umbrella"},
422          pruned=[],
423      )
424      live_after_curator = cj.load_jobs()
425      assert live_after_curator[0]["skills"] == ["umbrella"]
426  
427      # Now roll back
428      ok, msg, _ = cb.rollback(backup_id=snap.name)
429      assert ok, msg
430      assert "cron links" in msg
431  
432      live_after_rollback = cj.load_jobs()
433      # skills restored; legacy `skill` mirror follows first element
434      assert live_after_rollback[0]["skills"] == ["alpha", "beta"]
435  
436  
437  def test_rollback_only_touches_skill_fields(backup_env):
438      """Every field other than skills/skill must remain untouched across rollback.
439      Schedule, enabled, prompt, timestamps — all live state, hands off."""
440      cb = backup_env["cb"]
441      home = backup_env["home"]
442      _write_skill(backup_env["skills"], "alpha")
443  
444      # Hand-rolled jobs.json with varied fields (no real create_job — we want
445      # exact field control).
446      _write_cron_jobs(home, [{
447          "id": "stable-id",
448          "name": "original-name",
449          "prompt": "original prompt",
450          "schedule": "every 1h",
451          "skills": ["alpha"],
452          "enabled": True,
453          "last_run_at": "2026-04-01T00:00:00Z",
454      }])
455      snap = cb.snapshot_skills(reason="pre-curator-run")
456      assert snap is not None
457  
458      # User/scheduler activity AFTER the snapshot: rename the job, change
459      # the schedule, update timestamps, and (curator) rewrite the skills list.
460      cj = _reload_cron_jobs(home)
461      jobs = cj.load_jobs()
462      jobs[0]["name"] = "renamed-since-snapshot"
463      jobs[0]["schedule"] = "every 30m"
464      jobs[0]["last_run_at"] = "2026-05-01T12:00:00Z"
465      jobs[0]["skills"] = ["umbrella"]  # pretend curator did this
466      cj.save_jobs(jobs)
467  
468      ok, _, _ = cb.rollback(backup_id=snap.name)
469      assert ok
470  
471      after = cj.load_jobs()
472      job = after[0]
473      # skills: restored
474      assert job["skills"] == ["alpha"]
475      # everything else: untouched (live state preserved)
476      assert job["name"] == "renamed-since-snapshot"
477      assert job["schedule"] == "every 30m"
478      assert job["last_run_at"] == "2026-05-01T12:00:00Z"
479      assert job["prompt"] == "original prompt"
480  
481  
482  def test_rollback_skips_jobs_the_user_deleted(backup_env):
483      """If the user deleted a cron job after the snapshot, rollback must
484      NOT resurrect it — the user's delete is a later, explicit choice."""
485      cb = backup_env["cb"]
486      home = backup_env["home"]
487      _write_skill(backup_env["skills"], "alpha")
488  
489      _write_cron_jobs(home, [
490          {"id": "keep-me", "name": "keep", "schedule": "every 1h", "skills": ["alpha"]},
491          {"id": "delete-me", "name": "gone", "schedule": "every 1h", "skills": ["alpha"]},
492      ])
493      snap = cb.snapshot_skills(reason="pre-curator-run")
494  
495      # User deletes one job after the snapshot
496      cj = _reload_cron_jobs(home)
497      cj.save_jobs([j for j in cj.load_jobs() if j["id"] != "delete-me"])
498  
499      ok, _, _ = cb.rollback(backup_id=snap.name)
500      assert ok
501  
502      live_after = cj.load_jobs()
503      live_ids = {j["id"] for j in live_after}
504      assert "keep-me" in live_ids
505      assert "delete-me" not in live_ids  # not resurrected
506  
507  
508  def test_rollback_leaves_new_jobs_untouched(backup_env):
509      """Jobs created AFTER the snapshot must pass through rollback unchanged."""
510      cb = backup_env["cb"]
511      home = backup_env["home"]
512      _write_skill(backup_env["skills"], "alpha")
513      _write_cron_jobs(home, [
514          {"id": "original", "name": "o", "schedule": "every 1h", "skills": ["alpha"]},
515      ])
516      snap = cb.snapshot_skills(reason="pre-curator-run")
517  
518      cj = _reload_cron_jobs(home)
519      jobs = cj.load_jobs()
520      jobs.append({"id": "new-after-snapshot", "name": "new",
521                   "schedule": "every 15m", "skills": ["brand-new-skill"]})
522      cj.save_jobs(jobs)
523  
524      ok, _, _ = cb.rollback(backup_id=snap.name)
525      assert ok
526  
527      live = cj.load_jobs()
528      by_id = {j["id"]: j for j in live}
529      assert "new-after-snapshot" in by_id
530      # New job's fields completely preserved
531      assert by_id["new-after-snapshot"]["skills"] == ["brand-new-skill"]
532      assert by_id["new-after-snapshot"]["schedule"] == "every 15m"
533  
534  
535  def test_rollback_with_snapshot_missing_cron_succeeds(backup_env):
536      """Older snapshots (created before this feature shipped) have no
537      cron-jobs.json. Rollback must still restore the skills tree and not
538      error out."""
539      cb = backup_env["cb"]
540      home = backup_env["home"]
541      _write_skill(backup_env["skills"], "alpha")
542  
543      # No cron/jobs.json at snapshot time — simulates a pre-feature snapshot
544      snap = cb.snapshot_skills(reason="test")
545      assert snap is not None
546      assert not (snap / cb.CRON_JOBS_FILENAME).exists()
547  
548      # Later the user created a cron job
549      _write_cron_jobs(home, [
550          {"id": "later-job", "name": "l", "schedule": "every 1h", "skills": ["x"]},
551      ])
552  
553      ok, msg, _ = cb.rollback(backup_id=snap.name)
554      # Main rollback still succeeds; cron report notes the missing file.
555      assert ok, msg
556      # Jobs.json untouched (nothing to restore from)
557      cj = _reload_cron_jobs(home)
558      jobs = cj.load_jobs()
559      assert jobs[0]["id"] == "later-job"
560      assert jobs[0]["skills"] == ["x"]
561  
562  
563  def test_restore_cron_skill_links_standalone(backup_env):
564      """Unit-level test on _restore_cron_skill_links without the full rollback.
565      Verifies the report structure carefully."""
566      cb = backup_env["cb"]
567      home = backup_env["home"]
568  
569      # Prime a snapshot dir manually with cron-jobs.json
570      backups_dir = home / "skills" / ".curator_backups" / "fake-id"
571      backups_dir.mkdir(parents=True)
572      (backups_dir / cb.CRON_JOBS_FILENAME).write_text(json.dumps([
573          {"id": "job-1", "name": "one", "skills": ["narrow-a", "narrow-b"]},
574          {"id": "job-2", "name": "two", "skill": "legacy-single"},
575          {"id": "job-gone", "name": "deleted", "skills": ["whatever"]},
576      ]), encoding="utf-8")
577  
578      # Live jobs: job-1 got rewritten, job-2 unchanged, job-gone deleted
579      _write_cron_jobs(home, [
580          {"id": "job-1", "name": "one", "skills": ["umbrella"], "schedule": "every 1h"},
581          {"id": "job-2", "name": "two", "skill": "legacy-single", "schedule": "every 1h"},
582          {"id": "job-new", "name": "new", "skills": ["x"], "schedule": "every 1h"},
583      ])
584      _reload_cron_jobs(home)
585  
586      report = cb._restore_cron_skill_links(backups_dir)
587      assert report["attempted"] is True
588      assert report["error"] is None
589      assert report["unchanged"] == 1  # job-2 matched
590      assert len(report["restored"]) == 1  # job-1 got restored
591      assert report["restored"][0]["job_id"] == "job-1"
592      assert report["restored"][0]["to"]["skills"] == ["narrow-a", "narrow-b"]
593      assert len(report["skipped_missing"]) == 1
594      assert report["skipped_missing"][0]["job_id"] == "job-gone"