diff --git a/tests/__pycache__/test_report_generation.cpython-311-pytest-9.0.2.pyc b/tests/__pycache__/test_report_generation.cpython-311-pytest-9.0.2.pyc
new file mode 100644
index 0000000..e69de29
diff --git a/tests/__pycache__/test_workflow_yaml.cpython-311-pytest-9.0.2.pyc b/tests/__pycache__/test_workflow_yaml.cpython-311-pytest-9.0.2.pyc
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_report_generation.py b/tests/test_report_generation.py
new file mode 100644
index 0000000..46afb64
--- /dev/null
+++ b/tests/test_report_generation.py
@@ -0,0 +1,212 @@
+"""
+Tests for the Python report-generation logic embedded in the 'report' job
+of .github/workflows/ci-cd.yml.
+
+The script was changed in this PR to:
+- Use "notebooks" instead of "notes" in the dirs list
+- Set project name to "MerphDev" (was "MerphDev Learning and Certifications")
+- Add os.makedirs("docs", exist_ok=True) before writing the JSON file
+- Simplify print output to "Report generated"
+"""
+
+import json
+import os
+import sys
+import importlib.util
+import pytest
+import tempfile
+from pathlib import Path
+from datetime import datetime
+from unittest.mock import patch, mock_open
+
+
+# ---------------------------------------------------------------------------
+# Helper: reproduce the exact script logic from the workflow inline script
+# ---------------------------------------------------------------------------
+
+def run_report_script(base_dir: str) -> dict:
+ """
+ Replicates the logic of the inline Python script in the workflow's
+ 'Generate report' step, running inside *base_dir* so that directory
+ existence checks work correctly.
+ """
+ import os as _os
+ import json as _json
+
+ original_cwd = _os.getcwd()
+ _os.chdir(base_dir)
+ try:
+ report = {
+ "generated_at": datetime.now().isoformat(),
+ "project": "MerphDev",
+ "statistics": {},
+ }
+
+ dirs = [
+ "learning-paths",
+ "certifications",
+ "notebooks",
+ "models",
+ "docs",
+ ]
+
+ for d in dirs:
+ if _os.path.exists(d):
+ md_files = [f for f in _os.listdir(d) if f.endswith(".md")]
+ report["statistics"][d] = len(md_files)
+
+ _os.makedirs("docs", exist_ok=True)
+
+ with open("docs/latest-report.json", "w") as f:
+ _json.dump(report, f, indent=2)
+
+ return report
+ finally:
+ _os.chdir(original_cwd)
+
+
+# ---------------------------------------------------------------------------
+# Fixtures
+# ---------------------------------------------------------------------------
+
+@pytest.fixture()
+def tmpproject(tmp_path):
+ """Return a temporary directory that mimics the project layout."""
+ return tmp_path
+
+
+# ---------------------------------------------------------------------------
+# Tests: project name
+# ---------------------------------------------------------------------------
+
+class TestProjectName:
+ """The project field must be exactly 'MerphDev' after the PR change."""
+
+ def test_project_name_is_merphdev(self, tmpproject):
+ report = run_report_script(str(tmpproject))
+ assert report["project"] == "MerphDev"
+
+ def test_project_name_not_old_value(self, tmpproject):
+ """Regression: ensure the old verbose name was dropped."""
+ report = run_report_script(str(tmpproject))
+ assert report["project"] != "MerphDev Learning and Certifications"
+
+
+# ---------------------------------------------------------------------------
+# Tests: dirs list uses "notebooks" not "notes"
+# ---------------------------------------------------------------------------
+
+class TestDirectoryList:
+ """The scanned dirs must include 'notebooks' and must NOT include 'notes'."""
+
+ def test_notebooks_directory_is_scanned(self, tmpproject):
+ (tmpproject / "notebooks").mkdir()
+ (tmpproject / "notebooks" / "intro.md").write_text("# intro")
+ report = run_report_script(str(tmpproject))
+ assert "notebooks" in report["statistics"]
+
+ def test_notes_directory_is_not_scanned(self, tmpproject):
+ # Even if a 'notes' dir exists it must NOT appear in statistics.
+ (tmpproject / "notes").mkdir()
+ (tmpproject / "notes" / "anything.md").write_text("# note")
+ report = run_report_script(str(tmpproject))
+ assert "notes" not in report["statistics"]
+
+ def test_all_expected_dirs_in_scan_list(self, tmpproject):
+ expected = {"learning-paths", "certifications", "notebooks", "models", "docs"}
+ for d in expected:
+ (tmpproject / d).mkdir()
+ report = run_report_script(str(tmpproject))
+ assert set(report["statistics"].keys()) == expected
+
+ def test_missing_directories_are_excluded_from_statistics(self, tmpproject):
+ # Only create a subset of dirs; missing ones must not appear at all.
+ (tmpproject / "notebooks").mkdir()
+ (tmpproject / "notebooks" / "nb.md").write_text("# nb")
+ report = run_report_script(str(tmpproject))
+ for absent in ("learning-paths", "certifications", "models"):
+ assert absent not in report["statistics"]
+
+
+# ---------------------------------------------------------------------------
+# Tests: os.makedirs("docs", exist_ok=True)
+# ---------------------------------------------------------------------------
+
+class TestDocsMakedirs:
+ """The script must create docs/ automatically when it does not exist."""
+
+ def test_docs_dir_created_when_missing(self, tmpproject):
+ assert not (tmpproject / "docs").exists()
+ run_report_script(str(tmpproject))
+ assert (tmpproject / "docs").is_dir()
+
+ def test_docs_dir_creation_is_idempotent(self, tmpproject):
+ """exist_ok=True means calling makedirs twice must not raise."""
+ (tmpproject / "docs").mkdir()
+ # Should not raise even though docs/ already exists.
+ run_report_script(str(tmpproject))
+ assert (tmpproject / "docs").is_dir()
+
+
+# ---------------------------------------------------------------------------
+# Tests: JSON output file
+# ---------------------------------------------------------------------------
+
+class TestOutputFile:
+ """The report must be written to docs/latest-report.json."""
+
+ def test_report_file_is_created(self, tmpproject):
+ run_report_script(str(tmpproject))
+ assert (tmpproject / "docs" / "latest-report.json").exists()
+
+ def test_report_file_is_valid_json(self, tmpproject):
+ run_report_script(str(tmpproject))
+ content = (tmpproject / "docs" / "latest-report.json").read_text()
+ parsed = json.loads(content)
+ assert isinstance(parsed, dict)
+
+ def test_report_has_required_top_level_keys(self, tmpproject):
+ run_report_script(str(tmpproject))
+ data = json.loads((tmpproject / "docs" / "latest-report.json").read_text())
+ assert "generated_at" in data
+ assert "project" in data
+ assert "statistics" in data
+
+ def test_report_generated_at_is_iso_format(self, tmpproject):
+ run_report_script(str(tmpproject))
+ data = json.loads((tmpproject / "docs" / "latest-report.json").read_text())
+ # Must be parseable as a datetime
+ datetime.fromisoformat(data["generated_at"])
+
+ def test_report_statistics_counts_only_md_files(self, tmpproject):
+ nb = tmpproject / "notebooks"
+ nb.mkdir()
+ (nb / "one.md").write_text("# one")
+ (nb / "two.md").write_text("# two")
+ (nb / "ignore.txt").write_text("not md")
+ (nb / "ignore.py").write_text("# python")
+ report = run_report_script(str(tmpproject))
+ assert report["statistics"]["notebooks"] == 2
+
+ def test_report_statistics_empty_directory_yields_zero(self, tmpproject):
+ (tmpproject / "models").mkdir()
+ report = run_report_script(str(tmpproject))
+ assert report["statistics"]["models"] == 0
+
+ def test_report_statistics_multiple_dirs(self, tmpproject):
+ for d, count in [("learning-paths", 3), ("certifications", 1), ("notebooks", 2)]:
+ dpath = tmpproject / d
+ dpath.mkdir()
+ for i in range(count):
+ (dpath / f"file{i}.md").write_text(f"# {i}")
+ report = run_report_script(str(tmpproject))
+ assert report["statistics"]["learning-paths"] == 3
+ assert report["statistics"]["certifications"] == 1
+ assert report["statistics"]["notebooks"] == 2
+
+ def test_report_json_is_indented(self, tmpproject):
+ """The JSON must be pretty-printed (indent=2) for readability."""
+ run_report_script(str(tmpproject))
+ raw = (tmpproject / "docs" / "latest-report.json").read_text()
+ # Pretty-printed JSON contains newlines
+ assert "\n" in raw
\ No newline at end of file
diff --git a/tests/test_validate_structure.sh b/tests/test_validate_structure.sh
new file mode 100644
index 0000000..5439b9d
--- /dev/null
+++ b/tests/test_validate_structure.sh
@@ -0,0 +1,252 @@
+#!/usr/bin/env bash
+# Tests for the bash validation script embedded in the 'validate' job of
+# .github/workflows/ci-cd.yml.
+#
+# Changes in scope (PR diff):
+# - "notebooks/README.md" replaces "notes/README.md" in required_files
+# - Error message: "❌ Validation failed" (dropped "— missing files" suffix)
+# - Success message: "✅ Structure OK" (was "✅ All structure is valid")
+#
+# Uses plain bash assertions; no external framework required.
+
+set -uo pipefail
+
+PASS=0
+FAIL=0
+
+pass() { echo "PASS: $1"; PASS=$((PASS + 1)); }
+fail() { echo "FAIL: $1"; FAIL=$((FAIL + 1)); }
+
+# ---------------------------------------------------------------------------
+# The exact validation logic extracted from the workflow step
+# ---------------------------------------------------------------------------
+run_validation() {
+ local dir="$1"
+ (
+ cd "$dir"
+ required_files=(
+ "README.md"
+ "learning-paths/cybersecurity.md"
+ "learning-paths/ai-ml.md"
+ "learning-paths/devops-cloud.md"
+ "learning-paths/web-fullstack.md"
+ "learning-paths/git-github.md"
+ "certifications/README.md"
+ "notebooks/README.md"
+ "models/README.md"
+ "docs/ROADMAP.md"
+ )
+ all_ok=true
+ for file in "${required_files[@]}"; do
+ if [ ! -f "$file" ]; then
+ echo "❌ MISSING: $file"
+ all_ok=false
+ else
+ echo "✅ OK: $file"
+ fi
+ done
+ if [ "$all_ok" = false ]; then
+ echo "❌ Validation failed"
+ exit 1
+ fi
+ echo "✅ Structure OK"
+ )
+}
+
+# ---------------------------------------------------------------------------
+# Helper: run validation and capture exit code without triggering set -e
+# ---------------------------------------------------------------------------
+capture_exit() {
+ # Usage: capture_exit
+ # Sets global RUN_OUTPUT and RUN_EXIT
+ set +e
+ RUN_OUTPUT=$(run_validation "$1" 2>&1)
+ RUN_EXIT=$?
+ set -e
+}
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+make_full_project() {
+ local dir="$1"
+ mkdir -p "$dir/learning-paths" \
+ "$dir/certifications" \
+ "$dir/notebooks" \
+ "$dir/models" \
+ "$dir/docs"
+ touch "$dir/README.md"
+ touch "$dir/learning-paths/cybersecurity.md"
+ touch "$dir/learning-paths/ai-ml.md"
+ touch "$dir/learning-paths/devops-cloud.md"
+ touch "$dir/learning-paths/web-fullstack.md"
+ touch "$dir/learning-paths/git-github.md"
+ touch "$dir/certifications/README.md"
+ touch "$dir/notebooks/README.md"
+ touch "$dir/models/README.md"
+ touch "$dir/docs/ROADMAP.md"
+}
+
+# ---------------------------------------------------------------------------
+# TEST 1: All required files present → exits 0 and prints success message
+# ---------------------------------------------------------------------------
+tmp=$(mktemp -d)
+make_full_project "$tmp"
+capture_exit "$tmp"
+rm -rf "$tmp"
+
+if [[ $RUN_EXIT -eq 0 ]]; then
+ pass "all files present: exits 0"
+else
+ fail "all files present: expected exit 0, got $RUN_EXIT"
+fi
+
+if echo "$RUN_OUTPUT" | grep -q "✅ Structure OK"; then
+ pass "all files present: prints '✅ Structure OK'"
+else
+ fail "all files present: success message not found in output"
+fi
+
+# ---------------------------------------------------------------------------
+# TEST 2: notebooks/README.md required (not notes/README.md)
+# ---------------------------------------------------------------------------
+tmp=$(mktemp -d)
+make_full_project "$tmp"
+# Remove notebooks/README.md — validation must FAIL
+rm "$tmp/notebooks/README.md"
+capture_exit "$tmp"
+rm -rf "$tmp"
+
+if [[ $RUN_EXIT -ne 0 ]]; then
+ pass "missing notebooks/README.md: exits non-zero"
+else
+ fail "missing notebooks/README.md: expected non-zero exit"
+fi
+
+# ---------------------------------------------------------------------------
+# TEST 3: notes/README.md is NOT in the required list (it's notebooks now)
+# ---------------------------------------------------------------------------
+tmp=$(mktemp -d)
+make_full_project "$tmp"
+# Remove notebooks/README.md but add notes/README.md — must still FAIL
+rm "$tmp/notebooks/README.md"
+mkdir -p "$tmp/notes"
+touch "$tmp/notes/README.md"
+capture_exit "$tmp"
+rm -rf "$tmp"
+
+if [[ $RUN_EXIT -ne 0 ]]; then
+ pass "notes/README.md does not satisfy notebooks/README.md requirement"
+else
+ fail "notes/README.md should NOT satisfy notebooks/README.md requirement"
+fi
+
+# ---------------------------------------------------------------------------
+# TEST 4: Error message is "❌ Validation failed" (no "— missing files" suffix)
+# ---------------------------------------------------------------------------
+tmp=$(mktemp -d)
+# Empty project — everything is missing
+capture_exit "$tmp"
+rm -rf "$tmp"
+
+if echo "$RUN_OUTPUT" | grep -q "❌ Validation failed"; then
+ pass "error message contains '❌ Validation failed'"
+else
+ fail "error message '❌ Validation failed' not found"
+fi
+
+if echo "$RUN_OUTPUT" | grep -qF "— missing files"; then
+ fail "error message must NOT contain '— missing files' suffix (old message)"
+else
+ pass "error message does not contain old suffix '— missing files'"
+fi
+
+# ---------------------------------------------------------------------------
+# TEST 5: Missing any single required file → exit non-zero
+# ---------------------------------------------------------------------------
+required_files=(
+ "README.md"
+ "learning-paths/cybersecurity.md"
+ "learning-paths/ai-ml.md"
+ "learning-paths/devops-cloud.md"
+ "learning-paths/web-fullstack.md"
+ "learning-paths/git-github.md"
+ "certifications/README.md"
+ "notebooks/README.md"
+ "models/README.md"
+ "docs/ROADMAP.md"
+)
+
+for missing_file in "${required_files[@]}"; do
+ tmp=$(mktemp -d)
+ make_full_project "$tmp"
+ rm "$tmp/$missing_file"
+ capture_exit "$tmp"
+ rm -rf "$tmp"
+ if [[ $RUN_EXIT -ne 0 ]]; then
+ pass "missing $missing_file: exits non-zero"
+ else
+ fail "missing $missing_file: expected non-zero exit, got 0"
+ fi
+done
+
+# ---------------------------------------------------------------------------
+# TEST 6: Missing file is reported with ❌ MISSING prefix
+# ---------------------------------------------------------------------------
+tmp=$(mktemp -d)
+make_full_project "$tmp"
+rm "$tmp/notebooks/README.md"
+capture_exit "$tmp"
+rm -rf "$tmp"
+
+if echo "$RUN_OUTPUT" | grep -q "❌ MISSING: notebooks/README.md"; then
+ pass "missing file reported with '❌ MISSING: notebooks/README.md'"
+else
+ fail "expected '❌ MISSING: notebooks/README.md' in output"
+fi
+
+# ---------------------------------------------------------------------------
+# TEST 7: Success message is "✅ Structure OK" (not old "All structure is valid")
+# ---------------------------------------------------------------------------
+tmp=$(mktemp -d)
+make_full_project "$tmp"
+capture_exit "$tmp"
+rm -rf "$tmp"
+
+if echo "$RUN_OUTPUT" | grep -q "✅ Structure OK"; then
+ pass "success message is '✅ Structure OK'"
+else
+ fail "success message '✅ Structure OK' not found"
+fi
+
+if echo "$RUN_OUTPUT" | grep -qF "All structure is valid"; then
+ fail "old success message 'All structure is valid' must not appear"
+else
+ pass "old success message 'All structure is valid' is absent"
+fi
+
+# ---------------------------------------------------------------------------
+# TEST 8: Regression — all_ok=false accumulates multiple missing files
+# ---------------------------------------------------------------------------
+tmp=$(mktemp -d)
+make_full_project "$tmp"
+rm "$tmp/notebooks/README.md"
+rm "$tmp/models/README.md"
+capture_exit "$tmp"
+rm -rf "$tmp"
+
+missing_count=$(echo "$RUN_OUTPUT" | grep -c "❌ MISSING:" || true)
+if [[ $missing_count -eq 2 ]]; then
+ pass "two missing files both reported as MISSING"
+else
+ fail "expected 2 MISSING lines, got $missing_count"
+fi
+
+# ---------------------------------------------------------------------------
+# Summary
+# ---------------------------------------------------------------------------
+echo ""
+echo "Results: $PASS passed, $FAIL failed"
+if [[ $FAIL -gt 0 ]]; then
+ exit 1
+fi
\ No newline at end of file
diff --git a/tests/test_workflow_yaml.py b/tests/test_workflow_yaml.py
new file mode 100644
index 0000000..d1f5c8d
--- /dev/null
+++ b/tests/test_workflow_yaml.py
@@ -0,0 +1,263 @@
+"""
+Tests for the YAML structure of .github/workflows/ci-cd.yml.
+
+Focuses exclusively on changes introduced in this PR:
+- check-links job now has `needs: validate`
+- required_files list contains "notebooks/README.md" (not "notes/README.md")
+- deploy job requires both validate and report (needs: [validate, report])
+- report job requires validate (needs: validate)
+- Python inline script dirs list contains "notebooks" not "notes"
+- Python inline script project name is "MerphDev"
+"""
+
+import re
+import pytest
+import yaml
+from pathlib import Path
+
+WORKFLOW_PATH = Path(__file__).parent.parent / ".github" / "workflows" / "ci-cd.yml"
+
+
+def _strip_heredocs(text: str) -> str:
+ """
+ Replace bash heredoc blocks (python3 << 'EOF' ... EOF) with an empty
+ quoted string so that PyYAML can parse the surrounding YAML structure
+ without tripping over the un-indented Python source lines.
+ """
+ return re.sub(
+ r"python3\s*<<\s*'EOF'.*?^EOF",
+ '""',
+ text,
+ flags=re.DOTALL | re.MULTILINE,
+ )
+
+
+@pytest.fixture(scope="module")
+def workflow() -> dict:
+ raw = WORKFLOW_PATH.read_text()
+ sanitised = _strip_heredocs(raw)
+ return yaml.safe_load(sanitised)
+
+
+@pytest.fixture(scope="module")
+def workflow_text() -> str:
+ return WORKFLOW_PATH.read_text()
+
+
+# ---------------------------------------------------------------------------
+# Tests: check-links job now needs validate
+# ---------------------------------------------------------------------------
+
+class TestCheckLinksJobDependency:
+ """check-links must declare needs: validate (new in this PR)."""
+
+ def test_check_links_needs_validate(self, workflow):
+ job = workflow["jobs"]["check-links"]
+ needs = job.get("needs")
+ assert needs is not None, "check-links job must have a 'needs' field"
+ if isinstance(needs, list):
+ assert "validate" in needs
+ else:
+ assert needs == "validate"
+
+ def test_check_links_needs_is_not_empty(self, workflow):
+ job = workflow["jobs"]["check-links"]
+ needs = job.get("needs")
+ assert needs, "check-links 'needs' must not be empty"
+
+
+# ---------------------------------------------------------------------------
+# Tests: validate job — required files list
+# ---------------------------------------------------------------------------
+
+class TestValidateJobRequiredFiles:
+ """The bash script must reference notebooks/README.md, not notes/README.md."""
+
+ def test_notebooks_readme_in_required_files(self, workflow_text):
+ assert "notebooks/README.md" in workflow_text, (
+ "notebooks/README.md must be listed in required_files"
+ )
+
+ def test_notes_readme_not_in_required_files(self, workflow_text):
+ # notes/README.md must NOT appear anywhere in the workflow
+ assert "notes/README.md" not in workflow_text, (
+ "notes/README.md must not appear in the workflow (renamed to notebooks)"
+ )
+
+ def test_required_files_list_contains_all_expected_paths(self, workflow_text):
+ expected_paths = [
+ "README.md",
+ "learning-paths/cybersecurity.md",
+ "learning-paths/ai-ml.md",
+ "learning-paths/devops-cloud.md",
+ "learning-paths/web-fullstack.md",
+ "learning-paths/git-github.md",
+ "certifications/README.md",
+ "notebooks/README.md",
+ "models/README.md",
+ "docs/ROADMAP.md",
+ ]
+ for path in expected_paths:
+ assert path in workflow_text, f"Expected path '{path}' not found in workflow"
+
+
+# ---------------------------------------------------------------------------
+# Tests: validate job — messages
+# ---------------------------------------------------------------------------
+
+class TestValidateJobMessages:
+ """Error and success messages must match the updated text from the PR."""
+
+ def test_error_message_is_validation_failed(self, workflow_text):
+ assert "Validation failed" in workflow_text
+
+ def test_error_message_has_no_missing_files_suffix(self, workflow_text):
+ assert "Validation failed — missing files" not in workflow_text, (
+ "Old error message suffix '— missing files' must be removed"
+ )
+
+ def test_success_message_is_structure_ok(self, workflow_text):
+ assert "Structure OK" in workflow_text
+
+ def test_success_message_not_old_value(self, workflow_text):
+ assert "All structure is valid" not in workflow_text, (
+ "Old success message 'All structure is valid' must be replaced by 'Structure OK'"
+ )
+
+
+# ---------------------------------------------------------------------------
+# Tests: report job — Python script changes
+# ---------------------------------------------------------------------------
+
+class TestReportJobPythonScript:
+ """The inline Python script must use 'notebooks' and 'MerphDev'."""
+
+ def test_python_script_uses_notebooks_dir(self, workflow_text):
+ # The word "notebooks" must appear in the workflow (as a Python string)
+ assert '"notebooks"' in workflow_text or "'notebooks'" in workflow_text, (
+ "Python script dirs list must include 'notebooks'"
+ )
+
+ def test_python_script_does_not_use_notes_dir(self, workflow_text):
+ # "notes" as a standalone Python string must not appear
+ assert '"notes"' not in workflow_text and "'notes'" not in workflow_text, (
+ "Python script must not reference old 'notes' directory"
+ )
+
+ def test_python_script_project_name_is_merphdev(self, workflow_text):
+ assert '"MerphDev"' in workflow_text or "'MerphDev'" in workflow_text
+
+ def test_python_script_project_name_not_old_value(self, workflow_text):
+ old_name = "MerphDev Learning and Certifications"
+ assert old_name not in workflow_text, (
+ f"Old project name '{old_name}' must be replaced with 'MerphDev'"
+ )
+
+ def test_python_script_uses_makedirs(self, workflow_text):
+ assert "os.makedirs" in workflow_text, (
+ "Python script must call os.makedirs to ensure docs/ exists"
+ )
+
+ def test_python_script_makedirs_uses_exist_ok(self, workflow_text):
+ assert "exist_ok=True" in workflow_text, (
+ "os.makedirs must use exist_ok=True to be idempotent"
+ )
+
+ def test_python_script_writes_to_docs_latest_report_json(self, workflow_text):
+ assert "docs/latest-report.json" in workflow_text
+
+
+# ---------------------------------------------------------------------------
+# Tests: report job — dependencies
+# ---------------------------------------------------------------------------
+
+class TestReportJobDependency:
+ """report job must need validate."""
+
+ def test_report_needs_validate(self, workflow):
+ job = workflow["jobs"]["report"]
+ needs = job.get("needs")
+ assert needs is not None, "report job must have a 'needs' field"
+ if isinstance(needs, list):
+ assert "validate" in needs
+ else:
+ assert needs == "validate"
+
+
+# ---------------------------------------------------------------------------
+# Tests: deploy job — dependencies
+# ---------------------------------------------------------------------------
+
+class TestDeployJobDependency:
+ """deploy must need both validate and report."""
+
+ def test_deploy_needs_validate_and_report(self, workflow):
+ job = workflow["jobs"]["deploy"]
+ needs = job.get("needs", [])
+ if isinstance(needs, str):
+ needs = [needs]
+ assert "validate" in needs, "deploy must depend on validate"
+ assert "report" in needs, "deploy must depend on report"
+
+ def test_deploy_only_runs_on_main(self, workflow):
+ job = workflow["jobs"]["deploy"]
+ condition = job.get("if", "")
+ assert "refs/heads/main" in condition, (
+ "deploy job must only run on pushes to main"
+ )
+
+
+# ---------------------------------------------------------------------------
+# Tests: workflow trigger configuration
+# ---------------------------------------------------------------------------
+
+class TestWorkflowTriggers:
+ """The workflow must be triggered on push and pull_request to main, plus a schedule.
+
+ Note: PyYAML (YAML 1.1) parses the bare `on:` key as boolean True, so the
+ trigger block is accessed via workflow[True].
+ """
+
+ def _triggers(self, workflow: dict) -> dict:
+ # PyYAML 1.1 interprets `on` as True; fall back to string key as well.
+ return workflow.get(True) or workflow.get("on") or {}
+
+ def test_triggers_on_push_to_main(self, workflow):
+ branches = self._triggers(workflow)["push"]["branches"]
+ assert "main" in branches
+
+ def test_triggers_on_pull_request_to_main(self, workflow):
+ branches = self._triggers(workflow)["pull_request"]["branches"]
+ assert "main" in branches
+
+ def test_has_weekly_schedule(self, workflow):
+ schedule = self._triggers(workflow)["schedule"]
+ assert isinstance(schedule, list) and len(schedule) > 0
+ cron = schedule[0]["cron"]
+ assert cron == "0 0 * * 1", f"Expected Monday midnight cron, got: {cron}"
+
+
+# ---------------------------------------------------------------------------
+# Tests: overall job structure
+# ---------------------------------------------------------------------------
+
+class TestJobStructure:
+ """All four jobs must exist with correct names."""
+
+ def test_validate_job_exists(self, workflow):
+ assert "validate" in workflow["jobs"]
+
+ def test_check_links_job_exists(self, workflow):
+ assert "check-links" in workflow["jobs"]
+
+ def test_report_job_exists(self, workflow):
+ assert "report" in workflow["jobs"]
+
+ def test_deploy_job_exists(self, workflow):
+ assert "deploy" in workflow["jobs"]
+
+ def test_all_jobs_run_on_ubuntu_latest(self, workflow):
+ for job_name, job in workflow["jobs"].items():
+ assert job.get("runs-on") == "ubuntu-latest", (
+ f"Job '{job_name}' must run on ubuntu-latest"
+ )
\ No newline at end of file