1018 lines
40 KiB
Python
1018 lines
40 KiB
Python
from __future__ import annotations
|
||
|
||
import argparse
|
||
import re
|
||
from collections import defaultdict
|
||
from dataclasses import dataclass
|
||
from datetime import datetime
|
||
from pathlib import Path
|
||
|
||
|
||
SCRIPT_DIR = Path(__file__).resolve().parent
|
||
DOC_ROOT = SCRIPT_DIR.parent if SCRIPT_DIR.name == "_tools" else SCRIPT_DIR
|
||
REPO_ROOT = DOC_ROOT.parents[1]
|
||
INCLUDE_ROOT = REPO_ROOT / "engine" / "include"
|
||
PUBLIC_INCLUDE_ROOT = INCLUDE_ROOT / "XCEngine"
|
||
XCEDITOR_INCLUDE_ROOT = REPO_ROOT / "new_editor" / "include" / "XCEditor"
|
||
EDITOR_SOURCE_ROOT = REPO_ROOT / "editor" / "src"
|
||
META_ROOT = DOC_ROOT / "_meta"
|
||
DEFAULT_REPORT = META_ROOT / "rebuild-status.md"
|
||
|
||
HEADER_RE = re.compile(r"^\*\*头文件\*\*:\s*`([^`]+\.h)`", re.MULTILINE)
|
||
SOURCE_FILE_RE = re.compile(r"^\*\*源文件\*\*:\s*`([^`]+\.(?:h|hpp))`", re.MULTILINE)
|
||
NAMESPACE_RE = re.compile(r"^\*\*命名空间\*\*:\s*`[^`]+`", re.MULTILINE)
|
||
TYPE_RE = re.compile(r"^\*\*类型\*\*:\s*`[^`]+`", re.MULTILINE)
|
||
DESCRIPTION_RE = re.compile(r"^\*\*描述\*\*:\s*.+$", re.MULTILINE)
|
||
MD_LINK_RE = re.compile(r"\[([^\]]+)\]\(([^)]+)\)")
|
||
LEGACY_SECTION_RE = re.compile(r"^## (Syntax|Remarks|See Also|Examples)$", re.MULTILINE)
|
||
FENCED_CODE_BLOCK_RE = re.compile(r"```.*?```", re.DOTALL)
|
||
ACCESS_SPECIFIER_RE = re.compile(r"^(public|private|protected)\s*:\s*$")
|
||
IMPORTER_VERSION_RE = re.compile(r"kCurrentImporterVersion\s*=\s*(\d+)")
|
||
STALE_EDITOR_DOC_TOKENS: tuple[tuple[str, str], ...] = (
|
||
("MakeMigrateSceneAssetReferencesAction", "已删除的 Editor action helper"),
|
||
("ExecuteMigrateSceneAssetReferences", "已删除的主菜单动作"),
|
||
("CanMigrateSceneAssetReferences", "已删除的 ProjectCommands guard"),
|
||
("MigrateSceneAssetReferences", "已删除的迁移 API"),
|
||
("SceneAssetReferenceMigrationReport", "已删除的迁移报告类型"),
|
||
("Migrate Scene AssetRefs", "已删除的主菜单项文案"),
|
||
(
|
||
"ProjectCommandsMigrateSceneAssetReferencesRewritesLegacyScenePayloads",
|
||
"已删除的相关测试锚点",
|
||
),
|
||
)
|
||
STALE_EDITOR_PAGE_TOKENS: tuple[tuple[str, str, str], ...] = (
|
||
(
|
||
"XCEngine/Editor/Core/EditorConsoleSink/",
|
||
"fallback 实例",
|
||
"与当前 EditorConsoleSink::GetInstance 生命周期不符的旧描述",
|
||
),
|
||
(
|
||
"XCEngine/Editor/Core/EditorConsoleSink/",
|
||
"不会返回空指针",
|
||
"与当前 EditorConsoleSink::GetInstance 可返回 nullptr 的实现不符",
|
||
),
|
||
(
|
||
"XCEngine/Editor/Core/EditorConsoleSink/",
|
||
"不会返回 `nullptr`",
|
||
"与当前 EditorConsoleSink::GetInstance 可返回 nullptr 的实现不符",
|
||
),
|
||
(
|
||
"XCEngine/Editor/Viewport/SceneViewportOverlayBuilder/",
|
||
"static SceneViewportOverlayFrameData Build(",
|
||
"SceneViewportOverlayBuilder 已改为实例方法 Build(...) + provider registry 驱动,不应再写成 static Build。",
|
||
),
|
||
(
|
||
"XCEngine/Editor/Viewport/SceneViewportOverlayBuilder/",
|
||
"无状态构建器",
|
||
"SceneViewportOverlayBuilder 当前持有 provider registry,不再是无状态 helper。",
|
||
),
|
||
)
|
||
STALE_CANONICAL_PAGE_TOKENS: tuple[tuple[str, str, str], ...] = (
|
||
(
|
||
"XCEngine/Rendering/Planning/CameraRenderRequest/",
|
||
"builtinPostProcess",
|
||
"与当前 CameraRenderRequest 已删除该字段的实现不符",
|
||
),
|
||
(
|
||
"XCEngine/Rendering/Planning/CameraRenderRequest/",
|
||
"BuiltinPostProcessRequest",
|
||
"与当前 CameraRenderRequest 已删除该子请求对象的实现不符",
|
||
),
|
||
(
|
||
"XCEngine/Rendering/Execution/CameraRenderer/",
|
||
"m_builtinPostProcessBuilder",
|
||
"与当前 CameraRenderer 已删除该成员的实现不符",
|
||
),
|
||
(
|
||
"XCEngine/Core/Asset/ArtifactFormats/",
|
||
"kMaterialArtifactSchemaVersion = 1",
|
||
"与当前材质 artifact schema 已升级到 v2 的实现不符",
|
||
),
|
||
(
|
||
"XCEngine/Core/Asset/ArtifactFormats/",
|
||
"XCMAT01",
|
||
"与当前材质 artifact magic `XCMAT02` 的实现不符",
|
||
),
|
||
(
|
||
"XCEngine/Rendering/Pipelines/BuiltinForwardPipeline/",
|
||
"逐材质常量目前只写入 `baseColorFactor`",
|
||
"与当前 BuiltinForwardPipeline 优先消费 shader schema 常量 payload、仅在缺失时回退到 `baseColorFactor` 的实现不符",
|
||
),
|
||
(
|
||
"XCEngine/Rendering/RenderMaterialUtility/RenderMaterialUtility.md",
|
||
"BuildBuiltinForwardMaterialData() 当前只打包 `baseColorFactor`;贴图仍由调用方单独通过 [ResolveBuiltinBaseColorTexture](ResolveBuiltinBaseColorTexture.md) 解析。",
|
||
"与当前 BuiltinForwardPipeline 已优先通过 `ResolveSchemaMaterialConstantPayload()` 消费 schema-driven 材质常量 payload 的实现不符",
|
||
),
|
||
)
|
||
STALE_EDITOR_CANONICAL_PAGES: tuple[tuple[str, str], ...] = (
|
||
(
|
||
"XCEngine/Editor/Managers/ProjectManager/MigrateSceneAssetReferences.md",
|
||
"已删除 API 的残留 canonical 页面",
|
||
),
|
||
)
|
||
|
||
|
||
@dataclass
|
||
class EditorRiskEntry:
|
||
doc_page: str
|
||
source_file: str
|
||
direct_page_count: int
|
||
public_method_count: int
|
||
implementation_line_count: int
|
||
|
||
|
||
@dataclass
|
||
class ModuleCoverage:
|
||
module: str
|
||
public_headers: int
|
||
documented_headers: int
|
||
|
||
@property
|
||
def missing_headers(self) -> int:
|
||
return self.public_headers - self.documented_headers
|
||
|
||
|
||
@dataclass
|
||
class StaleDocTokenMatch:
|
||
doc_page: str
|
||
token: str
|
||
reason: str
|
||
line_number: int
|
||
line_text: str
|
||
|
||
|
||
@dataclass
|
||
class StaleCanonicalPageMatch:
|
||
doc_page: str
|
||
reason: str
|
||
|
||
|
||
@dataclass(frozen=True)
|
||
class PublicDocRoot:
|
||
doc_root_name: str
|
||
include_root: Path
|
||
doc_root: Path
|
||
|
||
|
||
PUBLIC_DOC_ROOTS: tuple[PublicDocRoot, ...] = (
|
||
PublicDocRoot(
|
||
doc_root_name="XCEngine",
|
||
include_root=PUBLIC_INCLUDE_ROOT,
|
||
doc_root=DOC_ROOT / "XCEngine",
|
||
),
|
||
PublicDocRoot(
|
||
doc_root_name="XCEditor",
|
||
include_root=XCEDITOR_INCLUDE_ROOT,
|
||
doc_root=DOC_ROOT / "XCEditor",
|
||
),
|
||
)
|
||
PUBLIC_DOC_ROOT_NAMES = tuple(root.doc_root_name for root in PUBLIC_DOC_ROOTS)
|
||
CANONICAL_PAGE_PREFIXES = tuple(f"{root.doc_root_name}/" for root in PUBLIC_DOC_ROOTS)
|
||
PUBLIC_HEADER_ROOT_MAP = {
|
||
root.doc_root_name: root.include_root.parent
|
||
for root in PUBLIC_DOC_ROOTS
|
||
}
|
||
|
||
|
||
def normalize_rel_path(path: str) -> str:
|
||
return path.replace("\\", "/")
|
||
|
||
|
||
def iter_markdown_files() -> list[Path]:
|
||
return sorted(
|
||
path
|
||
for path in DOC_ROOT.rglob("*.md")
|
||
if path.name != DEFAULT_REPORT.name
|
||
)
|
||
|
||
|
||
def iter_canonical_markdown_files() -> list[Path]:
|
||
files: list[Path] = []
|
||
for root in PUBLIC_DOC_ROOTS:
|
||
if not root.doc_root.exists():
|
||
continue
|
||
files.extend(root.doc_root.rglob("*.md"))
|
||
return sorted(files)
|
||
|
||
|
||
def iter_public_headers() -> list[str]:
|
||
headers: list[str] = []
|
||
for root in PUBLIC_DOC_ROOTS:
|
||
if not root.include_root.exists():
|
||
continue
|
||
headers.extend(
|
||
normalize_rel_path(path.relative_to(root.include_root.parent).as_posix())
|
||
for path in root.include_root.rglob("*.h")
|
||
)
|
||
return sorted(headers)
|
||
|
||
|
||
def iter_editor_source_headers() -> list[str]:
|
||
return sorted(
|
||
normalize_rel_path(path.relative_to(REPO_ROOT).as_posix())
|
||
for path in EDITOR_SOURCE_ROOT.rglob("*.h")
|
||
)
|
||
|
||
|
||
def iter_public_include_dirs() -> list[str]:
|
||
dirs: list[str] = []
|
||
for root in PUBLIC_DOC_ROOTS:
|
||
if not root.include_root.exists():
|
||
continue
|
||
dirs.append(root.doc_root_name)
|
||
dirs.extend(
|
||
f"{root.doc_root_name}/{path.relative_to(root.include_root).as_posix()}"
|
||
for path in sorted(root.include_root.rglob("*"))
|
||
if path.is_dir()
|
||
)
|
||
return dirs
|
||
|
||
|
||
def find_public_doc_root_by_rel_path(rel_path: str) -> PublicDocRoot | None:
|
||
root_name = rel_path.split("/", 1)[0]
|
||
for root in PUBLIC_DOC_ROOTS:
|
||
if root.doc_root_name == root_name:
|
||
return root
|
||
return None
|
||
|
||
|
||
def header_ref_exists(header: str) -> bool:
|
||
root_name = header.split("/", 1)[0]
|
||
root_parent = PUBLIC_HEADER_ROOT_MAP.get(root_name)
|
||
if root_parent is None:
|
||
return False
|
||
return (root_parent / header).exists()
|
||
|
||
|
||
def dir_index_name(relative: str) -> str:
|
||
return f"{Path(relative).name}.md"
|
||
|
||
|
||
def dir_index_doc_path(relative: str) -> Path:
|
||
return DOC_ROOT / relative / dir_index_name(relative)
|
||
|
||
|
||
def resolve_md_target(source: Path, target: str) -> Path:
|
||
clean = target.split("#", 1)[0].replace("\\", "/")
|
||
return (source.parent / clean).resolve()
|
||
|
||
|
||
def strip_fenced_code_blocks(content: str) -> str:
|
||
return FENCED_CODE_BLOCK_RE.sub("", content)
|
||
|
||
|
||
def count_non_empty_source_lines(path: Path) -> int:
|
||
if not path.exists():
|
||
return 0
|
||
|
||
count = 0
|
||
for line in path.read_text(encoding="utf-8", errors="ignore").splitlines():
|
||
stripped = line.strip()
|
||
if not stripped or stripped.startswith("//"):
|
||
continue
|
||
count += 1
|
||
return count
|
||
|
||
|
||
def count_public_method_like_declarations(path: Path) -> int:
|
||
if not path.exists():
|
||
return 0
|
||
|
||
in_public_block = False
|
||
pending: list[str] = []
|
||
count = 0
|
||
|
||
for raw_line in path.read_text(encoding="utf-8", errors="ignore").splitlines():
|
||
line = raw_line.strip()
|
||
if not line or line.startswith("//"):
|
||
continue
|
||
|
||
access_match = ACCESS_SPECIFIER_RE.match(line)
|
||
if access_match:
|
||
in_public_block = access_match.group(1) == "public"
|
||
pending.clear()
|
||
continue
|
||
|
||
if not in_public_block:
|
||
continue
|
||
|
||
pending.append(line)
|
||
if not any(token in line for token in (";", "{")):
|
||
continue
|
||
|
||
declaration = " ".join(pending)
|
||
pending.clear()
|
||
|
||
if "(" not in declaration or ")" not in declaration:
|
||
continue
|
||
if declaration.startswith(("using ", "friend ", "static_assert", "#")):
|
||
continue
|
||
if declaration.startswith(("return ", "if ", "for ", "while ", "switch ")):
|
||
continue
|
||
|
||
count += 1
|
||
|
||
return count
|
||
|
||
|
||
def is_dir_index_page(page: Path) -> bool:
|
||
if not page.is_relative_to(DOC_ROOT):
|
||
return False
|
||
if page.stem != page.parent.name:
|
||
return False
|
||
|
||
rel_page = normalize_rel_path(page.relative_to(DOC_ROOT).as_posix())
|
||
root = find_public_doc_root_by_rel_path(rel_page)
|
||
if root is None:
|
||
return False
|
||
|
||
relative_dir = page.parent.relative_to(root.doc_root)
|
||
source_dir = root.include_root / relative_dir
|
||
return source_dir.exists() and source_dir.is_dir()
|
||
|
||
|
||
def is_flat_header_page(page: Path, rel_page: str) -> bool:
|
||
root = find_public_doc_root_by_rel_path(rel_page)
|
||
if root is None:
|
||
return False
|
||
if is_dir_index_page(page):
|
||
return False
|
||
return (root.include_root.parent / Path(rel_page).with_suffix(".h")).exists()
|
||
|
||
|
||
def build_editor_module_coverages(
|
||
editor_headers: list[str],
|
||
documented_sources: set[str],
|
||
) -> list[ModuleCoverage]:
|
||
source_by_module: dict[str, list[str]] = defaultdict(list)
|
||
documented_by_module: dict[str, set[str]] = defaultdict(set)
|
||
|
||
for source in editor_headers:
|
||
relative = Path(source).relative_to("editor/src").as_posix()
|
||
module = relative.split("/", 1)[0] if "/" in relative else "(root)"
|
||
source_by_module[module].append(source)
|
||
|
||
for source in documented_sources:
|
||
relative = Path(source).relative_to("editor/src").as_posix()
|
||
module = relative.split("/", 1)[0] if "/" in relative else "(root)"
|
||
documented_by_module[module].add(source)
|
||
|
||
return [
|
||
ModuleCoverage(
|
||
module=module,
|
||
public_headers=len(headers),
|
||
documented_headers=len(documented_by_module.get(module, set())),
|
||
)
|
||
for module, headers in sorted(source_by_module.items())
|
||
]
|
||
|
||
|
||
def collect_editor_risk_entries(
|
||
source_pages: dict[str, list[str]],
|
||
) -> list[EditorRiskEntry]:
|
||
entries: list[EditorRiskEntry] = []
|
||
|
||
for source_file, doc_pages in sorted(source_pages.items()):
|
||
for rel_page in doc_pages:
|
||
page_path = DOC_ROOT / rel_page
|
||
direct_page_count = len(list(page_path.parent.glob("*.md")))
|
||
header_path = REPO_ROOT / source_file
|
||
implementation_path = header_path.with_suffix(".cpp")
|
||
public_method_count = count_public_method_like_declarations(header_path)
|
||
implementation_line_count = count_non_empty_source_lines(implementation_path)
|
||
|
||
if direct_page_count > 1:
|
||
continue
|
||
if implementation_line_count < 200 and not (
|
||
implementation_line_count >= 50 and public_method_count >= 8
|
||
):
|
||
continue
|
||
|
||
entries.append(
|
||
EditorRiskEntry(
|
||
doc_page=rel_page,
|
||
source_file=source_file,
|
||
direct_page_count=direct_page_count,
|
||
public_method_count=public_method_count,
|
||
implementation_line_count=implementation_line_count,
|
||
)
|
||
)
|
||
|
||
entries.sort(
|
||
key=lambda entry: (
|
||
entry.direct_page_count,
|
||
-entry.implementation_line_count,
|
||
-entry.public_method_count,
|
||
entry.doc_page,
|
||
)
|
||
)
|
||
return entries
|
||
|
||
|
||
def collect_stale_editor_doc_token_matches(
|
||
rel_page: str,
|
||
content: str,
|
||
) -> list[StaleDocTokenMatch]:
|
||
if not rel_page.startswith("XCEngine/Editor/"):
|
||
return []
|
||
|
||
matches: list[StaleDocTokenMatch] = []
|
||
for line_number, raw_line in enumerate(content.splitlines(), start=1):
|
||
line = raw_line.strip()
|
||
if not line:
|
||
continue
|
||
|
||
for token, reason in STALE_EDITOR_DOC_TOKENS:
|
||
if token not in raw_line:
|
||
continue
|
||
|
||
matches.append(
|
||
StaleDocTokenMatch(
|
||
doc_page=rel_page,
|
||
token=token,
|
||
reason=reason,
|
||
line_number=line_number,
|
||
line_text=line,
|
||
)
|
||
)
|
||
|
||
for page_prefix, token, reason in STALE_EDITOR_PAGE_TOKENS:
|
||
if not rel_page.startswith(page_prefix):
|
||
continue
|
||
if token not in raw_line:
|
||
continue
|
||
|
||
matches.append(
|
||
StaleDocTokenMatch(
|
||
doc_page=rel_page,
|
||
token=token,
|
||
reason=reason,
|
||
line_number=line_number,
|
||
line_text=line,
|
||
)
|
||
)
|
||
|
||
return matches
|
||
|
||
|
||
def collect_stale_canonical_doc_token_matches(
|
||
rel_page: str,
|
||
content: str,
|
||
) -> list[StaleDocTokenMatch]:
|
||
if not rel_page.startswith("XCEngine/"):
|
||
return []
|
||
|
||
matches: list[StaleDocTokenMatch] = []
|
||
for line_number, raw_line in enumerate(content.splitlines(), start=1):
|
||
line = raw_line.strip()
|
||
if not line:
|
||
continue
|
||
|
||
for page_prefix, token, reason in STALE_CANONICAL_PAGE_TOKENS:
|
||
if not rel_page.startswith(page_prefix):
|
||
continue
|
||
if token not in raw_line:
|
||
continue
|
||
|
||
matches.append(
|
||
StaleDocTokenMatch(
|
||
doc_page=rel_page,
|
||
token=token,
|
||
reason=reason,
|
||
line_number=line_number,
|
||
line_text=line,
|
||
)
|
||
)
|
||
|
||
return matches
|
||
|
||
|
||
def collect_dynamic_canonical_doc_token_matches(
|
||
rel_page: str,
|
||
content: str,
|
||
) -> list[StaleDocTokenMatch]:
|
||
if rel_page != "XCEngine/Core/Asset/AssetDatabase/AssetDatabase.md":
|
||
return []
|
||
|
||
header_path = INCLUDE_ROOT / "XCEngine/Core/Asset/AssetDatabase.h"
|
||
if not header_path.exists():
|
||
return []
|
||
|
||
header_content = header_path.read_text(encoding="utf-8", errors="ignore")
|
||
header_match = IMPORTER_VERSION_RE.search(header_content)
|
||
doc_match = IMPORTER_VERSION_RE.search(content)
|
||
if not header_match or not doc_match:
|
||
return []
|
||
|
||
current_version = header_match.group(1)
|
||
documented_version = doc_match.group(1)
|
||
if current_version == documented_version:
|
||
return []
|
||
|
||
matches: list[StaleDocTokenMatch] = []
|
||
for line_number, raw_line in enumerate(content.splitlines(), start=1):
|
||
if documented_version not in raw_line or "kCurrentImporterVersion" not in raw_line:
|
||
continue
|
||
|
||
matches.append(
|
||
StaleDocTokenMatch(
|
||
doc_page=rel_page,
|
||
token=f"kCurrentImporterVersion = {documented_version}",
|
||
reason=f"与当前 AssetDatabase 头文件中的 importer 版本 `{current_version}` 不符",
|
||
line_number=line_number,
|
||
line_text=raw_line.strip(),
|
||
)
|
||
)
|
||
break
|
||
|
||
return matches
|
||
|
||
|
||
def collect_doc_state(report_path: Path) -> dict[str, object]:
|
||
markdown_files = iter_markdown_files()
|
||
canonical_markdown_files = iter_canonical_markdown_files()
|
||
public_headers = iter_public_headers()
|
||
editor_headers = iter_editor_source_headers()
|
||
public_include_dirs = iter_public_include_dirs()
|
||
public_root_counts: dict[str, int] = defaultdict(int)
|
||
|
||
declared_header_refs: set[str] = set()
|
||
valid_header_refs: set[str] = set()
|
||
canonical_valid_header_refs: set[str] = set()
|
||
invalid_header_refs: list[tuple[str, str]] = []
|
||
declared_source_refs: set[str] = set()
|
||
valid_source_refs: set[str] = set()
|
||
canonical_valid_source_refs: set[str] = set()
|
||
invalid_source_refs: list[tuple[str, str]] = []
|
||
broken_md_links: list[tuple[str, str]] = []
|
||
non_md_relative_links: list[tuple[str, str]] = []
|
||
old_template_pages: list[str] = []
|
||
flat_header_pages: list[str] = []
|
||
editor_source_pages: dict[str, list[str]] = defaultdict(list)
|
||
stale_canonical_doc_token_matches: list[StaleDocTokenMatch] = []
|
||
stale_editor_doc_token_matches: list[StaleDocTokenMatch] = []
|
||
stale_editor_canonical_page_matches: list[StaleCanonicalPageMatch] = []
|
||
|
||
metadata_counts = {
|
||
"namespace": 0,
|
||
"type": 0,
|
||
"description": 0,
|
||
"header": 0,
|
||
"source_file": 0,
|
||
}
|
||
|
||
for page in markdown_files:
|
||
if page.resolve() == report_path.resolve():
|
||
continue
|
||
rel_page = normalize_rel_path(page.relative_to(DOC_ROOT).as_posix())
|
||
content = page.read_text(encoding="utf-8")
|
||
is_canonical_page = rel_page.startswith(CANONICAL_PAGE_PREFIXES)
|
||
|
||
if is_canonical_page and NAMESPACE_RE.search(content):
|
||
metadata_counts["namespace"] += 1
|
||
if is_canonical_page and TYPE_RE.search(content):
|
||
metadata_counts["type"] += 1
|
||
if is_canonical_page and DESCRIPTION_RE.search(content):
|
||
metadata_counts["description"] += 1
|
||
if is_canonical_page and HEADER_RE.search(content):
|
||
metadata_counts["header"] += 1
|
||
if is_canonical_page and SOURCE_FILE_RE.search(content):
|
||
metadata_counts["source_file"] += 1
|
||
|
||
if is_canonical_page and LEGACY_SECTION_RE.search(content):
|
||
old_template_pages.append(rel_page)
|
||
|
||
if is_flat_header_page(page, rel_page):
|
||
flat_header_pages.append(rel_page)
|
||
|
||
for stale_page, reason in STALE_EDITOR_CANONICAL_PAGES:
|
||
if rel_page == stale_page:
|
||
stale_editor_canonical_page_matches.append(
|
||
StaleCanonicalPageMatch(
|
||
doc_page=rel_page,
|
||
reason=reason,
|
||
)
|
||
)
|
||
|
||
stale_editor_doc_token_matches.extend(
|
||
collect_stale_editor_doc_token_matches(rel_page, content)
|
||
)
|
||
stale_canonical_doc_token_matches.extend(
|
||
collect_stale_canonical_doc_token_matches(rel_page, content)
|
||
)
|
||
stale_canonical_doc_token_matches.extend(
|
||
collect_dynamic_canonical_doc_token_matches(rel_page, content)
|
||
)
|
||
|
||
for match in HEADER_RE.finditer(content):
|
||
header = normalize_rel_path(match.group(1))
|
||
declared_header_refs.add(header)
|
||
if header_ref_exists(header):
|
||
valid_header_refs.add(header)
|
||
if is_canonical_page:
|
||
canonical_valid_header_refs.add(header)
|
||
elif is_canonical_page:
|
||
invalid_header_refs.append((rel_page, header))
|
||
|
||
for match in SOURCE_FILE_RE.finditer(content):
|
||
source_file = normalize_rel_path(match.group(1))
|
||
declared_source_refs.add(source_file)
|
||
if source_file.startswith("editor/src/") and (REPO_ROOT / source_file).exists():
|
||
valid_source_refs.add(source_file)
|
||
if rel_page.startswith("XCEngine/Editor/"):
|
||
canonical_valid_source_refs.add(source_file)
|
||
editor_source_pages[source_file].append(rel_page)
|
||
elif rel_page.startswith("XCEngine/Editor/"):
|
||
invalid_source_refs.append((rel_page, source_file))
|
||
|
||
if is_canonical_page or rel_page.startswith(("_meta/", "_tools/")):
|
||
link_scan_content = strip_fenced_code_blocks(content)
|
||
for _, target in MD_LINK_RE.findall(link_scan_content):
|
||
if target.startswith(("http://", "https://", "mailto:", "#")):
|
||
continue
|
||
|
||
normalized = target.replace("\\", "/")
|
||
if normalized.endswith(".md") or ".md#" in normalized:
|
||
resolved = resolve_md_target(page, normalized)
|
||
if not resolved.exists() and resolved != report_path.resolve():
|
||
broken_md_links.append((rel_page, target))
|
||
continue
|
||
|
||
non_md_relative_links.append((rel_page, target))
|
||
|
||
for header in public_headers:
|
||
public_root_counts[header.split("/", 1)[0]] += 1
|
||
|
||
documented_root_counts: dict[str, int] = defaultdict(int)
|
||
for header in canonical_valid_header_refs:
|
||
documented_root_counts[header.split("/", 1)[0]] += 1
|
||
|
||
public_by_module: dict[str, list[str]] = defaultdict(list)
|
||
documented_by_module: dict[str, set[str]] = defaultdict(set)
|
||
|
||
for header in public_headers:
|
||
module_parts = header.split("/", 2)
|
||
module = "/".join(module_parts[:2]) if len(module_parts) >= 2 else module_parts[0]
|
||
public_by_module[module].append(header)
|
||
|
||
for header in canonical_valid_header_refs:
|
||
module_parts = header.split("/", 2)
|
||
module = "/".join(module_parts[:2]) if len(module_parts) >= 2 else module_parts[0]
|
||
documented_by_module[module].add(header)
|
||
|
||
module_coverages = [
|
||
ModuleCoverage(
|
||
module=module,
|
||
public_headers=len(headers),
|
||
documented_headers=len(documented_by_module.get(module, set())),
|
||
)
|
||
for module, headers in sorted(public_by_module.items())
|
||
]
|
||
editor_module_coverages = build_editor_module_coverages(
|
||
editor_headers,
|
||
canonical_valid_source_refs,
|
||
)
|
||
|
||
missing_headers = [
|
||
header for header in public_headers if header not in canonical_valid_header_refs
|
||
]
|
||
missing_editor_headers = [
|
||
header for header in editor_headers if header not in canonical_valid_source_refs
|
||
]
|
||
missing_parallel_indexes = [
|
||
relative
|
||
for relative in public_include_dirs
|
||
if not dir_index_doc_path(relative).exists()
|
||
]
|
||
editor_risk_entries = collect_editor_risk_entries(editor_source_pages)
|
||
support_top_dirs = sorted(
|
||
path.name
|
||
for path in DOC_ROOT.iterdir()
|
||
if path.is_dir() and path.name in {"_meta", "_tools"}
|
||
)
|
||
|
||
return {
|
||
"generated_at": datetime.now(),
|
||
"markdown_files": markdown_files,
|
||
"canonical_markdown_files": canonical_markdown_files,
|
||
"public_headers": public_headers,
|
||
"public_root_counts": dict(sorted(public_root_counts.items())),
|
||
"documented_root_counts": dict(sorted(documented_root_counts.items())),
|
||
"public_include_dirs": public_include_dirs,
|
||
"declared_header_refs": sorted(declared_header_refs),
|
||
"valid_header_refs": sorted(valid_header_refs),
|
||
"canonical_valid_header_refs": sorted(canonical_valid_header_refs),
|
||
"invalid_header_refs": invalid_header_refs,
|
||
"declared_source_refs": sorted(declared_source_refs),
|
||
"valid_source_refs": sorted(valid_source_refs),
|
||
"canonical_valid_source_refs": sorted(canonical_valid_source_refs),
|
||
"invalid_source_refs": invalid_source_refs,
|
||
"broken_md_links": broken_md_links,
|
||
"non_md_relative_links": non_md_relative_links,
|
||
"old_template_pages": sorted(old_template_pages),
|
||
"flat_header_pages": sorted(flat_header_pages),
|
||
"stale_canonical_doc_token_matches": stale_canonical_doc_token_matches,
|
||
"stale_editor_doc_token_matches": stale_editor_doc_token_matches,
|
||
"stale_editor_canonical_page_matches": stale_editor_canonical_page_matches,
|
||
"missing_headers": missing_headers,
|
||
"editor_headers": editor_headers,
|
||
"editor_module_coverages": editor_module_coverages,
|
||
"missing_editor_headers": missing_editor_headers,
|
||
"editor_risk_entries": editor_risk_entries,
|
||
"module_coverages": module_coverages,
|
||
"metadata_counts": metadata_counts,
|
||
"support_top_dirs": support_top_dirs,
|
||
"missing_parallel_indexes": missing_parallel_indexes,
|
||
}
|
||
|
||
|
||
def format_pairs_table(headers: tuple[str, str], rows: list[tuple[str, str]]) -> list[str]:
|
||
output = [
|
||
f"| {headers[0]} | {headers[1]} |",
|
||
"|------|------|",
|
||
]
|
||
for left, right in rows:
|
||
output.append(f"| `{left}` | `{right}` |")
|
||
return output
|
||
|
||
|
||
def build_report(state: dict[str, object]) -> str:
|
||
generated_at: datetime = state["generated_at"] # type: ignore[assignment]
|
||
markdown_files: list[Path] = state["markdown_files"] # type: ignore[assignment]
|
||
canonical_markdown_files: list[Path] = state["canonical_markdown_files"] # type: ignore[assignment]
|
||
public_headers: list[str] = state["public_headers"] # type: ignore[assignment]
|
||
public_root_counts: dict[str, int] = state["public_root_counts"] # type: ignore[assignment]
|
||
documented_root_counts: dict[str, int] = state["documented_root_counts"] # type: ignore[assignment]
|
||
editor_headers: list[str] = state["editor_headers"] # type: ignore[assignment]
|
||
public_include_dirs: list[str] = state["public_include_dirs"] # type: ignore[assignment]
|
||
valid_header_refs: list[str] = state["valid_header_refs"] # type: ignore[assignment]
|
||
canonical_valid_header_refs: list[str] = state["canonical_valid_header_refs"] # type: ignore[assignment]
|
||
invalid_header_refs: list[tuple[str, str]] = state["invalid_header_refs"] # type: ignore[assignment]
|
||
valid_source_refs: list[str] = state["valid_source_refs"] # type: ignore[assignment]
|
||
canonical_valid_source_refs: list[str] = state["canonical_valid_source_refs"] # type: ignore[assignment]
|
||
invalid_source_refs: list[tuple[str, str]] = state["invalid_source_refs"] # type: ignore[assignment]
|
||
broken_md_links: list[tuple[str, str]] = state["broken_md_links"] # type: ignore[assignment]
|
||
non_md_relative_links: list[tuple[str, str]] = state["non_md_relative_links"] # type: ignore[assignment]
|
||
old_template_pages: list[str] = state["old_template_pages"] # type: ignore[assignment]
|
||
flat_header_pages: list[str] = state["flat_header_pages"] # type: ignore[assignment]
|
||
stale_canonical_doc_token_matches: list[StaleDocTokenMatch] = state["stale_canonical_doc_token_matches"] # type: ignore[assignment]
|
||
stale_editor_doc_token_matches: list[StaleDocTokenMatch] = state["stale_editor_doc_token_matches"] # type: ignore[assignment]
|
||
stale_editor_canonical_page_matches: list[StaleCanonicalPageMatch] = state["stale_editor_canonical_page_matches"] # type: ignore[assignment]
|
||
missing_headers: list[str] = state["missing_headers"] # type: ignore[assignment]
|
||
missing_editor_headers: list[str] = state["missing_editor_headers"] # type: ignore[assignment]
|
||
editor_module_coverages: list[ModuleCoverage] = state["editor_module_coverages"] # type: ignore[assignment]
|
||
editor_risk_entries: list[EditorRiskEntry] = state["editor_risk_entries"] # type: ignore[assignment]
|
||
module_coverages: list[ModuleCoverage] = state["module_coverages"] # type: ignore[assignment]
|
||
metadata_counts: dict[str, int] = state["metadata_counts"] # type: ignore[assignment]
|
||
support_top_dirs: list[str] = state["support_top_dirs"] # type: ignore[assignment]
|
||
missing_parallel_indexes: list[str] = state["missing_parallel_indexes"] # type: ignore[assignment]
|
||
|
||
lines: list[str] = []
|
||
lines.append("# API 文档重构状态")
|
||
lines.append("")
|
||
lines.append(f"**生成时间**: `{generated_at.strftime('%Y-%m-%d %H:%M:%S')}`")
|
||
lines.append("")
|
||
lines.append("**来源**: `docs/api/_tools/audit_api_docs.py`")
|
||
lines.append("")
|
||
lines.append("## 摘要")
|
||
lines.append("")
|
||
lines.append(f"- Markdown 页面数(全部): `{len(markdown_files)}`")
|
||
lines.append(f"- Markdown 页面数(canonical): `{len(canonical_markdown_files)}`")
|
||
lines.append(f"- Public headers 数: `{len(public_headers)}`")
|
||
for root_name, count in public_root_counts.items():
|
||
documented_count = documented_root_counts.get(root_name, 0)
|
||
lines.append(
|
||
f"- `{root_name}` public headers 数: `{count}`(canonical 已覆盖 `{documented_count}`)"
|
||
)
|
||
lines.append(f"- Editor source headers 数: `{len(editor_headers)}`")
|
||
lines.append(f"- 有效头文件引用数(全部): `{len(valid_header_refs)}`")
|
||
lines.append(f"- 有效头文件引用数(canonical): `{len(canonical_valid_header_refs)}`")
|
||
lines.append(f"- 无效头文件引用数: `{len(invalid_header_refs)}`")
|
||
lines.append(f"- 有效源文件引用数(全部): `{len(valid_source_refs)}`")
|
||
lines.append(f"- 有效源文件引用数(Editor canonical): `{len(canonical_valid_source_refs)}`")
|
||
lines.append(f"- 无效源文件引用数: `{len(invalid_source_refs)}`")
|
||
lines.append(f"- 失效 `.md` 链接数: `{len(broken_md_links)}`")
|
||
lines.append(f"- 非 `.md` 相对链接数: `{len(non_md_relative_links)}`")
|
||
lines.append(f"- 旧模板页面数: `{len(old_template_pages)}`")
|
||
lines.append(f"- 扁平 header 页面数: `{len(flat_header_pages)}`")
|
||
lines.append(f"- Canonical 显式过期符号残留数: `{len(stale_canonical_doc_token_matches)}`")
|
||
lines.append(f"- Editor 显式过期符号残留数: `{len(stale_editor_doc_token_matches)}`")
|
||
lines.append(f"- Editor 残留 canonical 旧页面数: `{len(stale_editor_canonical_page_matches)}`")
|
||
lines.append(f"- Editor 高风险单页目录数: `{len(editor_risk_entries)}`")
|
||
lines.append("")
|
||
lines.append("## 平行目录")
|
||
lines.append("")
|
||
lines.append(
|
||
"- Canonical 根目录: `"
|
||
+ ", ".join(root.doc_root_name for root in PUBLIC_DOC_ROOTS)
|
||
+ "`"
|
||
)
|
||
lines.append(f"- 源码目录节点数: `{len(public_include_dirs)}`")
|
||
lines.append(
|
||
f"- 已生成目录总览页节点数: `{len(public_include_dirs) - len(missing_parallel_indexes)}`"
|
||
)
|
||
lines.append(f"- 缺失目录总览页节点数: `{len(missing_parallel_indexes)}`")
|
||
if support_top_dirs:
|
||
lines.append(f"- 支撑目录: `{', '.join(support_top_dirs)}`")
|
||
lines.append("")
|
||
lines.append("## 模块覆盖")
|
||
lines.append("")
|
||
lines.append("| 模块 | Public headers | 已覆盖 | 未覆盖 |")
|
||
lines.append("|------|----------------|--------|--------|")
|
||
for coverage in module_coverages:
|
||
lines.append(
|
||
f"| `{coverage.module}` | `{coverage.public_headers}` | "
|
||
f"`{coverage.documented_headers}` | `{coverage.missing_headers}` |"
|
||
)
|
||
lines.append("")
|
||
lines.append("## Editor 源文件页覆盖")
|
||
lines.append("")
|
||
lines.append("| 模块 | Source headers | 已覆盖 | 未覆盖 |")
|
||
lines.append("|------|----------------|--------|--------|")
|
||
for coverage in editor_module_coverages:
|
||
lines.append(
|
||
f"| `{coverage.module}` | `{coverage.public_headers}` | "
|
||
f"`{coverage.documented_headers}` | `{coverage.missing_headers}` |"
|
||
)
|
||
lines.append("")
|
||
lines.append("## 元信息覆盖")
|
||
lines.append("")
|
||
lines.append("| 字段 | 页面数 |")
|
||
lines.append("|------|--------|")
|
||
lines.append(f"| `命名空间` | `{metadata_counts['namespace']}` |")
|
||
lines.append(f"| `类型` | `{metadata_counts['type']}` |")
|
||
lines.append(f"| `描述` | `{metadata_counts['description']}` |")
|
||
lines.append(f"| `头文件` | `{metadata_counts['header']}` |")
|
||
lines.append(f"| `源文件` | `{metadata_counts['source_file']}` |")
|
||
lines.append("")
|
||
|
||
if missing_parallel_indexes:
|
||
lines.append("## 缺失的平行目录总览页")
|
||
lines.append("")
|
||
for relative in missing_parallel_indexes:
|
||
lines.append(f"- `{relative}`")
|
||
lines.append("")
|
||
|
||
if invalid_header_refs:
|
||
lines.append("## 无效头文件引用")
|
||
lines.append("")
|
||
lines.extend(format_pairs_table(("文档", "头文件"), invalid_header_refs[:50]))
|
||
lines.append("")
|
||
|
||
if invalid_source_refs:
|
||
lines.append("## 无效源文件引用")
|
||
lines.append("")
|
||
lines.extend(format_pairs_table(("文档", "源文件"), invalid_source_refs[:50]))
|
||
lines.append("")
|
||
|
||
if broken_md_links:
|
||
lines.append("## 失效 Markdown 链接")
|
||
lines.append("")
|
||
lines.extend(format_pairs_table(("文档", "目标"), broken_md_links[:50]))
|
||
lines.append("")
|
||
|
||
if non_md_relative_links:
|
||
lines.append("## 非 `.md` 相对链接")
|
||
lines.append("")
|
||
lines.extend(format_pairs_table(("文档", "目标"), non_md_relative_links[:50]))
|
||
lines.append("")
|
||
|
||
if old_template_pages:
|
||
lines.append("## 旧模板页面")
|
||
lines.append("")
|
||
for page in old_template_pages[:80]:
|
||
lines.append(f"- `{page}`")
|
||
if len(old_template_pages) > 80:
|
||
lines.append(f"- 其余 `{len(old_template_pages) - 80}` 个页面请直接运行脚本查看。")
|
||
lines.append("")
|
||
|
||
if flat_header_pages:
|
||
lines.append("## 扁平 header 页面")
|
||
lines.append("")
|
||
for page in flat_header_pages[:120]:
|
||
lines.append(f"- `{page}`")
|
||
if len(flat_header_pages) > 120:
|
||
lines.append(f"- 其余 `{len(flat_header_pages) - 120}` 个页面请直接运行脚本查看。")
|
||
lines.append("")
|
||
|
||
if stale_canonical_doc_token_matches:
|
||
lines.append("## Canonical 显式过期符号残留")
|
||
lines.append("")
|
||
lines.append("| 文档页 | 过期符号 | 原因 | 行号 | 行内容 |")
|
||
lines.append("|------|----------|------|------|--------|")
|
||
for entry in stale_canonical_doc_token_matches[:80]:
|
||
safe_line_text = entry.line_text.replace("|", "\\|")
|
||
lines.append(
|
||
f"| `{entry.doc_page}` | `{entry.token}` | `{entry.reason}` | "
|
||
f"`{entry.line_number}` | `{safe_line_text}` |"
|
||
)
|
||
if len(stale_canonical_doc_token_matches) > 80:
|
||
lines.append(
|
||
f"| `...` | `...` | `...` | `...` | 其余 `{len(stale_canonical_doc_token_matches) - 80}` 项请直接运行脚本查看 |"
|
||
)
|
||
lines.append("")
|
||
|
||
if stale_editor_doc_token_matches:
|
||
lines.append("## Editor 显式过期符号残留")
|
||
lines.append("")
|
||
lines.append("| 文档页 | 过期符号 | 原因 | 行号 | 行内容 |")
|
||
lines.append("|------|----------|------|------|--------|")
|
||
for entry in stale_editor_doc_token_matches[:80]:
|
||
safe_line_text = entry.line_text.replace("|", "\\|")
|
||
lines.append(
|
||
f"| `{entry.doc_page}` | `{entry.token}` | `{entry.reason}` | "
|
||
f"`{entry.line_number}` | `{safe_line_text}` |"
|
||
)
|
||
if len(stale_editor_doc_token_matches) > 80:
|
||
lines.append(
|
||
f"| `...` | `...` | `...` | `...` | 其余 `{len(stale_editor_doc_token_matches) - 80}` 项请直接运行脚本查看 |"
|
||
)
|
||
lines.append("")
|
||
|
||
if stale_editor_canonical_page_matches:
|
||
lines.append("## Editor 残留 canonical 旧页面")
|
||
lines.append("")
|
||
lines.append("| 文档页 | 原因 |")
|
||
lines.append("|------|------|")
|
||
for entry in stale_editor_canonical_page_matches:
|
||
lines.append(f"| `{entry.doc_page}` | `{entry.reason}` |")
|
||
lines.append("")
|
||
|
||
if missing_headers:
|
||
lines.append("## 未覆盖的 public headers")
|
||
lines.append("")
|
||
for header in missing_headers[:120]:
|
||
lines.append(f"- `{header}`")
|
||
if len(missing_headers) > 120:
|
||
lines.append(f"- 其余 `{len(missing_headers) - 120}` 个 header 请直接运行脚本查看。")
|
||
lines.append("")
|
||
|
||
if missing_editor_headers:
|
||
lines.append("## 未覆盖的 Editor 源文件页")
|
||
lines.append("")
|
||
for header in missing_editor_headers[:120]:
|
||
lines.append(f"- `{header}`")
|
||
if len(missing_editor_headers) > 120:
|
||
lines.append(f"- 其余 `{len(missing_editor_headers) - 120}` 个源文件请直接运行脚本查看。")
|
||
lines.append("")
|
||
|
||
if editor_risk_entries:
|
||
lines.append("## Editor 高风险单页目录")
|
||
lines.append("")
|
||
lines.append("| 文档页 | 源文件 | 目录内直系页面数 | public 成员函数数 | 对应 `.cpp` 有效行数 |")
|
||
lines.append("|------|--------|------------------|-------------------|----------------------|")
|
||
for entry in editor_risk_entries[:40]:
|
||
lines.append(
|
||
f"| `{entry.doc_page}` | `{entry.source_file}` | "
|
||
f"`{entry.direct_page_count}` | `{entry.public_method_count}` | "
|
||
f"`{entry.implementation_line_count}` |"
|
||
)
|
||
if len(editor_risk_entries) > 40:
|
||
lines.append(
|
||
f"| `...` | `...` | `...` | `...` | 其余 `{len(editor_risk_entries) - 40}` 项请直接运行脚本查看 |"
|
||
)
|
||
lines.append("")
|
||
|
||
return "\n".join(lines).rstrip() + "\n"
|
||
|
||
|
||
def main() -> int:
|
||
parser = argparse.ArgumentParser(description="Audit XCEngine API documentation.")
|
||
parser.add_argument(
|
||
"--report",
|
||
default=str(DEFAULT_REPORT),
|
||
help="Markdown report output path.",
|
||
)
|
||
args = parser.parse_args()
|
||
|
||
report_path = Path(args.report)
|
||
report_path.parent.mkdir(parents=True, exist_ok=True)
|
||
state = collect_doc_state(report_path)
|
||
report_path.write_text(build_report(state), encoding="utf-8")
|
||
|
||
print(f"Markdown pages (all): {len(state['markdown_files'])}")
|
||
print(f"Markdown pages (canonical): {len(state['canonical_markdown_files'])}")
|
||
print(f"Public headers: {len(state['public_headers'])}")
|
||
print(f"Editor source headers: {len(state['editor_headers'])}")
|
||
print(f"Valid header refs (all): {len(state['valid_header_refs'])}")
|
||
print(f"Valid header refs (canonical): {len(state['canonical_valid_header_refs'])}")
|
||
print(f"Invalid header refs: {len(state['invalid_header_refs'])}")
|
||
print(f"Valid source refs (all): {len(state['valid_source_refs'])}")
|
||
print(f"Valid source refs (Editor canonical): {len(state['canonical_valid_source_refs'])}")
|
||
print(f"Invalid source refs: {len(state['invalid_source_refs'])}")
|
||
print(f"Broken .md links: {len(state['broken_md_links'])}")
|
||
print(f"Non-.md relative links: {len(state['non_md_relative_links'])}")
|
||
print(f"Old template pages: {len(state['old_template_pages'])}")
|
||
print(f"Flat header pages: {len(state['flat_header_pages'])}")
|
||
print(f"Stale canonical doc tokens: {len(state['stale_canonical_doc_token_matches'])}")
|
||
print(f"Stale editor doc tokens: {len(state['stale_editor_doc_token_matches'])}")
|
||
print(f"Stale editor canonical pages: {len(state['stale_editor_canonical_page_matches'])}")
|
||
print(f"Missing directory index pages: {len(state['missing_parallel_indexes'])}")
|
||
print(f"Editor high-risk single-page dirs: {len(state['editor_risk_entries'])}")
|
||
print(f"Report written to: {report_path}")
|
||
return 0
|
||
|
||
|
||
if __name__ == "__main__":
|
||
raise SystemExit(main())
|