docs: sync api and planning docs
This commit is contained in:
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -13,17 +13,112 @@ DOC_ROOT = SCRIPT_DIR.parent if SCRIPT_DIR.name == "_tools" else SCRIPT_DIR
|
||||
REPO_ROOT = DOC_ROOT.parents[1]
|
||||
INCLUDE_ROOT = REPO_ROOT / "engine" / "include"
|
||||
PUBLIC_INCLUDE_ROOT = INCLUDE_ROOT / "XCEngine"
|
||||
EDITOR_SOURCE_ROOT = REPO_ROOT / "editor" / "src"
|
||||
PARALLEL_ROOT = DOC_ROOT / "XCEngine"
|
||||
META_ROOT = DOC_ROOT / "_meta"
|
||||
DEFAULT_REPORT = META_ROOT / "rebuild-status.md"
|
||||
|
||||
HEADER_RE = re.compile(r"^\*\*头文件\*\*:\s*`([^`]+\.h)`", re.MULTILINE)
|
||||
SOURCE_FILE_RE = re.compile(r"^\*\*源文件\*\*:\s*`([^`]+\.(?:h|hpp))`", re.MULTILINE)
|
||||
NAMESPACE_RE = re.compile(r"^\*\*命名空间\*\*:\s*`[^`]+`", re.MULTILINE)
|
||||
TYPE_RE = re.compile(r"^\*\*类型\*\*:\s*`[^`]+`", re.MULTILINE)
|
||||
DESCRIPTION_RE = re.compile(r"^\*\*描述\*\*:\s*.+$", re.MULTILINE)
|
||||
MD_LINK_RE = re.compile(r"\[([^\]]+)\]\(([^)]+)\)")
|
||||
LEGACY_SECTION_RE = re.compile(r"^## (Syntax|Remarks|See Also|Examples)$", re.MULTILINE)
|
||||
FENCED_CODE_BLOCK_RE = re.compile(r"```.*?```", re.DOTALL)
|
||||
ACCESS_SPECIFIER_RE = re.compile(r"^(public|private|protected)\s*:\s*$")
|
||||
IMPORTER_VERSION_RE = re.compile(r"kCurrentImporterVersion\s*=\s*(\d+)")
|
||||
STALE_EDITOR_DOC_TOKENS: tuple[tuple[str, str], ...] = (
|
||||
("MakeMigrateSceneAssetReferencesAction", "已删除的 Editor action helper"),
|
||||
("ExecuteMigrateSceneAssetReferences", "已删除的主菜单动作"),
|
||||
("CanMigrateSceneAssetReferences", "已删除的 ProjectCommands guard"),
|
||||
("MigrateSceneAssetReferences", "已删除的迁移 API"),
|
||||
("SceneAssetReferenceMigrationReport", "已删除的迁移报告类型"),
|
||||
("Migrate Scene AssetRefs", "已删除的主菜单项文案"),
|
||||
(
|
||||
"ProjectCommandsMigrateSceneAssetReferencesRewritesLegacyScenePayloads",
|
||||
"已删除的相关测试锚点",
|
||||
),
|
||||
)
|
||||
STALE_EDITOR_PAGE_TOKENS: tuple[tuple[str, str, str], ...] = (
|
||||
(
|
||||
"XCEngine/Editor/Core/EditorConsoleSink/",
|
||||
"fallback 实例",
|
||||
"与当前 EditorConsoleSink::GetInstance 生命周期不符的旧描述",
|
||||
),
|
||||
(
|
||||
"XCEngine/Editor/Core/EditorConsoleSink/",
|
||||
"不会返回空指针",
|
||||
"与当前 EditorConsoleSink::GetInstance 可返回 nullptr 的实现不符",
|
||||
),
|
||||
(
|
||||
"XCEngine/Editor/Core/EditorConsoleSink/",
|
||||
"不会返回 `nullptr`",
|
||||
"与当前 EditorConsoleSink::GetInstance 可返回 nullptr 的实现不符",
|
||||
),
|
||||
(
|
||||
"XCEngine/Editor/Viewport/SceneViewportOverlayBuilder/",
|
||||
"static SceneViewportOverlayFrameData Build(",
|
||||
"SceneViewportOverlayBuilder 已改为实例方法 Build(...) + provider registry 驱动,不应再写成 static Build。",
|
||||
),
|
||||
(
|
||||
"XCEngine/Editor/Viewport/SceneViewportOverlayBuilder/",
|
||||
"无状态构建器",
|
||||
"SceneViewportOverlayBuilder 当前持有 provider registry,不再是无状态 helper。",
|
||||
),
|
||||
)
|
||||
STALE_CANONICAL_PAGE_TOKENS: tuple[tuple[str, str, str], ...] = (
|
||||
(
|
||||
"XCEngine/Rendering/CameraRenderRequest/",
|
||||
"builtinPostProcess",
|
||||
"与当前 CameraRenderRequest 已删除该字段的实现不符",
|
||||
),
|
||||
(
|
||||
"XCEngine/Rendering/CameraRenderRequest/",
|
||||
"BuiltinPostProcessRequest",
|
||||
"与当前 CameraRenderRequest 已删除该子请求对象的实现不符",
|
||||
),
|
||||
(
|
||||
"XCEngine/Rendering/CameraRenderer/",
|
||||
"m_builtinPostProcessBuilder",
|
||||
"与当前 CameraRenderer 已删除该成员的实现不符",
|
||||
),
|
||||
(
|
||||
"XCEngine/Core/Asset/ArtifactFormats/",
|
||||
"kMaterialArtifactSchemaVersion = 1",
|
||||
"与当前材质 artifact schema 已升级到 v2 的实现不符",
|
||||
),
|
||||
(
|
||||
"XCEngine/Core/Asset/ArtifactFormats/",
|
||||
"XCMAT01",
|
||||
"与当前材质 artifact magic `XCMAT02` 的实现不符",
|
||||
),
|
||||
(
|
||||
"XCEngine/Rendering/Pipelines/BuiltinForwardPipeline/",
|
||||
"逐材质常量目前只写入 `baseColorFactor`",
|
||||
"与当前 BuiltinForwardPipeline 优先消费 shader schema 常量 payload、仅在缺失时回退到 `baseColorFactor` 的实现不符",
|
||||
),
|
||||
(
|
||||
"XCEngine/Rendering/RenderMaterialUtility/RenderMaterialUtility.md",
|
||||
"BuildBuiltinForwardMaterialData() 当前只打包 `baseColorFactor`;贴图仍由调用方单独通过 [ResolveBuiltinBaseColorTexture](ResolveBuiltinBaseColorTexture.md) 解析。",
|
||||
"与当前 BuiltinForwardPipeline 已优先通过 `ResolveSchemaMaterialConstantPayload()` 消费 schema-driven 材质常量 payload 的实现不符",
|
||||
),
|
||||
)
|
||||
STALE_EDITOR_CANONICAL_PAGES: tuple[tuple[str, str], ...] = (
|
||||
(
|
||||
"XCEngine/Editor/Managers/ProjectManager/MigrateSceneAssetReferences.md",
|
||||
"已删除 API 的残留 canonical 页面",
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class EditorRiskEntry:
|
||||
doc_page: str
|
||||
source_file: str
|
||||
direct_page_count: int
|
||||
public_method_count: int
|
||||
implementation_line_count: int
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -37,6 +132,21 @@ class ModuleCoverage:
|
||||
return self.public_headers - self.documented_headers
|
||||
|
||||
|
||||
@dataclass
|
||||
class StaleDocTokenMatch:
|
||||
doc_page: str
|
||||
token: str
|
||||
reason: str
|
||||
line_number: int
|
||||
line_text: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class StaleCanonicalPageMatch:
|
||||
doc_page: str
|
||||
reason: str
|
||||
|
||||
|
||||
def normalize_rel_path(path: str) -> str:
|
||||
return path.replace("\\", "/")
|
||||
|
||||
@@ -63,6 +173,13 @@ def iter_public_headers() -> list[str]:
|
||||
)
|
||||
|
||||
|
||||
def iter_editor_source_headers() -> list[str]:
|
||||
return sorted(
|
||||
normalize_rel_path(path.relative_to(REPO_ROOT).as_posix())
|
||||
for path in EDITOR_SOURCE_ROOT.rglob("*.h")
|
||||
)
|
||||
|
||||
|
||||
def iter_public_include_dirs() -> list[str]:
|
||||
dirs = ["XCEngine"]
|
||||
dirs.extend(
|
||||
@@ -90,6 +207,60 @@ def strip_fenced_code_blocks(content: str) -> str:
|
||||
return FENCED_CODE_BLOCK_RE.sub("", content)
|
||||
|
||||
|
||||
def count_non_empty_source_lines(path: Path) -> int:
|
||||
if not path.exists():
|
||||
return 0
|
||||
|
||||
count = 0
|
||||
for line in path.read_text(encoding="utf-8", errors="ignore").splitlines():
|
||||
stripped = line.strip()
|
||||
if not stripped or stripped.startswith("//"):
|
||||
continue
|
||||
count += 1
|
||||
return count
|
||||
|
||||
|
||||
def count_public_method_like_declarations(path: Path) -> int:
|
||||
if not path.exists():
|
||||
return 0
|
||||
|
||||
in_public_block = False
|
||||
pending: list[str] = []
|
||||
count = 0
|
||||
|
||||
for raw_line in path.read_text(encoding="utf-8", errors="ignore").splitlines():
|
||||
line = raw_line.strip()
|
||||
if not line or line.startswith("//"):
|
||||
continue
|
||||
|
||||
access_match = ACCESS_SPECIFIER_RE.match(line)
|
||||
if access_match:
|
||||
in_public_block = access_match.group(1) == "public"
|
||||
pending.clear()
|
||||
continue
|
||||
|
||||
if not in_public_block:
|
||||
continue
|
||||
|
||||
pending.append(line)
|
||||
if not any(token in line for token in (";", "{")):
|
||||
continue
|
||||
|
||||
declaration = " ".join(pending)
|
||||
pending.clear()
|
||||
|
||||
if "(" not in declaration or ")" not in declaration:
|
||||
continue
|
||||
if declaration.startswith(("using ", "friend ", "static_assert", "#")):
|
||||
continue
|
||||
if declaration.startswith(("return ", "if ", "for ", "while ", "switch ")):
|
||||
continue
|
||||
|
||||
count += 1
|
||||
|
||||
return count
|
||||
|
||||
|
||||
def is_dir_index_page(page: Path) -> bool:
|
||||
if not page.is_relative_to(PARALLEL_ROOT):
|
||||
return False
|
||||
@@ -109,29 +280,229 @@ def is_flat_header_page(page: Path, rel_page: str) -> bool:
|
||||
return (INCLUDE_ROOT / Path(rel_page).with_suffix(".h")).exists()
|
||||
|
||||
|
||||
def build_editor_module_coverages(
|
||||
editor_headers: list[str],
|
||||
documented_sources: set[str],
|
||||
) -> list[ModuleCoverage]:
|
||||
source_by_module: dict[str, list[str]] = defaultdict(list)
|
||||
documented_by_module: dict[str, set[str]] = defaultdict(set)
|
||||
|
||||
for source in editor_headers:
|
||||
relative = Path(source).relative_to("editor/src").as_posix()
|
||||
module = relative.split("/", 1)[0] if "/" in relative else "(root)"
|
||||
source_by_module[module].append(source)
|
||||
|
||||
for source in documented_sources:
|
||||
relative = Path(source).relative_to("editor/src").as_posix()
|
||||
module = relative.split("/", 1)[0] if "/" in relative else "(root)"
|
||||
documented_by_module[module].add(source)
|
||||
|
||||
return [
|
||||
ModuleCoverage(
|
||||
module=module,
|
||||
public_headers=len(headers),
|
||||
documented_headers=len(documented_by_module.get(module, set())),
|
||||
)
|
||||
for module, headers in sorted(source_by_module.items())
|
||||
]
|
||||
|
||||
|
||||
def collect_editor_risk_entries(
|
||||
source_pages: dict[str, list[str]],
|
||||
) -> list[EditorRiskEntry]:
|
||||
entries: list[EditorRiskEntry] = []
|
||||
|
||||
for source_file, doc_pages in sorted(source_pages.items()):
|
||||
for rel_page in doc_pages:
|
||||
page_path = DOC_ROOT / rel_page
|
||||
direct_page_count = len(list(page_path.parent.glob("*.md")))
|
||||
header_path = REPO_ROOT / source_file
|
||||
implementation_path = header_path.with_suffix(".cpp")
|
||||
public_method_count = count_public_method_like_declarations(header_path)
|
||||
implementation_line_count = count_non_empty_source_lines(implementation_path)
|
||||
|
||||
if direct_page_count > 1:
|
||||
continue
|
||||
if implementation_line_count < 200 and not (
|
||||
implementation_line_count >= 50 and public_method_count >= 8
|
||||
):
|
||||
continue
|
||||
|
||||
entries.append(
|
||||
EditorRiskEntry(
|
||||
doc_page=rel_page,
|
||||
source_file=source_file,
|
||||
direct_page_count=direct_page_count,
|
||||
public_method_count=public_method_count,
|
||||
implementation_line_count=implementation_line_count,
|
||||
)
|
||||
)
|
||||
|
||||
entries.sort(
|
||||
key=lambda entry: (
|
||||
entry.direct_page_count,
|
||||
-entry.implementation_line_count,
|
||||
-entry.public_method_count,
|
||||
entry.doc_page,
|
||||
)
|
||||
)
|
||||
return entries
|
||||
|
||||
|
||||
def collect_stale_editor_doc_token_matches(
|
||||
rel_page: str,
|
||||
content: str,
|
||||
) -> list[StaleDocTokenMatch]:
|
||||
if not rel_page.startswith("XCEngine/Editor/"):
|
||||
return []
|
||||
|
||||
matches: list[StaleDocTokenMatch] = []
|
||||
for line_number, raw_line in enumerate(content.splitlines(), start=1):
|
||||
line = raw_line.strip()
|
||||
if not line:
|
||||
continue
|
||||
|
||||
for token, reason in STALE_EDITOR_DOC_TOKENS:
|
||||
if token not in raw_line:
|
||||
continue
|
||||
|
||||
matches.append(
|
||||
StaleDocTokenMatch(
|
||||
doc_page=rel_page,
|
||||
token=token,
|
||||
reason=reason,
|
||||
line_number=line_number,
|
||||
line_text=line,
|
||||
)
|
||||
)
|
||||
|
||||
for page_prefix, token, reason in STALE_EDITOR_PAGE_TOKENS:
|
||||
if not rel_page.startswith(page_prefix):
|
||||
continue
|
||||
if token not in raw_line:
|
||||
continue
|
||||
|
||||
matches.append(
|
||||
StaleDocTokenMatch(
|
||||
doc_page=rel_page,
|
||||
token=token,
|
||||
reason=reason,
|
||||
line_number=line_number,
|
||||
line_text=line,
|
||||
)
|
||||
)
|
||||
|
||||
return matches
|
||||
|
||||
|
||||
def collect_stale_canonical_doc_token_matches(
|
||||
rel_page: str,
|
||||
content: str,
|
||||
) -> list[StaleDocTokenMatch]:
|
||||
if not rel_page.startswith("XCEngine/"):
|
||||
return []
|
||||
|
||||
matches: list[StaleDocTokenMatch] = []
|
||||
for line_number, raw_line in enumerate(content.splitlines(), start=1):
|
||||
line = raw_line.strip()
|
||||
if not line:
|
||||
continue
|
||||
|
||||
for page_prefix, token, reason in STALE_CANONICAL_PAGE_TOKENS:
|
||||
if not rel_page.startswith(page_prefix):
|
||||
continue
|
||||
if token not in raw_line:
|
||||
continue
|
||||
|
||||
matches.append(
|
||||
StaleDocTokenMatch(
|
||||
doc_page=rel_page,
|
||||
token=token,
|
||||
reason=reason,
|
||||
line_number=line_number,
|
||||
line_text=line,
|
||||
)
|
||||
)
|
||||
|
||||
return matches
|
||||
|
||||
|
||||
def collect_dynamic_canonical_doc_token_matches(
|
||||
rel_page: str,
|
||||
content: str,
|
||||
) -> list[StaleDocTokenMatch]:
|
||||
if rel_page != "XCEngine/Core/Asset/AssetDatabase/AssetDatabase.md":
|
||||
return []
|
||||
|
||||
header_path = INCLUDE_ROOT / "XCEngine/Core/Asset/AssetDatabase.h"
|
||||
if not header_path.exists():
|
||||
return []
|
||||
|
||||
header_content = header_path.read_text(encoding="utf-8", errors="ignore")
|
||||
header_match = IMPORTER_VERSION_RE.search(header_content)
|
||||
doc_match = IMPORTER_VERSION_RE.search(content)
|
||||
if not header_match or not doc_match:
|
||||
return []
|
||||
|
||||
current_version = header_match.group(1)
|
||||
documented_version = doc_match.group(1)
|
||||
if current_version == documented_version:
|
||||
return []
|
||||
|
||||
matches: list[StaleDocTokenMatch] = []
|
||||
for line_number, raw_line in enumerate(content.splitlines(), start=1):
|
||||
if documented_version not in raw_line or "kCurrentImporterVersion" not in raw_line:
|
||||
continue
|
||||
|
||||
matches.append(
|
||||
StaleDocTokenMatch(
|
||||
doc_page=rel_page,
|
||||
token=f"kCurrentImporterVersion = {documented_version}",
|
||||
reason=f"与当前 AssetDatabase 头文件中的 importer 版本 `{current_version}` 不符",
|
||||
line_number=line_number,
|
||||
line_text=raw_line.strip(),
|
||||
)
|
||||
)
|
||||
break
|
||||
|
||||
return matches
|
||||
|
||||
|
||||
def collect_doc_state(report_path: Path) -> dict[str, object]:
|
||||
markdown_files = iter_markdown_files()
|
||||
canonical_markdown_files = iter_canonical_markdown_files()
|
||||
public_headers = iter_public_headers()
|
||||
editor_headers = iter_editor_source_headers()
|
||||
public_include_dirs = iter_public_include_dirs()
|
||||
|
||||
declared_header_refs: set[str] = set()
|
||||
valid_header_refs: set[str] = set()
|
||||
canonical_valid_header_refs: set[str] = set()
|
||||
invalid_header_refs: list[tuple[str, str]] = []
|
||||
declared_source_refs: set[str] = set()
|
||||
valid_source_refs: set[str] = set()
|
||||
canonical_valid_source_refs: set[str] = set()
|
||||
invalid_source_refs: list[tuple[str, str]] = []
|
||||
broken_md_links: list[tuple[str, str]] = []
|
||||
non_md_relative_links: list[tuple[str, str]] = []
|
||||
old_template_pages: list[str] = []
|
||||
flat_header_pages: list[str] = []
|
||||
editor_source_pages: dict[str, list[str]] = defaultdict(list)
|
||||
stale_canonical_doc_token_matches: list[StaleDocTokenMatch] = []
|
||||
stale_editor_doc_token_matches: list[StaleDocTokenMatch] = []
|
||||
stale_editor_canonical_page_matches: list[StaleCanonicalPageMatch] = []
|
||||
|
||||
metadata_counts = {
|
||||
"namespace": 0,
|
||||
"type": 0,
|
||||
"description": 0,
|
||||
"header": 0,
|
||||
"source_file": 0,
|
||||
}
|
||||
|
||||
for page in markdown_files:
|
||||
if page.resolve() == report_path.resolve():
|
||||
continue
|
||||
rel_page = normalize_rel_path(page.relative_to(DOC_ROOT).as_posix())
|
||||
content = page.read_text(encoding="utf-8")
|
||||
is_canonical_page = rel_page.startswith("XCEngine/")
|
||||
@@ -144,6 +515,8 @@ def collect_doc_state(report_path: Path) -> dict[str, object]:
|
||||
metadata_counts["description"] += 1
|
||||
if is_canonical_page and HEADER_RE.search(content):
|
||||
metadata_counts["header"] += 1
|
||||
if is_canonical_page and SOURCE_FILE_RE.search(content):
|
||||
metadata_counts["source_file"] += 1
|
||||
|
||||
if is_canonical_page and LEGACY_SECTION_RE.search(content):
|
||||
old_template_pages.append(rel_page)
|
||||
@@ -151,6 +524,25 @@ def collect_doc_state(report_path: Path) -> dict[str, object]:
|
||||
if is_flat_header_page(page, rel_page):
|
||||
flat_header_pages.append(rel_page)
|
||||
|
||||
for stale_page, reason in STALE_EDITOR_CANONICAL_PAGES:
|
||||
if rel_page == stale_page:
|
||||
stale_editor_canonical_page_matches.append(
|
||||
StaleCanonicalPageMatch(
|
||||
doc_page=rel_page,
|
||||
reason=reason,
|
||||
)
|
||||
)
|
||||
|
||||
stale_editor_doc_token_matches.extend(
|
||||
collect_stale_editor_doc_token_matches(rel_page, content)
|
||||
)
|
||||
stale_canonical_doc_token_matches.extend(
|
||||
collect_stale_canonical_doc_token_matches(rel_page, content)
|
||||
)
|
||||
stale_canonical_doc_token_matches.extend(
|
||||
collect_dynamic_canonical_doc_token_matches(rel_page, content)
|
||||
)
|
||||
|
||||
for match in HEADER_RE.finditer(content):
|
||||
header = normalize_rel_path(match.group(1))
|
||||
declared_header_refs.add(header)
|
||||
@@ -161,6 +553,17 @@ def collect_doc_state(report_path: Path) -> dict[str, object]:
|
||||
elif is_canonical_page:
|
||||
invalid_header_refs.append((rel_page, header))
|
||||
|
||||
for match in SOURCE_FILE_RE.finditer(content):
|
||||
source_file = normalize_rel_path(match.group(1))
|
||||
declared_source_refs.add(source_file)
|
||||
if source_file.startswith("editor/src/") and (REPO_ROOT / source_file).exists():
|
||||
valid_source_refs.add(source_file)
|
||||
if rel_page.startswith("XCEngine/Editor/"):
|
||||
canonical_valid_source_refs.add(source_file)
|
||||
editor_source_pages[source_file].append(rel_page)
|
||||
elif rel_page.startswith("XCEngine/Editor/"):
|
||||
invalid_source_refs.append((rel_page, source_file))
|
||||
|
||||
if is_canonical_page or rel_page.startswith(("_meta/", "_tools/")):
|
||||
link_scan_content = strip_fenced_code_blocks(content)
|
||||
for _, target in MD_LINK_RE.findall(link_scan_content):
|
||||
@@ -197,15 +600,23 @@ def collect_doc_state(report_path: Path) -> dict[str, object]:
|
||||
)
|
||||
for module, headers in sorted(public_by_module.items())
|
||||
]
|
||||
editor_module_coverages = build_editor_module_coverages(
|
||||
editor_headers,
|
||||
canonical_valid_source_refs,
|
||||
)
|
||||
|
||||
missing_headers = [
|
||||
header for header in public_headers if header not in canonical_valid_header_refs
|
||||
]
|
||||
missing_editor_headers = [
|
||||
header for header in editor_headers if header not in canonical_valid_source_refs
|
||||
]
|
||||
missing_parallel_indexes = [
|
||||
relative
|
||||
for relative in public_include_dirs
|
||||
if not dir_index_doc_path(relative).exists()
|
||||
]
|
||||
editor_risk_entries = collect_editor_risk_entries(editor_source_pages)
|
||||
support_top_dirs = sorted(
|
||||
path.name
|
||||
for path in DOC_ROOT.iterdir()
|
||||
@@ -222,11 +633,22 @@ def collect_doc_state(report_path: Path) -> dict[str, object]:
|
||||
"valid_header_refs": sorted(valid_header_refs),
|
||||
"canonical_valid_header_refs": sorted(canonical_valid_header_refs),
|
||||
"invalid_header_refs": invalid_header_refs,
|
||||
"declared_source_refs": sorted(declared_source_refs),
|
||||
"valid_source_refs": sorted(valid_source_refs),
|
||||
"canonical_valid_source_refs": sorted(canonical_valid_source_refs),
|
||||
"invalid_source_refs": invalid_source_refs,
|
||||
"broken_md_links": broken_md_links,
|
||||
"non_md_relative_links": non_md_relative_links,
|
||||
"old_template_pages": sorted(old_template_pages),
|
||||
"flat_header_pages": sorted(flat_header_pages),
|
||||
"stale_canonical_doc_token_matches": stale_canonical_doc_token_matches,
|
||||
"stale_editor_doc_token_matches": stale_editor_doc_token_matches,
|
||||
"stale_editor_canonical_page_matches": stale_editor_canonical_page_matches,
|
||||
"missing_headers": missing_headers,
|
||||
"editor_headers": editor_headers,
|
||||
"editor_module_coverages": editor_module_coverages,
|
||||
"missing_editor_headers": missing_editor_headers,
|
||||
"editor_risk_entries": editor_risk_entries,
|
||||
"module_coverages": module_coverages,
|
||||
"metadata_counts": metadata_counts,
|
||||
"support_top_dirs": support_top_dirs,
|
||||
@@ -249,15 +671,25 @@ def build_report(state: dict[str, object]) -> str:
|
||||
markdown_files: list[Path] = state["markdown_files"] # type: ignore[assignment]
|
||||
canonical_markdown_files: list[Path] = state["canonical_markdown_files"] # type: ignore[assignment]
|
||||
public_headers: list[str] = state["public_headers"] # type: ignore[assignment]
|
||||
editor_headers: list[str] = state["editor_headers"] # type: ignore[assignment]
|
||||
public_include_dirs: list[str] = state["public_include_dirs"] # type: ignore[assignment]
|
||||
valid_header_refs: list[str] = state["valid_header_refs"] # type: ignore[assignment]
|
||||
canonical_valid_header_refs: list[str] = state["canonical_valid_header_refs"] # type: ignore[assignment]
|
||||
invalid_header_refs: list[tuple[str, str]] = state["invalid_header_refs"] # type: ignore[assignment]
|
||||
valid_source_refs: list[str] = state["valid_source_refs"] # type: ignore[assignment]
|
||||
canonical_valid_source_refs: list[str] = state["canonical_valid_source_refs"] # type: ignore[assignment]
|
||||
invalid_source_refs: list[tuple[str, str]] = state["invalid_source_refs"] # type: ignore[assignment]
|
||||
broken_md_links: list[tuple[str, str]] = state["broken_md_links"] # type: ignore[assignment]
|
||||
non_md_relative_links: list[tuple[str, str]] = state["non_md_relative_links"] # type: ignore[assignment]
|
||||
old_template_pages: list[str] = state["old_template_pages"] # type: ignore[assignment]
|
||||
flat_header_pages: list[str] = state["flat_header_pages"] # type: ignore[assignment]
|
||||
stale_canonical_doc_token_matches: list[StaleDocTokenMatch] = state["stale_canonical_doc_token_matches"] # type: ignore[assignment]
|
||||
stale_editor_doc_token_matches: list[StaleDocTokenMatch] = state["stale_editor_doc_token_matches"] # type: ignore[assignment]
|
||||
stale_editor_canonical_page_matches: list[StaleCanonicalPageMatch] = state["stale_editor_canonical_page_matches"] # type: ignore[assignment]
|
||||
missing_headers: list[str] = state["missing_headers"] # type: ignore[assignment]
|
||||
missing_editor_headers: list[str] = state["missing_editor_headers"] # type: ignore[assignment]
|
||||
editor_module_coverages: list[ModuleCoverage] = state["editor_module_coverages"] # type: ignore[assignment]
|
||||
editor_risk_entries: list[EditorRiskEntry] = state["editor_risk_entries"] # type: ignore[assignment]
|
||||
module_coverages: list[ModuleCoverage] = state["module_coverages"] # type: ignore[assignment]
|
||||
metadata_counts: dict[str, int] = state["metadata_counts"] # type: ignore[assignment]
|
||||
support_top_dirs: list[str] = state["support_top_dirs"] # type: ignore[assignment]
|
||||
@@ -275,13 +707,21 @@ def build_report(state: dict[str, object]) -> str:
|
||||
lines.append(f"- Markdown 页面数(全部): `{len(markdown_files)}`")
|
||||
lines.append(f"- Markdown 页面数(canonical): `{len(canonical_markdown_files)}`")
|
||||
lines.append(f"- Public headers 数: `{len(public_headers)}`")
|
||||
lines.append(f"- Editor source headers 数: `{len(editor_headers)}`")
|
||||
lines.append(f"- 有效头文件引用数(全部): `{len(valid_header_refs)}`")
|
||||
lines.append(f"- 有效头文件引用数(canonical): `{len(canonical_valid_header_refs)}`")
|
||||
lines.append(f"- 无效头文件引用数: `{len(invalid_header_refs)}`")
|
||||
lines.append(f"- 有效源文件引用数(全部): `{len(valid_source_refs)}`")
|
||||
lines.append(f"- 有效源文件引用数(Editor canonical): `{len(canonical_valid_source_refs)}`")
|
||||
lines.append(f"- 无效源文件引用数: `{len(invalid_source_refs)}`")
|
||||
lines.append(f"- 失效 `.md` 链接数: `{len(broken_md_links)}`")
|
||||
lines.append(f"- 非 `.md` 相对链接数: `{len(non_md_relative_links)}`")
|
||||
lines.append(f"- 旧模板页面数: `{len(old_template_pages)}`")
|
||||
lines.append(f"- 扁平 header 页面数: `{len(flat_header_pages)}`")
|
||||
lines.append(f"- Canonical 显式过期符号残留数: `{len(stale_canonical_doc_token_matches)}`")
|
||||
lines.append(f"- Editor 显式过期符号残留数: `{len(stale_editor_doc_token_matches)}`")
|
||||
lines.append(f"- Editor 残留 canonical 旧页面数: `{len(stale_editor_canonical_page_matches)}`")
|
||||
lines.append(f"- Editor 高风险单页目录数: `{len(editor_risk_entries)}`")
|
||||
lines.append("")
|
||||
lines.append("## 平行目录")
|
||||
lines.append("")
|
||||
@@ -304,6 +744,16 @@ def build_report(state: dict[str, object]) -> str:
|
||||
f"`{coverage.documented_headers}` | `{coverage.missing_headers}` |"
|
||||
)
|
||||
lines.append("")
|
||||
lines.append("## Editor 源文件页覆盖")
|
||||
lines.append("")
|
||||
lines.append("| 模块 | Source headers | 已覆盖 | 未覆盖 |")
|
||||
lines.append("|------|----------------|--------|--------|")
|
||||
for coverage in editor_module_coverages:
|
||||
lines.append(
|
||||
f"| `{coverage.module}` | `{coverage.public_headers}` | "
|
||||
f"`{coverage.documented_headers}` | `{coverage.missing_headers}` |"
|
||||
)
|
||||
lines.append("")
|
||||
lines.append("## 元信息覆盖")
|
||||
lines.append("")
|
||||
lines.append("| 字段 | 页面数 |")
|
||||
@@ -312,6 +762,7 @@ def build_report(state: dict[str, object]) -> str:
|
||||
lines.append(f"| `类型` | `{metadata_counts['type']}` |")
|
||||
lines.append(f"| `描述` | `{metadata_counts['description']}` |")
|
||||
lines.append(f"| `头文件` | `{metadata_counts['header']}` |")
|
||||
lines.append(f"| `源文件` | `{metadata_counts['source_file']}` |")
|
||||
lines.append("")
|
||||
|
||||
if missing_parallel_indexes:
|
||||
@@ -327,6 +778,12 @@ def build_report(state: dict[str, object]) -> str:
|
||||
lines.extend(format_pairs_table(("文档", "头文件"), invalid_header_refs[:50]))
|
||||
lines.append("")
|
||||
|
||||
if invalid_source_refs:
|
||||
lines.append("## 无效源文件引用")
|
||||
lines.append("")
|
||||
lines.extend(format_pairs_table(("文档", "源文件"), invalid_source_refs[:50]))
|
||||
lines.append("")
|
||||
|
||||
if broken_md_links:
|
||||
lines.append("## 失效 Markdown 链接")
|
||||
lines.append("")
|
||||
@@ -357,6 +814,49 @@ def build_report(state: dict[str, object]) -> str:
|
||||
lines.append(f"- 其余 `{len(flat_header_pages) - 120}` 个页面请直接运行脚本查看。")
|
||||
lines.append("")
|
||||
|
||||
if stale_canonical_doc_token_matches:
|
||||
lines.append("## Canonical 显式过期符号残留")
|
||||
lines.append("")
|
||||
lines.append("| 文档页 | 过期符号 | 原因 | 行号 | 行内容 |")
|
||||
lines.append("|------|----------|------|------|--------|")
|
||||
for entry in stale_canonical_doc_token_matches[:80]:
|
||||
safe_line_text = entry.line_text.replace("|", "\\|")
|
||||
lines.append(
|
||||
f"| `{entry.doc_page}` | `{entry.token}` | `{entry.reason}` | "
|
||||
f"`{entry.line_number}` | `{safe_line_text}` |"
|
||||
)
|
||||
if len(stale_canonical_doc_token_matches) > 80:
|
||||
lines.append(
|
||||
f"| `...` | `...` | `...` | `...` | 其余 `{len(stale_canonical_doc_token_matches) - 80}` 项请直接运行脚本查看 |"
|
||||
)
|
||||
lines.append("")
|
||||
|
||||
if stale_editor_doc_token_matches:
|
||||
lines.append("## Editor 显式过期符号残留")
|
||||
lines.append("")
|
||||
lines.append("| 文档页 | 过期符号 | 原因 | 行号 | 行内容 |")
|
||||
lines.append("|------|----------|------|------|--------|")
|
||||
for entry in stale_editor_doc_token_matches[:80]:
|
||||
safe_line_text = entry.line_text.replace("|", "\\|")
|
||||
lines.append(
|
||||
f"| `{entry.doc_page}` | `{entry.token}` | `{entry.reason}` | "
|
||||
f"`{entry.line_number}` | `{safe_line_text}` |"
|
||||
)
|
||||
if len(stale_editor_doc_token_matches) > 80:
|
||||
lines.append(
|
||||
f"| `...` | `...` | `...` | `...` | 其余 `{len(stale_editor_doc_token_matches) - 80}` 项请直接运行脚本查看 |"
|
||||
)
|
||||
lines.append("")
|
||||
|
||||
if stale_editor_canonical_page_matches:
|
||||
lines.append("## Editor 残留 canonical 旧页面")
|
||||
lines.append("")
|
||||
lines.append("| 文档页 | 原因 |")
|
||||
lines.append("|------|------|")
|
||||
for entry in stale_editor_canonical_page_matches:
|
||||
lines.append(f"| `{entry.doc_page}` | `{entry.reason}` |")
|
||||
lines.append("")
|
||||
|
||||
if missing_headers:
|
||||
lines.append("## 未覆盖的 public headers")
|
||||
lines.append("")
|
||||
@@ -366,6 +866,32 @@ def build_report(state: dict[str, object]) -> str:
|
||||
lines.append(f"- 其余 `{len(missing_headers) - 120}` 个 header 请直接运行脚本查看。")
|
||||
lines.append("")
|
||||
|
||||
if missing_editor_headers:
|
||||
lines.append("## 未覆盖的 Editor 源文件页")
|
||||
lines.append("")
|
||||
for header in missing_editor_headers[:120]:
|
||||
lines.append(f"- `{header}`")
|
||||
if len(missing_editor_headers) > 120:
|
||||
lines.append(f"- 其余 `{len(missing_editor_headers) - 120}` 个源文件请直接运行脚本查看。")
|
||||
lines.append("")
|
||||
|
||||
if editor_risk_entries:
|
||||
lines.append("## Editor 高风险单页目录")
|
||||
lines.append("")
|
||||
lines.append("| 文档页 | 源文件 | 目录内直系页面数 | public 成员函数数 | 对应 `.cpp` 有效行数 |")
|
||||
lines.append("|------|--------|------------------|-------------------|----------------------|")
|
||||
for entry in editor_risk_entries[:40]:
|
||||
lines.append(
|
||||
f"| `{entry.doc_page}` | `{entry.source_file}` | "
|
||||
f"`{entry.direct_page_count}` | `{entry.public_method_count}` | "
|
||||
f"`{entry.implementation_line_count}` |"
|
||||
)
|
||||
if len(editor_risk_entries) > 40:
|
||||
lines.append(
|
||||
f"| `...` | `...` | `...` | `...` | 其余 `{len(editor_risk_entries) - 40}` 项请直接运行脚本查看 |"
|
||||
)
|
||||
lines.append("")
|
||||
|
||||
return "\n".join(lines).rstrip() + "\n"
|
||||
|
||||
|
||||
@@ -386,14 +912,22 @@ def main() -> int:
|
||||
print(f"Markdown pages (all): {len(state['markdown_files'])}")
|
||||
print(f"Markdown pages (canonical): {len(state['canonical_markdown_files'])}")
|
||||
print(f"Public headers: {len(state['public_headers'])}")
|
||||
print(f"Editor source headers: {len(state['editor_headers'])}")
|
||||
print(f"Valid header refs (all): {len(state['valid_header_refs'])}")
|
||||
print(f"Valid header refs (canonical): {len(state['canonical_valid_header_refs'])}")
|
||||
print(f"Invalid header refs: {len(state['invalid_header_refs'])}")
|
||||
print(f"Valid source refs (all): {len(state['valid_source_refs'])}")
|
||||
print(f"Valid source refs (Editor canonical): {len(state['canonical_valid_source_refs'])}")
|
||||
print(f"Invalid source refs: {len(state['invalid_source_refs'])}")
|
||||
print(f"Broken .md links: {len(state['broken_md_links'])}")
|
||||
print(f"Non-.md relative links: {len(state['non_md_relative_links'])}")
|
||||
print(f"Old template pages: {len(state['old_template_pages'])}")
|
||||
print(f"Flat header pages: {len(state['flat_header_pages'])}")
|
||||
print(f"Stale canonical doc tokens: {len(state['stale_canonical_doc_token_matches'])}")
|
||||
print(f"Stale editor doc tokens: {len(state['stale_editor_doc_token_matches'])}")
|
||||
print(f"Stale editor canonical pages: {len(state['stale_editor_canonical_page_matches'])}")
|
||||
print(f"Missing directory index pages: {len(state['missing_parallel_indexes'])}")
|
||||
print(f"Editor high-risk single-page dirs: {len(state['editor_risk_entries'])}")
|
||||
print(f"Report written to: {report_path}")
|
||||
return 0
|
||||
|
||||
|
||||
641
docs/api/_tools/cleanup_template_api_docs.py
Normal file
641
docs/api/_tools/cleanup_template_api_docs.py
Normal file
@@ -0,0 +1,641 @@
|
||||
#!/usr/bin/env python3
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
|
||||
from generate_core_resources_canonical_pages import (
|
||||
DOC_ROOT,
|
||||
INCLUDE_ROOT,
|
||||
REPO_ROOT,
|
||||
build_namespace_map,
|
||||
find_declarations,
|
||||
group_methods,
|
||||
select_primary,
|
||||
)
|
||||
|
||||
|
||||
TEMPLATE_METHOD_TOKENS = (
|
||||
"当前页面用于固定",
|
||||
"获取相关状态或对象。",
|
||||
"设置相关状态或配置。",
|
||||
"加载资源或数据。",
|
||||
"公开方法,详见头文件声明。",
|
||||
"参数语义详见头文件声明。",
|
||||
"返回值语义详见头文件声明。",
|
||||
)
|
||||
TEMPLATE_TABLE_TOKENS = (
|
||||
"获取相关状态或对象。",
|
||||
"设置相关状态或配置。",
|
||||
"加载资源或数据。",
|
||||
"公开方法,详见头文件声明。",
|
||||
)
|
||||
GENERIC_FALLBACK_RE = re.compile(
|
||||
r"执行该公开方法对应的当前实现。|"
|
||||
r"返回 `[^`]+` 相关结果。|"
|
||||
r"更新 `[^`]+` 相关状态。|"
|
||||
r"判断 `[^`]+` 条件是否成立。|"
|
||||
r"判断是否具备 `[^`]+`。|"
|
||||
r"判断当前是否可以执行 `[^`]+`。"
|
||||
)
|
||||
METHOD_SECTION_RE = re.compile(r"(?ms)^## 公共方法\n.*?(?=\n## |\Z)")
|
||||
BLOCK_COMMENT_RE = re.compile(r"/\*.*?\*/", re.DOTALL)
|
||||
LINE_COMMENT_RE = re.compile(r"//.*")
|
||||
RETURN_MEMBER_RE = re.compile(r"^return\s+(m_[A-Za-z_]\w*)\s*;?$")
|
||||
RETURN_MEMBER_METHOD_RE = re.compile(
|
||||
r"^return\s+(m_[A-Za-z_]\w*)\.(Data|data|Size|size|CStr|c_str|Get)\s*\((.*?)\)\s*;?$"
|
||||
)
|
||||
RETURN_SIMPLE_CALL_RE = re.compile(r"^return\s+([A-Za-z_]\w*(?:::[A-Za-z_]\w*)*)\s*\((.*)\)\s*;?$")
|
||||
RETURN_CONST_RE = re.compile(r"^return\s+([^;]+?)\s*;?$")
|
||||
ASSIGN_MEMBER_RE = re.compile(r"^(m_[A-Za-z_]\w*)\s*=\s*([^;]+?)\s*;?$")
|
||||
MEMBER_WRITE_RE = re.compile(r"\b(m_[A-Za-z_]\w*)\b\s*(?:=|\+=|-=|\*=|/=|%=|>>=|<<=|\+\+|--)")
|
||||
MEMBER_CALL_RE = re.compile(r"\b(m_[A-Za-z_]\w*)\.(\w+)\s*\(")
|
||||
DIRECT_CALL_RE = re.compile(r"\b([A-Za-z_]\w*(?:::[A-Za-z_]\w*)*)\s*\(")
|
||||
ARROW_CALL_RE = re.compile(r"->\s*([A-Za-z_]\w*)\s*\(")
|
||||
DOT_CALL_RE = re.compile(r"\.\s*([A-Za-z_]\w*)\s*\(")
|
||||
FIELD_NAME_RE = re.compile(r"\bm_[A-Za-z_]\w*\b")
|
||||
|
||||
KEYWORDS = {
|
||||
"if",
|
||||
"for",
|
||||
"while",
|
||||
"switch",
|
||||
"return",
|
||||
"sizeof",
|
||||
"static_cast",
|
||||
"reinterpret_cast",
|
||||
"const_cast",
|
||||
"dynamic_cast",
|
||||
"catch",
|
||||
"new",
|
||||
"delete",
|
||||
}
|
||||
MUTATING_MEMBER_CALLS = {
|
||||
"Append",
|
||||
"Assign",
|
||||
"Clear",
|
||||
"ClearDirty",
|
||||
"Emplace",
|
||||
"Erase",
|
||||
"Insert",
|
||||
"PopBack",
|
||||
"PushBack",
|
||||
"Release",
|
||||
"Remove",
|
||||
"Reset",
|
||||
"Resize",
|
||||
"Set",
|
||||
"SetInvalid",
|
||||
"Shrink",
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class ImplementationFact:
|
||||
summary: str | None
|
||||
details: list[str]
|
||||
|
||||
|
||||
@dataclass
|
||||
class ImplementationBlock:
|
||||
kind: str
|
||||
body: str
|
||||
|
||||
|
||||
def has_any_token(content: str, tokens: tuple[str, ...]) -> bool:
|
||||
return any(token in content for token in tokens)
|
||||
|
||||
|
||||
def has_generic_fallback(content: str) -> bool:
|
||||
return GENERIC_FALLBACK_RE.search(content) is not None
|
||||
|
||||
|
||||
def strip_comments(text: str) -> str:
|
||||
return LINE_COMMENT_RE.sub("", BLOCK_COMMENT_RE.sub("", text))
|
||||
|
||||
|
||||
def normalize_whitespace(text: str) -> str:
|
||||
return " ".join(text.strip().split())
|
||||
|
||||
|
||||
def split_statements(body: str) -> list[str]:
|
||||
parts: list[str] = []
|
||||
current: list[str] = []
|
||||
depth_paren = 0
|
||||
depth_brace = 0
|
||||
depth_angle = 0
|
||||
|
||||
for char in body:
|
||||
if char == "(":
|
||||
depth_paren += 1
|
||||
elif char == ")":
|
||||
depth_paren = max(0, depth_paren - 1)
|
||||
elif char == "{":
|
||||
depth_brace += 1
|
||||
elif char == "}":
|
||||
depth_brace = max(0, depth_brace - 1)
|
||||
elif char == "<":
|
||||
depth_angle += 1
|
||||
elif char == ">":
|
||||
depth_angle = max(0, depth_angle - 1)
|
||||
|
||||
if char == ";" and depth_paren == 0 and depth_brace == 0 and depth_angle == 0:
|
||||
statement = "".join(current).strip()
|
||||
if statement:
|
||||
parts.append(statement)
|
||||
current = []
|
||||
continue
|
||||
|
||||
current.append(char)
|
||||
|
||||
tail = "".join(current).strip()
|
||||
if tail:
|
||||
parts.append(tail)
|
||||
return parts
|
||||
|
||||
|
||||
def camel_tail(name: str, prefix_len: int) -> str:
|
||||
tail = name[prefix_len:]
|
||||
return tail or name
|
||||
|
||||
|
||||
def format_identifier_list(items: list[str]) -> str:
|
||||
unique: list[str] = []
|
||||
seen: set[str] = set()
|
||||
for item in items:
|
||||
if item in seen:
|
||||
continue
|
||||
unique.append(item)
|
||||
seen.add(item)
|
||||
return "、".join(f"`{item}`" for item in unique)
|
||||
|
||||
|
||||
def extract_balanced(text: str, start: int, open_char: str, close_char: str) -> tuple[str, int] | None:
|
||||
depth = 0
|
||||
for index in range(start, len(text)):
|
||||
char = text[index]
|
||||
if char == open_char:
|
||||
depth += 1
|
||||
elif char == close_char:
|
||||
depth -= 1
|
||||
if depth == 0:
|
||||
return text[start:index + 1], index + 1
|
||||
return None
|
||||
|
||||
|
||||
def scan_definition_suffix(text: str, start: int) -> tuple[str, int]:
|
||||
index = start
|
||||
while True:
|
||||
while index < len(text) and text[index].isspace():
|
||||
index += 1
|
||||
|
||||
if text.startswith("const", index):
|
||||
index += len("const")
|
||||
continue
|
||||
if text.startswith("override", index):
|
||||
index += len("override")
|
||||
continue
|
||||
if text.startswith("final", index):
|
||||
index += len("final")
|
||||
continue
|
||||
if text.startswith("constexpr", index):
|
||||
index += len("constexpr")
|
||||
continue
|
||||
if text.startswith("noexcept", index):
|
||||
index += len("noexcept")
|
||||
while index < len(text) and text[index].isspace():
|
||||
index += 1
|
||||
if index < len(text) and text[index] == "(":
|
||||
extracted = extract_balanced(text, index, "(", ")")
|
||||
if extracted:
|
||||
_, index = extracted
|
||||
continue
|
||||
if index < len(text) and text[index] in {"&", "*"}:
|
||||
index += 1
|
||||
continue
|
||||
break
|
||||
|
||||
if text.startswith("= default", index):
|
||||
return "default", index + len("= default")
|
||||
if text.startswith("= delete", index):
|
||||
return "delete", index + len("= delete")
|
||||
if index < len(text) and text[index] == "{":
|
||||
return "body", index
|
||||
return "", index
|
||||
|
||||
|
||||
def extract_qualified_blocks(text: str, class_name: str, method_name: str) -> list[ImplementationBlock]:
|
||||
pattern = re.compile(rf"{re.escape(class_name)}\s*::\s*{re.escape(method_name)}\s*\(")
|
||||
blocks: list[ImplementationBlock] = []
|
||||
|
||||
for match in pattern.finditer(text):
|
||||
params_start = text.find("(", match.start())
|
||||
extracted = extract_balanced(text, params_start, "(", ")")
|
||||
if not extracted:
|
||||
continue
|
||||
_, cursor = extracted
|
||||
kind, suffix_pos = scan_definition_suffix(text, cursor)
|
||||
if kind == "body":
|
||||
body_block = extract_balanced(text, suffix_pos, "{", "}")
|
||||
if body_block:
|
||||
body, _ = body_block
|
||||
blocks.append(ImplementationBlock(kind="body", body=body[1:-1]))
|
||||
elif kind in {"default", "delete"}:
|
||||
blocks.append(ImplementationBlock(kind=kind, body=""))
|
||||
|
||||
return blocks
|
||||
|
||||
|
||||
def extract_inline_blocks(text: str, method_name: str) -> list[ImplementationBlock]:
|
||||
pattern = re.compile(rf"{re.escape(method_name)}\s*\(")
|
||||
blocks: list[ImplementationBlock] = []
|
||||
|
||||
for match in pattern.finditer(text):
|
||||
previous = text[match.start() - 1] if match.start() > 0 else ""
|
||||
if previous.isalnum() or previous in {":", ".", ">"}:
|
||||
continue
|
||||
|
||||
params_start = text.find("(", match.start())
|
||||
extracted = extract_balanced(text, params_start, "(", ")")
|
||||
if not extracted:
|
||||
continue
|
||||
_, cursor = extracted
|
||||
kind, suffix_pos = scan_definition_suffix(text, cursor)
|
||||
if kind == "body":
|
||||
body_block = extract_balanced(text, suffix_pos, "{", "}")
|
||||
if body_block:
|
||||
body, _ = body_block
|
||||
blocks.append(ImplementationBlock(kind="body", body=body[1:-1]))
|
||||
elif kind in {"default", "delete"}:
|
||||
blocks.append(ImplementationBlock(kind=kind, body=""))
|
||||
|
||||
return blocks
|
||||
|
||||
|
||||
def summarize_return_value(expr: str) -> str:
|
||||
cleaned = normalize_whitespace(expr)
|
||||
if cleaned in {"true", "false", "nullptr"}:
|
||||
return f"固定返回 `{cleaned}`。"
|
||||
if FIELD_NAME_RE.fullmatch(cleaned):
|
||||
return f"返回 `{cleaned}` 当前值。"
|
||||
if cleaned.endswith("()"):
|
||||
return f"返回 `{cleaned}` 的结果。"
|
||||
return f"返回 `{cleaned}`。"
|
||||
|
||||
|
||||
def analyze_simple_statement(statement: str) -> ImplementationFact | None:
|
||||
normalized = normalize_whitespace(statement)
|
||||
if not normalized:
|
||||
return None
|
||||
|
||||
match = RETURN_MEMBER_RE.match(normalized)
|
||||
if match:
|
||||
field = match.group(1)
|
||||
return ImplementationFact(
|
||||
summary=f"返回 `{field}` 当前值。",
|
||||
details=[f"内联返回 `{field}`。"],
|
||||
)
|
||||
|
||||
match = RETURN_MEMBER_METHOD_RE.match(normalized)
|
||||
if match:
|
||||
field, method, args = match.groups()
|
||||
if method.lower() == "data":
|
||||
summary = f"返回 `{field}` 暴露的首地址。"
|
||||
elif method.lower() == "size":
|
||||
summary = f"返回 `{field}` 当前大小。"
|
||||
elif method in {"CStr", "c_str"}:
|
||||
summary = f"返回 `{field}` 的 C 风格字符串视图。"
|
||||
else:
|
||||
summary = f"返回 `{field}.{method}()` 的结果。"
|
||||
detail = f"当前实现直接调用 `{field}.{method}({args})`。".replace("()", "()")
|
||||
return ImplementationFact(summary=summary, details=[detail])
|
||||
|
||||
match = ASSIGN_MEMBER_RE.match(normalized)
|
||||
if match:
|
||||
field, value = match.groups()
|
||||
return ImplementationFact(
|
||||
summary=f"写入 `{field}`。",
|
||||
details=[f"当前实现把 `{value}` 写入 `{field}`。"],
|
||||
)
|
||||
|
||||
match = RETURN_SIMPLE_CALL_RE.match(normalized)
|
||||
if match:
|
||||
call_name = match.group(1)
|
||||
return ImplementationFact(
|
||||
summary=f"返回 `{call_name}(...)` 的结果。",
|
||||
details=[f"当前实现直接转发到 `{call_name}(...)`。"],
|
||||
)
|
||||
|
||||
match = RETURN_CONST_RE.match(normalized)
|
||||
if match:
|
||||
return ImplementationFact(
|
||||
summary=summarize_return_value(match.group(1)),
|
||||
details=[summarize_return_value(match.group(1))],
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def collect_calls(body: str) -> list[str]:
|
||||
calls: list[str] = []
|
||||
for regex in (DIRECT_CALL_RE, ARROW_CALL_RE, DOT_CALL_RE):
|
||||
for match in regex.finditer(body):
|
||||
name = match.group(1)
|
||||
short_name = name.split("::")[-1]
|
||||
if short_name in KEYWORDS:
|
||||
continue
|
||||
calls.append(short_name)
|
||||
return calls
|
||||
|
||||
|
||||
def collect_member_writes(body: str) -> list[str]:
|
||||
writes = MEMBER_WRITE_RE.findall(body)
|
||||
for field, method in MEMBER_CALL_RE.findall(body):
|
||||
if method in MUTATING_MEMBER_CALLS or method.startswith("Set"):
|
||||
writes.append(field)
|
||||
deduped: list[str] = []
|
||||
seen: set[str] = set()
|
||||
for field in writes:
|
||||
if field in seen:
|
||||
continue
|
||||
deduped.append(field)
|
||||
seen.add(field)
|
||||
return deduped
|
||||
|
||||
|
||||
def analyze_complex_body(body: str) -> ImplementationFact:
|
||||
stripped = strip_comments(body)
|
||||
normalized = normalize_whitespace(stripped)
|
||||
statements = [normalize_whitespace(item) for item in split_statements(stripped)]
|
||||
|
||||
if not normalized:
|
||||
return ImplementationFact(
|
||||
summary="当前实现为空。",
|
||||
details=["当前函数体为空。"],
|
||||
)
|
||||
|
||||
if len(statements) == 1:
|
||||
simple = analyze_simple_statement(statements[0])
|
||||
if simple:
|
||||
return simple
|
||||
|
||||
details: list[str] = []
|
||||
summary: str | None = None
|
||||
|
||||
writes = collect_member_writes(stripped)
|
||||
if writes:
|
||||
details.append(f"会更新 {format_identifier_list(writes[:4])}。")
|
||||
if len(writes) == 1 and summary is None:
|
||||
summary = f"更新 `{writes[0]}`。"
|
||||
|
||||
calls = collect_calls(stripped)
|
||||
filtered_calls = [call for call in calls if call not in {"Get", "Set", "Data", "Size"}]
|
||||
if filtered_calls:
|
||||
details.append(f"当前实现会调用 {format_identifier_list(filtered_calls[:5])}。")
|
||||
if summary is None and len(filtered_calls) == 1:
|
||||
summary = f"执行 `{filtered_calls[0]}(...)` 相关流程。"
|
||||
elif summary is None:
|
||||
summary = f"执行 {format_identifier_list(filtered_calls[:3])} 协同流程。"
|
||||
|
||||
return_values = re.findall(r"\breturn\s+([^;]+);", stripped)
|
||||
if len(return_values) > 1 or ("if (" in stripped and return_values):
|
||||
details.append("包含条件分支,并可能提前返回。")
|
||||
elif return_values and summary is None:
|
||||
summary = summarize_return_value(return_values[0])
|
||||
|
||||
if "nullptr" in stripped:
|
||||
details.append("包含 `nullptr` 相关分支。")
|
||||
if "not implemented" in stripped or "未实现" in stripped:
|
||||
details.append("当前实现仍带有未完成分支。")
|
||||
|
||||
if summary is None:
|
||||
summary = "执行该公开方法对应的当前实现。"
|
||||
|
||||
return ImplementationFact(summary=summary, details=details or [summary])
|
||||
|
||||
|
||||
def dedupe_lines(lines: list[str]) -> list[str]:
|
||||
result: list[str] = []
|
||||
seen: set[str] = set()
|
||||
for line in lines:
|
||||
if line in seen:
|
||||
continue
|
||||
seen.add(line)
|
||||
result.append(line)
|
||||
return result
|
||||
|
||||
|
||||
def analyze_method_group(
|
||||
class_name: str,
|
||||
method_name: str,
|
||||
overloads: list[dict[str, object]],
|
||||
header_text: str,
|
||||
source_text: str,
|
||||
source_rel: str | None,
|
||||
) -> ImplementationFact:
|
||||
implementation_blocks = extract_qualified_blocks(source_text, class_name, method_name)
|
||||
if not implementation_blocks:
|
||||
implementation_blocks = extract_inline_blocks(header_text, method_name)
|
||||
|
||||
details: list[str] = []
|
||||
summaries: list[str] = []
|
||||
|
||||
for overload in overloads:
|
||||
suffix = str(overload.get("suffix", "")).strip()
|
||||
if "= 0" in suffix:
|
||||
summaries.append("纯虚接口。")
|
||||
details.append("该声明是纯虚接口,基类不提供实现。")
|
||||
|
||||
for block in implementation_blocks:
|
||||
if block.kind == "default":
|
||||
if method_name == class_name:
|
||||
summaries.append(f"构造 `{class_name}` 实例。")
|
||||
details.append("当前为默认构造实现。")
|
||||
elif method_name == f"~{class_name}":
|
||||
summaries.append(f"销毁 `{class_name}` 实例。")
|
||||
details.append("当前为默认析构实现。")
|
||||
else:
|
||||
details.append("当前为 `= default` 实现。")
|
||||
continue
|
||||
|
||||
if block.kind == "delete":
|
||||
details.append("当前声明为 `= delete`。")
|
||||
continue
|
||||
|
||||
fact = analyze_complex_body(block.body)
|
||||
if fact.summary:
|
||||
summaries.append(fact.summary)
|
||||
details.extend(fact.details)
|
||||
|
||||
if not details and source_rel:
|
||||
details.append(f"具体定义位于 `{source_rel}`。")
|
||||
|
||||
short_desc = next((item for item in summaries if item), None)
|
||||
if short_desc is None:
|
||||
if method_name == class_name:
|
||||
short_desc = f"构造 `{class_name}` 实例。"
|
||||
elif method_name == f"~{class_name}":
|
||||
short_desc = f"销毁 `{class_name}` 实例。"
|
||||
elif method_name.startswith("Get"):
|
||||
short_desc = f"返回 `{camel_tail(method_name, 3)}` 相关结果。"
|
||||
elif method_name.startswith("Set"):
|
||||
short_desc = f"更新 `{camel_tail(method_name, 3)}` 相关状态。"
|
||||
elif method_name.startswith("Is"):
|
||||
short_desc = f"判断 `{camel_tail(method_name, 2)}` 条件是否成立。"
|
||||
elif method_name.startswith("Has"):
|
||||
short_desc = f"判断是否具备 `{camel_tail(method_name, 3)}`。"
|
||||
elif method_name.startswith("Can"):
|
||||
short_desc = f"判断当前是否可以执行 `{camel_tail(method_name, 3)}`。"
|
||||
elif method_name.startswith("Load"):
|
||||
short_desc = f"执行 `{method_name}` 加载流程。"
|
||||
elif method_name.startswith("Create"):
|
||||
short_desc = f"执行 `{method_name}` 创建流程。"
|
||||
elif method_name.startswith("Update"):
|
||||
short_desc = f"执行一次 `{method_name}` 更新。"
|
||||
else:
|
||||
short_desc = f"执行 `{method_name}` 对应的公开操作。"
|
||||
|
||||
if not details:
|
||||
details.append(short_desc)
|
||||
|
||||
return ImplementationFact(summary=short_desc, details=dedupe_lines(details))
|
||||
|
||||
|
||||
def build_method_page(
|
||||
class_name: str,
|
||||
namespace: str,
|
||||
relative_header: str,
|
||||
group: dict[str, object],
|
||||
analysis: ImplementationFact,
|
||||
) -> str:
|
||||
label = str(group["label"])
|
||||
method_name = str(group["method_name"])
|
||||
overloads: list[dict[str, object]] = group["overloads"] # type: ignore[assignment]
|
||||
|
||||
lines: list[str] = []
|
||||
lines.append(f"# {class_name}::{label}")
|
||||
lines.append("")
|
||||
lines.append(f"**命名空间**: `{namespace}`")
|
||||
lines.append("")
|
||||
lines.append("**类型**: `method`")
|
||||
lines.append("")
|
||||
lines.append(f"**头文件**: `{relative_header}`")
|
||||
lines.append("")
|
||||
lines.append("## 签名")
|
||||
lines.append("")
|
||||
lines.append("```cpp")
|
||||
for overload in overloads:
|
||||
lines.append(f"{overload['signature']};")
|
||||
lines.append("```")
|
||||
lines.append("")
|
||||
lines.append("## 作用")
|
||||
lines.append("")
|
||||
lines.append(analysis.summary)
|
||||
lines.append("")
|
||||
lines.append("## 当前实现")
|
||||
lines.append("")
|
||||
for detail in analysis.details:
|
||||
lines.append(f"- {detail}")
|
||||
lines.append("")
|
||||
lines.append("## 相关文档")
|
||||
lines.append("")
|
||||
lines.append(f"- [{class_name}]({class_name}.md)")
|
||||
if method_name.startswith("Get") and any(str(item["file_name"]) == f"Set{method_name[3:]}" for item in group.get("siblings", [])): # type: ignore[operator]
|
||||
lines.append(f"- [Set{method_name[3:]}](Set{method_name[3:]}.md)")
|
||||
elif method_name.startswith("Set") and any(str(item["file_name"]) == f"Get{method_name[3:]}" for item in group.get("siblings", [])): # type: ignore[operator]
|
||||
lines.append(f"- [Get{method_name[3:]}](Get{method_name[3:]}.md)")
|
||||
|
||||
return "\n".join(lines).rstrip() + "\n"
|
||||
|
||||
|
||||
def rebuild_method_table(
|
||||
content: str,
|
||||
method_groups: list[dict[str, object]],
|
||||
analyses: dict[str, ImplementationFact],
|
||||
) -> str:
|
||||
if "## 公共方法" not in content:
|
||||
return content
|
||||
|
||||
lines = ["## 公共方法", "", "| 方法 | 描述 |", "|------|------|"]
|
||||
for group in method_groups:
|
||||
file_name = str(group["file_name"])
|
||||
label = str(group["label"])
|
||||
description = analyses[file_name].summary
|
||||
lines.append(f"| [{label}]({file_name}.md) | {description} |")
|
||||
section = "\n".join(lines) + "\n"
|
||||
return METHOD_SECTION_RE.sub(section, content)
|
||||
|
||||
|
||||
def main() -> int:
|
||||
rewritten_method_pages = 0
|
||||
rewritten_overviews = 0
|
||||
|
||||
for header_path in sorted(INCLUDE_ROOT.rglob("*.h")):
|
||||
relative_header = header_path.relative_to(INCLUDE_ROOT.parent).as_posix()
|
||||
relative_source = header_path.relative_to(INCLUDE_ROOT).with_suffix(".cpp")
|
||||
source_path = REPO_ROOT / "engine" / "src" / relative_source
|
||||
source_text = source_path.read_text(encoding="utf-8", errors="ignore") if source_path.exists() else ""
|
||||
source_rel = source_path.relative_to(REPO_ROOT).as_posix() if source_path.exists() else None
|
||||
|
||||
header_text = header_path.read_text(encoding="utf-8", errors="ignore")
|
||||
lines = header_text.splitlines()
|
||||
declarations = find_declarations(lines, build_namespace_map(lines))
|
||||
primary = select_primary(header_path.stem, declarations)
|
||||
if primary is None or not primary.methods:
|
||||
continue
|
||||
|
||||
doc_dir = DOC_ROOT / "XCEngine" / header_path.parent.relative_to(INCLUDE_ROOT) / header_path.stem
|
||||
if not doc_dir.exists():
|
||||
continue
|
||||
|
||||
method_groups = group_methods(primary.methods, primary.name)
|
||||
analyses: dict[str, ImplementationFact] = {}
|
||||
for group in method_groups:
|
||||
group["siblings"] = method_groups
|
||||
analyses[str(group["file_name"])] = analyze_method_group(
|
||||
primary.name,
|
||||
str(group["method_name"]),
|
||||
group["overloads"], # type: ignore[arg-type]
|
||||
header_text,
|
||||
source_text,
|
||||
source_rel,
|
||||
)
|
||||
|
||||
overview_path = doc_dir / f"{header_path.stem}.md"
|
||||
if overview_path.exists():
|
||||
overview_content = overview_path.read_text(encoding="utf-8")
|
||||
if has_any_token(overview_content, TEMPLATE_TABLE_TOKENS) or has_generic_fallback(overview_content):
|
||||
updated = rebuild_method_table(overview_content, method_groups, analyses)
|
||||
if updated != overview_content:
|
||||
overview_path.write_text(updated, encoding="utf-8")
|
||||
rewritten_overviews += 1
|
||||
|
||||
for group in method_groups:
|
||||
file_name = str(group["file_name"])
|
||||
page_path = doc_dir / f"{file_name}.md"
|
||||
if not page_path.exists():
|
||||
continue
|
||||
|
||||
content = page_path.read_text(encoding="utf-8")
|
||||
if not has_any_token(content, TEMPLATE_METHOD_TOKENS) and not has_generic_fallback(content):
|
||||
continue
|
||||
|
||||
updated = build_method_page(
|
||||
primary.name,
|
||||
primary.namespace,
|
||||
relative_header,
|
||||
group,
|
||||
analyses[file_name],
|
||||
)
|
||||
if updated != content:
|
||||
page_path.write_text(updated, encoding="utf-8")
|
||||
rewritten_method_pages += 1
|
||||
|
||||
print(f"Rewritten overview pages: {rewritten_overviews}")
|
||||
print(f"Rewritten method pages: {rewritten_method_pages}")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
Reference in New Issue
Block a user