docs: add xceditor api tree and new resource docs

This commit is contained in:
2026-04-10 17:10:42 +08:00
parent 6b90c2f6c3
commit 66ae9ec919
189 changed files with 6613 additions and 30 deletions

View File

@@ -13,8 +13,8 @@ DOC_ROOT = SCRIPT_DIR.parent if SCRIPT_DIR.name == "_tools" else SCRIPT_DIR
REPO_ROOT = DOC_ROOT.parents[1]
INCLUDE_ROOT = REPO_ROOT / "engine" / "include"
PUBLIC_INCLUDE_ROOT = INCLUDE_ROOT / "XCEngine"
XCEDITOR_INCLUDE_ROOT = REPO_ROOT / "new_editor" / "include" / "XCEditor"
EDITOR_SOURCE_ROOT = REPO_ROOT / "editor" / "src"
PARALLEL_ROOT = DOC_ROOT / "XCEngine"
META_ROOT = DOC_ROOT / "_meta"
DEFAULT_REPORT = META_ROOT / "rebuild-status.md"
@@ -69,17 +69,17 @@ STALE_EDITOR_PAGE_TOKENS: tuple[tuple[str, str, str], ...] = (
)
STALE_CANONICAL_PAGE_TOKENS: tuple[tuple[str, str, str], ...] = (
(
"XCEngine/Rendering/CameraRenderRequest/",
"XCEngine/Rendering/Planning/CameraRenderRequest/",
"builtinPostProcess",
"与当前 CameraRenderRequest 已删除该字段的实现不符",
),
(
"XCEngine/Rendering/CameraRenderRequest/",
"XCEngine/Rendering/Planning/CameraRenderRequest/",
"BuiltinPostProcessRequest",
"与当前 CameraRenderRequest 已删除该子请求对象的实现不符",
),
(
"XCEngine/Rendering/CameraRenderer/",
"XCEngine/Rendering/Execution/CameraRenderer/",
"m_builtinPostProcessBuilder",
"与当前 CameraRenderer 已删除该成员的实现不符",
),
@@ -147,6 +147,33 @@ class StaleCanonicalPageMatch:
reason: str
@dataclass(frozen=True)
class PublicDocRoot:
doc_root_name: str
include_root: Path
doc_root: Path
PUBLIC_DOC_ROOTS: tuple[PublicDocRoot, ...] = (
PublicDocRoot(
doc_root_name="XCEngine",
include_root=PUBLIC_INCLUDE_ROOT,
doc_root=DOC_ROOT / "XCEngine",
),
PublicDocRoot(
doc_root_name="XCEditor",
include_root=XCEDITOR_INCLUDE_ROOT,
doc_root=DOC_ROOT / "XCEditor",
),
)
PUBLIC_DOC_ROOT_NAMES = tuple(root.doc_root_name for root in PUBLIC_DOC_ROOTS)
CANONICAL_PAGE_PREFIXES = tuple(f"{root.doc_root_name}/" for root in PUBLIC_DOC_ROOTS)
PUBLIC_HEADER_ROOT_MAP = {
root.doc_root_name: root.include_root.parent
for root in PUBLIC_DOC_ROOTS
}
def normalize_rel_path(path: str) -> str:
return path.replace("\\", "/")
@@ -160,17 +187,24 @@ def iter_markdown_files() -> list[Path]:
def iter_canonical_markdown_files() -> list[Path]:
return sorted(
path
for path in PARALLEL_ROOT.rglob("*.md")
)
files: list[Path] = []
for root in PUBLIC_DOC_ROOTS:
if not root.doc_root.exists():
continue
files.extend(root.doc_root.rglob("*.md"))
return sorted(files)
def iter_public_headers() -> list[str]:
return sorted(
normalize_rel_path(path.relative_to(INCLUDE_ROOT).as_posix())
for path in PUBLIC_INCLUDE_ROOT.rglob("*.h")
)
headers: list[str] = []
for root in PUBLIC_DOC_ROOTS:
if not root.include_root.exists():
continue
headers.extend(
normalize_rel_path(path.relative_to(root.include_root.parent).as_posix())
for path in root.include_root.rglob("*.h")
)
return sorted(headers)
def iter_editor_source_headers() -> list[str]:
@@ -181,15 +215,35 @@ def iter_editor_source_headers() -> list[str]:
def iter_public_include_dirs() -> list[str]:
dirs = ["XCEngine"]
dirs.extend(
f"XCEngine/{path.relative_to(PUBLIC_INCLUDE_ROOT).as_posix()}"
for path in sorted(PUBLIC_INCLUDE_ROOT.rglob("*"))
if path.is_dir()
)
dirs: list[str] = []
for root in PUBLIC_DOC_ROOTS:
if not root.include_root.exists():
continue
dirs.append(root.doc_root_name)
dirs.extend(
f"{root.doc_root_name}/{path.relative_to(root.include_root).as_posix()}"
for path in sorted(root.include_root.rglob("*"))
if path.is_dir()
)
return dirs
def find_public_doc_root_by_rel_path(rel_path: str) -> PublicDocRoot | None:
root_name = rel_path.split("/", 1)[0]
for root in PUBLIC_DOC_ROOTS:
if root.doc_root_name == root_name:
return root
return None
def header_ref_exists(header: str) -> bool:
root_name = header.split("/", 1)[0]
root_parent = PUBLIC_HEADER_ROOT_MAP.get(root_name)
if root_parent is None:
return False
return (root_parent / header).exists()
def dir_index_name(relative: str) -> str:
return f"{Path(relative).name}.md"
@@ -262,22 +316,28 @@ def count_public_method_like_declarations(path: Path) -> int:
def is_dir_index_page(page: Path) -> bool:
if not page.is_relative_to(PARALLEL_ROOT):
if not page.is_relative_to(DOC_ROOT):
return False
if page.stem != page.parent.name:
return False
relative_dir = page.parent.relative_to(PARALLEL_ROOT)
source_dir = PUBLIC_INCLUDE_ROOT / relative_dir
rel_page = normalize_rel_path(page.relative_to(DOC_ROOT).as_posix())
root = find_public_doc_root_by_rel_path(rel_page)
if root is None:
return False
relative_dir = page.parent.relative_to(root.doc_root)
source_dir = root.include_root / relative_dir
return source_dir.exists() and source_dir.is_dir()
def is_flat_header_page(page: Path, rel_page: str) -> bool:
if not rel_page.startswith("XCEngine/"):
root = find_public_doc_root_by_rel_path(rel_page)
if root is None:
return False
if is_dir_index_page(page):
return False
return (INCLUDE_ROOT / Path(rel_page).with_suffix(".h")).exists()
return (root.include_root.parent / Path(rel_page).with_suffix(".h")).exists()
def build_editor_module_coverages(
@@ -474,6 +534,7 @@ def collect_doc_state(report_path: Path) -> dict[str, object]:
public_headers = iter_public_headers()
editor_headers = iter_editor_source_headers()
public_include_dirs = iter_public_include_dirs()
public_root_counts: dict[str, int] = defaultdict(int)
declared_header_refs: set[str] = set()
valid_header_refs: set[str] = set()
@@ -505,7 +566,7 @@ def collect_doc_state(report_path: Path) -> dict[str, object]:
continue
rel_page = normalize_rel_path(page.relative_to(DOC_ROOT).as_posix())
content = page.read_text(encoding="utf-8")
is_canonical_page = rel_page.startswith("XCEngine/")
is_canonical_page = rel_page.startswith(CANONICAL_PAGE_PREFIXES)
if is_canonical_page and NAMESPACE_RE.search(content):
metadata_counts["namespace"] += 1
@@ -546,7 +607,7 @@ def collect_doc_state(report_path: Path) -> dict[str, object]:
for match in HEADER_RE.finditer(content):
header = normalize_rel_path(match.group(1))
declared_header_refs.add(header)
if (INCLUDE_ROOT / header).exists():
if header_ref_exists(header):
valid_header_refs.add(header)
if is_canonical_page:
canonical_valid_header_refs.add(header)
@@ -579,17 +640,24 @@ def collect_doc_state(report_path: Path) -> dict[str, object]:
non_md_relative_links.append((rel_page, target))
for header in public_headers:
public_root_counts[header.split("/", 1)[0]] += 1
documented_root_counts: dict[str, int] = defaultdict(int)
for header in canonical_valid_header_refs:
documented_root_counts[header.split("/", 1)[0]] += 1
public_by_module: dict[str, list[str]] = defaultdict(list)
documented_by_module: dict[str, set[str]] = defaultdict(set)
for header in public_headers:
module = header.split("/", 2)[1]
module_parts = header.split("/", 2)
module = "/".join(module_parts[:2]) if len(module_parts) >= 2 else module_parts[0]
public_by_module[module].append(header)
for header in canonical_valid_header_refs:
if not header.startswith("XCEngine/"):
continue
module = header.split("/", 2)[1]
module_parts = header.split("/", 2)
module = "/".join(module_parts[:2]) if len(module_parts) >= 2 else module_parts[0]
documented_by_module[module].add(header)
module_coverages = [
@@ -628,6 +696,8 @@ def collect_doc_state(report_path: Path) -> dict[str, object]:
"markdown_files": markdown_files,
"canonical_markdown_files": canonical_markdown_files,
"public_headers": public_headers,
"public_root_counts": dict(sorted(public_root_counts.items())),
"documented_root_counts": dict(sorted(documented_root_counts.items())),
"public_include_dirs": public_include_dirs,
"declared_header_refs": sorted(declared_header_refs),
"valid_header_refs": sorted(valid_header_refs),
@@ -671,6 +741,8 @@ def build_report(state: dict[str, object]) -> str:
markdown_files: list[Path] = state["markdown_files"] # type: ignore[assignment]
canonical_markdown_files: list[Path] = state["canonical_markdown_files"] # type: ignore[assignment]
public_headers: list[str] = state["public_headers"] # type: ignore[assignment]
public_root_counts: dict[str, int] = state["public_root_counts"] # type: ignore[assignment]
documented_root_counts: dict[str, int] = state["documented_root_counts"] # type: ignore[assignment]
editor_headers: list[str] = state["editor_headers"] # type: ignore[assignment]
public_include_dirs: list[str] = state["public_include_dirs"] # type: ignore[assignment]
valid_header_refs: list[str] = state["valid_header_refs"] # type: ignore[assignment]
@@ -707,6 +779,11 @@ def build_report(state: dict[str, object]) -> str:
lines.append(f"- Markdown 页面数(全部): `{len(markdown_files)}`")
lines.append(f"- Markdown 页面数canonical: `{len(canonical_markdown_files)}`")
lines.append(f"- Public headers 数: `{len(public_headers)}`")
for root_name, count in public_root_counts.items():
documented_count = documented_root_counts.get(root_name, 0)
lines.append(
f"- `{root_name}` public headers 数: `{count}`canonical 已覆盖 `{documented_count}`"
)
lines.append(f"- Editor source headers 数: `{len(editor_headers)}`")
lines.append(f"- 有效头文件引用数(全部): `{len(valid_header_refs)}`")
lines.append(f"- 有效头文件引用数canonical: `{len(canonical_valid_header_refs)}`")
@@ -725,7 +802,11 @@ def build_report(state: dict[str, object]) -> str:
lines.append("")
lines.append("## 平行目录")
lines.append("")
lines.append(f"- Canonical 根目录: `{PARALLEL_ROOT.relative_to(DOC_ROOT).as_posix()}`")
lines.append(
"- Canonical 根目录: `"
+ ", ".join(root.doc_root_name for root in PUBLIC_DOC_ROOTS)
+ "`"
)
lines.append(f"- 源码目录节点数: `{len(public_include_dirs)}`")
lines.append(
f"- 已生成目录总览页节点数: `{len(public_include_dirs) - len(missing_parallel_indexes)}`"