Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions .github/workflows/build-docs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,14 @@ jobs:
- name: Build docs
run: pdm docs

- name: Verify AI-friendly outputs
run: |
echo "Checking for AI-friendly documentation outputs..."
test -f docs/build/llms.txt && echo "✓ llms.txt found" || (echo "✗ llms.txt missing" && exit 1)
test -f docs/build/llms-full.txt && echo "✓ llms-full.txt found" || (echo "✗ llms-full.txt missing" && exit 1)
test -f docs/build/docs-index.json && echo "✓ docs-index.json found" || (echo "✗ docs-index.json missing" && exit 1)
echo "All AI-friendly outputs generated successfully"

- name: Upload docs artifact
uses: actions/upload-artifact@v4
with:
Expand Down
10 changes: 10 additions & 0 deletions docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,9 @@
'sphinxext.rediraffe',
'autoapi.extension',
'sphinx_design',
'sphinx_llm.txt',
'sphinx_json_index',
'sphinx_llms_enhancements',
]

rst_prolog = """
Expand Down Expand Up @@ -135,6 +138,13 @@
"amaranth-soc/cover.rst",
]

# -- llms.txt configuration (AI-friendly documentation)
llms_txt_description = """
ChipFlow is an open-source platform for designing, testing, and manufacturing
custom silicon using Python and the Amaranth HDL. This documentation covers
the ChipFlow library, Digital IP library, Amaranth language, and Amaranth SoC toolkit.
"""
llms_txt_build_parallel = True

intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
Expand Down
9 changes: 9 additions & 0 deletions docs/source/platform-api.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
Platform API Reference
======================

This section provides the API reference for the ChipFlow platform library.

.. toctree::
:maxdepth: 3

chipflow-lib/autoapi/chipflow/index
253 changes: 220 additions & 33 deletions pdm.lock

Large diffs are not rendered by default.

5 changes: 3 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ authors = [
]
readme = "README.md"

requires-python = "<3.13"
requires-python = ">=3.12,<3.14"
dependencies = [
"sphinx>=7.1",
"sphinxcontrib-platformpicker~=1.4",
Expand All @@ -19,10 +19,11 @@ dependencies = [
"sphinx-autobuild>=2024.10.3",
"sphinx-multiproject>=1.0.0",
"sphinx-design>=0.6.0",
"sphinx_design_elements @ git+https://github.com/panodata/sphinx-design-elements@linktree",
"sphinx_design_elements @ git+https://github.com/tech-writing/sphinx-design-elements@origin/linktree",
"sphinxext-rediraffe>=0.2.7",
"sphinx-autoapi>=3.6.0",
"jschon>=0.11.1",
"sphinx-llm>=0.2.0",
]

[build-system]
Expand Down
80 changes: 80 additions & 0 deletions tools/sphinx_json_index.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
"""
Sphinx extension to generate a structured JSON index of documentation.

This extension generates a docs-index.json file that provides machine-readable
metadata about all documentation pages, including titles, paths, and navigation
structure. This is useful for AI agents and tools that need to understand the
documentation structure programmatically.
"""
import json
from pathlib import Path
from sphinx.application import Sphinx
from sphinx.util import logging

logger = logging.getLogger(__name__)


def build_json_index(app: Sphinx, exception):
"""Generate docs-index.json after build completes."""
if exception:
return

# Only run for HTML builders
if app.builder.name not in ('html', 'dirhtml'):
logger.info("Skipping JSON index generation (not an HTML builder)")
return

outdir = Path(app.outdir)

index = {
"project": app.config.project,
"version": app.config.version,
"description": getattr(app.config, 'llms_txt_description', '').strip(),
"pages": []
}

# Iterate through all documents
for docname in sorted(app.env.found_docs):
try:
doc = app.env.get_doctree(docname)
title = ""
for node in doc.traverse():
if node.__class__.__name__ == 'title':
title = node.astext()
break

# Determine the HTML path based on builder type
if app.builder.name == 'dirhtml':
html_path = f"{docname}/index.html" if docname != 'index' else "index.html"
else:
html_path = f"{docname}.html"

page_info = {
"path": html_path,
"title": title or docname,
"docname": docname,
}

# Add toctree children if available
if hasattr(app.env, 'toctree_includes') and docname in app.env.toctree_includes:
page_info["children"] = app.env.toctree_includes[docname]

index["pages"].append(page_info)
except Exception as e:
logger.warning(f"Could not process {docname}: {e}")

# Write JSON index
output_path = outdir / "docs-index.json"
with open(output_path, 'w', encoding='utf-8') as f:
json.dump(index, f, indent=2, ensure_ascii=False)

logger.info(f"Generated JSON index with {len(index['pages'])} pages at {output_path}")


def setup(app: Sphinx):
app.connect('build-finished', build_json_index)
return {
'version': '0.1',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
164 changes: 164 additions & 0 deletions tools/sphinx_llms_enhancements.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,164 @@
"""
Sphinx extension to enhance LLM discoverability.

This extension adds:
1. <script type="text/llms.txt"> tags to HTML pages for inline LLM discovery
(following Vercel's proposal)
2. Post-processing of llms.txt to organize pages into logical sections
"""
import re
from pathlib import Path
from sphinx.application import Sphinx
from sphinx.util import logging

logger = logging.getLogger(__name__)

# Default section mappings - can be overridden in conf.py
DEFAULT_SECTION_MAPPINGS = {
"Getting Started": [
r"^index\.html\.md$",
r"^chipflow-lib/getting-started",
r"^chipflow-lib/index",
r"^tutorial",
r"^examples/getting-started",
],
"User Guide": [
r"^chipflow-lib/(?!autoapi)",
r"^examples/",
r"^configurator/",
],
"API Reference": [
r"^chipflow-lib/autoapi/",
r"^platform-api",
],
"Digital IP Library": [
r"^chipflow-digital-ip/",
],
"Amaranth Language": [
r"^amaranth/(?!.*soc)",
],
"Amaranth SoC": [
r"^amaranth-soc/",
],
"Optional": [
r"^amaranth/changes",
r"^support",
],
}


def add_llms_script_tag(app: Sphinx, pagename: str, templatename: str,
context: dict, doctree) -> None:
"""Add <script type="text/llms.txt"> to HTML pages."""

description = getattr(app.config, 'llms_txt_description', '')
if not description:
description = "Documentation for this project."
description = description.strip()

project = app.config.project or "Documentation"

# Create the inline llms.txt content
llms_script = f'''<script type="text/llms.txt">
# {project}

> {description}

For complete documentation in LLM-friendly format:
- [Documentation Index](/llms.txt) - Sitemap of all pages
- [Full Documentation](/llms-full.txt) - Complete docs in one file
- [JSON Index](/docs-index.json) - Structured metadata
</script>'''

# Add to metatags which Sphinx/Furo will include in <head>
if 'metatags' not in context:
context['metatags'] = ''
context['metatags'] = context.get('metatags', '') + llms_script


def reorganize_llms_txt(app: Sphinx, exception) -> None:
"""Post-process llms.txt to organize pages into logical sections."""
if exception:
return

outdir = Path(app.outdir)
llms_txt_path = outdir / "llms.txt"

if not llms_txt_path.exists():
logger.warning("llms.txt not found, skipping reorganization")
return

# Read current llms.txt
content = llms_txt_path.read_text(encoding='utf-8')

# Parse the header (everything before ## Pages)
header_match = re.match(r'^(.*?)(?=^## Pages|\Z)', content, re.MULTILINE | re.DOTALL)
if not header_match:
logger.warning("Could not parse llms.txt header")
return

header = header_match.group(1).strip()

# Extract all page links
page_pattern = re.compile(r'^- \[([^\]]+)\]\(([^)]+)\)(?::\s*(.*))?$', re.MULTILINE)
pages = [(m.group(1), m.group(2), m.group(3) or '') for m in page_pattern.finditer(content)]

if not pages:
logger.warning("No pages found in llms.txt")
return

# Get section mappings from config or use defaults
section_mappings = getattr(app.config, 'llms_sections', DEFAULT_SECTION_MAPPINGS)

# Categorize pages into sections
sections = {name: [] for name in section_mappings.keys()}
sections["Other"] = [] # Catch-all

for title, path, description in pages:
categorized = False
for section_name, patterns in section_mappings.items():
for pattern in patterns:
if re.search(pattern, path):
sections[section_name].append((title, path, description))
categorized = True
break
if categorized:
break
if not categorized:
sections["Other"].append((title, path, description))

# Build new llms.txt with sections
new_content = header + "\n\n"

for section_name, section_pages in sections.items():
if not section_pages:
continue

new_content += f"## {section_name}\n\n"
for title, path, description in section_pages:
if description:
new_content += f"- [{title}]({path}): {description}\n"
else:
new_content += f"- [{title}]({path})\n"
new_content += "\n"

# Write reorganized llms.txt
llms_txt_path.write_text(new_content.strip() + "\n", encoding='utf-8')
logger.info(f"Reorganized llms.txt with {len(sections)} sections")


def setup(app: Sphinx):
# Config value for custom section mappings
app.add_config_value('llms_sections', DEFAULT_SECTION_MAPPINGS, 'html')

# Add script tag to each HTML page
app.connect('html-page-context', add_llms_script_tag)

# Reorganize llms.txt after build (run after sphinx-llm)
app.connect('build-finished', reorganize_llms_txt, priority=900)

return {
'version': '0.1',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
Loading