Folds three previously-separate pieces into one preliminary-example repo for the HAHACS thesis: - thesis/ (submodule) → gitea Thesis.git — the PhD proposal - fret-pipeline/ — FRET requirements to AIGER controller (was ~/Documents/fret_processing/; prior single-commit history abandoned per user decision) - plant-model/ — 10-state PKE + lumped T/H PWR model (was ~/Documents/PKE_Playground/; never version-controlled before) - presentations/2026DICE/ (submodule) → gitea 2026DICE.git - reachability/, hardware/ — empty placeholders for Thrust 3 and HIL - docs/architecture.md — how the discrete and continuous layers compose - claude_memory/ — session notes and scratch knowledge pattern Plant model refactored to thesis naming (x, plant, u, ref); pke_th_rhs now takes u as an explicit arg instead of reading rho_ext from the params struct. First two controllers built to the contract u = ctrl_<mode>(t, x, plant, ref): ctrl_null (baseline) and ctrl_operation (stabilizing, proportional on T_avg). Validated under a 100% -> 80% Q_sg step: ctrl_operation reduces steady-state T_avg drift ~47% vs. the unforced plant. Root CLAUDE.md emphasizes that CLAUDE.md files are living documents and that any knowledge not captured before a session ends is lost forever; claude_memory/ holds the session-level notes that haven't stabilized enough to graduate into a CLAUDE.md. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
285 lines
10 KiB
Python
285 lines
10 KiB
Python
#!/usr/bin/env python3
|
|
"""Parse FRET SMV exports and extract requirements, variables, and LTL formulas.
|
|
|
|
Reads .smv files from a directory (e.g., specs/DRC/) and produces a JSON
|
|
config file with extracted requirements and inferred input/output roles.
|
|
The user can review and adjust the config before synthesis.
|
|
"""
|
|
|
|
import argparse
|
|
import json
|
|
import re
|
|
import sys
|
|
from pathlib import Path
|
|
|
|
|
|
def parse_smv_file(path: Path) -> dict:
|
|
"""Parse a single FRET-exported SMV file."""
|
|
text = path.read_text()
|
|
|
|
# Extract variables
|
|
var_section = re.search(r'VAR\s*(.*?)\s*DEFINE', text, re.DOTALL)
|
|
variables = []
|
|
if var_section:
|
|
for match in re.finditer(r'(\w+)\s*:\s*boolean', var_section.group(1)):
|
|
variables.append(match.group(1))
|
|
|
|
# Extract requirement text and LTLSPEC formulas
|
|
req_texts = set()
|
|
specs = []
|
|
for match in re.finditer(
|
|
r'--\s*Req text:\s*(.*?)\n\s*LTLSPEC\s+NAME\s+([\w-]+)\s*:=\s*(.*?)\s*;',
|
|
text
|
|
):
|
|
req_text = match.group(1).strip()
|
|
spec_name = match.group(2)
|
|
formula = match.group(3).strip()
|
|
req_texts.add(req_text)
|
|
specs.append({
|
|
'name': spec_name,
|
|
'formula': formula,
|
|
})
|
|
|
|
# Extract requirement ID from filename
|
|
req_id_match = re.search(r'(\w+-\d+)', path.stem)
|
|
req_id = req_id_match.group(1) if req_id_match else path.stem
|
|
|
|
return {
|
|
'file': path.name,
|
|
'req_id': req_id,
|
|
'req_texts': sorted(req_texts),
|
|
'variables': variables,
|
|
'ltlspecs': specs,
|
|
}
|
|
|
|
|
|
# FRETish pattern → LTL conversion
|
|
FRETISH_PATTERNS = [
|
|
# "shall initially satisfy P"
|
|
(r'shall initially satisfy (.+)',
|
|
lambda m: m.group(1).strip()),
|
|
|
|
# "While S If C shall immediately satisfy P"
|
|
(r'[Ww]hile (.+?) [Ii]f (.+?) \w+ shall immediately satisfy (.+)',
|
|
lambda m: f'G((({m.group(1)}) & ({m.group(2)})) -> X({m.group(3).strip()}))'),
|
|
|
|
# "While S If C shall at the next timepoint satisfy P"
|
|
(r'[Ww]hile (.+?) [Ii]f (.+?) \w+ shall at the next timepoint satisfy (.+)',
|
|
lambda m: f'G((({m.group(1)}) & ({m.group(2)})) -> X({m.group(3).strip()}))'),
|
|
|
|
# "While S If C shall always satisfy P"
|
|
(r'[Ww]hile (.+?) [Ii]f (.+?) \w+ shall always satisfy (.+)',
|
|
lambda m: f'G((({m.group(1)}) & ({m.group(2)})) -> ({m.group(3).strip()}))'),
|
|
|
|
# "If C shall immediately satisfy P"
|
|
(r'[Ii]f (.+?) \w+ shall immediately satisfy (.+)',
|
|
lambda m: f'G(({m.group(1)}) -> X({m.group(2).strip()}))'),
|
|
|
|
# "if C shall at the next timepoint satisfy P"
|
|
(r'[Ii]f (.+?) \w+ shall at the next timepoint satisfy (.+)',
|
|
lambda m: f'G(({m.group(1)}) -> X({m.group(2).strip()}))'),
|
|
|
|
# "When C shall always satisfy P"
|
|
(r'[Ww]hen (.+?) \w+ shall always satisfy (.+)',
|
|
lambda m: f'G(({m.group(1)}) -> X(G({m.group(2).strip()})))'),
|
|
|
|
# "While C shall always satisfy P"
|
|
(r'[Ww]hile (.+?) \w+ shall always satisfy (.+)',
|
|
lambda m: f'G(({m.group(1)}) -> ({m.group(2).strip()}))'),
|
|
|
|
# "shall always satisfy P" (no condition)
|
|
(r'shall always satisfy (.+)',
|
|
lambda m: f'G({m.group(1).strip()})'),
|
|
]
|
|
|
|
|
|
def fretish_to_ltl(text: str) -> tuple[str | None, str | None, str | None]:
|
|
"""Convert FRETish requirement text to LTL.
|
|
|
|
Returns (ltl_formula, condition_var, satisfaction_expr) or (None, None, None).
|
|
"""
|
|
for pattern, converter in FRETISH_PATTERNS:
|
|
match = re.search(pattern, text)
|
|
if match:
|
|
ltl = converter(match)
|
|
# Extract condition variable (if any)
|
|
cond_match = re.search(r'(?:[Ii]f|[Ww]hen|[Ww]hile)\s+(!?\w+)', text)
|
|
cond_var = cond_match.group(1) if cond_match else None
|
|
# Extract satisfaction expression
|
|
sat_match = re.search(r'satisfy\s+(.+)', text)
|
|
sat_expr = sat_match.group(1).strip() if sat_match else None
|
|
return ltl, cond_var, sat_expr
|
|
return None, None, None
|
|
|
|
|
|
def extract_variables_from_expr(expr: str) -> set[str]:
|
|
"""Extract variable names from an LTL/boolean expression."""
|
|
# Remove LTL operators and punctuation
|
|
cleaned = re.sub(r'\b(G|F|X|U|R|W)\b', ' ', expr)
|
|
cleaned = re.sub(r'[()!&|>\-]', ' ', cleaned)
|
|
return {w for w in cleaned.split() if w and not w.startswith('!')}
|
|
|
|
|
|
def infer_io_roles(requirements: list[dict]) -> dict[str, dict]:
|
|
"""Infer input/output roles from FRETish patterns.
|
|
|
|
Returns {var_name: {'role': 'input'|'output'|'conflict', 'reasons': [...]}}.
|
|
"""
|
|
var_roles: dict[str, dict] = {}
|
|
|
|
for req in requirements:
|
|
for text in req['req_texts']:
|
|
_, cond_var, sat_expr = fretish_to_ltl(text)
|
|
|
|
# Condition variables → likely inputs
|
|
# Also extract from "While X" scope conditions
|
|
cond_vars = set()
|
|
if cond_var:
|
|
cond_vars.update(extract_variables_from_expr(cond_var))
|
|
scope_match = re.search(r'[Ww]hile\s+(.+?)\s+(?:[Ii]f|shall)', text)
|
|
if scope_match:
|
|
cond_vars.update(extract_variables_from_expr(scope_match.group(1)))
|
|
trigger_match = re.search(r'[Ii]f\s+(.+?)\s+\w+\s+shall', text)
|
|
if trigger_match:
|
|
cond_vars.update(extract_variables_from_expr(trigger_match.group(1)))
|
|
|
|
for cv in cond_vars:
|
|
if cv not in var_roles:
|
|
var_roles[cv] = {'input_reasons': [], 'output_reasons': []}
|
|
var_roles[cv]['input_reasons'].append(
|
|
f'{req["req_id"]}: condition in "{text}"'
|
|
)
|
|
|
|
# Variables in satisfaction expression → likely output
|
|
if sat_expr:
|
|
for var in extract_variables_from_expr(sat_expr):
|
|
if var not in var_roles:
|
|
var_roles[var] = {'input_reasons': [], 'output_reasons': []}
|
|
var_roles[var]['output_reasons'].append(
|
|
f'{req["req_id"]}: satisfaction in "{text}"'
|
|
)
|
|
|
|
# Classify
|
|
result = {}
|
|
for var, info in var_roles.items():
|
|
has_input = bool(info['input_reasons'])
|
|
has_output = bool(info['output_reasons'])
|
|
if has_input and has_output:
|
|
role = 'conflict'
|
|
elif has_input:
|
|
role = 'input'
|
|
else:
|
|
role = 'output'
|
|
result[var] = {
|
|
'role': role,
|
|
'input_reasons': info['input_reasons'],
|
|
'output_reasons': info['output_reasons'],
|
|
}
|
|
|
|
return result
|
|
|
|
|
|
def main():
|
|
parser = argparse.ArgumentParser(description='Parse FRET SMV exports')
|
|
parser.add_argument('spec_dir', type=Path, help='Directory containing .smv files')
|
|
parser.add_argument('-o', '--output', type=Path, default=None,
|
|
help='Output JSON config file (default: <spec_dir>/config.json)')
|
|
args = parser.parse_args()
|
|
|
|
if not args.spec_dir.is_dir():
|
|
print(f'Error: {args.spec_dir} is not a directory', file=sys.stderr)
|
|
sys.exit(1)
|
|
|
|
smv_files = sorted(args.spec_dir.glob('*.smv'))
|
|
if not smv_files:
|
|
print(f'Error: no .smv files found in {args.spec_dir}', file=sys.stderr)
|
|
sys.exit(1)
|
|
|
|
# Parse all SMV files
|
|
requirements = [parse_smv_file(f) for f in smv_files]
|
|
|
|
# Convert FRETish to LTL
|
|
ltl_formulas = []
|
|
for req in requirements:
|
|
for text in req['req_texts']:
|
|
ltl, _, _ = fretish_to_ltl(text)
|
|
if ltl:
|
|
ltl_formulas.append({
|
|
'req_id': req['req_id'],
|
|
'fretish': text,
|
|
'ltl': ltl,
|
|
})
|
|
else:
|
|
ltl_formulas.append({
|
|
'req_id': req['req_id'],
|
|
'fretish': text,
|
|
'ltl': None,
|
|
'error': 'Could not convert FRETish to LTL — manual entry needed',
|
|
})
|
|
|
|
# Infer I/O roles
|
|
io_roles = infer_io_roles(requirements)
|
|
|
|
# Collect all variables
|
|
all_vars = set()
|
|
for req in requirements:
|
|
all_vars.update(req['variables'])
|
|
|
|
# Add any variables found in formulas but not in VAR sections
|
|
for entry in ltl_formulas:
|
|
if entry['ltl']:
|
|
all_vars.update(extract_variables_from_expr(entry['ltl']))
|
|
|
|
# Build config
|
|
config = {
|
|
'_comment': 'Generated by parse_smv.py. Review and edit variable roles before synthesis.',
|
|
'spec_name': args.spec_dir.name,
|
|
'variables': {},
|
|
'requirements': ltl_formulas,
|
|
}
|
|
|
|
for var in sorted(all_vars):
|
|
role_info = io_roles.get(var, {'role': 'unknown', 'input_reasons': [], 'output_reasons': []})
|
|
config['variables'][var] = {
|
|
'role': role_info['role'],
|
|
'input_reasons': role_info['input_reasons'],
|
|
'output_reasons': role_info['output_reasons'],
|
|
}
|
|
|
|
# Output
|
|
output_path = args.output or (args.spec_dir / 'config.json')
|
|
output_path.write_text(json.dumps(config, indent=2) + '\n')
|
|
|
|
# Summary
|
|
print(f'Parsed {len(smv_files)} SMV files')
|
|
print(f'Found {len(ltl_formulas)} requirements')
|
|
print(f'Found {len(all_vars)} variables:')
|
|
for var in sorted(all_vars):
|
|
role = io_roles.get(var, {}).get('role', 'unknown')
|
|
marker = ' ⚠ CONFLICT' if role == 'conflict' else ''
|
|
print(f' {var}: {role}{marker}')
|
|
|
|
unconverted = [e for e in ltl_formulas if e['ltl'] is None]
|
|
if unconverted:
|
|
print(f'\n⚠ {len(unconverted)} requirements could not be auto-converted:')
|
|
for e in unconverted:
|
|
print(f' {e["req_id"]}: {e["fretish"]}')
|
|
|
|
conflicts = [v for v, info in io_roles.items() if info['role'] == 'conflict']
|
|
if conflicts:
|
|
print(f'\n⚠ {len(conflicts)} variables have conflicting roles (appear as both input and output):')
|
|
for v in conflicts:
|
|
print(f' {v}')
|
|
for r in io_roles[v]['input_reasons']:
|
|
print(f' INPUT: {r}')
|
|
for r in io_roles[v]['output_reasons']:
|
|
print(f' OUTPUT: {r}')
|
|
print('\nPlease resolve conflicts in the config file before synthesis.')
|
|
|
|
print(f'\nConfig written to: {output_path}')
|
|
print('Review the config, resolve any conflicts, then run synthesize.py')
|
|
|
|
|
|
if __name__ == '__main__':
|
|
main()
|