diff --git a/CHANGELOG.md b/CHANGELOG.md index f809bc9..d40a7c2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ * [v0.2.0 (2026-03-08)](#v020--2026-03-08-) * [v0.3.0 (2026-03-10)](#v030--2026-03-10-) * [v0.3.2 (2026-03-16)](#v032--2026-03-16-) + * [v0.3.3 (2026-03-18)](#v033--2026-03-18-) @@ -76,3 +77,18 @@ [`can_id_allocator.c`](can_id_allocator.c) `can_rx_can_id_allocator_ack()`. - Fix `can_rx_can_id_allocatee_advertise` renamed to `can_rx_can_id_allocator_advertise` to correctly reflect owning module. + +--- + +## [v0.3.3 (2026-03-18)](https://github.com/scalpelspace/can_driver/releases/tag/v0.3.3) + +- Add per-repo node ID support to `generate_merged_dbc.py`. + - Repo specs now accept `url[@branch][#node_id]` format. + - CAN IDs are patched at merge time using the ScalpelSpace ID scheme. + - Allocation protocol messages (message_id 56..63) are excluded from + patching. + - Transmitter node names in `BU_` and `BO_` lines are suffixed by node ID + (e.g. `MOMENTUM` -> `MOMENTUM_02`). Shared roles (`LISTENER`, `REQUESTER`, + `COMMANDER`) are not suffixed. Message names are always preserved as-is. + - Duplicate node ID assignments across repos produce a warning. + - Update related documentation. diff --git a/README.md b/README.md index 8ecdfaa..297572a 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,11 @@ Low level simplified CAN bus (classic) communication drivers. * [2 CAN ID ScalpelSpace Node Scheme](#2-can-id-scalpelspace-node-scheme) * [2.1 Node ID Allocation Protocol](#21-node-id-allocation-protocol) * [2.2 Implementer Notes](#22-implementer-notes) + * [3 Generate Merged DBC python Script](#3-generate-merged-dbc-python-script) + * [3.1 Usage](#31-usage) + * [3.2 Repo File Format](#32-repo-file-format) + * [3.3 Node ID Patching](#33-node-id-patching) + * [3.4 Notable Behaviour](#34-notable-behaviour) @@ -161,3 +166,61 @@ Reserved `message_id` values for the allocation protocol: nodes boot simultaneously. If stable node ID mapping matters, implement a custom strategy via the `node_id_strategy` function in [`can_id_allocator.c`](can_id_allocator.c). + +## 3 Generate Merged DBC python Script + +`generate_merged_dbc.py` clones multiple git repos and merges their root-level +`.dbc` files into a single combined `.dbc`. Each repo represents one node on the +ScalpelSpace CAN bus, and an optional node ID can be assigned per repo to patch +all CAN IDs to match the ScalpelSpace ID scheme at merge time. + +### 3.1 Usage + +```shell +python3 generate_merged_dbc.py --repos-file repos.txt --out project.dbc --workdir workspace +``` + +### 3.2 Repo File Format + +One entry per line: `url[@branch][#node_id]` + +``` +# Unassigned (node_id=0, CAN IDs left as-is). +https://github.com/your_org/repo_a.git + +# Specific branch, unassigned. +https://github.com/your_org/repo_b.git@main + +# Assigned node IDs. +https://github.com/your_org/repo_c.git@main#1 +https://github.com/your_org/repo_d.git@main#2 +https://github.com/your_org/repo_d.git@main#3 +``` + +Lines beginning with `#` are treated as comments. + +### 3.3 Node ID Patching + +When a node ID is specified, all CAN IDs in that repo's DBC are repacked using +the ScalpelSpace scheme (`message_id << 5 | node_id`). If no node ID is given, +CAN IDs are left unchanged (base DBC state, `node_id=0`). + +Device node names in `BU_` and transmitter fields are suffixed with the node ID +(e.g. `MOMENTUM` → `MOMENTUM_02`) to uniquely identify each instance in the +merged output. Message names and signal definitions are always preserved as-is +from the source DBC. + +Shared role names (`LISTENER`, `REQUESTER`, `COMMANDER`) are never suffixed. + +### 3.4 Notable Behaviour + +- **Allocation protocol messages are never patched.** Messages with + `message_id >= 56` (CAN IDs 1792+) are reserved for the ScalpelSpace node ID + allocation protocol and are left unchanged regardless of the assigned node ID. +- **Duplicate node IDs produce a warning.** Assigning the same node ID to + multiple repos will cause CAN ID collisions in the merged output. +- **Conflicting CAN IDs keep the first occurrence.** If two repos define the + same CAN ID after patching, the first is kept and a warning is emitted. +- **Same repo, multiple instances is supported.** The same repo URL can appear + multiple times with different node IDs to represent multiple identical devices + on the bus. diff --git a/generate_merged_dbc.py b/generate_merged_dbc.py index 53eafa9..4a81553 100644 --- a/generate_merged_dbc.py +++ b/generate_merged_dbc.py @@ -3,13 +3,32 @@ Clone multiple git repos (each containing one or more .dbc files at the root directory) and merge them into a single merged .dbc file. +Each repo entry can optionally specify a branch and a node ID using the format: + url[@branch][#node_id] + +Node ID must be in the assignable range (1..30). If omitted, defaults to 0 +(unassigned), and CAN IDs are left as-is (base DBC state). + Example: ```shell # Unix. python3 generate_merged_dbc.py --repos-file repos.txt --out project.dbc --workdir workspace ``` - Creates a merged DBC named "project.dbc" using repo URLs in "repos.txt" - (one repo URL per line). DBC merge work done in "workspace" directory. + + repos.txt example: + ``` + https://github.com/your_org/repo_a.git + https://github.com/your_org/repo_b.git@main + https://github.com/your_org/repo_c.git@main#1 + https://github.com/your_org/repo_d.git@main#2 + https://github.com/your_org/repo_d.git@main#3 + ``` + + Creates a merged DBC named "project.dbc" with: + - repo_a messages (main) patched to node_id=0. + - repo_b messages (main) patched to node_id=0. + - repo_c messages (main) patched to node_id=1. + - repo_d messages (main) patched to node_id=2 and node_id=3 (2 instances). """ from __future__ import annotations @@ -25,13 +44,61 @@ REPOS: list[str] = [ # "https://github.com/your_org/repo1.git", - # "https://github.com/your_org/repo2.git@main" + # "https://github.com/your_org/repo2.git@main#2" ] -MSG_START_RE = re.compile(r"^BO_\s+(\d+)\s+") +# ScalpelSpace CAN ID scheme constants. +NODE_ID_BITS = 5 +NODE_ID_MASK = (1 << NODE_ID_BITS) - 1 # 0x1F +MESSAGE_ID_SHIFT = NODE_ID_BITS # 5 +NODE_ID_UNASSIGNED = 0 +NODE_ID_BROADCAST = 31 +CAN_ID_MAX_ASSIGNABLE = 30 # 1..30 inclusive. + +# Node names that represent shared roles rather than specific devices, these are +# not suffixed. +SHARED_NODE_ROLES = {"LISTENER", "REQUESTER", "COMMANDER"} + +MSG_START_RE = re.compile(r"^BO_\s+(\d+)\s+(\w+)\s*:\s*(\d+)\s+(\S+)") NODE_LINE_RE = re.compile(r"^BU_:\s*(.*)$") +def patch_can_id(can_id: int, node_id: int) -> int: + """Repack a ScalpelSpace CAN ID with a new node_id. + + Extracts the message_id from bits 10..5 and repacks with the given node_id + in bits 4..0. + + Args: + can_id: Original 11-bit CAN ID (node_id assumed 0 in base DBC). + node_id: Assignable node ID (1..30). + + Returns: + Patched 11-bit CAN ID. + """ + message_id = (can_id >> MESSAGE_ID_SHIFT) & 0x3F # Extract bits 10..5. + return (message_id << MESSAGE_ID_SHIFT) | (node_id & NODE_ID_MASK) + + +def validate_node_id(node_id: int, spec: str) -> bool: + """Validate node_id is within the assignable range.""" + if node_id == NODE_ID_UNASSIGNED: + return True # 0 is valid (unassigned/base state). + if node_id == NODE_ID_BROADCAST: + print( + f"[ERROR] node_id=31 is reserved for broadcast: {spec}", + file=sys.stderr, + ) + return False + if not (1 <= node_id <= CAN_ID_MAX_ASSIGNABLE): + print( + f"[ERROR] node_id={node_id} out of assignable range (1..30): {spec}", + file=sys.stderr, + ) + return False + return True + + @dataclass class OrderedSet: items: list[str] = field(default_factory=list) @@ -52,12 +119,39 @@ class DBCDoc: other_lines: OrderedSet = field(default_factory=OrderedSet) -def _split_repo_branch(spec: str) -> tuple[str, Optional[str]]: +@dataclass +class RepoSpec: + url: str + branch: Optional[str] + node_id: int # 0 = unassigned (base DBC, no patching). + + +def _split_repo_spec(spec: str) -> RepoSpec: + """Parse a repo spec of the form url[@branch][#node_id].""" + node_id = NODE_ID_UNASSIGNED + + # Extract node_id suffix (#N). + if "#" in spec: + spec, node_id_str = spec.rsplit("#", 1) + try: + node_id = int(node_id_str.strip()) + except ValueError: + print( + f"[WARN] Invalid node_id '{node_id_str}' in spec '{spec}', " + f"defaulting to 0.", + file=sys.stderr, + ) + node_id = NODE_ID_UNASSIGNED + + # Extract branch suffix (@branch). + branch = None if "@" in spec: url, branch = spec.rsplit("@", 1) branch = branch.strip() or None - return url.strip(), branch - return spec.strip(), None + else: + url = spec + + return RepoSpec(url=url.strip(), branch=branch, node_id=node_id) def run(cmd: list[str], cwd: Optional[Path] = None) -> None: @@ -162,7 +256,8 @@ def parse_dbc(path: Path) -> DBCDoc: else: if doc.messages[can_id] != block: print( - f"[WARN] {path.name}: conflicting BO_ {can_id}. Keeping first occurrence.", + f"[WARN] {path.name}: conflicting BO_ {can_id}. " + f"Keeping first occurrence.", file=sys.stderr, ) continue @@ -176,6 +271,86 @@ def parse_dbc(path: Path) -> DBCDoc: return doc +def apply_node_id(doc: DBCDoc, node_id: int, repo_name: str) -> DBCDoc: + """Return a new DBCDoc with all CAN IDs patched for the given node_id. + + Allocation protocol messages (message_id >= 56) are never patched - + these are managed by the allocation protocol itself. + + Node names in BU_ are suffixed with the node_id (e.g. NODE_NAME -> + NODE_NAME_02). Message names are always preserved as-is from the source DBC. + Transmitter fields on BO_ lines are updated to match the suffixed node name. + + Args: + doc: Parsed DBC document. + node_id: Node ID to apply (1..30). 0 = no patching. + repo_name: Used for warning messages on collision. + + Returns: + New DBCDoc with patched CAN IDs, suffixed node names, original message names. + """ + if node_id == NODE_ID_UNASSIGNED: + return doc # No patching needed. + + patched = DBCDoc() + patched.other_lines = doc.other_lines + + # Build suffixed node name mapping: NODE_NAME -> NODE_NAME_02. + node_suffix = f"_{node_id:02d}" + patched.nodes = OrderedSet() + node_name_map: dict[str, str] = {} + for n in doc.nodes.items: + if n in SHARED_NODE_ROLES: + node_name_map[n] = n # No suffix. + patched.nodes.add(n) + else: + suffixed = f"{n}{node_suffix}" + node_name_map[n] = suffixed + patched.nodes.add(suffixed) + + for old_id in doc.message_order: + block = doc.messages[old_id] + + # Extract message_id from the CAN ID (bits 10..5). + message_id = (old_id >> MESSAGE_ID_SHIFT) & 0x3F + + # Skip allocation protocol messages (message_id 56..63). + # These are managed by the allocation protocol, not the node DBC. + if message_id >= 56: + patched.messages[old_id] = block + patched.message_order.append(old_id) + continue + + new_id = patch_can_id(old_id, node_id) + + # Patch BO_ line: update CAN ID and suffix the transmitter node name. + # Message name is preserved exactly as defined in the source DBC. + new_block = [] + for line in block: + m = MSG_START_RE.match(line) + if m: + msg_name = m.group(2) + dlc = m.group(3) + transmitter = m.group(4) + suffixed_transmitter = node_name_map.get( + transmitter, transmitter + ) + line = f"BO_ {new_id} {msg_name}: {dlc} {suffixed_transmitter}" + new_block.append(line) + + if new_id in patched.messages: + print( + f"[WARN] Patched CAN ID {new_id} collision for node_id=" + f"{node_id} in {repo_name}. Keeping first occurrence.", + file=sys.stderr, + ) + else: + patched.messages[new_id] = new_block + patched.message_order.append(new_id) + + return patched + + def parse_base_header(path: Path) -> tuple[list[str], list[str]]: lines = path.read_text(errors="ignore").splitlines() header: list[str] = [] @@ -203,7 +378,8 @@ def merge_docs(base_header: list[str], docs: list[DBCDoc]) -> DBCDoc: else: if out.messages[can_id] != block: print( - f"[WARN] conflicting BO_ {can_id} across inputs. Keeping first occurrence.", + f"[WARN] conflicting BO_ {can_id} across inputs. " + f"Keeping first occurrence.", file=sys.stderr, ) @@ -268,14 +444,6 @@ def load_repos_from_file(path: Path) -> list[str]: return repos -def pick_root_dbc(repo_dirs: list[Path]) -> Optional[Path]: - for rd in repo_dirs: - dbcs = find_dbc_files_root_only(rd) - if dbcs: - return dbcs[0] - return None - - def main() -> int: ap = argparse.ArgumentParser() ap.add_argument( @@ -283,7 +451,7 @@ def main() -> int: ) ap.add_argument( "--repos-file", - help="Text file: one repo URL per line (optionally url@branch)", + help="Text file: one repo URL per line (url[@branch][#node_id])", ) ap.add_argument( "--out", default="merged.dbc", help="Output merged DBC path" @@ -293,11 +461,11 @@ def main() -> int: workdir = Path(args.workdir).resolve() workdir.mkdir(parents=True, exist_ok=True) - repos = REPOS + raw_repos = REPOS if args.repos_file: - repos = load_repos_from_file(Path(args.repos_file).resolve()) + raw_repos = load_repos_from_file(Path(args.repos_file).resolve()) - if not repos: + if not raw_repos: print( "[ERROR] No repos provided. " "Edit REPOS in the script or pass --repos-file.", @@ -305,25 +473,49 @@ def main() -> int: ) return 2 + # Parse and validate repo specs. + specs: list[RepoSpec] = [] + for raw in raw_repos: + spec = _split_repo_spec(raw) + if not validate_node_id(spec.node_id, raw): + return 2 + specs.append(spec) + + # Warn on duplicate node_id assignments (excluding 0). + seen_node_ids: dict[int, str] = {} + for spec in specs: + if spec.node_id != NODE_ID_UNASSIGNED: + if spec.node_id in seen_node_ids: + print( + f"[WARN] node_id={spec.node_id} assigned to multiple repos: " + f"'{seen_node_ids[spec.node_id]}' and '{spec.url}'. " + f"CAN ID collisions likely.", + file=sys.stderr, + ) + else: + seen_node_ids[spec.node_id] = spec.url + # Clone/update repos. - repo_dirs: list[Path] = [] - for idx, spec in enumerate(repos, start=1): - url, branch = _split_repo_branch(spec) + repo_dirs: list[tuple[Path, RepoSpec]] = [] + for idx, spec in enumerate(specs, start=1): safe = re.sub( r"[^A-Za-z0-9._-]+", "_", - url.strip().split("/")[-1].replace(".git", ""), + spec.url.strip().split("/")[-1].replace(".git", ""), ) - dst = workdir / f"{idx:02d}_{safe}" + # Include node_id in dir name to support multi-instance of same repo. + dst = workdir / f"{idx:02d}_{safe}_n{spec.node_id:02d}" print( - f"[INFO] Syncing {url}" + (f" (branch {branch})" if branch else "") + f"[INFO] Syncing {spec.url}" + + (f" (branch {spec.branch})" if spec.branch else "") + + f" -> node_id={spec.node_id}" ) - git_clone_or_update(url, branch, dst) - repo_dirs.append(dst) + git_clone_or_update(spec.url, spec.branch, dst) + repo_dirs.append((dst, spec)) - # Find root-level DBCs. + # Find and parse root-level DBCs, applying node ID patches. all_dbcs: list[Path] = [] - for rd in repo_dirs: + for rd, _ in repo_dirs: all_dbcs.extend(find_dbc_files_root_only(rd)) if not all_dbcs: @@ -333,13 +525,23 @@ def main() -> int: return 2 print(f"[INFO] Found {len(all_dbcs)} .dbc file(s)") - for p in all_dbcs: - print(f" - {p}") + + docs: list[DBCDoc] = [] + for rd, spec in repo_dirs: + dbcs = find_dbc_files_root_only(rd) + for dbc_path in dbcs: + print( + f" - {dbc_path}" + + (f" [node_id={spec.node_id}]" if spec.node_id else "") + ) + parsed = parse_dbc(dbc_path) + repo_name = dbc_path.stem + patched = apply_node_id(parsed, spec.node_id, repo_name) + docs.append(patched) # Use first DBC as header template. base_header, _ = parse_base_header(all_dbcs[0]) - docs: list[DBCDoc] = [parse_dbc(p) for p in all_dbcs] merged = merge_docs(base_header=base_header, docs=docs) out_text = render_merged(merged)