Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
160 changes: 2 additions & 158 deletions .github/workflows/sync-tag-definitions.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,167 +29,11 @@ jobs:

- name: Fetch tag type definitions from OpenEPaperLink
id: fetch
run: |
python3 << 'PYEOF'
import urllib.request
import json
import re

print("Fetching tag type files from OpenEPaperLink repository...")

# Fetch the directory listing
url = "https://github.com/OpenEPaperLink/OpenEPaperLink/tree/master/resources/tagtypes"
headers = {'User-Agent': 'Mozilla/5.0'}
req = urllib.request.Request(url, headers=headers)

try:
with urllib.request.urlopen(req, timeout=30) as response:
html = response.read().decode('utf-8')
json_files = re.findall(r'([0-9a-fA-F]+\.json)', html)
json_files = sorted(set(json_files))
print(f"Found {len(json_files)} tag type files")
except Exception as e:
print(f"Error fetching file list: {e}")
exit(1)

# Fetch all tag type definitions
tag_types = {}
errors = []

for filename in json_files:
url = f"https://raw.githubusercontent.com/OpenEPaperLink/OpenEPaperLink/master/resources/tagtypes/{filename}"
try:
with urllib.request.urlopen(url, timeout=10) as response:
data = json.loads(response.read().decode('utf-8'))
type_id = int(filename.replace('.json', ''), 16)

# Extract only required fields
tag_types[type_id] = {
'version': data.get('version'),
'name': data.get('name'),
'width': data.get('width'),
'height': data.get('height'),
}
except Exception as e:
errors.append(f"Error fetching {filename}: {e}")

if errors:
for error in errors:
print(error)

print(f"Successfully fetched {len(tag_types)} tag type definitions")

# Save to file for next step
with open('new_tag_types.json', 'w') as f:
json.dump(tag_types, f, indent=2)

print("Tag types saved to new_tag_types.json")
PYEOF
run: python3 scripts/fetch_tag_types.py new_tag_types.json

- name: Generate updated tag_types.py
id: generate
run: |
python3 << 'PYEOF'
import json
import re

# Load new tag types
with open('new_tag_types.json', 'r') as f:
new_tag_types = json.load(f)

# Read current tag_types.py
with open('custom_components/opendisplay/tag_types.py', 'r') as f:
content = f.read()

# Extract current fallback definitions
match = re.search(r'fallback_definitions = \{(.*?)\n \}', content, re.DOTALL)
if not match:
print("Error: Could not find fallback_definitions in tag_types.py")
exit(1)

current_definitions = match.group(1)

# Parse current definitions to dict
current_types = {}
for line in current_definitions.split('\n'):
match = re.match(r'\s+(\d+):', line)
if match:
type_id = int(match.group(1))
current_types[type_id] = line.strip()

print(f"Current definitions: {len(current_types)} types")
print(f"New definitions: {len(new_tag_types)} types")

# Check if there are differences
changed = False
added = []
removed = []
modified = []

# Find added and modified
for type_id in sorted(new_tag_types.keys()):
if type_id not in current_types:
added.append(type_id)
changed = True
else:
# Compare values
new_line = f'{type_id}: {json.dumps(new_tag_types[str(type_id)])}'
if new_line not in current_types[type_id]:
modified.append(type_id)
changed = True

# Find removed
for type_id in current_types:
if str(type_id) not in new_tag_types and type_id not in [int(k) for k in new_tag_types.keys()]:
removed.append(type_id)
changed = True

# Generate new fallback_definitions content
lines = []
for type_id in sorted([int(k) for k in new_tag_types.keys()]):
type_data = new_tag_types[str(type_id)]
line = f' {type_id}: {json.dumps(type_data)},'
lines.append(line)

new_fallback = '\n'.join(lines)

# Replace in content
new_content = re.sub(
r'(fallback_definitions = \{)\n.*?\n( \})',
r'\1\n' + new_fallback + '\n\2',
content,
flags=re.DOTALL
)

# Write updated file
with open('custom_components/opendisplay/tag_types.py', 'w') as f:
f.write(new_content)

# Create summary
summary = []
if added:
summary.append(f"Added: {len(added)} types ({', '.join(map(str, added[:5]))}{'...' if len(added) > 5 else ''})")
if removed:
summary.append(f"Removed: {len(removed)} types ({', '.join(map(str, removed[:5]))}{'...' if len(removed) > 5 else ''})")
if modified:
summary.append(f"Modified: {len(modified)} types ({', '.join(map(str, modified[:5]))}{'...' if len(modified) > 5 else ''})")

if changed:
print("CHANGED=true")
print(f"SUMMARY={'|'.join(summary)}")
with open('CHANGES_SUMMARY.txt', 'w') as f:
f.write('\n'.join(summary))
else:
print("CHANGED=false")
print("No changes detected")

# Set output for GitHub Actions
import os
with open(os.environ['GITHUB_OUTPUT'], 'a') as f:
f.write(f"changed={'true' if changed else 'false'}\n")
if summary:
f.write(f"summary={'|'.join(summary)}\n")
PYEOF
run: python3 scripts/generate_tag_types.py new_tag_types.json

- name: Create Pull Request
if: steps.generate.outputs.changed == 'true'
Expand Down
85 changes: 85 additions & 0 deletions scripts/fetch_tag_types.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
#!/usr/bin/env python3
"""Fetch tag type definitions from the OpenEPaperLink repository.

Downloads all tag type JSON files from the OpenEPaperLink GitHub repository
and saves them as a consolidated JSON file for further processing.
"""

import json
import re
import sys
import urllib.request


GITHUB_TREE_URL = (
"https://github.com/OpenEPaperLink/OpenEPaperLink/tree/master/resources/tagtypes"
)
GITHUB_RAW_URL = (
"https://raw.githubusercontent.com/OpenEPaperLink/OpenEPaperLink"
"/master/resources/tagtypes"
)


def fetch_file_list():
"""Fetch the list of tag type JSON files from the repository."""
print("Fetching tag type files from OpenEPaperLink repository...")
headers = {"User-Agent": "Mozilla/5.0"}
req = urllib.request.Request(GITHUB_TREE_URL, headers=headers)

with urllib.request.urlopen(req, timeout=30) as response:
html = response.read().decode("utf-8")
json_files = re.findall(r"([0-9a-fA-F]+\.json)", html)
json_files = sorted(set(json_files))
print(f"Found {len(json_files)} tag type files")
return json_files


def fetch_tag_types(json_files):
"""Fetch and parse all tag type definitions."""
tag_types = {}
errors = []

for filename in json_files:
url = f"{GITHUB_RAW_URL}/{filename}"
try:
with urllib.request.urlopen(url, timeout=10) as response:
data = json.loads(response.read().decode("utf-8"))
type_id = int(filename.replace(".json", ""), 16)

tag_types[type_id] = {
"version": data.get("version"),
"name": data.get("name"),
"width": data.get("width"),
"height": data.get("height"),
}
except Exception as e:
errors.append(f"Error fetching {filename}: {e}")

if errors:
for error in errors:
print(error)

print(f"Successfully fetched {len(tag_types)} tag type definitions")
return tag_types


def main():
"""Fetch tag type definitions and save to a JSON file."""
output_file = sys.argv[1] if len(sys.argv) > 1 else "new_tag_types.json"

try:
json_files = fetch_file_list()
except Exception as e:
print(f"Error fetching file list: {e}")
sys.exit(1)

tag_types = fetch_tag_types(json_files)

with open(output_file, "w") as f:
json.dump(tag_types, f, indent=2)

print(f"Tag types saved to {output_file}")


if __name__ == "__main__":
main()
Loading
Loading