From 9e6babd55a044730f0a38351a98f6270a848c775 Mon Sep 17 00:00:00 2001 From: Srinivasan Parthasarathy Date: Tue, 21 Oct 2025 16:13:36 -0400 Subject: [PATCH 01/10] tektonc poc Signed-off-by: Srinivasan Parthasarathy --- tektonc/tektonc.py | 290 ++++++++++++++++++++++ tektoncsample/quickstart/pipeline.yaml.j2 | 52 ++++ tektoncsample/quickstart/values.yaml | 4 + 3 files changed, 346 insertions(+) create mode 100644 tektonc/tektonc.py create mode 100644 tektoncsample/quickstart/pipeline.yaml.j2 create mode 100644 tektoncsample/quickstart/values.yaml diff --git a/tektonc/tektonc.py b/tektonc/tektonc.py new file mode 100644 index 00000000..7789a474 --- /dev/null +++ b/tektonc/tektonc.py @@ -0,0 +1,290 @@ +#!/usr/bin/env python3 +""" +tektonc — minimal render+expand for Tekton templates with loop nodes (Option A). + +Authoring grammar (one construct only): + Loop node := { loopName: str, foreach: { domain: { var: [..], ... } }, tasks: [ , ... ] } + Task node := any Tekton task map (name, taskRef/taskSpec, params, runAfter, workspaces, retries, when, timeout, ...) + +Semantics: + - Expansion is cartesian over foreach.domain (keys sorted for determinism). + - Loops can nest; variables from outer loops are in scope for inner loops. + - Dependencies/parallelism are expressed purely via native Tekton 'runAfter'. + - 'finally' supports the same loop nodes as 'tasks'. + - No validation yet (name uniqueness, runAfter targets, DAG acyclicity)—add later. + +CLI: + tektonc -t pipeline.yaml.j2 -f values.yaml [-o build/pipeline.yaml] [--explain] +""" + +from __future__ import annotations + +import argparse +import copy +import itertools +import os +import sys +from typing import Any, Dict, Iterable, List, Mapping, MutableMapping + +import yaml +from jinja2 import Environment, StrictUndefined, TemplateError, Undefined +from jinja2.runtime import Undefined as RTUndefined + +# ────────────────────────────────────────────────────────────────────────────── +# Jinja helpers +# Two-pass render: +# - Outer env: preserves unknown loop vars (e.g., {{ modelRef|dns }} stays literal) +# - Inner env: strict; resolves loop vars during loop expansion +# ────────────────────────────────────────────────────────────────────────────── + +def _dns_inner(s: str) -> str: + """DNS-1123-ish: lowercase, alnum and dash, trim to 63 chars with hash fallback.""" + import re, hashlib + s2 = re.sub(r'[^a-z0-9-]+', '-', str(s).lower()).strip('-') + if len(s2) <= 63: + return s2 + h = hashlib.sha1(s2.encode()).hexdigest()[:8] + return (s2[:63-1-8] + '-' + h).strip('-') + +def _slug_inner(s: str) -> str: + """Looser slug for params: keep letters/numbers/._-; replace others with '-'.""" + import re + return re.sub(r'[^A-Za-z0-9_.-]+', '-', str(s)) + +# Outer filters: if value is undefined, round-trip original expression +def _dns_outer(val: object) -> str: + if isinstance(val, RTUndefined): + name = getattr(val, "_undefined_name", None) or "" + return "{{ " + name + "|dns }}" + return _dns_inner(val) # type: ignore[arg-type] + +def _slug_outer(val: object) -> str: + if isinstance(val, RTUndefined): + name = getattr(val, "_undefined_name", None) or "" + return "{{ " + name + "|slug }}" + return _slug_inner(val) # type: ignore[arg-type] + +class PassthroughUndefined(Undefined): + """In OUTER render, keep unknown variables as '{{ name }}' so inner pass can resolve them.""" + __slots__ = () + def __str__(self) -> str: # type: ignore[override] + name = getattr(self, "_undefined_name", None) + return "{{ " + name + " }}" if name else "{{ ?? }}" + def __iter__(self): # allows use in loops without crashing + return iter(()) + def __bool__(self) -> bool: # treat undefined as False + return False + +def _enum(seq): + """Return [{i, item}, ...] for easy serial chains in Jinja.""" + return [{"i": i, "item": v} for i, v in enumerate(seq)] + +def build_env_outer() -> Environment: + env = Environment(undefined=PassthroughUndefined, autoescape=False, trim_blocks=True, lstrip_blocks=True) + env.filters.update({"dns": _dns_outer, "slug": _slug_outer}) + env.globals.update({"enumerate_list": _enum}) + return env + +def build_env_inner() -> Environment: + env = Environment(undefined=StrictUndefined, autoescape=False, trim_blocks=True, lstrip_blocks=True) + env.filters.update({"dns": _dns_inner, "slug": _slug_inner}) + env.globals.update({"enumerate_list": _enum}) + return env + +# ────────────────────────────────────────────────────────────────────────────── +# Expander (no validation yet) +# ────────────────────────────────────────────────────────────────────────────── + +def expand_document(doc: MutableMapping[str, Any], + globals: Mapping[str, Any] | None = None, + jinja_env: Environment | None = None) -> Dict[str, Any]: + """ + Expand loops in a Pipeline document: + - Recursively expands spec.tasks (required) and spec.finally (optional) + - Returns a NEW dict; input is not mutated + """ + env = jinja_env or build_env_inner() + scope: Dict[str, Any] = dict(globals or {}) + + out: Dict[str, Any] = copy.deepcopy(doc) # type: ignore[assignment] + spec = out.get("spec") or {} + + spec["tasks"] = expand_list(spec.get("tasks", []), scope, env) + if "finally" in spec: + spec["finally"] = expand_list(spec.get("finally", []), scope, env) + + out["spec"] = spec + return out + +def expand_list(nodes: Iterable[Any], + scope: Mapping[str, Any], + env: Environment) -> List[Dict[str, Any]]: + """ + Core recursive expander. + + If a node is a loop node (loopName + foreach.domain + tasks list): + * Enumerate cartesian product over the domain (keys sorted for determinism) + * For each binding, extend scope and recursively expand the child 'tasks' + * Concatenate all expansions + + Else (plain Tekton task): + * Deep-copy the map; render ALL scalar strings with current scope (via Jinja) + * Append as a single task in the flat list + """ + flat: List[Dict[str, Any]] = [] + for node in nodes or []: + if _is_loop_node(node): + domain = node["foreach"]["domain"] + child_nodes = node.get("tasks", []) + for binding in _cartesian_bindings(domain): + child_scope = dict(scope) + child_scope.update(binding) + flat.extend(expand_list(child_nodes, child_scope, env)) + else: + rendered = _render_scalars(copy.deepcopy(node), scope, env) + # After scalar render, node should be a mapping for Tekton; we pass it through + flat.append(rendered) # type: ignore[arg-type] + return flat + +# ────────────────────────────────────────────────────────────────────────────── +# Internals +# ────────────────────────────────────────────────────────────────────────────── + +def _is_loop_node(node: Any) -> bool: + """A loop node must be a mapping with loopName, foreach.domain, and tasks (list).""" + from collections.abc import Mapping as _Mapping + if not isinstance(node, _Mapping): + return False + if "loopName" not in node or "foreach" not in node or "tasks" not in node: + return False + f = node["foreach"] + if not isinstance(f, dict) or "domain" not in f: + return False + if not isinstance(node["tasks"], list): + return False + return True + +def _cartesian_bindings(domain: Mapping[str, Iterable[Any]]) -> Iterable[Dict[str, Any]]: + """ + Deterministic cartesian enumeration of a domain dict: {var: [v1, v2], ...} + - Sort domain keys to ensure stable order + - Preserve the order of each value list + - Yield dicts like {'var1': v1, 'var2': v2, ...} + """ + if not isinstance(domain, Mapping): + raise TypeError("foreach.domain must be a mapping of {var: list}") + + keys = sorted(domain.keys()) + lists: List[List[Any]] = [] + for k in keys: + vals = domain[k] + if isinstance(vals, (str, bytes)): + raise TypeError(f"foreach.domain['{k}'] must be an iterable of values (not string)") + lists.append(list(vals)) + + for combo in itertools.product(*lists): + yield dict(zip(keys, combo)) + +def _render_scalars(obj: Any, scope: Mapping[str, Any], env: Environment) -> Any: + """ + Recursively render scalar strings using Jinja with the given scope. + - Dict: render values + - List/Tuple: render each element + - String: env.from_string(s).render(scope) + - Other scalars: return as-is + + Note: We do NOT render dict keys — only values. + """ + from collections.abc import Mapping as _Mapping + if isinstance(obj, _Mapping): + return {k: _render_scalars(v, scope, env) for k, v in obj.items()} + if isinstance(obj, list): + return [_render_scalars(v, scope, env) for v in obj] + if isinstance(obj, tuple): + return tuple(_render_scalars(v, scope, env) for v in obj) + if isinstance(obj, str): + try: + return env.from_string(obj).render(**scope) + except TemplateError as e: + raise RuntimeError(f"Template render failed for: {obj!r} (scope keys={list(scope.keys())})") from e + return obj + +# ────────────────────────────────────────────────────────────────────────────── +# CLI +# ────────────────────────────────────────────────────────────────────────────── + +def parse_args(argv=None): + ap = argparse.ArgumentParser(description="Render + expand Tekton templates with loop nodes") + ap.add_argument("-t", "--template", required=True, help="Jinja template file (use - for stdin)") + ap.add_argument("-f", "--values", required=True, help="YAML/JSON values file (use - for stdin)") + ap.add_argument("-o", "--out", help="Output YAML file (default: stdout)") + ap.add_argument("--explain", action="store_true", help="Print name/runAfter table to stderr after expansion") + return ap.parse_args(argv) + +def _read_text(path: str) -> str: + return sys.stdin.read() if path == "-" else open(path, "r").read() + +def _load_values(path: str) -> Dict[str, Any]: + data = _read_text(path) + return yaml.safe_load(data) or {} + +def _explain(expanded: Mapping[str, Any]) -> None: + def print_section(title: str, items: List[Mapping[str, Any]]): + print(f"# {title}", file=sys.stderr) + print(f"{'TASK NAME':<60} RUNAFTER", file=sys.stderr) + print("-" * 90, file=sys.stderr) + for t in items: + name = t.get("name", "") # type: ignore[assignment] + ra = t.get("runAfter", []) + ra_str = ", ".join(ra) if isinstance(ra, list) else str(ra) + print(f"{name:<60} {ra_str}", file=sys.stderr) + print("", file=sys.stderr) + + spec = expanded.get("spec") or {} + tasks = spec.get("tasks", []) + print_section("spec.tasks", tasks) + if "finally" in spec: + print_section("spec.finally", spec.get("finally", [])) + +def main(argv=None) -> int: + args = parse_args(argv) + + try: + values = _load_values(args.values) + + # 1) OUTER render with globals; loop vars are preserved verbatim + env_outer = build_env_outer() + template_src = _read_text(args.template) + rendered = env_outer.from_string(template_src).render(**values) + + # 2) YAML parse + doc = yaml.safe_load(rendered) + if not isinstance(doc, dict): + print("Rendered template is not a YAML mapping (expected a Pipeline).", file=sys.stderr) + return 1 + + # 3) Loop expansion with INNER strict env (resolves loop vars) + env_inner = build_env_inner() + expanded: Dict[str, Any] = expand_document(doc, globals=values, jinja_env=env_inner) + + # 4) Optional explain + if args.explain: + _explain(expanded) + + # 5) Output + out_text = yaml.safe_dump(expanded, sort_keys=False) + if args.out: + with open(args.out, "w") as f: + f.write(out_text) + else: + sys.stdout.write(out_text) + return 0 + + except TemplateError as e: + print(f"Template render error: {e}", file=sys.stderr) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + return 1 + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/tektoncsample/quickstart/pipeline.yaml.j2 b/tektoncsample/quickstart/pipeline.yaml.j2 new file mode 100644 index 00000000..c362ca92 --- /dev/null +++ b/tektoncsample/quickstart/pipeline.yaml.j2 @@ -0,0 +1,52 @@ +# pipeline.yaml.j2 +apiVersion: tekton.dev/v1 +kind: Pipeline +metadata: + name: {{ pipeline_name }} +spec: + params: + - name: message + type: string + + tasks: + # Plain Tekton task — unchanged by the expander + - name: print-start + taskRef: { name: echo } + params: + - name: text + value: "Starting pipeline {{ pipeline_name }}" + + # Loop: one task per modelRef + - loopName: per-model + foreach: + domain: + modelRef: {{ models|tojson }} + tasks: + - name: "process-{{ modelRef|dns }}" + taskRef: { name: process-model } + runAfter: [ print-start ] + params: + - { name: model, value: "{{ modelRef }}" } + - { name: message, value: "$(params.message)" } + + # Aggregate after all per-model tasks finish. + # Use an inline list to avoid indentation issues with Jinja. + - name: aggregate-results + taskRef: { name: aggregate } + runAfter: [ {% for m in models %}process-{{ m|dns }}{% if not loop.last %}, {% endif %}{% endfor %} ] + params: + - name: note + value: "All models processed." + + finally: + # Loop in 'finally': one cleanup per model, after aggregate-results + - loopName: cleanup + foreach: + domain: + modelRef: {{ models|tojson }} + tasks: + - name: "cleanup-{{ modelRef|dns }}" + taskRef: { name: cleanup-model } + runAfter: [ aggregate-results ] + params: + - { name: model, value: "{{ modelRef }}" } diff --git a/tektoncsample/quickstart/values.yaml b/tektoncsample/quickstart/values.yaml new file mode 100644 index 00000000..ae207fa8 --- /dev/null +++ b/tektoncsample/quickstart/values.yaml @@ -0,0 +1,4 @@ +pipeline_name: demo-pipeline +models: + - llama-7b + - qwen-2.5-7b From eb374f3f54249ee270f0bb49c34ab9c86ae4ee44 Mon Sep 17 00:00:00 2001 From: Srinivasan Parthasarathy Date: Tue, 21 Oct 2025 16:25:49 -0400 Subject: [PATCH 02/10] added README Signed-off-by: Srinivasan Parthasarathy --- tektonc/README.md | 182 +++++++++++++++++++++++++++++++++++++++ tektonc/requirements.txt | 5 ++ 2 files changed, 187 insertions(+) create mode 100644 tektonc/README.md create mode 100644 tektonc/requirements.txt diff --git a/tektonc/README.md b/tektonc/README.md new file mode 100644 index 00000000..0f881eb2 --- /dev/null +++ b/tektonc/README.md @@ -0,0 +1,182 @@ +# tektonc — A Minimal Template Expander for Tekton Pipelines + +`tektonc` is a lightweight command-line tool that helps authors write **reusable Tekton pipeline templates** using +a small extension to standard Tekton YAML. + +It is designed for the [`llm-d-benchmark`](https://llm-d.ai) repository, where multiple model, workload, inference-scheduler, and other platform configuration variants need to be expressed cleanly without duplicating boilerplate. + +--- + +## ✨ Purpose + +In many `llm-d` benchmarking workflows, you often have a base pipeline structure that needs to repeat the same sequence of tasks for several models, +prefixes, or configuration sweeps. + +Manually writing these combinations leads to large, hard-to-maintain YAML. +`tektonc` solves that by introducing a **single, minimal construct** for +compile-time expansion: + +```yaml +loopName: +foreach: + domain: + var1: [a, b, c] + var2: [x, y] +tasks: + - name: ... + runAfter: ... +``` + +Everything else remains **pure Tekton**. + +--- + +## 🧩 Overview + +### Input +1. A Jinja-based Tekton pipeline template (`pipeline.yaml.j2`) +2. A simple YAML file of template values (`values.yaml`) + +### Output +A **flat, valid Tekton pipeline YAML** ready for `kubectl apply` or `tkn pipeline start`. + +### Example + +**Template (`pipeline.yaml.j2`):** + +```yaml +apiVersion: tekton.dev/v1 +kind: Pipeline +metadata: + name: {{ pipeline_name }} +spec: + params: + - name: message + type: string + tasks: + - name: print-start + taskRef: { name: echo } + params: + - name: text + value: "Starting pipeline {{ pipeline_name }}" + + - loopName: per-model + foreach: + domain: + modelRef: {{ models|tojson }} + tasks: + - name: "process-{{ modelRef|dns }}" + taskRef: { name: process-model } + runAfter: [ print-start ] + params: + - { name: model, value: "{{ modelRef }}" } +``` + +**Values (`values.yaml`):** + +```yaml +pipeline_name: demo-pipeline +models: ["llama-7b", "qwen-2.5-7b"] +``` + +Run: + +```bash +tektonc -t pipeline.yaml.j2 -f values.yaml -o build/pipeline.yaml +``` + +Result (`build/pipeline.yaml`): + +```yaml +apiVersion: tekton.dev/v1 +kind: Pipeline +metadata: + name: demo-pipeline +spec: + params: + - name: message + type: string + tasks: + - name: print-start + taskRef: + name: echo + params: + - name: text + value: Starting pipeline demo-pipeline + - name: process-llama-7b + taskRef: + name: process-model + runAfter: + - print-start + params: + - { name: model, value: llama-7b } + - name: process-qwen-2-5-7b + taskRef: + name: process-model + runAfter: + - print-start + params: + - { name: model, value: qwen-2.5-7b } +``` + +--- + +## 🚀 Capabilities + +- **Single construct** — only `loopName + foreach + tasks` +- **Nested loops** — define inner/outer iterations naturally +- **Native Tekton** — all fields (`retries`, `when`, `workspaces`, etc.) pass through unchanged +- **Finally blocks** — support the same loop semantics +- **Deterministic expansion** — Cartesian product enumeration of domains +- **Safe** — Jinja variables (`{{ }}`) resolved at compile-time; Tekton params (`$(params.xxx)`) left untouched + +--- + +## 🧠 When to Use It + +Use `tektonc` when you need to: +- generate a Tekton pipeline for benchmarking llm-d configurations, +- run configuration sweeps or inference experiments, +- keep YAML human-readable while supporting complex graph expansions. + +--- + +## 🛠️ Installation + +```bash +pip install -r requirements.txt +``` + +Then test it: + +```bash +python3 tektonc.py -t tektoncsample/quickstart/pipeline.yaml.j2 -f tektoncsample/quickstart/values.yaml --explain +``` + +--- + +## 📘 Command Reference + +``` +tektonc -t TEMPLATE -f VALUES [-o OUTPUT] [--explain] +``` + +| Flag | Description | +|------|--------------| +| `-t, --template` | Path to Jinja template file (`pipeline.yaml.j2`) | +| `-f, --values` | Path to YAML/JSON file containing template variables | +| `-o, --out` | Output file (default: stdout) | +| `--explain` | Print an easy-to-read table of task names and dependencies | + +--- + +## 🤝 Contributing + +- Keep new features minimal and Tekton-native. +- Avoid adding new syntax unless absolutely necessary. +- Open PRs against the `llm-d-benchmark` repo with clear examples under `tektoncsample/`. + +--- + +**In short:** +`tektonc` makes Tekton authoring for llm-d-benchmarking scalable — without inventing a new DSL. It keeps templates clean, YAML valid, and expansion predictable. diff --git a/tektonc/requirements.txt b/tektonc/requirements.txt new file mode 100644 index 00000000..6f9f03f3 --- /dev/null +++ b/tektonc/requirements.txt @@ -0,0 +1,5 @@ +# tektonc — minimal Tekton pipeline template compiler +# (compatible with Python 3.9+) + +jinja2>=3.1 +PyYAML>=6.0 From dfe512ba3afdf5faa343c998dd37098fe9d58e0a Mon Sep 17 00:00:00 2001 From: Srinivasan Parthasarathy Date: Tue, 21 Oct 2025 16:34:31 -0400 Subject: [PATCH 03/10] updated readme Signed-off-by: Srinivasan Parthasarathy --- tektonc/README.md | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/tektonc/README.md b/tektonc/README.md index 0f881eb2..ec639c7b 100644 --- a/tektonc/README.md +++ b/tektonc/README.md @@ -9,12 +9,18 @@ It is designed for the [`llm-d-benchmark`](https://llm-d.ai) repository, where m ## ✨ Purpose -In many `llm-d` benchmarking workflows, you often have a base pipeline structure that needs to repeat the same sequence of tasks for several models, -prefixes, or configuration sweeps. +Tekton already provides a powerful foundation for modular and reproducible orchestration: +- **Modularity** — reusable `Task` definitions and `Step`-level composition. +- **Precedence & dependencies** — control flow through `runAfter` relationships. +- **Parallelism** — automatic execution of independent tasks. +- **Failure tolerance** — built-in retries and error handling. +- **Cleanup & teardown** — handled elegantly using `finally` blocks. -Manually writing these combinations leads to large, hard-to-maintain YAML. -`tektonc` solves that by introducing a **single, minimal construct** for -compile-time expansion: +However, in complex `llm-d` benchmarking workflows, you often have a base pipeline structure that needs to repeat the same sequence of tasks for several **models**, **workload variants**, or **inference configurations**. + +Manually authoring these combinations quickly leads to large, repetitive, and error-prone YAML. + +`tektonc` solves this problem by introducing a **single, minimal construct** for compile-time expansion, enabling high-level loops and parameter sweeps while keeping everything 100% Tekton-compatible. ```yaml loopName: @@ -27,7 +33,7 @@ tasks: runAfter: ... ``` -Everything else remains **pure Tekton**. +Everything else remains **pure Tekton** — `tektonc` only handles structured expansion. --- @@ -116,7 +122,7 @@ spec: runAfter: - print-start params: - - { name: model, value: qwen-2.5-7b } + - { name: model, value: qwen-2-5-7b } ``` --- @@ -126,7 +132,7 @@ spec: - **Single construct** — only `loopName + foreach + tasks` - **Nested loops** — define inner/outer iterations naturally - **Native Tekton** — all fields (`retries`, `when`, `workspaces`, etc.) pass through unchanged -- **Finally blocks** — support the same loop semantics +- **Finally blocks** — support the same loop semantics for teardown/cleanup - **Deterministic expansion** — Cartesian product enumeration of domains - **Safe** — Jinja variables (`{{ }}`) resolved at compile-time; Tekton params (`$(params.xxx)`) left untouched @@ -135,7 +141,7 @@ spec: ## 🧠 When to Use It Use `tektonc` when you need to: -- generate a Tekton pipeline for benchmarking llm-d configurations, +- generate a Tekton pipeline for benchmarking `llm-d` configurations, - run configuration sweeps or inference experiments, - keep YAML human-readable while supporting complex graph expansions. @@ -179,4 +185,5 @@ tektonc -t TEMPLATE -f VALUES [-o OUTPUT] [--explain] --- **In short:** -`tektonc` makes Tekton authoring for llm-d-benchmarking scalable — without inventing a new DSL. It keeps templates clean, YAML valid, and expansion predictable. +`tektonc` makes Tekton authoring for `llm-d-benchmark` scalable — without inventing a new DSL. +It keeps templates clean, YAML valid, and expansion predictable. From 0bcd4d72d3a0a3deda4a00ed1a8b8e41c96a7080 Mon Sep 17 00:00:00 2001 From: Srinivasan Parthasarathy Date: Tue, 21 Oct 2025 23:41:39 -0400 Subject: [PATCH 04/10] no option A Signed-off-by: Srinivasan Parthasarathy --- tektonc/tektonc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tektonc/tektonc.py b/tektonc/tektonc.py index 7789a474..af6ebc9c 100644 --- a/tektonc/tektonc.py +++ b/tektonc/tektonc.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 """ -tektonc — minimal render+expand for Tekton templates with loop nodes (Option A). +tektonc — minimal render+expand for Tekton templates with loop nodes. Authoring grammar (one construct only): Loop node := { loopName: str, foreach: { domain: { var: [..], ... } }, tasks: [ , ... ] } From ebe2237203594ca32eecdf48c473db1ae83ffaee Mon Sep 17 00:00:00 2001 From: Srinivasan Parthasarathy Date: Tue, 21 Oct 2025 23:49:13 -0400 Subject: [PATCH 05/10] commented templated Signed-off-by: Srinivasan Parthasarathy --- tektonc/tektonc.py | 13 ++++++++----- tektoncsample/quickstart/pipeline.yaml.j2 | 19 +++++++++++-------- 2 files changed, 19 insertions(+), 13 deletions(-) diff --git a/tektonc/tektonc.py b/tektonc/tektonc.py index af6ebc9c..5b110d01 100644 --- a/tektonc/tektonc.py +++ b/tektonc/tektonc.py @@ -26,10 +26,12 @@ import sys from typing import Any, Dict, Iterable, List, Mapping, MutableMapping -import yaml +import json, yaml from jinja2 import Environment, StrictUndefined, TemplateError, Undefined from jinja2.runtime import Undefined as RTUndefined + + # ────────────────────────────────────────────────────────────────────────────── # Jinja helpers # Two-pass render: @@ -80,17 +82,18 @@ def _enum(seq): return [{"i": i, "item": v} for i, v in enumerate(seq)] def build_env_outer() -> Environment: - env = Environment(undefined=PassthroughUndefined, autoescape=False, trim_blocks=True, lstrip_blocks=True) - env.filters.update({"dns": _dns_outer, "slug": _slug_outer}) + env = Environment(undefined=PassthroughUndefined, autoescape=False, trim_blocks=True, lstrip_blocks=False) + env.filters.update({"dns": _dns_outer, "slug": _slug_outer, "tojson": json.dumps}) env.globals.update({"enumerate_list": _enum}) return env def build_env_inner() -> Environment: - env = Environment(undefined=StrictUndefined, autoescape=False, trim_blocks=True, lstrip_blocks=True) - env.filters.update({"dns": _dns_inner, "slug": _slug_inner}) + env = Environment(undefined=StrictUndefined, autoescape=False, trim_blocks=True, lstrip_blocks=False) + env.filters.update({"dns": _dns_inner, "slug": _slug_inner, "tojson": json.dumps}) env.globals.update({"enumerate_list": _enum}) return env + # ────────────────────────────────────────────────────────────────────────────── # Expander (no validation yet) # ────────────────────────────────────────────────────────────────────────────── diff --git a/tektoncsample/quickstart/pipeline.yaml.j2 b/tektoncsample/quickstart/pipeline.yaml.j2 index c362ca92..c91b8855 100644 --- a/tektoncsample/quickstart/pipeline.yaml.j2 +++ b/tektoncsample/quickstart/pipeline.yaml.j2 @@ -1,4 +1,6 @@ -# pipeline.yaml.j2 +# ============================================================================= +# pipeline.yaml.j2 — Minimal example for tektonc (no tojson) +# ============================================================================= apiVersion: tekton.dev/v1 kind: Pipeline metadata: @@ -9,28 +11,29 @@ spec: type: string tasks: - # Plain Tekton task — unchanged by the expander + # 1) Plain Tekton task — unchanged by the expander - name: print-start taskRef: { name: echo } params: - name: text value: "Starting pipeline {{ pipeline_name }}" - # Loop: one task per modelRef + # 2) Loop: one task per modelRef (compile-time fan-out) - loopName: per-model foreach: domain: - modelRef: {{ models|tojson }} + # Render the list directly; YAML accepts it (e.g., ['llama-7b','qwen-2.5-7b']) + modelRef: {{ models }} tasks: - name: "process-{{ modelRef|dns }}" taskRef: { name: process-model } runAfter: [ print-start ] params: - { name: model, value: "{{ modelRef }}" } + # Tekton param — resolved at runtime by Tekton, not by Jinja - { name: message, value: "$(params.message)" } - # Aggregate after all per-model tasks finish. - # Use an inline list to avoid indentation issues with Jinja. + # 3) Aggregate after all per-model tasks finish (inline list to avoid indent issues) - name: aggregate-results taskRef: { name: aggregate } runAfter: [ {% for m in models %}process-{{ m|dns }}{% if not loop.last %}, {% endif %}{% endfor %} ] @@ -39,11 +42,11 @@ spec: value: "All models processed." finally: - # Loop in 'finally': one cleanup per model, after aggregate-results + # 4) Finally loop: one cleanup per model, after aggregate-results - loopName: cleanup foreach: domain: - modelRef: {{ models|tojson }} + modelRef: {{ models }} tasks: - name: "cleanup-{{ modelRef|dns }}" taskRef: { name: cleanup-model } From b7d1d181090fb52b31bc305863e11206653359b3 Mon Sep 17 00:00:00 2001 From: Srinivasan Parthasarathy Date: Wed, 22 Oct 2025 16:04:46 -0400 Subject: [PATCH 06/10] nested loops Signed-off-by: Srinivasan Parthasarathy --- tektoncsample/nested-loops/pipeline.yaml.j2 | 60 +++++++++++++++++++++ tektoncsample/nested-loops/values.yaml | 3 ++ 2 files changed, 63 insertions(+) create mode 100644 tektoncsample/nested-loops/pipeline.yaml.j2 create mode 100644 tektoncsample/nested-loops/values.yaml diff --git a/tektoncsample/nested-loops/pipeline.yaml.j2 b/tektoncsample/nested-loops/pipeline.yaml.j2 new file mode 100644 index 00000000..89d0dd8d --- /dev/null +++ b/tektoncsample/nested-loops/pipeline.yaml.j2 @@ -0,0 +1,60 @@ +apiVersion: tekton.dev/v1 +kind: Pipeline +metadata: + # Quote + default so YAML stays valid even if the value is missing + name: "{{ pipeline_name|default('nested-loops-demo') }}" +spec: + tasks: + - name: prep + taskRef: { name: prep-env } + + # OUTER loop: per model + - loopName: per-model + foreach: + domain: + # Safe default list if 'models' missing in values.yaml + modelRef: {{ models|default(['llama-7b','qwen-2.5-7b']) }} + tasks: + - name: "dl-{{ modelRef|dns }}" + taskRef: { name: download-model } + runAfter: [ prep ] + params: + - { name: modelRef, value: "{{ modelRef }}" } + + # INNER loop: per prefix for this model + - loopName: per-prefix + foreach: + domain: + # Safe default list if 'prefixes' missing in values.yaml + prefix: {{ prefixes|default(['A','B']) }} + tasks: + - name: "svc-{{ modelRef|dns }}-{{ prefix|slug }}" + taskRef: { name: start-service } + runAfter: [ "dl-{{ modelRef|dns }}" ] + params: + - { name: modelRef, value: "{{ modelRef }}" } + - { name: prefix, value: "{{ prefix }}" } + + - name: "job-{{ modelRef|dns }}-{{ prefix|slug }}" + taskRef: { name: run-job } + runAfter: [ "svc-{{ modelRef|dns }}-{{ prefix|slug }}" ] + params: + - { name: modelRef, value: "{{ modelRef }}" } + - { name: prefix, value: "{{ prefix }}" } + + # Per-model fan-in: wait for all prefixes to finish their job step + - name: "agg-{{ modelRef|dns }}" + taskRef: { name: aggregate-results } + runAfter: + {% for p in prefixes|default(['A','B']) %} + - "job-{{ modelRef|dns }}-{{ p|slug }}" + {% endfor %} + + finally: + # Optional: a global summary after all per-model aggregates + - name: global-summary + taskRef: { name: global-summarize } + runAfter: + {% for m in models|default(['llama-7b','qwen-2.5-7b']) %} + - "agg-{{ m|dns }}" + {% endfor %} diff --git a/tektoncsample/nested-loops/values.yaml b/tektoncsample/nested-loops/values.yaml new file mode 100644 index 00000000..61c7bbe2 --- /dev/null +++ b/tektoncsample/nested-loops/values.yaml @@ -0,0 +1,3 @@ +pipeline_name: nested-loops-demo +models: ["llama-7b", "qwen-2.5-7b"] +prefixes: ["A", "B"] From 8628b56eb0f5bc0be6a20dcdf029591a5a2e10d4 Mon Sep 17 00:00:00 2001 From: Michael Kalantar Date: Thu, 23 Oct 2025 15:24:54 -0400 Subject: [PATCH 07/10] document operations Signed-off-by: Michael Kalantar --- tektonc/operations.md | 158 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 158 insertions(+) create mode 100644 tektonc/operations.md diff --git a/tektonc/operations.md b/tektonc/operations.md new file mode 100644 index 00000000..94f8e223 --- /dev/null +++ b/tektonc/operations.md @@ -0,0 +1,158 @@ +Based on experiments with Tekton, some basic composable operations might include: + +## Tooling + +Operations to install tooling and to configure the environment. This might include installing and configuring the cluster; for example, a gateway provider (istio, kgateway, gke), LWS, Tekton, etc. +It might also include installing runtime tooling such as llmdbench, helm, yq, git, kubectl, oc, etc. + +## Stack Creation + +Operations to create elements of the model stack -- gateway, GAIE, and model servers. + +To delpoy each stack, a unique DNS compatible identifier (`model_label`) is required. It serves two purposes: + +(a) For each model service, a GAIE deployment is created. The `InferencePool` identifies the pods of the model service using a set of match labels. Typically, the `llm-d.ai/model` label is used for this. Its value must be unique across all model services in the namespace. The `model_label` can be used for this. + +(b) At the level of the Gateway, there must be a means to distinguish requests for one model service vs. another. For most workload generators, the simplest mechanism is to modify the request path by inserting a model specific prefix in the path. This prefix must be unique to the instance of the deployed model. Again, the `model_label` can be used for this (in an `HTTPRoute`). + +### Operation: `deploy_gateway` + +**Description:** + +Installs a gateway pod into a namespace. + +Notes: A gateway pod can be used for multiple namespaces. This requires additional configuration and is ignored for now. It is assumed that if a model is deployed + +**Inputs**: + +- *namespace* +- *release_name* - Helm release name +- *helm_chart_values* - (default: ?) +- *helm_chart* - (default: `llm-d-infra/llm-d-infra`) +- *helm_chart_version* - (default: `none` (latest)) +- *helm_chart_repository_url* - (default: `https://llm-d-incubation.github.io/llm-d-infra/`) + +**Outputs**: + +_ _name_ - name of gateway created +- _serviceUrl_ - endpoint (incl. port) to be used by requests + +### Operation: `deploy_gaie` + +**Inputs**: + +- *namespace* +- *model_label* - used to configure `InferencePool` match labels. +- *release_name* - Helm release name +- *helm_chart_values* - [samples](https://github.com/llm-d/llm-d/tree/main/guides/prereq/gateway-provider/common-configurations) +- *helm_chart* - (default: `oci://registry.k8s.io/gateway-api-inference-extension/charts/inferencepool`) +- *helm_chart_version* (default: `v1.0.1`) +- *helm_chart_repository_url* (default: `none`) +- *helm_overrides* - list of fields to set? values file to apply? *model_label* used here? + +**Outputs**: + +### Operation: `deploy_model` + +**Inputs**: + +**Outputs**: + +### Operation: `create_httproute` + +**Description:** + +Create an `HTTPRoute` object to match requests to a Gateway to the GAIE `InferencePool` (and hence to the model service Pods). One HTTPRoute can be created per stack. Alternatively, a single `HTTPRoute` can configure multiple mappings (currently required for Istio). + +**Inputs**: + +- *namespace* +- *manifest* - Requires Gateway *name* and InferencePool *name* and a *model_label* + +**Outputs**: + +### Operation: `download_model` + +**Description:** + +Downloads model from HF to a locally mounted disk. + +**Inputs**: + +- *model* +- *HF_TOKEN* +- *path* - location to which the model should be downloaded + +**Outputs**: + +- *endpoint* - url for sending requests from within the cluster + +## Run Workloads + +### Operation: `create_workload_profile` + +**Description**: + +Modify a workload profile template for a particular execution. The profile format is specific to workload generator (harness) type. Should this be part of **run_workload**? + +**Inputs**: + +- **harness_type** +- **workload_profile_template** - workload profile (yaml) or location of profile template +- **changes** - name/path/value information to modify template; In addition to the workload parameters, this includes: + + - **stack_endpoint** - endpoint to be used to send requests + - **model** - HF model name + +**Outputs**: + +- **workload_profile** - yaml string or url to location + +### Operation: `run_workload` + +**Description**: + +Configure and run a workload generator (harness). On completion, results are saved to a locally mounted filesystem and are converted to a universal format. Should conversion be a separate operation? + +**Inputs**: + +- **harness_type** +- **workoad_profile** - workload profile (yaml) +- **HF_TOKEN** - required by some generators (`vllm-benchmark`) +- **path** - path to where results should be saved + +**Outputs**: + +### Operation: `record` + +**Description**: + +Record configuration of stack and workload. Should this be part of **run_workload**? + +**Inputs**: + +- All inputs from `deploy_gaie`, `deploy_model`, `create_httproute`, and `run_workflow` + +**Outputs**: + +- list of paths? + +### Operation: `upload` + +**Description**: + +Copy results from a locally mounted files to remote location. Should there be one operation per target type? + +**Inputs**: + +- list of paths to upload +- target_details + + - this is specific to the target type, for example for s3 compatible bucket: + - *AWS_ACCESS_KEY_ID* + - *AWS_SECRET_ACCESS_KEY* + - *s3_endpoint* + - *s3_bucket* + - *target_object_name* + +**Outputs**: \ No newline at end of file From 2044d84149c07483f72ed82667e13f08ed84215d Mon Sep 17 00:00:00 2001 From: Michael Kalantar Date: Thu, 23 Oct 2025 15:27:25 -0400 Subject: [PATCH 08/10] rename Signed-off-by: Michael Kalantar --- tektonc/{operations.md => tasks.md} | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) rename tektonc/{operations.md => tasks.md} (85%) diff --git a/tektonc/operations.md b/tektonc/tasks.md similarity index 85% rename from tektonc/operations.md rename to tektonc/tasks.md index 94f8e223..9b71fe8e 100644 --- a/tektonc/operations.md +++ b/tektonc/tasks.md @@ -1,13 +1,13 @@ -Based on experiments with Tekton, some basic composable operations might include: +Based on experiments with Tekton, some basic composable tasks might include: ## Tooling -Operations to install tooling and to configure the environment. This might include installing and configuring the cluster; for example, a gateway provider (istio, kgateway, gke), LWS, Tekton, etc. +Tasks to install tooling and to configure the environment. This might include installing and configuring the cluster; for example, a gateway provider (istio, kgateway, gke), LWS, Tekton, etc. It might also include installing runtime tooling such as llmdbench, helm, yq, git, kubectl, oc, etc. ## Stack Creation -Operations to create elements of the model stack -- gateway, GAIE, and model servers. +Tasks to create elements of the model stack -- gateway, GAIE, and model servers. To delpoy each stack, a unique DNS compatible identifier (`model_label`) is required. It serves two purposes: @@ -15,7 +15,7 @@ To delpoy each stack, a unique DNS compatible identifier (`model_label`) is requ (b) At the level of the Gateway, there must be a means to distinguish requests for one model service vs. another. For most workload generators, the simplest mechanism is to modify the request path by inserting a model specific prefix in the path. This prefix must be unique to the instance of the deployed model. Again, the `model_label` can be used for this (in an `HTTPRoute`). -### Operation: `deploy_gateway` +### Task: `deploy_gateway` **Description:** @@ -37,7 +37,7 @@ Notes: A gateway pod can be used for multiple namespaces. This requires addition _ _name_ - name of gateway created - _serviceUrl_ - endpoint (incl. port) to be used by requests -### Operation: `deploy_gaie` +### Task: `deploy_gaie` **Inputs**: @@ -52,13 +52,13 @@ _ _name_ - name of gateway created **Outputs**: -### Operation: `deploy_model` +### Task: `deploy_model` **Inputs**: **Outputs**: -### Operation: `create_httproute` +### Task: `create_httproute` **Description:** @@ -71,7 +71,7 @@ Create an `HTTPRoute` object to match requests to a Gateway to the GAIE `Inferen **Outputs**: -### Operation: `download_model` +### Task: `download_model` **Description:** @@ -89,7 +89,7 @@ Downloads model from HF to a locally mounted disk. ## Run Workloads -### Operation: `create_workload_profile` +### Task: `create_workload_profile` **Description**: @@ -108,11 +108,11 @@ Modify a workload profile template for a particular execution. The profile forma - **workload_profile** - yaml string or url to location -### Operation: `run_workload` +### Task: `run_workload` **Description**: -Configure and run a workload generator (harness). On completion, results are saved to a locally mounted filesystem and are converted to a universal format. Should conversion be a separate operation? +Configure and run a workload generator (harness). On completion, results are saved to a locally mounted filesystem and are converted to a universal format. Should conversion be a separate task? **Inputs**: @@ -123,7 +123,7 @@ Configure and run a workload generator (harness). On completion, results are sav **Outputs**: -### Operation: `record` +### Task: `record` **Description**: @@ -137,11 +137,11 @@ Record configuration of stack and workload. Should this be part of **run_workloa - list of paths? -### Operation: `upload` +### Task: `upload` **Description**: -Copy results from a locally mounted files to remote location. Should there be one operation per target type? +Copy results from a locally mounted files to remote location. Should there be one task per target type? **Inputs**: From 6a3479dfe458183a821fe4e03131053b35fd7ef2 Mon Sep 17 00:00:00 2001 From: Michael Kalantar Date: Thu, 23 Oct 2025 16:09:01 -0400 Subject: [PATCH 09/10] split to specific types Signed-off-by: Michael Kalantar --- tektonc/tasks.md | 86 ++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 79 insertions(+), 7 deletions(-) diff --git a/tektonc/tasks.md b/tektonc/tasks.md index 9b71fe8e..db6d1dab 100644 --- a/tektonc/tasks.md +++ b/tektonc/tasks.md @@ -1,4 +1,4 @@ -Based on experiments with Tekton, some basic composable tasks might include: +Based on experiments with Tekton, some basic composable tasks might include the following. ## Tooling @@ -38,12 +38,16 @@ _ _name_ - name of gateway created - _serviceUrl_ - endpoint (incl. port) to be used by requests ### Task: `deploy_gaie` + +**Description**: + +Installs Kubernetes Gateway Inference Extension objects: an endpoint picker and inference pool. **Inputs**: - *namespace* - *model_label* - used to configure `InferencePool` match labels. -- *release_name* - Helm release name +- *release_name* - Helm release name; unique to stack - *helm_chart_values* - [samples](https://github.com/llm-d/llm-d/tree/main/guides/prereq/gateway-provider/common-configurations) - *helm_chart* - (default: `oci://registry.k8s.io/gateway-api-inference-extension/charts/inferencepool`) - *helm_chart_version* (default: `v1.0.1`) @@ -54,8 +58,21 @@ _ _name_ - name of gateway created ### Task: `deploy_model` +**Description**: + +Installs vllm engines. + **Inputs**: +- *namespace* +- *model_label* - used to configure labels. +- *release_name* - Helm release name; unique to stack +- *helm_chart_values* - [samples](https://github.com/llm-d/llm-d/tree/main/guides/prereq/gateway-provider/common-configurations) +- *helm_chart* - (default: `llm-d-modelservice/llm-d-modelservice`) +- *helm_chart_version* (default: `none` (latest)) +- *helm_chart_repository_url* (default: `https://llm-d-incubation.github.io/llm-d-modelservice/`) +- *helm_overrides* - list of fields to set? values file to apply? + **Outputs**: ### Task: `create_httproute` @@ -108,26 +125,81 @@ Modify a workload profile template for a particular execution. The profile forma - **workload_profile** - yaml string or url to location -### Task: `run_workload` +### Task: `run_workload_inference-perf` **Description**: -Configure and run a workload generator (harness). On completion, results are saved to a locally mounted filesystem and are converted to a universal format. Should conversion be a separate task? +Generate workload using _inference perf_. On completion, results are saved to a locally mounted filesystem. **Inputs**: -- **harness_type** - **workoad_profile** - workload profile (yaml) -- **HF_TOKEN** - required by some generators (`vllm-benchmark`) - **path** - path to where results should be saved **Outputs**: +### Task: `transform_results_inference-perf` + +**Description**: + +Convert results from execution of _inference perf_ to a universal format. + +**Inputs**: + +- **source_path** - path to where results are saved +- **target_path** - location where converted results should be saved + +**Outputs**: + +### Task: `run_workload_vllm_benchmark` + +**Description**: + +Generate workload using _vllm benchmark_. On completion, results are saved to a locally mounted filesystem. + +Details as above for `run_workload_inference_perf` with addition of input `HF_TOKEN`. + +### Task: `transform_results_vllm_benchmark` + +**Description**: + +Convert results from execution of _vllm benchmark_ to a universal format. Details are as above for `convert_profile_inference-perf. + +### Task: `run_workload_guidellm` + +**Description**: + +Generate workload using _guidellm_. On completion, results are saved to a locally mounted filesystem. + +Details as above for `run_workload_inference_perf`. + +### Task: `transform_results_guidellm` + +**Description**: + +Convert results from execution of _guidellm_ to a universal format. Details are as above for `convert_profile_inference-perf. + +### Task: `run_workload_fmperf` + +**Description**: + +Generate workload using _fmperf_. On completion, results are saved to a locally mounted filesystem. + +Details as above for `run_workload_inference_perf`. + +### Task: `transform_results_fmperf` + +**Description**: + +Convert results from execution of _fmperf_ to a universal format. Details are as above for `convert_profile_inference-perf. + +## Document + ### Task: `record` **Description**: -Record configuration of stack and workload. Should this be part of **run_workload**? +Record configuration of one stack and one or more workload executions. **Inputs**: From 7a7a5de71c1f9c667fa866f23a64cdf2c212e505 Mon Sep 17 00:00:00 2001 From: Srinivasan Parthasarathy Date: Fri, 24 Oct 2025 10:42:44 -0400 Subject: [PATCH 10/10] object loops Signed-off-by: Srinivasan Parthasarathy --- tektonc/tektonc.py | 40 ++++++++++++++-- tektoncsample/object-loops/pipeline.yaml.j2 | 53 +++++++++++++++++++++ tektoncsample/object-loops/values.yaml | 9 ++++ 3 files changed, 98 insertions(+), 4 deletions(-) create mode 100644 tektoncsample/object-loops/pipeline.yaml.j2 create mode 100644 tektoncsample/object-loops/values.yaml diff --git a/tektonc/tektonc.py b/tektonc/tektonc.py index 5b110d01..b658381e 100644 --- a/tektonc/tektonc.py +++ b/tektonc/tektonc.py @@ -39,6 +39,7 @@ # - Inner env: strict; resolves loop vars during loop expansion # ────────────────────────────────────────────────────────────────────────────── + def _dns_inner(s: str) -> str: """DNS-1123-ish: lowercase, alnum and dash, trim to 63 chars with hash fallback.""" import re, hashlib @@ -67,16 +68,47 @@ def _slug_outer(val: object) -> str: return _slug_inner(val) # type: ignore[arg-type] class PassthroughUndefined(Undefined): - """In OUTER render, keep unknown variables as '{{ name }}' so inner pass can resolve them.""" + """ + OUTER render: keep unknown variables as their original Jinja expression, + including dotted attributes and item access, so the INNER pass can resolve them. + - {{ model }} -> "{{ model }}" + - {{ model.name }} -> "{{ model.name }}" + - {{ model['port'] }} -> "{{ model['port'] }}" + - {{ model.name|dns }} -> dns_outer will see an Undefined and reconstruct "{{ model.name|dns }}" + """ __slots__ = () + + # Compose a new Undefined that remembers the full Jinja expression text. + def _compose(self, suffix: str) -> "PassthroughUndefined": + base = getattr(self, "_undefined_name", None) or "?" + expr = f"{base}{suffix}" + # Undefined signature: (hint=None, obj=None, name=None, exc=None) + return PassthroughUndefined(name=expr) + + # Attribute access: {{ x.y }} + def __getattr__(self, name: str) -> "PassthroughUndefined": # type: ignore[override] + return self._compose(f".{name}") + + # Item access: {{ x['k'] }} / {{ x[0] }} + def __getitem__(self, key) -> "PassthroughUndefined": # type: ignore[override] + # Use repr to round-trip quotes correctly + return self._compose(f"[{repr(key)}]") + + # Function call: {{ f(x) }} -> best-effort string form + def __call__(self, *args, **kwargs) -> "PassthroughUndefined": # type: ignore[override] + return self._compose("(...)") + + # Stringification -> the literal Jinja expression def __str__(self) -> str: # type: ignore[override] name = getattr(self, "_undefined_name", None) return "{{ " + name + " }}" if name else "{{ ?? }}" - def __iter__(self): # allows use in loops without crashing + + def __iter__(self): return iter(()) - def __bool__(self) -> bool: # treat undefined as False - return False + def __bool__(self): + return False + def _enum(seq): """Return [{i, item}, ...] for easy serial chains in Jinja.""" return [{"i": i, "item": v} for i, v in enumerate(seq)] diff --git a/tektoncsample/object-loops/pipeline.yaml.j2 b/tektoncsample/object-loops/pipeline.yaml.j2 new file mode 100644 index 00000000..c9dc0dd3 --- /dev/null +++ b/tektoncsample/object-loops/pipeline.yaml.j2 @@ -0,0 +1,53 @@ +# pipeline.yaml.j2 — minimal example: iterate over objects +apiVersion: tekton.dev/v1 +kind: Pipeline +metadata: + name: "{{ pipeline_name }}" +spec: + params: + - name: message + type: string + + tasks: + # A setup task that runs first + - name: print-start + taskRef: { name: echo } + params: + - name: text + value: "Starting pipeline {{ pipeline_name }}" + + # Loop over a list of model objects + - loopName: per-model + foreach: + domain: + model: {{ models }} + tasks: + # Each 'model' is a dict with fields: name, port, quant + - name: "serve-{{ model.name|dns }}" + taskRef: { name: start-service } + runAfter: [ print-start ] + params: + - { name: modelRef, value: "{{ model.name }}" } + - { name: port, value: "{{ model.port }}" } + - { name: quant, value: "{{ model.quant }}" } + + - name: "test-{{ model.name|dns }}" + taskRef: { name: run-test } + runAfter: [ "serve-{{ model.name|dns }}" ] + params: + - { name: modelRef, value: "{{ model.name }}" } + - { name: port, value: "{{ model.port }}" } + - { name: message, value: "$(params.message)" } + + finally: + # Cleanup task per model + - loopName: cleanup + foreach: + domain: + model: {{ models }} + tasks: + - name: "cleanup-{{ model.name|dns }}" + taskRef: { name: stop-service } + runAfter: [ "test-{{ model.name|dns }}" ] + params: + - { name: modelRef, value: "{{ model.name }}" } diff --git a/tektoncsample/object-loops/values.yaml b/tektoncsample/object-loops/values.yaml new file mode 100644 index 00000000..0a96f25f --- /dev/null +++ b/tektoncsample/object-loops/values.yaml @@ -0,0 +1,9 @@ +pipeline_name: object-loop-demo + +models: + - name: llama-7b + port: "8080" + quant: "fp16" + - name: qwen-2.5-7b + port: "9090" + quant: "int4"